id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
172,980
from __future__ import annotations from contextlib import contextmanager from typing import ( TYPE_CHECKING, Generator, ) from pandas.plotting._core import _get_plot_backend def _get_plot_backend(backend: str | None = None): """ Return the plotting backend to use (e.g. `pandas.plotting._matplotlib`). The plotting system of pandas uses matplotlib by default, but the idea here is that it can also work with other third-party backends. This function returns the module which provides a top-level `.plot` method that will actually do the plotting. The backend is specified from a string, which either comes from the keyword argument `backend`, or, if not specified, from the option `pandas.options.plotting.backend`. All the rest of the code in this file uses the backend specified there for the plotting. The backend is imported lazily, as matplotlib is a soft dependency, and pandas can be used without it being installed. Notes ----- Modifies `_backends` with imported backend as a side effect. """ backend_str: str = backend or get_option("plotting.backend") if backend_str in _backends: return _backends[backend_str] module = _load_backend(backend_str) _backends[backend_str] = module return module class Figure(FigureBase): """ The top level container for all the plot elements. Attributes ---------- patch The `.Rectangle` instance representing the figure background patch. suppressComposite For multiple images, the figure will make composite images depending on the renderer option_image_nocomposite function. If *suppressComposite* is a boolean, this will override the renderer. """ # Remove the self._fig_callbacks properties on figure and subfigure # after the deprecation expires. callbacks = _api.deprecated( "3.6", alternative=("the 'resize_event' signal in " "Figure.canvas.callbacks") )(property(lambda self: self._fig_callbacks)) def __str__(self): return "Figure(%gx%g)" % tuple(self.bbox.size) def __repr__(self): return "<{clsname} size {h:g}x{w:g} with {naxes} Axes>".format( clsname=self.__class__.__name__, h=self.bbox.size[0], w=self.bbox.size[1], naxes=len(self.axes), ) def __init__(self, figsize=None, dpi=None, facecolor=None, edgecolor=None, linewidth=0.0, frameon=None, subplotpars=None, # rc figure.subplot.* tight_layout=None, # rc figure.autolayout constrained_layout=None, # rc figure.constrained_layout.use *, layout=None, **kwargs ): """ Parameters ---------- figsize : 2-tuple of floats, default: :rc:`figure.figsize` Figure dimension ``(width, height)`` in inches. dpi : float, default: :rc:`figure.dpi` Dots per inch. facecolor : default: :rc:`figure.facecolor` The figure patch facecolor. edgecolor : default: :rc:`figure.edgecolor` The figure patch edge color. linewidth : float The linewidth of the frame (i.e. the edge linewidth of the figure patch). frameon : bool, default: :rc:`figure.frameon` If ``False``, suppress drawing the figure background patch. subplotpars : `SubplotParams` Subplot parameters. If not given, the default subplot parameters :rc:`figure.subplot.*` are used. tight_layout : bool or dict, default: :rc:`figure.autolayout` Whether to use the tight layout mechanism. See `.set_tight_layout`. .. admonition:: Discouraged The use of this parameter is discouraged. Please use ``layout='tight'`` instead for the common case of ``tight_layout=True`` and use `.set_tight_layout` otherwise. constrained_layout : bool, default: :rc:`figure.constrained_layout.use` This is equal to ``layout='constrained'``. .. admonition:: Discouraged The use of this parameter is discouraged. Please use ``layout='constrained'`` instead. layout : {'constrained', 'compressed', 'tight', 'none', `.LayoutEngine`, \ None}, default: None The layout mechanism for positioning of plot elements to avoid overlapping Axes decorations (labels, ticks, etc). Note that layout managers can have significant performance penalties. - 'constrained': The constrained layout solver adjusts axes sizes to avoid overlapping axes decorations. Can handle complex plot layouts and colorbars, and is thus recommended. See :doc:`/tutorials/intermediate/constrainedlayout_guide` for examples. - 'compressed': uses the same algorithm as 'constrained', but removes extra space between fixed-aspect-ratio Axes. Best for simple grids of axes. - 'tight': Use the tight layout mechanism. This is a relatively simple algorithm that adjusts the subplot parameters so that decorations do not overlap. See `.Figure.set_tight_layout` for further details. - 'none': Do not use a layout engine. - A `.LayoutEngine` instance. Builtin layout classes are `.ConstrainedLayoutEngine` and `.TightLayoutEngine`, more easily accessible by 'constrained' and 'tight'. Passing an instance allows third parties to provide their own layout engine. If not given, fall back to using the parameters *tight_layout* and *constrained_layout*, including their config defaults :rc:`figure.autolayout` and :rc:`figure.constrained_layout.use`. Other Parameters ---------------- **kwargs : `.Figure` properties, optional %(Figure:kwdoc)s """ super().__init__(**kwargs) self._layout_engine = None if layout is not None: if (tight_layout is not None): _api.warn_external( "The Figure parameters 'layout' and 'tight_layout' cannot " "be used together. Please use 'layout' only.") if (constrained_layout is not None): _api.warn_external( "The Figure parameters 'layout' and 'constrained_layout' " "cannot be used together. Please use 'layout' only.") self.set_layout_engine(layout=layout) elif tight_layout is not None: if constrained_layout is not None: _api.warn_external( "The Figure parameters 'tight_layout' and " "'constrained_layout' cannot be used together. Please use " "'layout' parameter") self.set_layout_engine(layout='tight') if isinstance(tight_layout, dict): self.get_layout_engine().set(**tight_layout) elif constrained_layout is not None: if isinstance(constrained_layout, dict): self.set_layout_engine(layout='constrained') self.get_layout_engine().set(**constrained_layout) elif constrained_layout: self.set_layout_engine(layout='constrained') else: # everything is None, so use default: self.set_layout_engine(layout=layout) self._fig_callbacks = cbook.CallbackRegistry(signals=["dpi_changed"]) # Callbacks traditionally associated with the canvas (and exposed with # a proxy property), but that actually need to be on the figure for # pickling. self._canvas_callbacks = cbook.CallbackRegistry( signals=FigureCanvasBase.events) connect = self._canvas_callbacks._connect_picklable self._mouse_key_ids = [ connect('key_press_event', backend_bases._key_handler), connect('key_release_event', backend_bases._key_handler), connect('key_release_event', backend_bases._key_handler), connect('button_press_event', backend_bases._mouse_handler), connect('button_release_event', backend_bases._mouse_handler), connect('scroll_event', backend_bases._mouse_handler), connect('motion_notify_event', backend_bases._mouse_handler), ] self._button_pick_id = connect('button_press_event', self.pick) self._scroll_pick_id = connect('scroll_event', self.pick) if figsize is None: figsize = mpl.rcParams['figure.figsize'] if dpi is None: dpi = mpl.rcParams['figure.dpi'] if facecolor is None: facecolor = mpl.rcParams['figure.facecolor'] if edgecolor is None: edgecolor = mpl.rcParams['figure.edgecolor'] if frameon is None: frameon = mpl.rcParams['figure.frameon'] if not np.isfinite(figsize).all() or (np.array(figsize) < 0).any(): raise ValueError('figure size must be positive finite not ' f'{figsize}') self.bbox_inches = Bbox.from_bounds(0, 0, *figsize) self.dpi_scale_trans = Affine2D().scale(dpi) # do not use property as it will trigger self._dpi = dpi self.bbox = TransformedBbox(self.bbox_inches, self.dpi_scale_trans) self.figbbox = self.bbox self.transFigure = BboxTransformTo(self.bbox) self.transSubfigure = self.transFigure self.patch = Rectangle( xy=(0, 0), width=1, height=1, visible=frameon, facecolor=facecolor, edgecolor=edgecolor, linewidth=linewidth, # Don't let the figure patch influence bbox calculation. in_layout=False) self._set_artist_props(self.patch) self.patch.set_antialiased(False) FigureCanvasBase(self) # Set self.canvas. if subplotpars is None: subplotpars = SubplotParams() self.subplotpars = subplotpars self._axstack = _AxesStack() # track all figure axes and current axes self.clear() def pick(self, mouseevent): if not self.canvas.widgetlock.locked(): super().pick(mouseevent) def _check_layout_engines_compat(self, old, new): """ Helper for set_layout engine If the figure has used the old engine and added a colorbar then the value of colorbar_gridspec must be the same on the new engine. """ if old is None or new is None: return True if old.colorbar_gridspec == new.colorbar_gridspec: return True # colorbar layout different, so check if any colorbars are on the # figure... for ax in self.axes: if hasattr(ax, '_colorbar'): # colorbars list themselves as a colorbar. return False return True def set_layout_engine(self, layout=None, **kwargs): """ Set the layout engine for this figure. Parameters ---------- layout: {'constrained', 'compressed', 'tight', 'none'} or \ `LayoutEngine` or None - 'constrained' will use `~.ConstrainedLayoutEngine` - 'compressed' will also use `~.ConstrainedLayoutEngine`, but with a correction that attempts to make a good layout for fixed-aspect ratio Axes. - 'tight' uses `~.TightLayoutEngine` - 'none' removes layout engine. If `None`, the behavior is controlled by :rc:`figure.autolayout` (which if `True` behaves as if 'tight' was passed) and :rc:`figure.constrained_layout.use` (which if `True` behaves as if 'constrained' was passed). If both are `True`, :rc:`figure.autolayout` takes priority. Users and libraries can define their own layout engines and pass the instance directly as well. kwargs: dict The keyword arguments are passed to the layout engine to set things like padding and margin sizes. Only used if *layout* is a string. """ if layout is None: if mpl.rcParams['figure.autolayout']: layout = 'tight' elif mpl.rcParams['figure.constrained_layout.use']: layout = 'constrained' else: self._layout_engine = None return if layout == 'tight': new_layout_engine = TightLayoutEngine(**kwargs) elif layout == 'constrained': new_layout_engine = ConstrainedLayoutEngine(**kwargs) elif layout == 'compressed': new_layout_engine = ConstrainedLayoutEngine(compress=True, **kwargs) elif layout == 'none': if self._layout_engine is not None: new_layout_engine = PlaceHolderLayoutEngine( self._layout_engine.adjust_compatible, self._layout_engine.colorbar_gridspec ) else: new_layout_engine = None elif isinstance(layout, LayoutEngine): new_layout_engine = layout else: raise ValueError(f"Invalid value for 'layout': {layout!r}") if self._check_layout_engines_compat(self._layout_engine, new_layout_engine): self._layout_engine = new_layout_engine else: raise RuntimeError('Colorbar layout of new layout engine not ' 'compatible with old engine, and a colorbar ' 'has been created. Engine not changed.') def get_layout_engine(self): return self._layout_engine # TODO: I'd like to dynamically add the _repr_html_ method # to the figure in the right context, but then IPython doesn't # use it, for some reason. def _repr_html_(self): # We can't use "isinstance" here, because then we'd end up importing # webagg unconditionally. if 'WebAgg' in type(self.canvas).__name__: from matplotlib.backends import backend_webagg return backend_webagg.ipython_inline_display(self) def show(self, warn=True): """ If using a GUI backend with pyplot, display the figure window. If the figure was not created using `~.pyplot.figure`, it will lack a `~.backend_bases.FigureManagerBase`, and this method will raise an AttributeError. .. warning:: This does not manage an GUI event loop. Consequently, the figure may only be shown briefly or not shown at all if you or your environment are not managing an event loop. Use cases for `.Figure.show` include running this from a GUI application (where there is persistently an event loop running) or from a shell, like IPython, that install an input hook to allow the interactive shell to accept input while the figure is also being shown and interactive. Some, but not all, GUI toolkits will register an input hook on import. See :ref:`cp_integration` for more details. If you're in a shell without input hook integration or executing a python script, you should use `matplotlib.pyplot.show` with ``block=True`` instead, which takes care of starting and running the event loop for you. Parameters ---------- warn : bool, default: True If ``True`` and we are not running headless (i.e. on Linux with an unset DISPLAY), issue warning when called on a non-GUI backend. """ if self.canvas.manager is None: raise AttributeError( "Figure.show works only for figures managed by pyplot, " "normally created by pyplot.figure()") try: self.canvas.manager.show() except NonGuiException as exc: if warn: _api.warn_external(str(exc)) def axes(self): """ List of Axes in the Figure. You can access and modify the Axes in the Figure through this list. Do not modify the list itself. Instead, use `~Figure.add_axes`, `~.Figure.add_subplot` or `~.Figure.delaxes` to add or remove an Axes. Note: The `.Figure.axes` property and `~.Figure.get_axes` method are equivalent. """ return self._axstack.as_list() get_axes = axes.fget def _get_renderer(self): if hasattr(self.canvas, 'get_renderer'): return self.canvas.get_renderer() else: return _get_renderer(self) def _get_dpi(self): return self._dpi def _set_dpi(self, dpi, forward=True): """ Parameters ---------- dpi : float forward : bool Passed on to `~.Figure.set_size_inches` """ if dpi == self._dpi: # We don't want to cause undue events in backends. return self._dpi = dpi self.dpi_scale_trans.clear().scale(dpi) w, h = self.get_size_inches() self.set_size_inches(w, h, forward=forward) self._fig_callbacks.process('dpi_changed', self) dpi = property(_get_dpi, _set_dpi, doc="The resolution in dots per inch.") def get_tight_layout(self): """Return whether `.tight_layout` is called when drawing.""" return isinstance(self.get_layout_engine(), TightLayoutEngine) pending=True) def set_tight_layout(self, tight): """ [*Discouraged*] Set whether and how `.tight_layout` is called when drawing. .. admonition:: Discouraged This method is discouraged in favor of `~.set_layout_engine`. Parameters ---------- tight : bool or dict with keys "pad", "w_pad", "h_pad", "rect" or None If a bool, sets whether to call `.tight_layout` upon drawing. If ``None``, use :rc:`figure.autolayout` instead. If a dict, pass it as kwargs to `.tight_layout`, overriding the default paddings. """ if tight is None: tight = mpl.rcParams['figure.autolayout'] _tight = 'tight' if bool(tight) else 'none' _tight_parameters = tight if isinstance(tight, dict) else {} self.set_layout_engine(_tight, **_tight_parameters) self.stale = True def get_constrained_layout(self): """ Return whether constrained layout is being used. See :doc:`/tutorials/intermediate/constrainedlayout_guide`. """ return isinstance(self.get_layout_engine(), ConstrainedLayoutEngine) pending=True) def set_constrained_layout(self, constrained): """ [*Discouraged*] Set whether ``constrained_layout`` is used upon drawing. If None, :rc:`figure.constrained_layout.use` value will be used. When providing a dict containing the keys ``w_pad``, ``h_pad`` the default ``constrained_layout`` paddings will be overridden. These pads are in inches and default to 3.0/72.0. ``w_pad`` is the width padding and ``h_pad`` is the height padding. .. admonition:: Discouraged This method is discouraged in favor of `~.set_layout_engine`. Parameters ---------- constrained : bool or dict or None """ if constrained is None: constrained = mpl.rcParams['figure.constrained_layout.use'] _constrained = 'constrained' if bool(constrained) else 'none' _parameters = constrained if isinstance(constrained, dict) else {} self.set_layout_engine(_constrained, **_parameters) self.stale = True "3.6", alternative="figure.get_layout_engine().set()", pending=True) def set_constrained_layout_pads(self, **kwargs): """ Set padding for ``constrained_layout``. Tip: The parameters can be passed from a dictionary by using ``fig.set_constrained_layout(**pad_dict)``. See :doc:`/tutorials/intermediate/constrainedlayout_guide`. Parameters ---------- w_pad : float, default: :rc:`figure.constrained_layout.w_pad` Width padding in inches. This is the pad around Axes and is meant to make sure there is enough room for fonts to look good. Defaults to 3 pts = 0.04167 inches h_pad : float, default: :rc:`figure.constrained_layout.h_pad` Height padding in inches. Defaults to 3 pts. wspace : float, default: :rc:`figure.constrained_layout.wspace` Width padding between subplots, expressed as a fraction of the subplot width. The total padding ends up being w_pad + wspace. hspace : float, default: :rc:`figure.constrained_layout.hspace` Height padding between subplots, expressed as a fraction of the subplot width. The total padding ends up being h_pad + hspace. """ if isinstance(self.get_layout_engine(), ConstrainedLayoutEngine): self.get_layout_engine().set(**kwargs) pending=True) def get_constrained_layout_pads(self, relative=False): """ Get padding for ``constrained_layout``. Returns a list of ``w_pad, h_pad`` in inches and ``wspace`` and ``hspace`` as fractions of the subplot. All values are None if ``constrained_layout`` is not used. See :doc:`/tutorials/intermediate/constrainedlayout_guide`. Parameters ---------- relative : bool If `True`, then convert from inches to figure relative. """ if not isinstance(self.get_layout_engine(), ConstrainedLayoutEngine): return None, None, None, None info = self.get_layout_engine().get_info() w_pad = info['w_pad'] h_pad = info['h_pad'] wspace = info['wspace'] hspace = info['hspace'] if relative and (w_pad is not None or h_pad is not None): renderer = self._get_renderer() dpi = renderer.dpi w_pad = w_pad * dpi / renderer.width h_pad = h_pad * dpi / renderer.height return w_pad, h_pad, wspace, hspace def set_canvas(self, canvas): """ Set the canvas that contains the figure Parameters ---------- canvas : FigureCanvas """ self.canvas = canvas def figimage(self, X, xo=0, yo=0, alpha=None, norm=None, cmap=None, vmin=None, vmax=None, origin=None, resize=False, **kwargs): """ Add a non-resampled image to the figure. The image is attached to the lower or upper left corner depending on *origin*. Parameters ---------- X The image data. This is an array of one of the following shapes: - (M, N): an image with scalar data. Color-mapping is controlled by *cmap*, *norm*, *vmin*, and *vmax*. - (M, N, 3): an image with RGB values (0-1 float or 0-255 int). - (M, N, 4): an image with RGBA values (0-1 float or 0-255 int), i.e. including transparency. xo, yo : int The *x*/*y* image offset in pixels. alpha : None or float The alpha blending value. %(cmap_doc)s This parameter is ignored if *X* is RGB(A). %(norm_doc)s This parameter is ignored if *X* is RGB(A). %(vmin_vmax_doc)s This parameter is ignored if *X* is RGB(A). origin : {'upper', 'lower'}, default: :rc:`image.origin` Indicates where the [0, 0] index of the array is in the upper left or lower left corner of the axes. resize : bool If *True*, resize the figure to match the given image size. Returns ------- `matplotlib.image.FigureImage` Other Parameters ---------------- **kwargs Additional kwargs are `.Artist` kwargs passed on to `.FigureImage`. Notes ----- figimage complements the Axes image (`~matplotlib.axes.Axes.imshow`) which will be resampled to fit the current Axes. If you want a resampled image to fill the entire figure, you can define an `~matplotlib.axes.Axes` with extent [0, 0, 1, 1]. Examples -------- :: f = plt.figure() nx = int(f.get_figwidth() * f.dpi) ny = int(f.get_figheight() * f.dpi) data = np.random.random((ny, nx)) f.figimage(data) plt.show() """ if resize: dpi = self.get_dpi() figsize = [x / dpi for x in (X.shape[1], X.shape[0])] self.set_size_inches(figsize, forward=True) im = mimage.FigureImage(self, cmap=cmap, norm=norm, offsetx=xo, offsety=yo, origin=origin, **kwargs) im.stale_callback = _stale_figure_callback im.set_array(X) im.set_alpha(alpha) if norm is None: im.set_clim(vmin, vmax) self.images.append(im) im._remove_method = self.images.remove self.stale = True return im def set_size_inches(self, w, h=None, forward=True): """ Set the figure size in inches. Call signatures:: fig.set_size_inches(w, h) # OR fig.set_size_inches((w, h)) Parameters ---------- w : (float, float) or float Width and height in inches (if height not specified as a separate argument) or width. h : float Height in inches. forward : bool, default: True If ``True``, the canvas size is automatically updated, e.g., you can resize the figure window from the shell. See Also -------- matplotlib.figure.Figure.get_size_inches matplotlib.figure.Figure.set_figwidth matplotlib.figure.Figure.set_figheight Notes ----- To transform from pixels to inches divide by `Figure.dpi`. """ if h is None: # Got called with a single pair as argument. w, h = w size = np.array([w, h]) if not np.isfinite(size).all() or (size < 0).any(): raise ValueError(f'figure size must be positive finite not {size}') self.bbox_inches.p1 = size if forward: manager = self.canvas.manager if manager is not None: manager.resize(*(size * self.dpi).astype(int)) self.stale = True def get_size_inches(self): """ Return the current size of the figure in inches. Returns ------- ndarray The size (width, height) of the figure in inches. See Also -------- matplotlib.figure.Figure.set_size_inches matplotlib.figure.Figure.get_figwidth matplotlib.figure.Figure.get_figheight Notes ----- The size in pixels can be obtained by multiplying with `Figure.dpi`. """ return np.array(self.bbox_inches.p1) def get_figwidth(self): """Return the figure width in inches.""" return self.bbox_inches.width def get_figheight(self): """Return the figure height in inches.""" return self.bbox_inches.height def get_dpi(self): """Return the resolution in dots per inch as a float.""" return self.dpi def set_dpi(self, val): """ Set the resolution of the figure in dots-per-inch. Parameters ---------- val : float """ self.dpi = val self.stale = True def set_figwidth(self, val, forward=True): """ Set the width of the figure in inches. Parameters ---------- val : float forward : bool See `set_size_inches`. See Also -------- matplotlib.figure.Figure.set_figheight matplotlib.figure.Figure.set_size_inches """ self.set_size_inches(val, self.get_figheight(), forward=forward) def set_figheight(self, val, forward=True): """ Set the height of the figure in inches. Parameters ---------- val : float forward : bool See `set_size_inches`. See Also -------- matplotlib.figure.Figure.set_figwidth matplotlib.figure.Figure.set_size_inches """ self.set_size_inches(self.get_figwidth(), val, forward=forward) def clear(self, keep_observers=False): # docstring inherited super().clear(keep_observers=keep_observers) # FigureBase.clear does not clear toolbars, as # only Figure can have toolbars toolbar = self.canvas.toolbar if toolbar is not None: toolbar.update() def draw(self, renderer): # docstring inherited # draw the figure bounding box, perhaps none for white figure if not self.get_visible(): return artists = self._get_draw_artists(renderer) try: renderer.open_group('figure', gid=self.get_gid()) if self.axes and self.get_layout_engine() is not None: try: self.get_layout_engine().execute(self) except ValueError: pass # ValueError can occur when resizing a window. self.patch.draw(renderer) mimage._draw_list_compositing_images( renderer, self, artists, self.suppressComposite) for sfig in self.subfigs: sfig.draw(renderer) renderer.close_group('figure') finally: self.stale = False DrawEvent("draw_event", self.canvas, renderer)._process() def draw_without_rendering(self): """ Draw the figure with no output. Useful to get the final size of artists that require a draw before their size is known (e.g. text). """ renderer = _get_renderer(self) with renderer._draw_disabled(): self.draw(renderer) def draw_artist(self, a): """ Draw `.Artist` *a* only. """ a.draw(self.canvas.get_renderer()) def __getstate__(self): state = super().__getstate__() # The canvas cannot currently be pickled, but this has the benefit # of meaning that a figure can be detached from one canvas, and # re-attached to another. state.pop("canvas") # discard any changes to the dpi due to pixel ratio changes state["_dpi"] = state.get('_original_dpi', state['_dpi']) # add version information to the state state['__mpl_version__'] = mpl.__version__ # check whether the figure manager (if any) is registered with pyplot from matplotlib import _pylab_helpers if self.canvas.manager in _pylab_helpers.Gcf.figs.values(): state['_restore_to_pylab'] = True return state def __setstate__(self, state): version = state.pop('__mpl_version__') restore_to_pylab = state.pop('_restore_to_pylab', False) if version != mpl.__version__: _api.warn_external( f"This figure was saved with matplotlib version {version} and " f"is unlikely to function correctly.") self.__dict__ = state # re-initialise some of the unstored state information FigureCanvasBase(self) # Set self.canvas. if restore_to_pylab: # lazy import to avoid circularity import matplotlib.pyplot as plt import matplotlib._pylab_helpers as pylab_helpers allnums = plt.get_fignums() num = max(allnums) + 1 if allnums else 1 backend = plt._get_backend_mod() mgr = backend.new_figure_manager_given_figure(num, self) pylab_helpers.Gcf._set_new_active_manager(mgr) plt.draw_if_interactive() self.stale = True def add_axobserver(self, func): """Whenever the Axes state change, ``func(self)`` will be called.""" # Connect a wrapper lambda and not func itself, to avoid it being # weakref-collected. self._axobservers.connect("_axes_change_event", lambda arg: func(arg)) def savefig(self, fname, *, transparent=None, **kwargs): """ Save the current figure. Call signature:: savefig(fname, *, dpi='figure', format=None, metadata=None, bbox_inches=None, pad_inches=0.1, facecolor='auto', edgecolor='auto', backend=None, **kwargs ) The available output formats depend on the backend being used. Parameters ---------- fname : str or path-like or binary file-like A path, or a Python file-like object, or possibly some backend-dependent object such as `matplotlib.backends.backend_pdf.PdfPages`. If *format* is set, it determines the output format, and the file is saved as *fname*. Note that *fname* is used verbatim, and there is no attempt to make the extension, if any, of *fname* match *format*, and no extension is appended. If *format* is not set, then the format is inferred from the extension of *fname*, if there is one. If *format* is not set and *fname* has no extension, then the file is saved with :rc:`savefig.format` and the appropriate extension is appended to *fname*. Other Parameters ---------------- dpi : float or 'figure', default: :rc:`savefig.dpi` The resolution in dots per inch. If 'figure', use the figure's dpi value. format : str The file format, e.g. 'png', 'pdf', 'svg', ... The behavior when this is unset is documented under *fname*. metadata : dict, optional Key/value pairs to store in the image metadata. The supported keys and defaults depend on the image format and backend: - 'png' with Agg backend: See the parameter ``metadata`` of `~.FigureCanvasAgg.print_png`. - 'pdf' with pdf backend: See the parameter ``metadata`` of `~.backend_pdf.PdfPages`. - 'svg' with svg backend: See the parameter ``metadata`` of `~.FigureCanvasSVG.print_svg`. - 'eps' and 'ps' with PS backend: Only 'Creator' is supported. bbox_inches : str or `.Bbox`, default: :rc:`savefig.bbox` Bounding box in inches: only the given portion of the figure is saved. If 'tight', try to figure out the tight bbox of the figure. pad_inches : float, default: :rc:`savefig.pad_inches` Amount of padding around the figure when bbox_inches is 'tight'. facecolor : color or 'auto', default: :rc:`savefig.facecolor` The facecolor of the figure. If 'auto', use the current figure facecolor. edgecolor : color or 'auto', default: :rc:`savefig.edgecolor` The edgecolor of the figure. If 'auto', use the current figure edgecolor. backend : str, optional Use a non-default backend to render the file, e.g. to render a png file with the "cairo" backend rather than the default "agg", or a pdf file with the "pgf" backend rather than the default "pdf". Note that the default backend is normally sufficient. See :ref:`the-builtin-backends` for a list of valid backends for each file format. Custom backends can be referenced as "module://...". orientation : {'landscape', 'portrait'} Currently only supported by the postscript backend. papertype : str One of 'letter', 'legal', 'executive', 'ledger', 'a0' through 'a10', 'b0' through 'b10'. Only supported for postscript output. transparent : bool If *True*, the Axes patches will all be transparent; the Figure patch will also be transparent unless *facecolor* and/or *edgecolor* are specified via kwargs. If *False* has no effect and the color of the Axes and Figure patches are unchanged (unless the Figure patch is specified via the *facecolor* and/or *edgecolor* keyword arguments in which case those colors are used). The transparency of these patches will be restored to their original values upon exit of this function. This is useful, for example, for displaying a plot on top of a colored background on a web page. bbox_extra_artists : list of `~matplotlib.artist.Artist`, optional A list of extra artists that will be considered when the tight bbox is calculated. pil_kwargs : dict, optional Additional keyword arguments that are passed to `PIL.Image.Image.save` when saving the figure. """ kwargs.setdefault('dpi', mpl.rcParams['savefig.dpi']) if transparent is None: transparent = mpl.rcParams['savefig.transparent'] with ExitStack() as stack: if transparent: kwargs.setdefault('facecolor', 'none') kwargs.setdefault('edgecolor', 'none') for ax in self.axes: stack.enter_context( ax.patch._cm_set(facecolor='none', edgecolor='none')) self.canvas.print_figure(fname, **kwargs) def ginput(self, n=1, timeout=30, show_clicks=True, mouse_add=MouseButton.LEFT, mouse_pop=MouseButton.RIGHT, mouse_stop=MouseButton.MIDDLE): """ Blocking call to interact with a figure. Wait until the user clicks *n* times on the figure, and return the coordinates of each click in a list. There are three possible interactions: - Add a point. - Remove the most recently added point. - Stop the interaction and return the points added so far. The actions are assigned to mouse buttons via the arguments *mouse_add*, *mouse_pop* and *mouse_stop*. Parameters ---------- n : int, default: 1 Number of mouse clicks to accumulate. If negative, accumulate clicks until the input is terminated manually. timeout : float, default: 30 seconds Number of seconds to wait before timing out. If zero or negative will never time out. show_clicks : bool, default: True If True, show a red cross at the location of each click. mouse_add : `.MouseButton` or None, default: `.MouseButton.LEFT` Mouse button used to add points. mouse_pop : `.MouseButton` or None, default: `.MouseButton.RIGHT` Mouse button used to remove the most recently added point. mouse_stop : `.MouseButton` or None, default: `.MouseButton.MIDDLE` Mouse button used to stop input. Returns ------- list of tuples A list of the clicked (x, y) coordinates. Notes ----- The keyboard can also be used to select points in case your mouse does not have one or more of the buttons. The delete and backspace keys act like right-clicking (i.e., remove last point), the enter key terminates input and any other key (not already used by the window manager) selects a point. """ clicks = [] marks = [] def handler(event): is_button = event.name == "button_press_event" is_key = event.name == "key_press_event" # Quit (even if not in infinite mode; this is consistent with # MATLAB and sometimes quite useful, but will require the user to # test how many points were actually returned before using data). if (is_button and event.button == mouse_stop or is_key and event.key in ["escape", "enter"]): self.canvas.stop_event_loop() # Pop last click. elif (is_button and event.button == mouse_pop or is_key and event.key in ["backspace", "delete"]): if clicks: clicks.pop() if show_clicks: marks.pop().remove() self.canvas.draw() # Add new click. elif (is_button and event.button == mouse_add # On macOS/gtk, some keys return None. or is_key and event.key is not None): if event.inaxes: clicks.append((event.xdata, event.ydata)) _log.info("input %i: %f, %f", len(clicks), event.xdata, event.ydata) if show_clicks: line = mpl.lines.Line2D([event.xdata], [event.ydata], marker="+", color="r") event.inaxes.add_line(line) marks.append(line) self.canvas.draw() if len(clicks) == n and n > 0: self.canvas.stop_event_loop() _blocking_input.blocking_input_loop( self, ["button_press_event", "key_press_event"], timeout, handler) # Cleanup. for mark in marks: mark.remove() self.canvas.draw() return clicks def waitforbuttonpress(self, timeout=-1): """ Blocking call to interact with the figure. Wait for user input and return True if a key was pressed, False if a mouse button was pressed and None if no input was given within *timeout* seconds. Negative values deactivate *timeout*. """ event = None def handler(ev): nonlocal event event = ev self.canvas.stop_event_loop() _blocking_input.blocking_input_loop( self, ["button_press_event", "key_press_event"], timeout, handler) return None if event is None else event.name == "key_press_event" def execute_constrained_layout(self, renderer=None): """ Use ``layoutgrid`` to determine pos positions within Axes. See also `.set_constrained_layout_pads`. Returns ------- layoutgrid : private debugging object """ if not isinstance(self.get_layout_engine(), ConstrainedLayoutEngine): return None return self.get_layout_engine().execute(self) def tight_layout(self, *, pad=1.08, h_pad=None, w_pad=None, rect=None): """ Adjust the padding between and around subplots. To exclude an artist on the Axes from the bounding box calculation that determines the subplot parameters (i.e. legend, or annotation), set ``a.set_in_layout(False)`` for that artist. Parameters ---------- pad : float, default: 1.08 Padding between the figure edge and the edges of subplots, as a fraction of the font size. h_pad, w_pad : float, default: *pad* Padding (height/width) between edges of adjacent subplots, as a fraction of the font size. rect : tuple (left, bottom, right, top), default: (0, 0, 1, 1) A rectangle in normalized figure coordinates into which the whole subplots area (including labels) will fit. See Also -------- .Figure.set_layout_engine .pyplot.tight_layout """ # note that here we do not permanently set the figures engine to # tight_layout but rather just perform the layout in place and remove # any previous engines. engine = TightLayoutEngine(pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect) try: previous_engine = self.get_layout_engine() self.set_layout_engine(engine) engine.execute(self) if not isinstance(previous_engine, TightLayoutEngine) \ and previous_engine is not None: _api.warn_external('The figure layout has changed to tight') finally: self.set_layout_engine(None) The provided code snippet includes necessary dependencies for implementing the `bootstrap_plot` function. Write a Python function `def bootstrap_plot( series: Series, fig: Figure | None = None, size: int = 50, samples: int = 500, **kwds, ) -> Figure` to solve the following problem: Bootstrap plot on mean, median and mid-range statistics. The bootstrap plot is used to estimate the uncertainty of a statistic by relying on random sampling with replacement [1]_. This function will generate bootstrapping plots for mean, median and mid-range statistics for the given number of samples of the given size. .. [1] "Bootstrapping (statistics)" in \ https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29 Parameters ---------- series : pandas.Series Series from where to get the samplings for the bootstrapping. fig : matplotlib.figure.Figure, default None If given, it will use the `fig` reference for plotting instead of creating a new one with default parameters. size : int, default 50 Number of data points to consider during each sampling. It must be less than or equal to the length of the `series`. samples : int, default 500 Number of times the bootstrap procedure is performed. **kwds Options to pass to matplotlib plotting method. Returns ------- matplotlib.figure.Figure Matplotlib figure. See Also -------- pandas.DataFrame.plot : Basic plotting for DataFrame objects. pandas.Series.plot : Basic plotting for Series objects. Examples -------- This example draws a basic bootstrap plot for a Series. .. plot:: :context: close-figs >>> s = pd.Series(np.random.uniform(size=100)) >>> pd.plotting.bootstrap_plot(s) <Figure size 640x480 with 6 Axes> Here is the function: def bootstrap_plot( series: Series, fig: Figure | None = None, size: int = 50, samples: int = 500, **kwds, ) -> Figure: """ Bootstrap plot on mean, median and mid-range statistics. The bootstrap plot is used to estimate the uncertainty of a statistic by relying on random sampling with replacement [1]_. This function will generate bootstrapping plots for mean, median and mid-range statistics for the given number of samples of the given size. .. [1] "Bootstrapping (statistics)" in \ https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29 Parameters ---------- series : pandas.Series Series from where to get the samplings for the bootstrapping. fig : matplotlib.figure.Figure, default None If given, it will use the `fig` reference for plotting instead of creating a new one with default parameters. size : int, default 50 Number of data points to consider during each sampling. It must be less than or equal to the length of the `series`. samples : int, default 500 Number of times the bootstrap procedure is performed. **kwds Options to pass to matplotlib plotting method. Returns ------- matplotlib.figure.Figure Matplotlib figure. See Also -------- pandas.DataFrame.plot : Basic plotting for DataFrame objects. pandas.Series.plot : Basic plotting for Series objects. Examples -------- This example draws a basic bootstrap plot for a Series. .. plot:: :context: close-figs >>> s = pd.Series(np.random.uniform(size=100)) >>> pd.plotting.bootstrap_plot(s) <Figure size 640x480 with 6 Axes> """ plot_backend = _get_plot_backend("matplotlib") return plot_backend.bootstrap_plot( series=series, fig=fig, size=size, samples=samples, **kwds )
Bootstrap plot on mean, median and mid-range statistics. The bootstrap plot is used to estimate the uncertainty of a statistic by relying on random sampling with replacement [1]_. This function will generate bootstrapping plots for mean, median and mid-range statistics for the given number of samples of the given size. .. [1] "Bootstrapping (statistics)" in \ https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29 Parameters ---------- series : pandas.Series Series from where to get the samplings for the bootstrapping. fig : matplotlib.figure.Figure, default None If given, it will use the `fig` reference for plotting instead of creating a new one with default parameters. size : int, default 50 Number of data points to consider during each sampling. It must be less than or equal to the length of the `series`. samples : int, default 500 Number of times the bootstrap procedure is performed. **kwds Options to pass to matplotlib plotting method. Returns ------- matplotlib.figure.Figure Matplotlib figure. See Also -------- pandas.DataFrame.plot : Basic plotting for DataFrame objects. pandas.Series.plot : Basic plotting for Series objects. Examples -------- This example draws a basic bootstrap plot for a Series. .. plot:: :context: close-figs >>> s = pd.Series(np.random.uniform(size=100)) >>> pd.plotting.bootstrap_plot(s) <Figure size 640x480 with 6 Axes>
172,981
from __future__ import annotations from contextlib import contextmanager from typing import ( TYPE_CHECKING, Generator, ) from pandas.plotting._core import _get_plot_backend def _get_plot_backend(backend: str | None = None): """ Return the plotting backend to use (e.g. `pandas.plotting._matplotlib`). The plotting system of pandas uses matplotlib by default, but the idea here is that it can also work with other third-party backends. This function returns the module which provides a top-level `.plot` method that will actually do the plotting. The backend is specified from a string, which either comes from the keyword argument `backend`, or, if not specified, from the option `pandas.options.plotting.backend`. All the rest of the code in this file uses the backend specified there for the plotting. The backend is imported lazily, as matplotlib is a soft dependency, and pandas can be used without it being installed. Notes ----- Modifies `_backends` with imported backend as a side effect. """ backend_str: str = backend or get_option("plotting.backend") if backend_str in _backends: return _backends[backend_str] module = _load_backend(backend_str) _backends[backend_str] = module return module The provided code snippet includes necessary dependencies for implementing the `parallel_coordinates` function. Write a Python function `def parallel_coordinates( frame: DataFrame, class_column: str, cols: list[str] | None = None, ax: Axes | None = None, color: list[str] | tuple[str, ...] | None = None, use_columns: bool = False, xticks: list | tuple | None = None, colormap=None, axvlines: bool = True, axvlines_kwds=None, sort_labels: bool = False, **kwargs, ) -> Axes` to solve the following problem: Parallel coordinates plotting. Parameters ---------- frame : DataFrame class_column : str Column name containing class names. cols : list, optional A list of column names to use. ax : matplotlib.axis, optional Matplotlib axis object. color : list or tuple, optional Colors to use for the different classes. use_columns : bool, optional If true, columns will be used as xticks. xticks : list or tuple, optional A list of values to use for xticks. colormap : str or matplotlib colormap, default None Colormap to use for line colors. axvlines : bool, optional If true, vertical lines will be added at each xtick. axvlines_kwds : keywords, optional Options to be passed to axvline method for vertical lines. sort_labels : bool, default False Sort class_column labels, useful when assigning colors. **kwargs Options to pass to matplotlib plotting method. Returns ------- matplotlib.axes.Axes Examples -------- .. plot:: :context: close-figs >>> df = pd.read_csv( ... 'https://raw.githubusercontent.com/pandas-dev/' ... 'pandas/main/pandas/tests/io/data/csv/iris.csv' ... ) >>> pd.plotting.parallel_coordinates( ... df, 'Name', color=('#556270', '#4ECDC4', '#C7F464') ... ) <AxesSubplot: xlabel='y(t)', ylabel='y(t + 1)'> Here is the function: def parallel_coordinates( frame: DataFrame, class_column: str, cols: list[str] | None = None, ax: Axes | None = None, color: list[str] | tuple[str, ...] | None = None, use_columns: bool = False, xticks: list | tuple | None = None, colormap=None, axvlines: bool = True, axvlines_kwds=None, sort_labels: bool = False, **kwargs, ) -> Axes: """ Parallel coordinates plotting. Parameters ---------- frame : DataFrame class_column : str Column name containing class names. cols : list, optional A list of column names to use. ax : matplotlib.axis, optional Matplotlib axis object. color : list or tuple, optional Colors to use for the different classes. use_columns : bool, optional If true, columns will be used as xticks. xticks : list or tuple, optional A list of values to use for xticks. colormap : str or matplotlib colormap, default None Colormap to use for line colors. axvlines : bool, optional If true, vertical lines will be added at each xtick. axvlines_kwds : keywords, optional Options to be passed to axvline method for vertical lines. sort_labels : bool, default False Sort class_column labels, useful when assigning colors. **kwargs Options to pass to matplotlib plotting method. Returns ------- matplotlib.axes.Axes Examples -------- .. plot:: :context: close-figs >>> df = pd.read_csv( ... 'https://raw.githubusercontent.com/pandas-dev/' ... 'pandas/main/pandas/tests/io/data/csv/iris.csv' ... ) >>> pd.plotting.parallel_coordinates( ... df, 'Name', color=('#556270', '#4ECDC4', '#C7F464') ... ) <AxesSubplot: xlabel='y(t)', ylabel='y(t + 1)'> """ plot_backend = _get_plot_backend("matplotlib") return plot_backend.parallel_coordinates( frame=frame, class_column=class_column, cols=cols, ax=ax, color=color, use_columns=use_columns, xticks=xticks, colormap=colormap, axvlines=axvlines, axvlines_kwds=axvlines_kwds, sort_labels=sort_labels, **kwargs, )
Parallel coordinates plotting. Parameters ---------- frame : DataFrame class_column : str Column name containing class names. cols : list, optional A list of column names to use. ax : matplotlib.axis, optional Matplotlib axis object. color : list or tuple, optional Colors to use for the different classes. use_columns : bool, optional If true, columns will be used as xticks. xticks : list or tuple, optional A list of values to use for xticks. colormap : str or matplotlib colormap, default None Colormap to use for line colors. axvlines : bool, optional If true, vertical lines will be added at each xtick. axvlines_kwds : keywords, optional Options to be passed to axvline method for vertical lines. sort_labels : bool, default False Sort class_column labels, useful when assigning colors. **kwargs Options to pass to matplotlib plotting method. Returns ------- matplotlib.axes.Axes Examples -------- .. plot:: :context: close-figs >>> df = pd.read_csv( ... 'https://raw.githubusercontent.com/pandas-dev/' ... 'pandas/main/pandas/tests/io/data/csv/iris.csv' ... ) >>> pd.plotting.parallel_coordinates( ... df, 'Name', color=('#556270', '#4ECDC4', '#C7F464') ... ) <AxesSubplot: xlabel='y(t)', ylabel='y(t + 1)'>
172,982
from __future__ import annotations from contextlib import contextmanager from typing import ( TYPE_CHECKING, Generator, ) from pandas.plotting._core import _get_plot_backend def _get_plot_backend(backend: str | None = None): """ Return the plotting backend to use (e.g. `pandas.plotting._matplotlib`). The plotting system of pandas uses matplotlib by default, but the idea here is that it can also work with other third-party backends. This function returns the module which provides a top-level `.plot` method that will actually do the plotting. The backend is specified from a string, which either comes from the keyword argument `backend`, or, if not specified, from the option `pandas.options.plotting.backend`. All the rest of the code in this file uses the backend specified there for the plotting. The backend is imported lazily, as matplotlib is a soft dependency, and pandas can be used without it being installed. Notes ----- Modifies `_backends` with imported backend as a side effect. """ backend_str: str = backend or get_option("plotting.backend") if backend_str in _backends: return _backends[backend_str] module = _load_backend(backend_str) _backends[backend_str] = module return module The provided code snippet includes necessary dependencies for implementing the `lag_plot` function. Write a Python function `def lag_plot(series: Series, lag: int = 1, ax: Axes | None = None, **kwds) -> Axes` to solve the following problem: Lag plot for time series. Parameters ---------- series : Series The time series to visualize. lag : int, default 1 Lag length of the scatter plot. ax : Matplotlib axis object, optional The matplotlib axis object to use. **kwds Matplotlib scatter method keyword arguments. Returns ------- matplotlib.axes.Axes Examples -------- Lag plots are most commonly used to look for patterns in time series data. Given the following time series .. plot:: :context: close-figs >>> np.random.seed(5) >>> x = np.cumsum(np.random.normal(loc=1, scale=5, size=50)) >>> s = pd.Series(x) >>> s.plot() <AxesSubplot: xlabel='Midrange'> A lag plot with ``lag=1`` returns .. plot:: :context: close-figs >>> pd.plotting.lag_plot(s, lag=1) <AxesSubplot: xlabel='y(t)', ylabel='y(t + 1)'> Here is the function: def lag_plot(series: Series, lag: int = 1, ax: Axes | None = None, **kwds) -> Axes: """ Lag plot for time series. Parameters ---------- series : Series The time series to visualize. lag : int, default 1 Lag length of the scatter plot. ax : Matplotlib axis object, optional The matplotlib axis object to use. **kwds Matplotlib scatter method keyword arguments. Returns ------- matplotlib.axes.Axes Examples -------- Lag plots are most commonly used to look for patterns in time series data. Given the following time series .. plot:: :context: close-figs >>> np.random.seed(5) >>> x = np.cumsum(np.random.normal(loc=1, scale=5, size=50)) >>> s = pd.Series(x) >>> s.plot() <AxesSubplot: xlabel='Midrange'> A lag plot with ``lag=1`` returns .. plot:: :context: close-figs >>> pd.plotting.lag_plot(s, lag=1) <AxesSubplot: xlabel='y(t)', ylabel='y(t + 1)'> """ plot_backend = _get_plot_backend("matplotlib") return plot_backend.lag_plot(series=series, lag=lag, ax=ax, **kwds)
Lag plot for time series. Parameters ---------- series : Series The time series to visualize. lag : int, default 1 Lag length of the scatter plot. ax : Matplotlib axis object, optional The matplotlib axis object to use. **kwds Matplotlib scatter method keyword arguments. Returns ------- matplotlib.axes.Axes Examples -------- Lag plots are most commonly used to look for patterns in time series data. Given the following time series .. plot:: :context: close-figs >>> np.random.seed(5) >>> x = np.cumsum(np.random.normal(loc=1, scale=5, size=50)) >>> s = pd.Series(x) >>> s.plot() <AxesSubplot: xlabel='Midrange'> A lag plot with ``lag=1`` returns .. plot:: :context: close-figs >>> pd.plotting.lag_plot(s, lag=1) <AxesSubplot: xlabel='y(t)', ylabel='y(t + 1)'>
172,983
from __future__ import annotations from contextlib import contextmanager from typing import ( TYPE_CHECKING, Generator, ) from pandas.plotting._core import _get_plot_backend def _get_plot_backend(backend: str | None = None): """ Return the plotting backend to use (e.g. `pandas.plotting._matplotlib`). The plotting system of pandas uses matplotlib by default, but the idea here is that it can also work with other third-party backends. This function returns the module which provides a top-level `.plot` method that will actually do the plotting. The backend is specified from a string, which either comes from the keyword argument `backend`, or, if not specified, from the option `pandas.options.plotting.backend`. All the rest of the code in this file uses the backend specified there for the plotting. The backend is imported lazily, as matplotlib is a soft dependency, and pandas can be used without it being installed. Notes ----- Modifies `_backends` with imported backend as a side effect. """ backend_str: str = backend or get_option("plotting.backend") if backend_str in _backends: return _backends[backend_str] module = _load_backend(backend_str) _backends[backend_str] = module return module The provided code snippet includes necessary dependencies for implementing the `autocorrelation_plot` function. Write a Python function `def autocorrelation_plot(series: Series, ax: Axes | None = None, **kwargs) -> Axes` to solve the following problem: Autocorrelation plot for time series. Parameters ---------- series : Series The time series to visualize. ax : Matplotlib axis object, optional The matplotlib axis object to use. **kwargs Options to pass to matplotlib plotting method. Returns ------- matplotlib.axes.Axes Examples -------- The horizontal lines in the plot correspond to 95% and 99% confidence bands. The dashed line is 99% confidence band. .. plot:: :context: close-figs >>> spacing = np.linspace(-9 * np.pi, 9 * np.pi, num=1000) >>> s = pd.Series(0.7 * np.random.rand(1000) + 0.3 * np.sin(spacing)) >>> pd.plotting.autocorrelation_plot(s) <AxesSubplot: title={'center': 'width'}, xlabel='Lag', ylabel='Autocorrelation'> Here is the function: def autocorrelation_plot(series: Series, ax: Axes | None = None, **kwargs) -> Axes: """ Autocorrelation plot for time series. Parameters ---------- series : Series The time series to visualize. ax : Matplotlib axis object, optional The matplotlib axis object to use. **kwargs Options to pass to matplotlib plotting method. Returns ------- matplotlib.axes.Axes Examples -------- The horizontal lines in the plot correspond to 95% and 99% confidence bands. The dashed line is 99% confidence band. .. plot:: :context: close-figs >>> spacing = np.linspace(-9 * np.pi, 9 * np.pi, num=1000) >>> s = pd.Series(0.7 * np.random.rand(1000) + 0.3 * np.sin(spacing)) >>> pd.plotting.autocorrelation_plot(s) <AxesSubplot: title={'center': 'width'}, xlabel='Lag', ylabel='Autocorrelation'> """ plot_backend = _get_plot_backend("matplotlib") return plot_backend.autocorrelation_plot(series=series, ax=ax, **kwargs)
Autocorrelation plot for time series. Parameters ---------- series : Series The time series to visualize. ax : Matplotlib axis object, optional The matplotlib axis object to use. **kwargs Options to pass to matplotlib plotting method. Returns ------- matplotlib.axes.Axes Examples -------- The horizontal lines in the plot correspond to 95% and 99% confidence bands. The dashed line is 99% confidence band. .. plot:: :context: close-figs >>> spacing = np.linspace(-9 * np.pi, 9 * np.pi, num=1000) >>> s = pd.Series(0.7 * np.random.rand(1000) + 0.3 * np.sin(spacing)) >>> pd.plotting.autocorrelation_plot(s) <AxesSubplot: title={'center': 'width'}, xlabel='Lag', ylabel='Autocorrelation'>
172,984
from __future__ import annotations from abc import ( ABC, abstractmethod, ) from typing import ( TYPE_CHECKING, Hashable, Iterable, Literal, Sequence, ) import warnings import matplotlib as mpl from matplotlib.artist import Artist import numpy as np from pandas._typing import ( IndexLabel, PlottingOrientation, npt, ) from pandas.errors import AbstractMethodError from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_any_real_numeric_dtype, is_categorical_dtype, is_extension_array_dtype, is_float, is_float_dtype, is_hashable, is_integer, is_integer_dtype, is_iterator, is_list_like, is_number, is_numeric_dtype, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCIndex, ABCMultiIndex, ABCPeriodIndex, ABCSeries, ) from pandas.core.dtypes.missing import ( isna, notna, ) import pandas.core.common as com from pandas.core.frame import DataFrame from pandas.util.version import Version from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib import tools from pandas.plotting._matplotlib.converter import register_pandas_matplotlib_converters from pandas.plotting._matplotlib.groupby import reconstruct_data_with_by from pandas.plotting._matplotlib.misc import unpack_single_str_list from pandas.plotting._matplotlib.style import get_standard_colors from pandas.plotting._matplotlib.timeseries import ( decorate_axes, format_dateaxis, maybe_convert_index, maybe_resample, use_dynamic_x, ) from pandas.plotting._matplotlib.tools import ( create_subplots, flatten_axes, format_date_labels, get_all_lines, get_xlim, handle_shared_axes, ) The provided code snippet includes necessary dependencies for implementing the `_color_in_style` function. Write a Python function `def _color_in_style(style: str) -> bool` to solve the following problem: Check if there is a color letter in the style string. Here is the function: def _color_in_style(style: str) -> bool: """ Check if there is a color letter in the style string. """ from matplotlib.colors import BASE_COLORS return not set(BASE_COLORS).isdisjoint(style)
Check if there is a color letter in the style string.
172,985
from __future__ import annotations from math import ceil from typing import ( TYPE_CHECKING, Iterable, Sequence, ) import warnings from matplotlib import ticker import matplotlib.table import numpy as np from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import is_list_like from pandas.core.dtypes.generic import ( ABCDataFrame, ABCIndex, ABCSeries, ) def maybe_adjust_figure(fig: Figure, *args, **kwargs) -> None: """Call fig.subplots_adjust unless fig has constrained_layout enabled.""" if do_adjust_figure(fig): fig.subplots_adjust(*args, **kwargs) def format_date_labels(ax: Axes, rot) -> None: # mini version of autofmt_xdate for label in ax.get_xticklabels(): label.set_ha("right") label.set_rotation(rot) fig = ax.get_figure() maybe_adjust_figure(fig, bottom=0.2)
null
172,986
from __future__ import annotations from math import ceil from typing import ( TYPE_CHECKING, Iterable, Sequence, ) import warnings from matplotlib import ticker import matplotlib.table import numpy as np from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import is_list_like from pandas.core.dtypes.generic import ( ABCDataFrame, ABCIndex, ABCSeries, ) ABCSeries = cast( "Type[Series]", create_pandas_abc_type("ABCSeries", "_typ", ("series",)), ) ABCDataFrame = cast( "Type[DataFrame]", create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",)) ) class Table(Artist): """ A table of cells. The table consists of a grid of cells, which are indexed by (row, column). For a simple table, you'll have a full grid of cells with indices from (0, 0) to (num_rows-1, num_cols-1), in which the cell (0, 0) is positioned at the top left. However, you can also add cells with negative indices. You don't have to add a cell to every grid position, so you can create tables that have holes. *Note*: You'll usually not create an empty table from scratch. Instead use `~matplotlib.table.table` to create a table from data. """ codes = {'best': 0, 'upper right': 1, # default 'upper left': 2, 'lower left': 3, 'lower right': 4, 'center left': 5, 'center right': 6, 'lower center': 7, 'upper center': 8, 'center': 9, 'top right': 10, 'top left': 11, 'bottom left': 12, 'bottom right': 13, 'right': 14, 'left': 15, 'top': 16, 'bottom': 17, } """Possible values where to place the table relative to the Axes.""" FONTSIZE = 10 AXESPAD = 0.02 """The border between the Axes and the table edge in Axes units.""" def __init__(self, ax, loc=None, bbox=None, **kwargs): """ Parameters ---------- ax : `matplotlib.axes.Axes` The `~.axes.Axes` to plot the table into. loc : str The position of the cell with respect to *ax*. This must be one of the `~.Table.codes`. bbox : `.Bbox` or [xmin, ymin, width, height], optional A bounding box to draw the table into. If this is not *None*, this overrides *loc*. Other Parameters ---------------- **kwargs `.Artist` properties. """ super().__init__() if isinstance(loc, str): if loc not in self.codes: raise ValueError( "Unrecognized location {!r}. Valid locations are\n\t{}" .format(loc, '\n\t'.join(self.codes))) loc = self.codes[loc] self.set_figure(ax.figure) self._axes = ax self._loc = loc self._bbox = bbox # use axes coords ax._unstale_viewLim() self.set_transform(ax.transAxes) self._cells = {} self._edges = None self._autoColumns = [] self._autoFontsize = True self._internal_update(kwargs) self.set_clip_on(False) def add_cell(self, row, col, *args, **kwargs): """ Create a cell and add it to the table. Parameters ---------- row : int Row index. col : int Column index. *args, **kwargs All other parameters are passed on to `Cell`. Returns ------- `.Cell` The created cell. """ xy = (0, 0) cell = Cell(xy, visible_edges=self.edges, *args, **kwargs) self[row, col] = cell return cell def __setitem__(self, position, cell): """ Set a custom cell in a given position. """ _api.check_isinstance(Cell, cell=cell) try: row, col = position[0], position[1] except Exception as err: raise KeyError('Only tuples length 2 are accepted as ' 'coordinates') from err cell.set_figure(self.figure) cell.set_transform(self.get_transform()) cell.set_clip_on(False) self._cells[row, col] = cell self.stale = True def __getitem__(self, position): """Retrieve a custom cell from a given position.""" return self._cells[position] def edges(self): """ The default value of `~.Cell.visible_edges` for newly added cells using `.add_cell`. Notes ----- This setting does currently only affect newly created cells using `.add_cell`. To change existing cells, you have to set their edges explicitly:: for c in tab.get_celld().values(): c.visible_edges = 'horizontal' """ return self._edges def edges(self, value): self._edges = value self.stale = True def _approx_text_height(self): return (self.FONTSIZE / 72.0 * self.figure.dpi / self._axes.bbox.height * 1.2) def draw(self, renderer): # docstring inherited # Need a renderer to do hit tests on mouseevent; assume the last one # will do if renderer is None: renderer = self.figure._get_renderer() if renderer is None: raise RuntimeError('No renderer defined') if not self.get_visible(): return renderer.open_group('table', gid=self.get_gid()) self._update_positions(renderer) for key in sorted(self._cells): self._cells[key].draw(renderer) renderer.close_group('table') self.stale = False def _get_grid_bbox(self, renderer): """ Get a bbox, in axes coordinates for the cells. Only include those in the range (0, 0) to (maxRow, maxCol). """ boxes = [cell.get_window_extent(renderer) for (row, col), cell in self._cells.items() if row >= 0 and col >= 0] bbox = Bbox.union(boxes) return bbox.transformed(self.get_transform().inverted()) def contains(self, mouseevent): # docstring inherited inside, info = self._default_contains(mouseevent) if inside is not None: return inside, info # TODO: Return index of the cell containing the cursor so that the user # doesn't have to bind to each one individually. renderer = self.figure._get_renderer() if renderer is not None: boxes = [cell.get_window_extent(renderer) for (row, col), cell in self._cells.items() if row >= 0 and col >= 0] bbox = Bbox.union(boxes) return bbox.contains(mouseevent.x, mouseevent.y), {} else: return False, {} def get_children(self): """Return the Artists contained by the table.""" return list(self._cells.values()) def get_window_extent(self, renderer=None): # docstring inherited if renderer is None: renderer = self.figure._get_renderer() self._update_positions(renderer) boxes = [cell.get_window_extent(renderer) for cell in self._cells.values()] return Bbox.union(boxes) def _do_cell_alignment(self): """ Calculate row heights and column widths; position cells accordingly. """ # Calculate row/column widths widths = {} heights = {} for (row, col), cell in self._cells.items(): height = heights.setdefault(row, 0.0) heights[row] = max(height, cell.get_height()) width = widths.setdefault(col, 0.0) widths[col] = max(width, cell.get_width()) # work out left position for each column xpos = 0 lefts = {} for col in sorted(widths): lefts[col] = xpos xpos += widths[col] ypos = 0 bottoms = {} for row in sorted(heights, reverse=True): bottoms[row] = ypos ypos += heights[row] # set cell positions for (row, col), cell in self._cells.items(): cell.set_x(lefts[col]) cell.set_y(bottoms[row]) def auto_set_column_width(self, col): """ Automatically set the widths of given columns to optimal sizes. Parameters ---------- col : int or sequence of ints The indices of the columns to auto-scale. """ # check for col possibility on iteration try: iter(col) except (TypeError, AttributeError): self._autoColumns.append(col) else: for cell in col: self._autoColumns.append(cell) self.stale = True def _auto_set_column_width(self, col, renderer): """Automatically set width for column.""" cells = [cell for key, cell in self._cells.items() if key[1] == col] max_width = max((cell.get_required_width(renderer) for cell in cells), default=0) for cell in cells: cell.set_width(max_width) def auto_set_font_size(self, value=True): """Automatically set font size.""" self._autoFontsize = value self.stale = True def _auto_set_font_size(self, renderer): if len(self._cells) == 0: return fontsize = next(iter(self._cells.values())).get_fontsize() cells = [] for key, cell in self._cells.items(): # ignore auto-sized columns if key[1] in self._autoColumns: continue size = cell.auto_set_font_size(renderer) fontsize = min(fontsize, size) cells.append(cell) # now set all fontsizes equal for cell in self._cells.values(): cell.set_fontsize(fontsize) def scale(self, xscale, yscale): """Scale column widths by *xscale* and row heights by *yscale*.""" for c in self._cells.values(): c.set_width(c.get_width() * xscale) c.set_height(c.get_height() * yscale) def set_fontsize(self, size): """ Set the font size, in points, of the cell text. Parameters ---------- size : float Notes ----- As long as auto font size has not been disabled, the value will be clipped such that the text fits horizontally into the cell. You can disable this behavior using `.auto_set_font_size`. >>> the_table.auto_set_font_size(False) >>> the_table.set_fontsize(20) However, there is no automatic scaling of the row height so that the text may exceed the cell boundary. """ for cell in self._cells.values(): cell.set_fontsize(size) self.stale = True def _offset(self, ox, oy): """Move all the artists by ox, oy (axes coords).""" for c in self._cells.values(): x, y = c.get_x(), c.get_y() c.set_x(x + ox) c.set_y(y + oy) def _update_positions(self, renderer): # called from renderer to allow more precise estimates of # widths and heights with get_window_extent # Do any auto width setting for col in self._autoColumns: self._auto_set_column_width(col, renderer) if self._autoFontsize: self._auto_set_font_size(renderer) # Align all the cells self._do_cell_alignment() bbox = self._get_grid_bbox(renderer) l, b, w, h = bbox.bounds if self._bbox is not None: # Position according to bbox if isinstance(self._bbox, Bbox): rl, rb, rw, rh = self._bbox.bounds else: rl, rb, rw, rh = self._bbox self.scale(rw / w, rh / h) ox = rl - l oy = rb - b self._do_cell_alignment() else: # Position using loc (BEST, UR, UL, LL, LR, CL, CR, LC, UC, C, TR, TL, BL, BR, R, L, T, B) = range(len(self.codes)) # defaults for center ox = (0.5 - w / 2) - l oy = (0.5 - h / 2) - b if self._loc in (UL, LL, CL): # left ox = self.AXESPAD - l if self._loc in (BEST, UR, LR, R, CR): # right ox = 1 - (l + w + self.AXESPAD) if self._loc in (BEST, UR, UL, UC): # upper oy = 1 - (b + h + self.AXESPAD) if self._loc in (LL, LR, LC): # lower oy = self.AXESPAD - b if self._loc in (LC, UC, C): # center x ox = (0.5 - w / 2) - l if self._loc in (CL, CR, C): # center y oy = (0.5 - h / 2) - b if self._loc in (TL, BL, L): # out left ox = - (l + w) if self._loc in (TR, BR, R): # out right ox = 1.0 - l if self._loc in (TR, TL, T): # out top oy = 1.0 - b if self._loc in (BL, BR, B): # out bottom oy = - (b + h) self._offset(ox, oy) def get_celld(self): r""" Return a dict of cells in the table mapping *(row, column)* to `.Cell`\s. Notes ----- You can also directly index into the Table object to access individual cells:: cell = table[row, col] """ return self._cells def table( ax, data: DataFrame | Series, rowLabels=None, colLabels=None, **kwargs ) -> Table: if isinstance(data, ABCSeries): data = data.to_frame() elif isinstance(data, ABCDataFrame): pass else: raise ValueError("Input data must be DataFrame or Series") if rowLabels is None: rowLabels = data.index if colLabels is None: colLabels = data.columns cellText = data.values return matplotlib.table.table( ax, cellText=cellText, rowLabels=rowLabels, colLabels=colLabels, **kwargs )
null
172,987
from __future__ import annotations from math import ceil from typing import ( TYPE_CHECKING, Iterable, Sequence, ) import warnings from matplotlib import ticker import matplotlib.table import numpy as np from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import is_list_like from pandas.core.dtypes.generic import ( ABCDataFrame, ABCIndex, ABCSeries, ) }) class Line2D(Artist): """ A line - the line can have both a solid linestyle connecting all the vertices, and a marker at each vertex. Additionally, the drawing of the solid line is influenced by the drawstyle, e.g., one can create "stepped" lines in various styles. """ lineStyles = _lineStyles = { # hidden names deprecated '-': '_draw_solid', '--': '_draw_dashed', '-.': '_draw_dash_dot', ':': '_draw_dotted', 'None': '_draw_nothing', ' ': '_draw_nothing', '': '_draw_nothing', } _drawStyles_l = { 'default': '_draw_lines', 'steps-mid': '_draw_steps_mid', 'steps-pre': '_draw_steps_pre', 'steps-post': '_draw_steps_post', } _drawStyles_s = { 'steps': '_draw_steps_pre', } # drawStyles should now be deprecated. drawStyles = {**_drawStyles_l, **_drawStyles_s} # Need a list ordered with long names first: drawStyleKeys = [*_drawStyles_l, *_drawStyles_s] # Referenced here to maintain API. These are defined in # MarkerStyle markers = MarkerStyle.markers filled_markers = MarkerStyle.filled_markers fillStyles = MarkerStyle.fillstyles zorder = 2 def __str__(self): if self._label != "": return f"Line2D({self._label})" elif self._x is None: return "Line2D()" elif len(self._x) > 3: return "Line2D((%g,%g),(%g,%g),...,(%g,%g))" % ( self._x[0], self._y[0], self._x[0], self._y[0], self._x[-1], self._y[-1]) else: return "Line2D(%s)" % ",".join( map("({:g},{:g})".format, self._x, self._y)) def __init__(self, xdata, ydata, linewidth=None, # all Nones default to rc linestyle=None, color=None, gapcolor=None, marker=None, markersize=None, markeredgewidth=None, markeredgecolor=None, markerfacecolor=None, markerfacecoloralt='none', fillstyle=None, antialiased=None, dash_capstyle=None, solid_capstyle=None, dash_joinstyle=None, solid_joinstyle=None, pickradius=5, drawstyle=None, markevery=None, **kwargs ): """ Create a `.Line2D` instance with *x* and *y* data in sequences of *xdata*, *ydata*. Additional keyword arguments are `.Line2D` properties: %(Line2D:kwdoc)s See :meth:`set_linestyle` for a description of the line styles, :meth:`set_marker` for a description of the markers, and :meth:`set_drawstyle` for a description of the draw styles. """ super().__init__() # Convert sequences to NumPy arrays. if not np.iterable(xdata): raise RuntimeError('xdata must be a sequence') if not np.iterable(ydata): raise RuntimeError('ydata must be a sequence') if linewidth is None: linewidth = mpl.rcParams['lines.linewidth'] if linestyle is None: linestyle = mpl.rcParams['lines.linestyle'] if marker is None: marker = mpl.rcParams['lines.marker'] if color is None: color = mpl.rcParams['lines.color'] if markersize is None: markersize = mpl.rcParams['lines.markersize'] if antialiased is None: antialiased = mpl.rcParams['lines.antialiased'] if dash_capstyle is None: dash_capstyle = mpl.rcParams['lines.dash_capstyle'] if dash_joinstyle is None: dash_joinstyle = mpl.rcParams['lines.dash_joinstyle'] if solid_capstyle is None: solid_capstyle = mpl.rcParams['lines.solid_capstyle'] if solid_joinstyle is None: solid_joinstyle = mpl.rcParams['lines.solid_joinstyle'] if drawstyle is None: drawstyle = 'default' self._dashcapstyle = None self._dashjoinstyle = None self._solidjoinstyle = None self._solidcapstyle = None self.set_dash_capstyle(dash_capstyle) self.set_dash_joinstyle(dash_joinstyle) self.set_solid_capstyle(solid_capstyle) self.set_solid_joinstyle(solid_joinstyle) self._linestyles = None self._drawstyle = None self._linewidth = linewidth self._unscaled_dash_pattern = (0, None) # offset, dash self._dash_pattern = (0, None) # offset, dash (scaled by linewidth) self.set_linewidth(linewidth) self.set_linestyle(linestyle) self.set_drawstyle(drawstyle) self._color = None self.set_color(color) if marker is None: marker = 'none' # Default. if not isinstance(marker, MarkerStyle): self._marker = MarkerStyle(marker, fillstyle) else: self._marker = marker self._gapcolor = None self.set_gapcolor(gapcolor) self._markevery = None self._markersize = None self._antialiased = None self.set_markevery(markevery) self.set_antialiased(antialiased) self.set_markersize(markersize) self._markeredgecolor = None self._markeredgewidth = None self._markerfacecolor = None self._markerfacecoloralt = None self.set_markerfacecolor(markerfacecolor) # Normalizes None to rc. self.set_markerfacecoloralt(markerfacecoloralt) self.set_markeredgecolor(markeredgecolor) # Normalizes None to rc. self.set_markeredgewidth(markeredgewidth) # update kwargs before updating data to give the caller a # chance to init axes (and hence unit support) self._internal_update(kwargs) self._pickradius = pickradius self.ind_offset = 0 if (isinstance(self._picker, Number) and not isinstance(self._picker, bool)): self._pickradius = self._picker self._xorig = np.asarray([]) self._yorig = np.asarray([]) self._invalidx = True self._invalidy = True self._x = None self._y = None self._xy = None self._path = None self._transformed_path = None self._subslice = False self._x_filled = None # used in subslicing; only x is needed self.set_data(xdata, ydata) def contains(self, mouseevent): """ Test whether *mouseevent* occurred on the line. An event is deemed to have occurred "on" the line if it is less than ``self.pickradius`` (default: 5 points) away from it. Use `~.Line2D.get_pickradius` or `~.Line2D.set_pickradius` to get or set the pick radius. Parameters ---------- mouseevent : `matplotlib.backend_bases.MouseEvent` Returns ------- contains : bool Whether any values are within the radius. details : dict A dictionary ``{'ind': pointlist}``, where *pointlist* is a list of points of the line that are within the pickradius around the event position. TODO: sort returned indices by distance """ inside, info = self._default_contains(mouseevent) if inside is not None: return inside, info # Make sure we have data to plot if self._invalidy or self._invalidx: self.recache() if len(self._xy) == 0: return False, {} # Convert points to pixels transformed_path = self._get_transformed_path() path, affine = transformed_path.get_transformed_path_and_affine() path = affine.transform_path(path) xy = path.vertices xt = xy[:, 0] yt = xy[:, 1] # Convert pick radius from points to pixels if self.figure is None: _log.warning('no figure set when check if mouse is on line') pixels = self._pickradius else: pixels = self.figure.dpi / 72. * self._pickradius # The math involved in checking for containment (here and inside of # segment_hits) assumes that it is OK to overflow, so temporarily set # the error flags accordingly. with np.errstate(all='ignore'): # Check for collision if self._linestyle in ['None', None]: # If no line, return the nearby point(s) ind, = np.nonzero( (xt - mouseevent.x) ** 2 + (yt - mouseevent.y) ** 2 <= pixels ** 2) else: # If line, return the nearby segment(s) ind = segment_hits(mouseevent.x, mouseevent.y, xt, yt, pixels) if self._drawstyle.startswith("steps"): ind //= 2 ind += self.ind_offset # Return the point(s) within radius return len(ind) > 0, dict(ind=ind) def get_pickradius(self): """ Return the pick radius used for containment tests. See `.contains` for more details. """ return self._pickradius def set_pickradius(self, pickradius): """ Set the pick radius used for containment tests. See `.contains` for more details. Parameters ---------- pickradius : float Pick radius, in points. """ if not isinstance(pickradius, Number) or pickradius < 0: raise ValueError("pick radius should be a distance") self._pickradius = pickradius pickradius = property(get_pickradius, set_pickradius) def get_fillstyle(self): """ Return the marker fill style. See also `~.Line2D.set_fillstyle`. """ return self._marker.get_fillstyle() def set_fillstyle(self, fs): """ Set the marker fill style. Parameters ---------- fs : {'full', 'left', 'right', 'bottom', 'top', 'none'} Possible values: - 'full': Fill the whole marker with the *markerfacecolor*. - 'left', 'right', 'bottom', 'top': Fill the marker half at the given side with the *markerfacecolor*. The other half of the marker is filled with *markerfacecoloralt*. - 'none': No filling. For examples see :ref:`marker_fill_styles`. """ self.set_marker(MarkerStyle(self._marker.get_marker(), fs)) self.stale = True def set_markevery(self, every): """ Set the markevery property to subsample the plot when using markers. e.g., if ``every=5``, every 5-th marker will be plotted. Parameters ---------- every : None or int or (int, int) or slice or list[int] or float or \ (float, float) or list[bool] Which markers to plot. - ``every=None``: every point will be plotted. - ``every=N``: every N-th marker will be plotted starting with marker 0. - ``every=(start, N)``: every N-th marker, starting at index *start*, will be plotted. - ``every=slice(start, end, N)``: every N-th marker, starting at index *start*, up to but not including index *end*, will be plotted. - ``every=[i, j, m, ...]``: only markers at the given indices will be plotted. - ``every=[True, False, True, ...]``: only positions that are True will be plotted. The list must have the same length as the data points. - ``every=0.1``, (i.e. a float): markers will be spaced at approximately equal visual distances along the line; the distance along the line between markers is determined by multiplying the display-coordinate distance of the axes bounding-box diagonal by the value of *every*. - ``every=(0.5, 0.1)`` (i.e. a length-2 tuple of float): similar to ``every=0.1`` but the first marker will be offset along the line by 0.5 multiplied by the display-coordinate-diagonal-distance along the line. For examples see :doc:`/gallery/lines_bars_and_markers/markevery_demo`. Notes ----- Setting *markevery* will still only draw markers at actual data points. While the float argument form aims for uniform visual spacing, it has to coerce from the ideal spacing to the nearest available data point. Depending on the number and distribution of data points, the result may still not look evenly spaced. When using a start offset to specify the first marker, the offset will be from the first data point which may be different from the first the visible data point if the plot is zoomed in. If zooming in on a plot when using float arguments then the actual data points that have markers will change because the distance between markers is always determined from the display-coordinates axes-bounding-box-diagonal regardless of the actual axes data limits. """ self._markevery = every self.stale = True def get_markevery(self): """ Return the markevery setting for marker subsampling. See also `~.Line2D.set_markevery`. """ return self._markevery def set_picker(self, p): """ Set the event picker details for the line. Parameters ---------- p : float or callable[[Artist, Event], tuple[bool, dict]] If a float, it is used as the pick radius in points. """ if not callable(p): self.set_pickradius(p) self._picker = p def get_bbox(self): """Get the bounding box of this line.""" bbox = Bbox([[0, 0], [0, 0]]) bbox.update_from_data_xy(self.get_xydata()) return bbox def get_window_extent(self, renderer=None): bbox = Bbox([[0, 0], [0, 0]]) trans_data_to_xy = self.get_transform().transform bbox.update_from_data_xy(trans_data_to_xy(self.get_xydata()), ignore=True) # correct for marker size, if any if self._marker: ms = (self._markersize / 72.0 * self.figure.dpi) * 0.5 bbox = bbox.padded(ms) return bbox def set_data(self, *args): """ Set the x and y data. Parameters ---------- *args : (2, N) array or two 1D arrays """ if len(args) == 1: (x, y), = args else: x, y = args self.set_xdata(x) self.set_ydata(y) def recache_always(self): self.recache(always=True) def recache(self, always=False): if always or self._invalidx: xconv = self.convert_xunits(self._xorig) x = _to_unmasked_float_array(xconv).ravel() else: x = self._x if always or self._invalidy: yconv = self.convert_yunits(self._yorig) y = _to_unmasked_float_array(yconv).ravel() else: y = self._y self._xy = np.column_stack(np.broadcast_arrays(x, y)).astype(float) self._x, self._y = self._xy.T # views self._subslice = False if (self.axes and len(x) > 1000 and self._is_sorted(x) and self.axes.name == 'rectilinear' and self.axes.get_xscale() == 'linear' and self._markevery is None and self.get_clip_on() and self.get_transform() == self.axes.transData): self._subslice = True nanmask = np.isnan(x) if nanmask.any(): self._x_filled = self._x.copy() indices = np.arange(len(x)) self._x_filled[nanmask] = np.interp( indices[nanmask], indices[~nanmask], self._x[~nanmask]) else: self._x_filled = self._x if self._path is not None: interpolation_steps = self._path._interpolation_steps else: interpolation_steps = 1 xy = STEP_LOOKUP_MAP[self._drawstyle](*self._xy.T) self._path = Path(np.asarray(xy).T, _interpolation_steps=interpolation_steps) self._transformed_path = None self._invalidx = False self._invalidy = False def _transform_path(self, subslice=None): """ Put a TransformedPath instance at self._transformed_path; all invalidation of the transform is then handled by the TransformedPath instance. """ # Masked arrays are now handled by the Path class itself if subslice is not None: xy = STEP_LOOKUP_MAP[self._drawstyle](*self._xy[subslice, :].T) _path = Path(np.asarray(xy).T, _interpolation_steps=self._path._interpolation_steps) else: _path = self._path self._transformed_path = TransformedPath(_path, self.get_transform()) def _get_transformed_path(self): """Return this line's `~matplotlib.transforms.TransformedPath`.""" if self._transformed_path is None: self._transform_path() return self._transformed_path def set_transform(self, t): # docstring inherited self._invalidx = True self._invalidy = True super().set_transform(t) def _is_sorted(self, x): """Return whether x is sorted in ascending order.""" # We don't handle the monotonically decreasing case. return _path.is_sorted(x) def draw(self, renderer): # docstring inherited if not self.get_visible(): return if self._invalidy or self._invalidx: self.recache() self.ind_offset = 0 # Needed for contains() method. if self._subslice and self.axes: x0, x1 = self.axes.get_xbound() i0 = self._x_filled.searchsorted(x0, 'left') i1 = self._x_filled.searchsorted(x1, 'right') subslice = slice(max(i0 - 1, 0), i1 + 1) self.ind_offset = subslice.start self._transform_path(subslice) else: subslice = None if self.get_path_effects(): from matplotlib.patheffects import PathEffectRenderer renderer = PathEffectRenderer(self.get_path_effects(), renderer) renderer.open_group('line2d', self.get_gid()) if self._lineStyles[self._linestyle] != '_draw_nothing': tpath, affine = (self._get_transformed_path() .get_transformed_path_and_affine()) if len(tpath.vertices): gc = renderer.new_gc() self._set_gc_clip(gc) gc.set_url(self.get_url()) gc.set_antialiased(self._antialiased) gc.set_linewidth(self._linewidth) if self.is_dashed(): cap = self._dashcapstyle join = self._dashjoinstyle else: cap = self._solidcapstyle join = self._solidjoinstyle gc.set_joinstyle(join) gc.set_capstyle(cap) gc.set_snap(self.get_snap()) if self.get_sketch_params() is not None: gc.set_sketch_params(*self.get_sketch_params()) # We first draw a path within the gaps if needed. if self.is_dashed() and self._gapcolor is not None: lc_rgba = mcolors.to_rgba(self._gapcolor, self._alpha) gc.set_foreground(lc_rgba, isRGBA=True) # Define the inverse pattern by moving the last gap to the # start of the sequence. dashes = self._dash_pattern[1] gaps = dashes[-1:] + dashes[:-1] # Set the offset so that this new first segment is skipped # (see backend_bases.GraphicsContextBase.set_dashes for # offset definition). offset_gaps = self._dash_pattern[0] + dashes[-1] gc.set_dashes(offset_gaps, gaps) renderer.draw_path(gc, tpath, affine.frozen()) lc_rgba = mcolors.to_rgba(self._color, self._alpha) gc.set_foreground(lc_rgba, isRGBA=True) gc.set_dashes(*self._dash_pattern) renderer.draw_path(gc, tpath, affine.frozen()) gc.restore() if self._marker and self._markersize > 0: gc = renderer.new_gc() self._set_gc_clip(gc) gc.set_url(self.get_url()) gc.set_linewidth(self._markeredgewidth) gc.set_antialiased(self._antialiased) ec_rgba = mcolors.to_rgba( self.get_markeredgecolor(), self._alpha) fc_rgba = mcolors.to_rgba( self._get_markerfacecolor(), self._alpha) fcalt_rgba = mcolors.to_rgba( self._get_markerfacecolor(alt=True), self._alpha) # If the edgecolor is "auto", it is set according to the *line* # color but inherits the alpha value of the *face* color, if any. if (cbook._str_equal(self._markeredgecolor, "auto") and not cbook._str_lower_equal( self.get_markerfacecolor(), "none")): ec_rgba = ec_rgba[:3] + (fc_rgba[3],) gc.set_foreground(ec_rgba, isRGBA=True) if self.get_sketch_params() is not None: scale, length, randomness = self.get_sketch_params() gc.set_sketch_params(scale/2, length/2, 2*randomness) marker = self._marker # Markers *must* be drawn ignoring the drawstyle (but don't pay the # recaching if drawstyle is already "default"). if self.get_drawstyle() != "default": with cbook._setattr_cm( self, _drawstyle="default", _transformed_path=None): self.recache() self._transform_path(subslice) tpath, affine = (self._get_transformed_path() .get_transformed_points_and_affine()) else: tpath, affine = (self._get_transformed_path() .get_transformed_points_and_affine()) if len(tpath.vertices): # subsample the markers if markevery is not None markevery = self.get_markevery() if markevery is not None: subsampled = _mark_every_path( markevery, tpath, affine, self.axes) else: subsampled = tpath snap = marker.get_snap_threshold() if isinstance(snap, Real): snap = renderer.points_to_pixels(self._markersize) >= snap gc.set_snap(snap) gc.set_joinstyle(marker.get_joinstyle()) gc.set_capstyle(marker.get_capstyle()) marker_path = marker.get_path() marker_trans = marker.get_transform() w = renderer.points_to_pixels(self._markersize) if cbook._str_equal(marker.get_marker(), ","): gc.set_linewidth(0) else: # Don't scale for pixels, and don't stroke them marker_trans = marker_trans.scale(w) renderer.draw_markers(gc, marker_path, marker_trans, subsampled, affine.frozen(), fc_rgba) alt_marker_path = marker.get_alt_path() if alt_marker_path: alt_marker_trans = marker.get_alt_transform() alt_marker_trans = alt_marker_trans.scale(w) renderer.draw_markers( gc, alt_marker_path, alt_marker_trans, subsampled, affine.frozen(), fcalt_rgba) gc.restore() renderer.close_group('line2d') self.stale = False def get_antialiased(self): """Return whether antialiased rendering is used.""" return self._antialiased def get_color(self): """ Return the line color. See also `~.Line2D.set_color`. """ return self._color def get_drawstyle(self): """ Return the drawstyle. See also `~.Line2D.set_drawstyle`. """ return self._drawstyle def get_gapcolor(self): """ Return the line gapcolor. See also `~.Line2D.set_gapcolor`. """ return self._gapcolor def get_linestyle(self): """ Return the linestyle. See also `~.Line2D.set_linestyle`. """ return self._linestyle def get_linewidth(self): """ Return the linewidth in points. See also `~.Line2D.set_linewidth`. """ return self._linewidth def get_marker(self): """ Return the line marker. See also `~.Line2D.set_marker`. """ return self._marker.get_marker() def get_markeredgecolor(self): """ Return the marker edge color. See also `~.Line2D.set_markeredgecolor`. """ mec = self._markeredgecolor if cbook._str_equal(mec, 'auto'): if mpl.rcParams['_internal.classic_mode']: if self._marker.get_marker() in ('.', ','): return self._color if (self._marker.is_filled() and self._marker.get_fillstyle() != 'none'): return 'k' # Bad hard-wired default... return self._color else: return mec def get_markeredgewidth(self): """ Return the marker edge width in points. See also `~.Line2D.set_markeredgewidth`. """ return self._markeredgewidth def _get_markerfacecolor(self, alt=False): if self._marker.get_fillstyle() == 'none': return 'none' fc = self._markerfacecoloralt if alt else self._markerfacecolor if cbook._str_lower_equal(fc, 'auto'): return self._color else: return fc def get_markerfacecolor(self): """ Return the marker face color. See also `~.Line2D.set_markerfacecolor`. """ return self._get_markerfacecolor(alt=False) def get_markerfacecoloralt(self): """ Return the alternate marker face color. See also `~.Line2D.set_markerfacecoloralt`. """ return self._get_markerfacecolor(alt=True) def get_markersize(self): """ Return the marker size in points. See also `~.Line2D.set_markersize`. """ return self._markersize def get_data(self, orig=True): """ Return the line data as an ``(xdata, ydata)`` pair. If *orig* is *True*, return the original data. """ return self.get_xdata(orig=orig), self.get_ydata(orig=orig) def get_xdata(self, orig=True): """ Return the xdata. If *orig* is *True*, return the original data, else the processed data. """ if orig: return self._xorig if self._invalidx: self.recache() return self._x def get_ydata(self, orig=True): """ Return the ydata. If *orig* is *True*, return the original data, else the processed data. """ if orig: return self._yorig if self._invalidy: self.recache() return self._y def get_path(self): """Return the `~matplotlib.path.Path` associated with this line.""" if self._invalidy or self._invalidx: self.recache() return self._path def get_xydata(self): """ Return the *xy* data as a Nx2 numpy array. """ if self._invalidy or self._invalidx: self.recache() return self._xy def set_antialiased(self, b): """ Set whether to use antialiased rendering. Parameters ---------- b : bool """ if self._antialiased != b: self.stale = True self._antialiased = b def set_color(self, color): """ Set the color of the line. Parameters ---------- color : color """ mcolors._check_color_like(color=color) self._color = color self.stale = True def set_drawstyle(self, drawstyle): """ Set the drawstyle of the plot. The drawstyle determines how the points are connected. Parameters ---------- drawstyle : {'default', 'steps', 'steps-pre', 'steps-mid', \ 'steps-post'}, default: 'default' For 'default', the points are connected with straight lines. The steps variants connect the points with step-like lines, i.e. horizontal lines with vertical steps. They differ in the location of the step: - 'steps-pre': The step is at the beginning of the line segment, i.e. the line will be at the y-value of point to the right. - 'steps-mid': The step is halfway between the points. - 'steps-post: The step is at the end of the line segment, i.e. the line will be at the y-value of the point to the left. - 'steps' is equal to 'steps-pre' and is maintained for backward-compatibility. For examples see :doc:`/gallery/lines_bars_and_markers/step_demo`. """ if drawstyle is None: drawstyle = 'default' _api.check_in_list(self.drawStyles, drawstyle=drawstyle) if self._drawstyle != drawstyle: self.stale = True # invalidate to trigger a recache of the path self._invalidx = True self._drawstyle = drawstyle def set_gapcolor(self, gapcolor): """ Set a color to fill the gaps in the dashed line style. .. note:: Striped lines are created by drawing two interleaved dashed lines. There can be overlaps between those two, which may result in artifacts when using transparency. This functionality is experimental and may change. Parameters ---------- gapcolor : color or None The color with which to fill the gaps. If None, the gaps are unfilled. """ if gapcolor is not None: mcolors._check_color_like(color=gapcolor) self._gapcolor = gapcolor self.stale = True def set_linewidth(self, w): """ Set the line width in points. Parameters ---------- w : float Line width, in points. """ w = float(w) if self._linewidth != w: self.stale = True self._linewidth = w self._dash_pattern = _scale_dashes(*self._unscaled_dash_pattern, w) def set_linestyle(self, ls): """ Set the linestyle of the line. Parameters ---------- ls : {'-', '--', '-.', ':', '', (offset, on-off-seq), ...} Possible values: - A string: ========================================== ================= linestyle description ========================================== ================= ``'-'`` or ``'solid'`` solid line ``'--'`` or ``'dashed'`` dashed line ``'-.'`` or ``'dashdot'`` dash-dotted line ``':'`` or ``'dotted'`` dotted line ``'none'``, ``'None'``, ``' '``, or ``''`` draw nothing ========================================== ================= - Alternatively a dash tuple of the following form can be provided:: (offset, onoffseq) where ``onoffseq`` is an even length tuple of on and off ink in points. See also :meth:`set_dashes`. For examples see :doc:`/gallery/lines_bars_and_markers/linestyles`. """ if isinstance(ls, str): if ls in [' ', '', 'none']: ls = 'None' _api.check_in_list([*self._lineStyles, *ls_mapper_r], ls=ls) if ls not in self._lineStyles: ls = ls_mapper_r[ls] self._linestyle = ls else: self._linestyle = '--' self._unscaled_dash_pattern = _get_dash_pattern(ls) self._dash_pattern = _scale_dashes( *self._unscaled_dash_pattern, self._linewidth) self.stale = True def set_marker(self, marker): """ Set the line marker. Parameters ---------- marker : marker style string, `~.path.Path` or `~.markers.MarkerStyle` See `~matplotlib.markers` for full description of possible arguments. """ self._marker = MarkerStyle(marker, self._marker.get_fillstyle()) self.stale = True def _set_markercolor(self, name, has_rcdefault, val): if val is None: val = mpl.rcParams[f"lines.{name}"] if has_rcdefault else "auto" attr = f"_{name}" current = getattr(self, attr) if current is None: self.stale = True else: neq = current != val # Much faster than `np.any(current != val)` if no arrays are used. if neq.any() if isinstance(neq, np.ndarray) else neq: self.stale = True setattr(self, attr, val) def set_markeredgecolor(self, ec): """ Set the marker edge color. Parameters ---------- ec : color """ self._set_markercolor("markeredgecolor", True, ec) def set_markerfacecolor(self, fc): """ Set the marker face color. Parameters ---------- fc : color """ self._set_markercolor("markerfacecolor", True, fc) def set_markerfacecoloralt(self, fc): """ Set the alternate marker face color. Parameters ---------- fc : color """ self._set_markercolor("markerfacecoloralt", False, fc) def set_markeredgewidth(self, ew): """ Set the marker edge width in points. Parameters ---------- ew : float Marker edge width, in points. """ if ew is None: ew = mpl.rcParams['lines.markeredgewidth'] if self._markeredgewidth != ew: self.stale = True self._markeredgewidth = ew def set_markersize(self, sz): """ Set the marker size in points. Parameters ---------- sz : float Marker size, in points. """ sz = float(sz) if self._markersize != sz: self.stale = True self._markersize = sz def set_xdata(self, x): """ Set the data array for x. Parameters ---------- x : 1D array """ if not np.iterable(x): # When deprecation cycle is completed # raise RuntimeError('x must be a sequence') _api.warn_deprecated( since=3.7, message="Setting data with a non sequence type " "is deprecated since %(since)s and will be " "remove %(removal)s") x = [x, ] self._xorig = copy.copy(x) self._invalidx = True self.stale = True def set_ydata(self, y): """ Set the data array for y. Parameters ---------- y : 1D array """ if not np.iterable(y): # When deprecation cycle is completed # raise RuntimeError('y must be a sequence') _api.warn_deprecated( since=3.7, message="Setting data with a non sequence type " "is deprecated since %(since)s and will be " "remove %(removal)s") y = [y, ] self._yorig = copy.copy(y) self._invalidy = True self.stale = True def set_dashes(self, seq): """ Set the dash sequence. The dash sequence is a sequence of floats of even length describing the length of dashes and spaces in points. For example, (5, 2, 1, 2) describes a sequence of 5 point and 1 point dashes separated by 2 point spaces. See also `~.Line2D.set_gapcolor`, which allows those spaces to be filled with a color. Parameters ---------- seq : sequence of floats (on/off ink in points) or (None, None) If *seq* is empty or ``(None, None)``, the linestyle will be set to solid. """ if seq == (None, None) or len(seq) == 0: self.set_linestyle('-') else: self.set_linestyle((0, seq)) def update_from(self, other): """Copy properties from *other* to self.""" super().update_from(other) self._linestyle = other._linestyle self._linewidth = other._linewidth self._color = other._color self._gapcolor = other._gapcolor self._markersize = other._markersize self._markerfacecolor = other._markerfacecolor self._markerfacecoloralt = other._markerfacecoloralt self._markeredgecolor = other._markeredgecolor self._markeredgewidth = other._markeredgewidth self._unscaled_dash_pattern = other._unscaled_dash_pattern self._dash_pattern = other._dash_pattern self._dashcapstyle = other._dashcapstyle self._dashjoinstyle = other._dashjoinstyle self._solidcapstyle = other._solidcapstyle self._solidjoinstyle = other._solidjoinstyle self._linestyle = other._linestyle self._marker = MarkerStyle(marker=other._marker) self._drawstyle = other._drawstyle def set_dash_joinstyle(self, s): """ How to join segments of the line if it `~Line2D.is_dashed`. The default joinstyle is :rc:`lines.dash_joinstyle`. Parameters ---------- s : `.JoinStyle` or %(JoinStyle)s """ js = JoinStyle(s) if self._dashjoinstyle != js: self.stale = True self._dashjoinstyle = js def set_solid_joinstyle(self, s): """ How to join segments if the line is solid (not `~Line2D.is_dashed`). The default joinstyle is :rc:`lines.solid_joinstyle`. Parameters ---------- s : `.JoinStyle` or %(JoinStyle)s """ js = JoinStyle(s) if self._solidjoinstyle != js: self.stale = True self._solidjoinstyle = js def get_dash_joinstyle(self): """ Return the `.JoinStyle` for dashed lines. See also `~.Line2D.set_dash_joinstyle`. """ return self._dashjoinstyle.name def get_solid_joinstyle(self): """ Return the `.JoinStyle` for solid lines. See also `~.Line2D.set_solid_joinstyle`. """ return self._solidjoinstyle.name def set_dash_capstyle(self, s): """ How to draw the end caps if the line is `~Line2D.is_dashed`. The default capstyle is :rc:`lines.dash_capstyle`. Parameters ---------- s : `.CapStyle` or %(CapStyle)s """ cs = CapStyle(s) if self._dashcapstyle != cs: self.stale = True self._dashcapstyle = cs def set_solid_capstyle(self, s): """ How to draw the end caps if the line is solid (not `~Line2D.is_dashed`) The default capstyle is :rc:`lines.solid_capstyle`. Parameters ---------- s : `.CapStyle` or %(CapStyle)s """ cs = CapStyle(s) if self._solidcapstyle != cs: self.stale = True self._solidcapstyle = cs def get_dash_capstyle(self): """ Return the `.CapStyle` for dashed lines. See also `~.Line2D.set_dash_capstyle`. """ return self._dashcapstyle.name def get_solid_capstyle(self): """ Return the `.CapStyle` for solid lines. See also `~.Line2D.set_solid_capstyle`. """ return self._solidcapstyle.name def is_dashed(self): """ Return whether line has a dashed linestyle. A custom linestyle is assumed to be dashed, we do not inspect the ``onoffseq`` directly. See also `~.Line2D.set_linestyle`. """ return self._linestyle in ('--', '-.', ':') def get_all_lines(ax: Axes) -> list[Line2D]: lines = ax.get_lines() if hasattr(ax, "right_ax"): lines += ax.right_ax.get_lines() if hasattr(ax, "left_ax"): lines += ax.left_ax.get_lines() return lines
null
172,988
from __future__ import annotations from math import ceil from typing import ( TYPE_CHECKING, Iterable, Sequence, ) import warnings from matplotlib import ticker import matplotlib.table import numpy as np from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import is_list_like from pandas.core.dtypes.generic import ( ABCDataFrame, ABCIndex, ABCSeries, ) class Iterable(Protocol[_T_co]): def __iter__(self) -> Iterator[_T_co]: }) class Line2D(Artist): def __str__(self): def __init__(self, xdata, ydata, linewidth=None, # all Nones default to rc linestyle=None, color=None, gapcolor=None, marker=None, markersize=None, markeredgewidth=None, markeredgecolor=None, markerfacecolor=None, markerfacecoloralt='none', fillstyle=None, antialiased=None, dash_capstyle=None, solid_capstyle=None, dash_joinstyle=None, solid_joinstyle=None, pickradius=5, drawstyle=None, markevery=None, **kwargs ): def contains(self, mouseevent): def get_pickradius(self): def set_pickradius(self, pickradius): def get_fillstyle(self): def set_fillstyle(self, fs): def set_markevery(self, every): def get_markevery(self): def set_picker(self, p): def get_bbox(self): def get_window_extent(self, renderer=None): def set_data(self, *args): def recache_always(self): def recache(self, always=False): def _transform_path(self, subslice=None): def _get_transformed_path(self): def set_transform(self, t): def _is_sorted(self, x): def draw(self, renderer): def get_antialiased(self): def get_color(self): def get_drawstyle(self): def get_gapcolor(self): def get_linestyle(self): def get_linewidth(self): def get_marker(self): def get_markeredgecolor(self): def get_markeredgewidth(self): def _get_markerfacecolor(self, alt=False): def get_markerfacecolor(self): def get_markerfacecoloralt(self): def get_markersize(self): def get_data(self, orig=True): def get_xdata(self, orig=True): def get_ydata(self, orig=True): def get_path(self): def get_xydata(self): def set_antialiased(self, b): def set_color(self, color): def set_drawstyle(self, drawstyle): def set_gapcolor(self, gapcolor): def set_linewidth(self, w): def set_linestyle(self, ls): def set_marker(self, marker): def _set_markercolor(self, name, has_rcdefault, val): def set_markeredgecolor(self, ec): def set_markerfacecolor(self, fc): def set_markerfacecoloralt(self, fc): def set_markeredgewidth(self, ew): def set_markersize(self, sz): def set_xdata(self, x): def set_ydata(self, y): def set_dashes(self, seq): def update_from(self, other): def set_dash_joinstyle(self, s): def set_solid_joinstyle(self, s): def get_dash_joinstyle(self): def get_solid_joinstyle(self): def set_dash_capstyle(self, s): def set_solid_capstyle(self, s): def get_dash_capstyle(self): def get_solid_capstyle(self): def is_dashed(self): def get_xlim(lines: Iterable[Line2D]) -> tuple[float, float]: left, right = np.inf, -np.inf for line in lines: x = line.get_xdata(orig=False) left = min(np.nanmin(x), left) right = max(np.nanmax(x), right) return left, right
null
172,989
from __future__ import annotations import random from typing import ( TYPE_CHECKING, Hashable, ) from matplotlib import patches import matplotlib.lines as mlines import numpy as np from pandas.core.dtypes.missing import notna from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib.style import get_standard_colors from pandas.plotting._matplotlib.tools import ( create_subplots, do_adjust_figure, maybe_adjust_figure, set_ticks_props, ) def _get_marker_compat(marker): if marker not in mlines.lineMarkers: return "o" return marker def notna(obj: Scalar) -> bool: ... def notna( obj: ArrayLike | Index | list, ) -> npt.NDArray[np.bool_]: ... def notna(obj: NDFrameT) -> NDFrameT: ... def notna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]: ... def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: ... def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: """ Detect non-missing values for an array-like object. This function takes a scalar or array-like object and indicates whether values are valid (not missing, which is ``NaN`` in numeric arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike). Parameters ---------- obj : array-like or object value Object to check for *not* null or *non*-missing values. Returns ------- bool or array-like of bool For scalar input, returns a scalar boolean. For array input, returns an array of boolean indicating whether each corresponding element is valid. See Also -------- isna : Boolean inverse of pandas.notna. Series.notna : Detect valid values in a Series. DataFrame.notna : Detect valid values in a DataFrame. Index.notna : Detect valid values in an Index. Examples -------- Scalar arguments (including strings) result in a scalar boolean. >>> pd.notna('dog') True >>> pd.notna(pd.NA) False >>> pd.notna(np.nan) False ndarrays result in an ndarray of booleans. >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]]) >>> array array([[ 1., nan, 3.], [ 4., 5., nan]]) >>> pd.notna(array) array([[ True, False, True], [ True, True, False]]) For indexes, an ndarray of booleans is returned. >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, ... "2017-07-08"]) >>> index DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'], dtype='datetime64[ns]', freq=None) >>> pd.notna(index) array([ True, True, False, True]) For Series and DataFrame, the same type is returned, containing booleans. >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) >>> df 0 1 2 0 ant bee cat 1 dog None fly >>> pd.notna(df) 0 1 2 0 True True True 1 True False True >>> pd.notna(df[1]) 0 True 1 False Name: 1, dtype: bool """ res = isna(obj) if isinstance(res, bool): return not res return ~res def maybe_adjust_figure(fig: Figure, *args, **kwargs) -> None: """Call fig.subplots_adjust unless fig has constrained_layout enabled.""" if do_adjust_figure(fig): fig.subplots_adjust(*args, **kwargs) def create_subplots( naxes: int, sharex: bool = False, sharey: bool = False, squeeze: bool = True, subplot_kw=None, ax=None, layout=None, layout_type: str = "box", **fig_kw, ): """ Create a figure with a set of subplots already made. This utility wrapper makes it convenient to create common layouts of subplots, including the enclosing figure object, in a single call. Parameters ---------- naxes : int Number of required axes. Exceeded axes are set invisible. Default is nrows * ncols. sharex : bool If True, the X axis will be shared amongst all subplots. sharey : bool If True, the Y axis will be shared amongst all subplots. squeeze : bool If True, extra dimensions are squeezed out from the returned axis object: - if only one subplot is constructed (nrows=ncols=1), the resulting single Axis object is returned as a scalar. - for Nx1 or 1xN subplots, the returned object is a 1-d numpy object array of Axis objects are returned as numpy 1-d arrays. - for NxM subplots with N>1 and M>1 are returned as a 2d array. If False, no squeezing is done: the returned axis object is always a 2-d array containing Axis instances, even if it ends up being 1x1. subplot_kw : dict Dict with keywords passed to the add_subplot() call used to create each subplots. ax : Matplotlib axis object, optional layout : tuple Number of rows and columns of the subplot grid. If not specified, calculated from naxes and layout_type layout_type : {'box', 'horizontal', 'vertical'}, default 'box' Specify how to layout the subplot grid. fig_kw : Other keyword arguments to be passed to the figure() call. Note that all keywords not recognized above will be automatically included here. Returns ------- fig, ax : tuple - fig is the Matplotlib Figure object - ax can be either a single axis object or an array of axis objects if more than one subplot was created. The dimensions of the resulting array can be controlled with the squeeze keyword, see above. Examples -------- x = np.linspace(0, 2*np.pi, 400) y = np.sin(x**2) # Just a figure and one subplot f, ax = plt.subplots() ax.plot(x, y) ax.set_title('Simple plot') # Two subplots, unpack the output array immediately f, (ax1, ax2) = plt.subplots(1, 2, sharey=True) ax1.plot(x, y) ax1.set_title('Sharing Y axis') ax2.scatter(x, y) # Four polar axes plt.subplots(2, 2, subplot_kw=dict(polar=True)) """ import matplotlib.pyplot as plt if subplot_kw is None: subplot_kw = {} if ax is None: fig = plt.figure(**fig_kw) else: if is_list_like(ax): if squeeze: ax = flatten_axes(ax) if layout is not None: warnings.warn( "When passing multiple axes, layout keyword is ignored.", UserWarning, stacklevel=find_stack_level(), ) if sharex or sharey: warnings.warn( "When passing multiple axes, sharex and sharey " "are ignored. These settings must be specified when creating axes.", UserWarning, stacklevel=find_stack_level(), ) if ax.size == naxes: fig = ax.flat[0].get_figure() return fig, ax else: raise ValueError( f"The number of passed axes must be {naxes}, the " "same as the output plot" ) fig = ax.get_figure() # if ax is passed and a number of subplots is 1, return ax as it is if naxes == 1: if squeeze: return fig, ax else: return fig, flatten_axes(ax) else: warnings.warn( "To output multiple subplots, the figure containing " "the passed axes is being cleared.", UserWarning, stacklevel=find_stack_level(), ) fig.clear() nrows, ncols = _get_layout(naxes, layout=layout, layout_type=layout_type) nplots = nrows * ncols # Create empty object array to hold all axes. It's easiest to make it 1-d # so we can just append subplots upon creation, and then axarr = np.empty(nplots, dtype=object) # Create first subplot separately, so we can share it if requested ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw) if sharex: subplot_kw["sharex"] = ax0 if sharey: subplot_kw["sharey"] = ax0 axarr[0] = ax0 # Note off-by-one counting because add_subplot uses the MATLAB 1-based # convention. for i in range(1, nplots): kwds = subplot_kw.copy() # Set sharex and sharey to None for blank/dummy axes, these can # interfere with proper axis limits on the visible axes if # they share axes e.g. issue #7528 if i >= naxes: kwds["sharex"] = None kwds["sharey"] = None ax = fig.add_subplot(nrows, ncols, i + 1, **kwds) axarr[i] = ax if naxes != nplots: for ax in axarr[naxes:]: ax.set_visible(False) handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey) if squeeze: # Reshape the array to have the final desired dimension (nrow,ncol), # though discarding unneeded dimensions that equal 1. If we only have # one subplot, just return it instead of a 1-element array. if nplots == 1: axes = axarr[0] else: axes = axarr.reshape(nrows, ncols).squeeze() else: # returned axis array will be always 2-d, even if nrows=ncols=1 axes = axarr.reshape(nrows, ncols) return fig, axes def set_ticks_props( axes: Axes | Sequence[Axes], xlabelsize=None, xrot=None, ylabelsize=None, yrot=None, ): import matplotlib.pyplot as plt for ax in flatten_axes(axes): if xlabelsize is not None: plt.setp(ax.get_xticklabels(), fontsize=xlabelsize) if xrot is not None: plt.setp(ax.get_xticklabels(), rotation=xrot) if ylabelsize is not None: plt.setp(ax.get_yticklabels(), fontsize=ylabelsize) if yrot is not None: plt.setp(ax.get_yticklabels(), rotation=yrot) return axes def scatter_matrix( frame: DataFrame, alpha: float = 0.5, figsize=None, ax=None, grid: bool = False, diagonal: str = "hist", marker: str = ".", density_kwds=None, hist_kwds=None, range_padding: float = 0.05, **kwds, ): df = frame._get_numeric_data() n = df.columns.size naxes = n * n fig, axes = create_subplots(naxes=naxes, figsize=figsize, ax=ax, squeeze=False) # no gaps between subplots maybe_adjust_figure(fig, wspace=0, hspace=0) mask = notna(df) marker = _get_marker_compat(marker) hist_kwds = hist_kwds or {} density_kwds = density_kwds or {} # GH 14855 kwds.setdefault("edgecolors", "none") boundaries_list = [] for a in df.columns: values = df[a].values[mask[a].values] rmin_, rmax_ = np.min(values), np.max(values) rdelta_ext = (rmax_ - rmin_) * range_padding / 2 boundaries_list.append((rmin_ - rdelta_ext, rmax_ + rdelta_ext)) for i, a in enumerate(df.columns): for j, b in enumerate(df.columns): ax = axes[i, j] if i == j: values = df[a].values[mask[a].values] # Deal with the diagonal by drawing a histogram there. if diagonal == "hist": ax.hist(values, **hist_kwds) elif diagonal in ("kde", "density"): from scipy.stats import gaussian_kde y = values gkde = gaussian_kde(y) ind = np.linspace(y.min(), y.max(), 1000) ax.plot(ind, gkde.evaluate(ind), **density_kwds) ax.set_xlim(boundaries_list[i]) else: common = (mask[a] & mask[b]).values ax.scatter( df[b][common], df[a][common], marker=marker, alpha=alpha, **kwds ) ax.set_xlim(boundaries_list[j]) ax.set_ylim(boundaries_list[i]) ax.set_xlabel(b) ax.set_ylabel(a) if j != 0: ax.yaxis.set_visible(False) if i != n - 1: ax.xaxis.set_visible(False) if len(df.columns) > 1: lim1 = boundaries_list[0] locs = axes[0][1].yaxis.get_majorticklocs() locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])] adj = (locs - lim1[0]) / (lim1[1] - lim1[0]) lim0 = axes[0][0].get_ylim() adj = adj * (lim0[1] - lim0[0]) + lim0[0] axes[0][0].yaxis.set_ticks(adj) if np.all(locs == locs.astype(int)): # if all ticks are int locs = locs.astype(int) axes[0][0].yaxis.set_ticklabels(locs) set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0) return axes
null
172,990
from __future__ import annotations import random from typing import ( TYPE_CHECKING, Hashable, ) from matplotlib import patches import matplotlib.lines as mlines import numpy as np from pandas.core.dtypes.missing import notna from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib.style import get_standard_colors from pandas.plotting._matplotlib.tools import ( create_subplots, do_adjust_figure, maybe_adjust_figure, set_ticks_props, ) class Hashable(Protocol, metaclass=ABCMeta): # TODO: This is special, in that a subclass of a hashable class may not be hashable # (for example, list vs. object). It's not obvious how to represent this. This class # is currently mostly useless for static checking. def __hash__(self) -> int: ... "antialiased": ["aa"], "edgecolor": ["ec"], "facecolor": ["fc"], "linestyle": ["ls"], "linewidth": ["lw"], }) def pprint_thing( thing: Any, _nest_lvl: int = 0, escape_chars: EscapeChars | None = None, default_escapes: bool = False, quote_strings: bool = False, max_seq_items: int | None = None, ) -> str: """ This function is the sanctioned way of converting objects to a string representation and properly handles nested sequences. Parameters ---------- thing : anything to be formatted _nest_lvl : internal use only. pprint_thing() is mutually-recursive with pprint_sequence, this argument is used to keep track of the current nesting level, and limit it. escape_chars : list or dict, optional Characters to escape. If a dict is passed the values are the replacements default_escapes : bool, default False Whether the input escape characters replaces or adds to the defaults max_seq_items : int or None, default None Pass through to other pretty printers to limit sequence printing Returns ------- str """ def as_escaped_string( thing: Any, escape_chars: EscapeChars | None = escape_chars ) -> str: translate = {"\t": r"\t", "\n": r"\n", "\r": r"\r"} if isinstance(escape_chars, dict): if default_escapes: translate.update(escape_chars) else: translate = escape_chars escape_chars = list(escape_chars.keys()) else: escape_chars = escape_chars or () result = str(thing) for c in escape_chars: result = result.replace(c, translate[c]) return result if hasattr(thing, "__next__"): return str(thing) elif isinstance(thing, dict) and _nest_lvl < get_option( "display.pprint_nest_depth" ): result = _pprint_dict( thing, _nest_lvl, quote_strings=True, max_seq_items=max_seq_items ) elif is_sequence(thing) and _nest_lvl < get_option("display.pprint_nest_depth"): result = _pprint_seq( thing, _nest_lvl, escape_chars=escape_chars, quote_strings=quote_strings, max_seq_items=max_seq_items, ) elif isinstance(thing, str) and quote_strings: result = f"'{as_escaped_string(thing)}'" else: result = as_escaped_string(thing) return result def get_standard_colors( num_colors: int, colormap: Colormap | None = None, color_type: str = "default", color: dict[str, Color] | Color | Collection[Color] | None = None, ): """ Get standard colors based on `colormap`, `color_type` or `color` inputs. Parameters ---------- num_colors : int Minimum number of colors to be returned. Ignored if `color` is a dictionary. colormap : :py:class:`matplotlib.colors.Colormap`, optional Matplotlib colormap. When provided, the resulting colors will be derived from the colormap. color_type : {"default", "random"}, optional Type of colors to derive. Used if provided `color` and `colormap` are None. Ignored if either `color` or `colormap` are not None. color : dict or str or sequence, optional Color(s) to be used for deriving sequence of colors. Can be either be a dictionary, or a single color (single color string, or sequence of floats representing a single color), or a sequence of colors. Returns ------- dict or list Standard colors. Can either be a mapping if `color` was a dictionary, or a list of colors with a length of `num_colors` or more. Warns ----- UserWarning If both `colormap` and `color` are provided. Parameter `color` will override. """ if isinstance(color, dict): return color colors = _derive_colors( color=color, colormap=colormap, color_type=color_type, num_colors=num_colors, ) return list(_cycle_colors(colors, num_colors=num_colors)) def radviz( frame: DataFrame, class_column, ax: Axes | None = None, color=None, colormap=None, **kwds, ) -> Axes: import matplotlib.pyplot as plt def normalize(series): a = min(series) b = max(series) return (series - a) / (b - a) n = len(frame) classes = frame[class_column].drop_duplicates() class_col = frame[class_column] df = frame.drop(class_column, axis=1).apply(normalize) if ax is None: ax = plt.gca() ax.set_xlim(-1, 1) ax.set_ylim(-1, 1) to_plot: dict[Hashable, list[list]] = {} colors = get_standard_colors( num_colors=len(classes), colormap=colormap, color_type="random", color=color ) for kls in classes: to_plot[kls] = [[], []] m = len(frame.columns) - 1 s = np.array( [(np.cos(t), np.sin(t)) for t in [2 * np.pi * (i / m) for i in range(m)]] ) for i in range(n): row = df.iloc[i].values row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1) y = (s * row_).sum(axis=0) / row.sum() kls = class_col.iat[i] to_plot[kls][0].append(y[0]) to_plot[kls][1].append(y[1]) for i, kls in enumerate(classes): ax.scatter( to_plot[kls][0], to_plot[kls][1], color=colors[i], label=pprint_thing(kls), **kwds, ) ax.legend() ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor="none")) for xy, name in zip(s, df.columns): ax.add_patch(patches.Circle(xy, radius=0.025, facecolor="gray")) if xy[0] < 0.0 and xy[1] < 0.0: ax.text( xy[0] - 0.025, xy[1] - 0.025, name, ha="right", va="top", size="small" ) elif xy[0] < 0.0 <= xy[1]: ax.text( xy[0] - 0.025, xy[1] + 0.025, name, ha="right", va="bottom", size="small", ) elif xy[1] < 0.0 <= xy[0]: ax.text( xy[0] + 0.025, xy[1] - 0.025, name, ha="left", va="top", size="small" ) elif xy[0] >= 0.0 and xy[1] >= 0.0: ax.text( xy[0] + 0.025, xy[1] + 0.025, name, ha="left", va="bottom", size="small" ) ax.axis("equal") return ax
null
172,991
from __future__ import annotations import random from typing import ( TYPE_CHECKING, Hashable, ) from matplotlib import patches import matplotlib.lines as mlines import numpy as np from pandas.core.dtypes.missing import notna from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib.style import get_standard_colors from pandas.plotting._matplotlib.tools import ( create_subplots, do_adjust_figure, maybe_adjust_figure, set_ticks_props, ) def pprint_thing( thing: Any, _nest_lvl: int = 0, escape_chars: EscapeChars | None = None, default_escapes: bool = False, quote_strings: bool = False, max_seq_items: int | None = None, ) -> str: def get_standard_colors( num_colors: int, colormap: Colormap | None = None, color_type: str = "default", color: dict[str, Color] | Color | Collection[Color] | None = None, ): def andrews_curves( frame: DataFrame, class_column, ax: Axes | None = None, samples: int = 200, color=None, colormap=None, **kwds, ) -> Axes: import matplotlib.pyplot as plt def function(amplitudes): def f(t): x1 = amplitudes[0] result = x1 / np.sqrt(2.0) # Take the rest of the coefficients and resize them # appropriately. Take a copy of amplitudes as otherwise numpy # deletes the element from amplitudes itself. coeffs = np.delete(np.copy(amplitudes), 0) coeffs = np.resize(coeffs, (int((coeffs.size + 1) / 2), 2)) # Generate the harmonics and arguments for the sin and cos # functions. harmonics = np.arange(0, coeffs.shape[0]) + 1 trig_args = np.outer(harmonics, t) result += np.sum( coeffs[:, 0, np.newaxis] * np.sin(trig_args) + coeffs[:, 1, np.newaxis] * np.cos(trig_args), axis=0, ) return result return f n = len(frame) class_col = frame[class_column] classes = frame[class_column].drop_duplicates() df = frame.drop(class_column, axis=1) t = np.linspace(-np.pi, np.pi, samples) used_legends: set[str] = set() color_values = get_standard_colors( num_colors=len(classes), colormap=colormap, color_type="random", color=color ) colors = dict(zip(classes, color_values)) if ax is None: ax = plt.gca() ax.set_xlim(-np.pi, np.pi) for i in range(n): row = df.iloc[i].values f = function(row) y = f(t) kls = class_col.iat[i] label = pprint_thing(kls) if label not in used_legends: used_legends.add(label) ax.plot(t, y, color=colors[kls], label=label, **kwds) else: ax.plot(t, y, color=colors[kls], **kwds) ax.legend(loc="upper right") ax.grid() return ax
null
172,992
from __future__ import annotations import random from typing import ( TYPE_CHECKING, Hashable, ) from matplotlib import patches import matplotlib.lines as mlines import numpy as np from pandas.core.dtypes.missing import notna from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib.style import get_standard_colors from pandas.plotting._matplotlib.tools import ( create_subplots, do_adjust_figure, maybe_adjust_figure, set_ticks_props, ) def do_adjust_figure(fig: Figure) -> bool: """Whether fig has constrained_layout enabled.""" if not hasattr(fig, "get_constrained_layout"): return False return not fig.get_constrained_layout() class Figure(FigureBase): """ The top level container for all the plot elements. Attributes ---------- patch The `.Rectangle` instance representing the figure background patch. suppressComposite For multiple images, the figure will make composite images depending on the renderer option_image_nocomposite function. If *suppressComposite* is a boolean, this will override the renderer. """ # Remove the self._fig_callbacks properties on figure and subfigure # after the deprecation expires. callbacks = _api.deprecated( "3.6", alternative=("the 'resize_event' signal in " "Figure.canvas.callbacks") )(property(lambda self: self._fig_callbacks)) def __str__(self): return "Figure(%gx%g)" % tuple(self.bbox.size) def __repr__(self): return "<{clsname} size {h:g}x{w:g} with {naxes} Axes>".format( clsname=self.__class__.__name__, h=self.bbox.size[0], w=self.bbox.size[1], naxes=len(self.axes), ) def __init__(self, figsize=None, dpi=None, facecolor=None, edgecolor=None, linewidth=0.0, frameon=None, subplotpars=None, # rc figure.subplot.* tight_layout=None, # rc figure.autolayout constrained_layout=None, # rc figure.constrained_layout.use *, layout=None, **kwargs ): """ Parameters ---------- figsize : 2-tuple of floats, default: :rc:`figure.figsize` Figure dimension ``(width, height)`` in inches. dpi : float, default: :rc:`figure.dpi` Dots per inch. facecolor : default: :rc:`figure.facecolor` The figure patch facecolor. edgecolor : default: :rc:`figure.edgecolor` The figure patch edge color. linewidth : float The linewidth of the frame (i.e. the edge linewidth of the figure patch). frameon : bool, default: :rc:`figure.frameon` If ``False``, suppress drawing the figure background patch. subplotpars : `SubplotParams` Subplot parameters. If not given, the default subplot parameters :rc:`figure.subplot.*` are used. tight_layout : bool or dict, default: :rc:`figure.autolayout` Whether to use the tight layout mechanism. See `.set_tight_layout`. .. admonition:: Discouraged The use of this parameter is discouraged. Please use ``layout='tight'`` instead for the common case of ``tight_layout=True`` and use `.set_tight_layout` otherwise. constrained_layout : bool, default: :rc:`figure.constrained_layout.use` This is equal to ``layout='constrained'``. .. admonition:: Discouraged The use of this parameter is discouraged. Please use ``layout='constrained'`` instead. layout : {'constrained', 'compressed', 'tight', 'none', `.LayoutEngine`, \ None}, default: None The layout mechanism for positioning of plot elements to avoid overlapping Axes decorations (labels, ticks, etc). Note that layout managers can have significant performance penalties. - 'constrained': The constrained layout solver adjusts axes sizes to avoid overlapping axes decorations. Can handle complex plot layouts and colorbars, and is thus recommended. See :doc:`/tutorials/intermediate/constrainedlayout_guide` for examples. - 'compressed': uses the same algorithm as 'constrained', but removes extra space between fixed-aspect-ratio Axes. Best for simple grids of axes. - 'tight': Use the tight layout mechanism. This is a relatively simple algorithm that adjusts the subplot parameters so that decorations do not overlap. See `.Figure.set_tight_layout` for further details. - 'none': Do not use a layout engine. - A `.LayoutEngine` instance. Builtin layout classes are `.ConstrainedLayoutEngine` and `.TightLayoutEngine`, more easily accessible by 'constrained' and 'tight'. Passing an instance allows third parties to provide their own layout engine. If not given, fall back to using the parameters *tight_layout* and *constrained_layout*, including their config defaults :rc:`figure.autolayout` and :rc:`figure.constrained_layout.use`. Other Parameters ---------------- **kwargs : `.Figure` properties, optional %(Figure:kwdoc)s """ super().__init__(**kwargs) self._layout_engine = None if layout is not None: if (tight_layout is not None): _api.warn_external( "The Figure parameters 'layout' and 'tight_layout' cannot " "be used together. Please use 'layout' only.") if (constrained_layout is not None): _api.warn_external( "The Figure parameters 'layout' and 'constrained_layout' " "cannot be used together. Please use 'layout' only.") self.set_layout_engine(layout=layout) elif tight_layout is not None: if constrained_layout is not None: _api.warn_external( "The Figure parameters 'tight_layout' and " "'constrained_layout' cannot be used together. Please use " "'layout' parameter") self.set_layout_engine(layout='tight') if isinstance(tight_layout, dict): self.get_layout_engine().set(**tight_layout) elif constrained_layout is not None: if isinstance(constrained_layout, dict): self.set_layout_engine(layout='constrained') self.get_layout_engine().set(**constrained_layout) elif constrained_layout: self.set_layout_engine(layout='constrained') else: # everything is None, so use default: self.set_layout_engine(layout=layout) self._fig_callbacks = cbook.CallbackRegistry(signals=["dpi_changed"]) # Callbacks traditionally associated with the canvas (and exposed with # a proxy property), but that actually need to be on the figure for # pickling. self._canvas_callbacks = cbook.CallbackRegistry( signals=FigureCanvasBase.events) connect = self._canvas_callbacks._connect_picklable self._mouse_key_ids = [ connect('key_press_event', backend_bases._key_handler), connect('key_release_event', backend_bases._key_handler), connect('key_release_event', backend_bases._key_handler), connect('button_press_event', backend_bases._mouse_handler), connect('button_release_event', backend_bases._mouse_handler), connect('scroll_event', backend_bases._mouse_handler), connect('motion_notify_event', backend_bases._mouse_handler), ] self._button_pick_id = connect('button_press_event', self.pick) self._scroll_pick_id = connect('scroll_event', self.pick) if figsize is None: figsize = mpl.rcParams['figure.figsize'] if dpi is None: dpi = mpl.rcParams['figure.dpi'] if facecolor is None: facecolor = mpl.rcParams['figure.facecolor'] if edgecolor is None: edgecolor = mpl.rcParams['figure.edgecolor'] if frameon is None: frameon = mpl.rcParams['figure.frameon'] if not np.isfinite(figsize).all() or (np.array(figsize) < 0).any(): raise ValueError('figure size must be positive finite not ' f'{figsize}') self.bbox_inches = Bbox.from_bounds(0, 0, *figsize) self.dpi_scale_trans = Affine2D().scale(dpi) # do not use property as it will trigger self._dpi = dpi self.bbox = TransformedBbox(self.bbox_inches, self.dpi_scale_trans) self.figbbox = self.bbox self.transFigure = BboxTransformTo(self.bbox) self.transSubfigure = self.transFigure self.patch = Rectangle( xy=(0, 0), width=1, height=1, visible=frameon, facecolor=facecolor, edgecolor=edgecolor, linewidth=linewidth, # Don't let the figure patch influence bbox calculation. in_layout=False) self._set_artist_props(self.patch) self.patch.set_antialiased(False) FigureCanvasBase(self) # Set self.canvas. if subplotpars is None: subplotpars = SubplotParams() self.subplotpars = subplotpars self._axstack = _AxesStack() # track all figure axes and current axes self.clear() def pick(self, mouseevent): if not self.canvas.widgetlock.locked(): super().pick(mouseevent) def _check_layout_engines_compat(self, old, new): """ Helper for set_layout engine If the figure has used the old engine and added a colorbar then the value of colorbar_gridspec must be the same on the new engine. """ if old is None or new is None: return True if old.colorbar_gridspec == new.colorbar_gridspec: return True # colorbar layout different, so check if any colorbars are on the # figure... for ax in self.axes: if hasattr(ax, '_colorbar'): # colorbars list themselves as a colorbar. return False return True def set_layout_engine(self, layout=None, **kwargs): """ Set the layout engine for this figure. Parameters ---------- layout: {'constrained', 'compressed', 'tight', 'none'} or \ `LayoutEngine` or None - 'constrained' will use `~.ConstrainedLayoutEngine` - 'compressed' will also use `~.ConstrainedLayoutEngine`, but with a correction that attempts to make a good layout for fixed-aspect ratio Axes. - 'tight' uses `~.TightLayoutEngine` - 'none' removes layout engine. If `None`, the behavior is controlled by :rc:`figure.autolayout` (which if `True` behaves as if 'tight' was passed) and :rc:`figure.constrained_layout.use` (which if `True` behaves as if 'constrained' was passed). If both are `True`, :rc:`figure.autolayout` takes priority. Users and libraries can define their own layout engines and pass the instance directly as well. kwargs: dict The keyword arguments are passed to the layout engine to set things like padding and margin sizes. Only used if *layout* is a string. """ if layout is None: if mpl.rcParams['figure.autolayout']: layout = 'tight' elif mpl.rcParams['figure.constrained_layout.use']: layout = 'constrained' else: self._layout_engine = None return if layout == 'tight': new_layout_engine = TightLayoutEngine(**kwargs) elif layout == 'constrained': new_layout_engine = ConstrainedLayoutEngine(**kwargs) elif layout == 'compressed': new_layout_engine = ConstrainedLayoutEngine(compress=True, **kwargs) elif layout == 'none': if self._layout_engine is not None: new_layout_engine = PlaceHolderLayoutEngine( self._layout_engine.adjust_compatible, self._layout_engine.colorbar_gridspec ) else: new_layout_engine = None elif isinstance(layout, LayoutEngine): new_layout_engine = layout else: raise ValueError(f"Invalid value for 'layout': {layout!r}") if self._check_layout_engines_compat(self._layout_engine, new_layout_engine): self._layout_engine = new_layout_engine else: raise RuntimeError('Colorbar layout of new layout engine not ' 'compatible with old engine, and a colorbar ' 'has been created. Engine not changed.') def get_layout_engine(self): return self._layout_engine # TODO: I'd like to dynamically add the _repr_html_ method # to the figure in the right context, but then IPython doesn't # use it, for some reason. def _repr_html_(self): # We can't use "isinstance" here, because then we'd end up importing # webagg unconditionally. if 'WebAgg' in type(self.canvas).__name__: from matplotlib.backends import backend_webagg return backend_webagg.ipython_inline_display(self) def show(self, warn=True): """ If using a GUI backend with pyplot, display the figure window. If the figure was not created using `~.pyplot.figure`, it will lack a `~.backend_bases.FigureManagerBase`, and this method will raise an AttributeError. .. warning:: This does not manage an GUI event loop. Consequently, the figure may only be shown briefly or not shown at all if you or your environment are not managing an event loop. Use cases for `.Figure.show` include running this from a GUI application (where there is persistently an event loop running) or from a shell, like IPython, that install an input hook to allow the interactive shell to accept input while the figure is also being shown and interactive. Some, but not all, GUI toolkits will register an input hook on import. See :ref:`cp_integration` for more details. If you're in a shell without input hook integration or executing a python script, you should use `matplotlib.pyplot.show` with ``block=True`` instead, which takes care of starting and running the event loop for you. Parameters ---------- warn : bool, default: True If ``True`` and we are not running headless (i.e. on Linux with an unset DISPLAY), issue warning when called on a non-GUI backend. """ if self.canvas.manager is None: raise AttributeError( "Figure.show works only for figures managed by pyplot, " "normally created by pyplot.figure()") try: self.canvas.manager.show() except NonGuiException as exc: if warn: _api.warn_external(str(exc)) def axes(self): """ List of Axes in the Figure. You can access and modify the Axes in the Figure through this list. Do not modify the list itself. Instead, use `~Figure.add_axes`, `~.Figure.add_subplot` or `~.Figure.delaxes` to add or remove an Axes. Note: The `.Figure.axes` property and `~.Figure.get_axes` method are equivalent. """ return self._axstack.as_list() get_axes = axes.fget def _get_renderer(self): if hasattr(self.canvas, 'get_renderer'): return self.canvas.get_renderer() else: return _get_renderer(self) def _get_dpi(self): return self._dpi def _set_dpi(self, dpi, forward=True): """ Parameters ---------- dpi : float forward : bool Passed on to `~.Figure.set_size_inches` """ if dpi == self._dpi: # We don't want to cause undue events in backends. return self._dpi = dpi self.dpi_scale_trans.clear().scale(dpi) w, h = self.get_size_inches() self.set_size_inches(w, h, forward=forward) self._fig_callbacks.process('dpi_changed', self) dpi = property(_get_dpi, _set_dpi, doc="The resolution in dots per inch.") def get_tight_layout(self): """Return whether `.tight_layout` is called when drawing.""" return isinstance(self.get_layout_engine(), TightLayoutEngine) pending=True) def set_tight_layout(self, tight): """ [*Discouraged*] Set whether and how `.tight_layout` is called when drawing. .. admonition:: Discouraged This method is discouraged in favor of `~.set_layout_engine`. Parameters ---------- tight : bool or dict with keys "pad", "w_pad", "h_pad", "rect" or None If a bool, sets whether to call `.tight_layout` upon drawing. If ``None``, use :rc:`figure.autolayout` instead. If a dict, pass it as kwargs to `.tight_layout`, overriding the default paddings. """ if tight is None: tight = mpl.rcParams['figure.autolayout'] _tight = 'tight' if bool(tight) else 'none' _tight_parameters = tight if isinstance(tight, dict) else {} self.set_layout_engine(_tight, **_tight_parameters) self.stale = True def get_constrained_layout(self): """ Return whether constrained layout is being used. See :doc:`/tutorials/intermediate/constrainedlayout_guide`. """ return isinstance(self.get_layout_engine(), ConstrainedLayoutEngine) pending=True) def set_constrained_layout(self, constrained): """ [*Discouraged*] Set whether ``constrained_layout`` is used upon drawing. If None, :rc:`figure.constrained_layout.use` value will be used. When providing a dict containing the keys ``w_pad``, ``h_pad`` the default ``constrained_layout`` paddings will be overridden. These pads are in inches and default to 3.0/72.0. ``w_pad`` is the width padding and ``h_pad`` is the height padding. .. admonition:: Discouraged This method is discouraged in favor of `~.set_layout_engine`. Parameters ---------- constrained : bool or dict or None """ if constrained is None: constrained = mpl.rcParams['figure.constrained_layout.use'] _constrained = 'constrained' if bool(constrained) else 'none' _parameters = constrained if isinstance(constrained, dict) else {} self.set_layout_engine(_constrained, **_parameters) self.stale = True "3.6", alternative="figure.get_layout_engine().set()", pending=True) def set_constrained_layout_pads(self, **kwargs): """ Set padding for ``constrained_layout``. Tip: The parameters can be passed from a dictionary by using ``fig.set_constrained_layout(**pad_dict)``. See :doc:`/tutorials/intermediate/constrainedlayout_guide`. Parameters ---------- w_pad : float, default: :rc:`figure.constrained_layout.w_pad` Width padding in inches. This is the pad around Axes and is meant to make sure there is enough room for fonts to look good. Defaults to 3 pts = 0.04167 inches h_pad : float, default: :rc:`figure.constrained_layout.h_pad` Height padding in inches. Defaults to 3 pts. wspace : float, default: :rc:`figure.constrained_layout.wspace` Width padding between subplots, expressed as a fraction of the subplot width. The total padding ends up being w_pad + wspace. hspace : float, default: :rc:`figure.constrained_layout.hspace` Height padding between subplots, expressed as a fraction of the subplot width. The total padding ends up being h_pad + hspace. """ if isinstance(self.get_layout_engine(), ConstrainedLayoutEngine): self.get_layout_engine().set(**kwargs) pending=True) def get_constrained_layout_pads(self, relative=False): """ Get padding for ``constrained_layout``. Returns a list of ``w_pad, h_pad`` in inches and ``wspace`` and ``hspace`` as fractions of the subplot. All values are None if ``constrained_layout`` is not used. See :doc:`/tutorials/intermediate/constrainedlayout_guide`. Parameters ---------- relative : bool If `True`, then convert from inches to figure relative. """ if not isinstance(self.get_layout_engine(), ConstrainedLayoutEngine): return None, None, None, None info = self.get_layout_engine().get_info() w_pad = info['w_pad'] h_pad = info['h_pad'] wspace = info['wspace'] hspace = info['hspace'] if relative and (w_pad is not None or h_pad is not None): renderer = self._get_renderer() dpi = renderer.dpi w_pad = w_pad * dpi / renderer.width h_pad = h_pad * dpi / renderer.height return w_pad, h_pad, wspace, hspace def set_canvas(self, canvas): """ Set the canvas that contains the figure Parameters ---------- canvas : FigureCanvas """ self.canvas = canvas def figimage(self, X, xo=0, yo=0, alpha=None, norm=None, cmap=None, vmin=None, vmax=None, origin=None, resize=False, **kwargs): """ Add a non-resampled image to the figure. The image is attached to the lower or upper left corner depending on *origin*. Parameters ---------- X The image data. This is an array of one of the following shapes: - (M, N): an image with scalar data. Color-mapping is controlled by *cmap*, *norm*, *vmin*, and *vmax*. - (M, N, 3): an image with RGB values (0-1 float or 0-255 int). - (M, N, 4): an image with RGBA values (0-1 float or 0-255 int), i.e. including transparency. xo, yo : int The *x*/*y* image offset in pixels. alpha : None or float The alpha blending value. %(cmap_doc)s This parameter is ignored if *X* is RGB(A). %(norm_doc)s This parameter is ignored if *X* is RGB(A). %(vmin_vmax_doc)s This parameter is ignored if *X* is RGB(A). origin : {'upper', 'lower'}, default: :rc:`image.origin` Indicates where the [0, 0] index of the array is in the upper left or lower left corner of the axes. resize : bool If *True*, resize the figure to match the given image size. Returns ------- `matplotlib.image.FigureImage` Other Parameters ---------------- **kwargs Additional kwargs are `.Artist` kwargs passed on to `.FigureImage`. Notes ----- figimage complements the Axes image (`~matplotlib.axes.Axes.imshow`) which will be resampled to fit the current Axes. If you want a resampled image to fill the entire figure, you can define an `~matplotlib.axes.Axes` with extent [0, 0, 1, 1]. Examples -------- :: f = plt.figure() nx = int(f.get_figwidth() * f.dpi) ny = int(f.get_figheight() * f.dpi) data = np.random.random((ny, nx)) f.figimage(data) plt.show() """ if resize: dpi = self.get_dpi() figsize = [x / dpi for x in (X.shape[1], X.shape[0])] self.set_size_inches(figsize, forward=True) im = mimage.FigureImage(self, cmap=cmap, norm=norm, offsetx=xo, offsety=yo, origin=origin, **kwargs) im.stale_callback = _stale_figure_callback im.set_array(X) im.set_alpha(alpha) if norm is None: im.set_clim(vmin, vmax) self.images.append(im) im._remove_method = self.images.remove self.stale = True return im def set_size_inches(self, w, h=None, forward=True): """ Set the figure size in inches. Call signatures:: fig.set_size_inches(w, h) # OR fig.set_size_inches((w, h)) Parameters ---------- w : (float, float) or float Width and height in inches (if height not specified as a separate argument) or width. h : float Height in inches. forward : bool, default: True If ``True``, the canvas size is automatically updated, e.g., you can resize the figure window from the shell. See Also -------- matplotlib.figure.Figure.get_size_inches matplotlib.figure.Figure.set_figwidth matplotlib.figure.Figure.set_figheight Notes ----- To transform from pixels to inches divide by `Figure.dpi`. """ if h is None: # Got called with a single pair as argument. w, h = w size = np.array([w, h]) if not np.isfinite(size).all() or (size < 0).any(): raise ValueError(f'figure size must be positive finite not {size}') self.bbox_inches.p1 = size if forward: manager = self.canvas.manager if manager is not None: manager.resize(*(size * self.dpi).astype(int)) self.stale = True def get_size_inches(self): """ Return the current size of the figure in inches. Returns ------- ndarray The size (width, height) of the figure in inches. See Also -------- matplotlib.figure.Figure.set_size_inches matplotlib.figure.Figure.get_figwidth matplotlib.figure.Figure.get_figheight Notes ----- The size in pixels can be obtained by multiplying with `Figure.dpi`. """ return np.array(self.bbox_inches.p1) def get_figwidth(self): """Return the figure width in inches.""" return self.bbox_inches.width def get_figheight(self): """Return the figure height in inches.""" return self.bbox_inches.height def get_dpi(self): """Return the resolution in dots per inch as a float.""" return self.dpi def set_dpi(self, val): """ Set the resolution of the figure in dots-per-inch. Parameters ---------- val : float """ self.dpi = val self.stale = True def set_figwidth(self, val, forward=True): """ Set the width of the figure in inches. Parameters ---------- val : float forward : bool See `set_size_inches`. See Also -------- matplotlib.figure.Figure.set_figheight matplotlib.figure.Figure.set_size_inches """ self.set_size_inches(val, self.get_figheight(), forward=forward) def set_figheight(self, val, forward=True): """ Set the height of the figure in inches. Parameters ---------- val : float forward : bool See `set_size_inches`. See Also -------- matplotlib.figure.Figure.set_figwidth matplotlib.figure.Figure.set_size_inches """ self.set_size_inches(self.get_figwidth(), val, forward=forward) def clear(self, keep_observers=False): # docstring inherited super().clear(keep_observers=keep_observers) # FigureBase.clear does not clear toolbars, as # only Figure can have toolbars toolbar = self.canvas.toolbar if toolbar is not None: toolbar.update() def draw(self, renderer): # docstring inherited # draw the figure bounding box, perhaps none for white figure if not self.get_visible(): return artists = self._get_draw_artists(renderer) try: renderer.open_group('figure', gid=self.get_gid()) if self.axes and self.get_layout_engine() is not None: try: self.get_layout_engine().execute(self) except ValueError: pass # ValueError can occur when resizing a window. self.patch.draw(renderer) mimage._draw_list_compositing_images( renderer, self, artists, self.suppressComposite) for sfig in self.subfigs: sfig.draw(renderer) renderer.close_group('figure') finally: self.stale = False DrawEvent("draw_event", self.canvas, renderer)._process() def draw_without_rendering(self): """ Draw the figure with no output. Useful to get the final size of artists that require a draw before their size is known (e.g. text). """ renderer = _get_renderer(self) with renderer._draw_disabled(): self.draw(renderer) def draw_artist(self, a): """ Draw `.Artist` *a* only. """ a.draw(self.canvas.get_renderer()) def __getstate__(self): state = super().__getstate__() # The canvas cannot currently be pickled, but this has the benefit # of meaning that a figure can be detached from one canvas, and # re-attached to another. state.pop("canvas") # discard any changes to the dpi due to pixel ratio changes state["_dpi"] = state.get('_original_dpi', state['_dpi']) # add version information to the state state['__mpl_version__'] = mpl.__version__ # check whether the figure manager (if any) is registered with pyplot from matplotlib import _pylab_helpers if self.canvas.manager in _pylab_helpers.Gcf.figs.values(): state['_restore_to_pylab'] = True return state def __setstate__(self, state): version = state.pop('__mpl_version__') restore_to_pylab = state.pop('_restore_to_pylab', False) if version != mpl.__version__: _api.warn_external( f"This figure was saved with matplotlib version {version} and " f"is unlikely to function correctly.") self.__dict__ = state # re-initialise some of the unstored state information FigureCanvasBase(self) # Set self.canvas. if restore_to_pylab: # lazy import to avoid circularity import matplotlib.pyplot as plt import matplotlib._pylab_helpers as pylab_helpers allnums = plt.get_fignums() num = max(allnums) + 1 if allnums else 1 backend = plt._get_backend_mod() mgr = backend.new_figure_manager_given_figure(num, self) pylab_helpers.Gcf._set_new_active_manager(mgr) plt.draw_if_interactive() self.stale = True def add_axobserver(self, func): """Whenever the Axes state change, ``func(self)`` will be called.""" # Connect a wrapper lambda and not func itself, to avoid it being # weakref-collected. self._axobservers.connect("_axes_change_event", lambda arg: func(arg)) def savefig(self, fname, *, transparent=None, **kwargs): """ Save the current figure. Call signature:: savefig(fname, *, dpi='figure', format=None, metadata=None, bbox_inches=None, pad_inches=0.1, facecolor='auto', edgecolor='auto', backend=None, **kwargs ) The available output formats depend on the backend being used. Parameters ---------- fname : str or path-like or binary file-like A path, or a Python file-like object, or possibly some backend-dependent object such as `matplotlib.backends.backend_pdf.PdfPages`. If *format* is set, it determines the output format, and the file is saved as *fname*. Note that *fname* is used verbatim, and there is no attempt to make the extension, if any, of *fname* match *format*, and no extension is appended. If *format* is not set, then the format is inferred from the extension of *fname*, if there is one. If *format* is not set and *fname* has no extension, then the file is saved with :rc:`savefig.format` and the appropriate extension is appended to *fname*. Other Parameters ---------------- dpi : float or 'figure', default: :rc:`savefig.dpi` The resolution in dots per inch. If 'figure', use the figure's dpi value. format : str The file format, e.g. 'png', 'pdf', 'svg', ... The behavior when this is unset is documented under *fname*. metadata : dict, optional Key/value pairs to store in the image metadata. The supported keys and defaults depend on the image format and backend: - 'png' with Agg backend: See the parameter ``metadata`` of `~.FigureCanvasAgg.print_png`. - 'pdf' with pdf backend: See the parameter ``metadata`` of `~.backend_pdf.PdfPages`. - 'svg' with svg backend: See the parameter ``metadata`` of `~.FigureCanvasSVG.print_svg`. - 'eps' and 'ps' with PS backend: Only 'Creator' is supported. bbox_inches : str or `.Bbox`, default: :rc:`savefig.bbox` Bounding box in inches: only the given portion of the figure is saved. If 'tight', try to figure out the tight bbox of the figure. pad_inches : float, default: :rc:`savefig.pad_inches` Amount of padding around the figure when bbox_inches is 'tight'. facecolor : color or 'auto', default: :rc:`savefig.facecolor` The facecolor of the figure. If 'auto', use the current figure facecolor. edgecolor : color or 'auto', default: :rc:`savefig.edgecolor` The edgecolor of the figure. If 'auto', use the current figure edgecolor. backend : str, optional Use a non-default backend to render the file, e.g. to render a png file with the "cairo" backend rather than the default "agg", or a pdf file with the "pgf" backend rather than the default "pdf". Note that the default backend is normally sufficient. See :ref:`the-builtin-backends` for a list of valid backends for each file format. Custom backends can be referenced as "module://...". orientation : {'landscape', 'portrait'} Currently only supported by the postscript backend. papertype : str One of 'letter', 'legal', 'executive', 'ledger', 'a0' through 'a10', 'b0' through 'b10'. Only supported for postscript output. transparent : bool If *True*, the Axes patches will all be transparent; the Figure patch will also be transparent unless *facecolor* and/or *edgecolor* are specified via kwargs. If *False* has no effect and the color of the Axes and Figure patches are unchanged (unless the Figure patch is specified via the *facecolor* and/or *edgecolor* keyword arguments in which case those colors are used). The transparency of these patches will be restored to their original values upon exit of this function. This is useful, for example, for displaying a plot on top of a colored background on a web page. bbox_extra_artists : list of `~matplotlib.artist.Artist`, optional A list of extra artists that will be considered when the tight bbox is calculated. pil_kwargs : dict, optional Additional keyword arguments that are passed to `PIL.Image.Image.save` when saving the figure. """ kwargs.setdefault('dpi', mpl.rcParams['savefig.dpi']) if transparent is None: transparent = mpl.rcParams['savefig.transparent'] with ExitStack() as stack: if transparent: kwargs.setdefault('facecolor', 'none') kwargs.setdefault('edgecolor', 'none') for ax in self.axes: stack.enter_context( ax.patch._cm_set(facecolor='none', edgecolor='none')) self.canvas.print_figure(fname, **kwargs) def ginput(self, n=1, timeout=30, show_clicks=True, mouse_add=MouseButton.LEFT, mouse_pop=MouseButton.RIGHT, mouse_stop=MouseButton.MIDDLE): """ Blocking call to interact with a figure. Wait until the user clicks *n* times on the figure, and return the coordinates of each click in a list. There are three possible interactions: - Add a point. - Remove the most recently added point. - Stop the interaction and return the points added so far. The actions are assigned to mouse buttons via the arguments *mouse_add*, *mouse_pop* and *mouse_stop*. Parameters ---------- n : int, default: 1 Number of mouse clicks to accumulate. If negative, accumulate clicks until the input is terminated manually. timeout : float, default: 30 seconds Number of seconds to wait before timing out. If zero or negative will never time out. show_clicks : bool, default: True If True, show a red cross at the location of each click. mouse_add : `.MouseButton` or None, default: `.MouseButton.LEFT` Mouse button used to add points. mouse_pop : `.MouseButton` or None, default: `.MouseButton.RIGHT` Mouse button used to remove the most recently added point. mouse_stop : `.MouseButton` or None, default: `.MouseButton.MIDDLE` Mouse button used to stop input. Returns ------- list of tuples A list of the clicked (x, y) coordinates. Notes ----- The keyboard can also be used to select points in case your mouse does not have one or more of the buttons. The delete and backspace keys act like right-clicking (i.e., remove last point), the enter key terminates input and any other key (not already used by the window manager) selects a point. """ clicks = [] marks = [] def handler(event): is_button = event.name == "button_press_event" is_key = event.name == "key_press_event" # Quit (even if not in infinite mode; this is consistent with # MATLAB and sometimes quite useful, but will require the user to # test how many points were actually returned before using data). if (is_button and event.button == mouse_stop or is_key and event.key in ["escape", "enter"]): self.canvas.stop_event_loop() # Pop last click. elif (is_button and event.button == mouse_pop or is_key and event.key in ["backspace", "delete"]): if clicks: clicks.pop() if show_clicks: marks.pop().remove() self.canvas.draw() # Add new click. elif (is_button and event.button == mouse_add # On macOS/gtk, some keys return None. or is_key and event.key is not None): if event.inaxes: clicks.append((event.xdata, event.ydata)) _log.info("input %i: %f, %f", len(clicks), event.xdata, event.ydata) if show_clicks: line = mpl.lines.Line2D([event.xdata], [event.ydata], marker="+", color="r") event.inaxes.add_line(line) marks.append(line) self.canvas.draw() if len(clicks) == n and n > 0: self.canvas.stop_event_loop() _blocking_input.blocking_input_loop( self, ["button_press_event", "key_press_event"], timeout, handler) # Cleanup. for mark in marks: mark.remove() self.canvas.draw() return clicks def waitforbuttonpress(self, timeout=-1): """ Blocking call to interact with the figure. Wait for user input and return True if a key was pressed, False if a mouse button was pressed and None if no input was given within *timeout* seconds. Negative values deactivate *timeout*. """ event = None def handler(ev): nonlocal event event = ev self.canvas.stop_event_loop() _blocking_input.blocking_input_loop( self, ["button_press_event", "key_press_event"], timeout, handler) return None if event is None else event.name == "key_press_event" def execute_constrained_layout(self, renderer=None): """ Use ``layoutgrid`` to determine pos positions within Axes. See also `.set_constrained_layout_pads`. Returns ------- layoutgrid : private debugging object """ if not isinstance(self.get_layout_engine(), ConstrainedLayoutEngine): return None return self.get_layout_engine().execute(self) def tight_layout(self, *, pad=1.08, h_pad=None, w_pad=None, rect=None): """ Adjust the padding between and around subplots. To exclude an artist on the Axes from the bounding box calculation that determines the subplot parameters (i.e. legend, or annotation), set ``a.set_in_layout(False)`` for that artist. Parameters ---------- pad : float, default: 1.08 Padding between the figure edge and the edges of subplots, as a fraction of the font size. h_pad, w_pad : float, default: *pad* Padding (height/width) between edges of adjacent subplots, as a fraction of the font size. rect : tuple (left, bottom, right, top), default: (0, 0, 1, 1) A rectangle in normalized figure coordinates into which the whole subplots area (including labels) will fit. See Also -------- .Figure.set_layout_engine .pyplot.tight_layout """ # note that here we do not permanently set the figures engine to # tight_layout but rather just perform the layout in place and remove # any previous engines. engine = TightLayoutEngine(pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect) try: previous_engine = self.get_layout_engine() self.set_layout_engine(engine) engine.execute(self) if not isinstance(previous_engine, TightLayoutEngine) \ and previous_engine is not None: _api.warn_external('The figure layout has changed to tight') finally: self.set_layout_engine(None) def bootstrap_plot( series: Series, fig: Figure | None = None, size: int = 50, samples: int = 500, **kwds, ) -> Figure: import matplotlib.pyplot as plt # TODO: is the failure mentioned below still relevant? # random.sample(ndarray, int) fails on python 3.3, sigh data = list(series.values) samplings = [random.sample(data, size) for _ in range(samples)] means = np.array([np.mean(sampling) for sampling in samplings]) medians = np.array([np.median(sampling) for sampling in samplings]) midranges = np.array( [(min(sampling) + max(sampling)) * 0.5 for sampling in samplings] ) if fig is None: fig = plt.figure() x = list(range(samples)) axes = [] ax1 = fig.add_subplot(2, 3, 1) ax1.set_xlabel("Sample") axes.append(ax1) ax1.plot(x, means, **kwds) ax2 = fig.add_subplot(2, 3, 2) ax2.set_xlabel("Sample") axes.append(ax2) ax2.plot(x, medians, **kwds) ax3 = fig.add_subplot(2, 3, 3) ax3.set_xlabel("Sample") axes.append(ax3) ax3.plot(x, midranges, **kwds) ax4 = fig.add_subplot(2, 3, 4) ax4.set_xlabel("Mean") axes.append(ax4) ax4.hist(means, **kwds) ax5 = fig.add_subplot(2, 3, 5) ax5.set_xlabel("Median") axes.append(ax5) ax5.hist(medians, **kwds) ax6 = fig.add_subplot(2, 3, 6) ax6.set_xlabel("Midrange") axes.append(ax6) ax6.hist(midranges, **kwds) for axis in axes: plt.setp(axis.get_xticklabels(), fontsize=8) plt.setp(axis.get_yticklabels(), fontsize=8) if do_adjust_figure(fig): plt.tight_layout() return fig
null
172,993
from __future__ import annotations import random from typing import ( TYPE_CHECKING, Hashable, ) from matplotlib import patches import matplotlib.lines as mlines import numpy as np from pandas.core.dtypes.missing import notna from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib.style import get_standard_colors from pandas.plotting._matplotlib.tools import ( create_subplots, do_adjust_figure, maybe_adjust_figure, set_ticks_props, ) def pprint_thing( thing: Any, _nest_lvl: int = 0, escape_chars: EscapeChars | None = None, default_escapes: bool = False, quote_strings: bool = False, max_seq_items: int | None = None, ) -> str: """ This function is the sanctioned way of converting objects to a string representation and properly handles nested sequences. Parameters ---------- thing : anything to be formatted _nest_lvl : internal use only. pprint_thing() is mutually-recursive with pprint_sequence, this argument is used to keep track of the current nesting level, and limit it. escape_chars : list or dict, optional Characters to escape. If a dict is passed the values are the replacements default_escapes : bool, default False Whether the input escape characters replaces or adds to the defaults max_seq_items : int or None, default None Pass through to other pretty printers to limit sequence printing Returns ------- str """ def as_escaped_string( thing: Any, escape_chars: EscapeChars | None = escape_chars ) -> str: translate = {"\t": r"\t", "\n": r"\n", "\r": r"\r"} if isinstance(escape_chars, dict): if default_escapes: translate.update(escape_chars) else: translate = escape_chars escape_chars = list(escape_chars.keys()) else: escape_chars = escape_chars or () result = str(thing) for c in escape_chars: result = result.replace(c, translate[c]) return result if hasattr(thing, "__next__"): return str(thing) elif isinstance(thing, dict) and _nest_lvl < get_option( "display.pprint_nest_depth" ): result = _pprint_dict( thing, _nest_lvl, quote_strings=True, max_seq_items=max_seq_items ) elif is_sequence(thing) and _nest_lvl < get_option("display.pprint_nest_depth"): result = _pprint_seq( thing, _nest_lvl, escape_chars=escape_chars, quote_strings=quote_strings, max_seq_items=max_seq_items, ) elif isinstance(thing, str) and quote_strings: result = f"'{as_escaped_string(thing)}'" else: result = as_escaped_string(thing) return result def get_standard_colors( num_colors: int, colormap: Colormap | None = None, color_type: str = "default", color: dict[str, Color] | Color | Collection[Color] | None = None, ): """ Get standard colors based on `colormap`, `color_type` or `color` inputs. Parameters ---------- num_colors : int Minimum number of colors to be returned. Ignored if `color` is a dictionary. colormap : :py:class:`matplotlib.colors.Colormap`, optional Matplotlib colormap. When provided, the resulting colors will be derived from the colormap. color_type : {"default", "random"}, optional Type of colors to derive. Used if provided `color` and `colormap` are None. Ignored if either `color` or `colormap` are not None. color : dict or str or sequence, optional Color(s) to be used for deriving sequence of colors. Can be either be a dictionary, or a single color (single color string, or sequence of floats representing a single color), or a sequence of colors. Returns ------- dict or list Standard colors. Can either be a mapping if `color` was a dictionary, or a list of colors with a length of `num_colors` or more. Warns ----- UserWarning If both `colormap` and `color` are provided. Parameter `color` will override. """ if isinstance(color, dict): return color colors = _derive_colors( color=color, colormap=colormap, color_type=color_type, num_colors=num_colors, ) return list(_cycle_colors(colors, num_colors=num_colors)) def parallel_coordinates( frame: DataFrame, class_column, cols=None, ax: Axes | None = None, color=None, use_columns: bool = False, xticks=None, colormap=None, axvlines: bool = True, axvlines_kwds=None, sort_labels: bool = False, **kwds, ) -> Axes: import matplotlib.pyplot as plt if axvlines_kwds is None: axvlines_kwds = {"linewidth": 1, "color": "black"} n = len(frame) classes = frame[class_column].drop_duplicates() class_col = frame[class_column] if cols is None: df = frame.drop(class_column, axis=1) else: df = frame[cols] used_legends: set[str] = set() ncols = len(df.columns) # determine values to use for xticks x: list[int] | Index if use_columns is True: if not np.all(np.isreal(list(df.columns))): raise ValueError("Columns must be numeric to be used as xticks") x = df.columns elif xticks is not None: if not np.all(np.isreal(xticks)): raise ValueError("xticks specified must be numeric") if len(xticks) != ncols: raise ValueError("Length of xticks must match number of columns") x = xticks else: x = list(range(ncols)) if ax is None: ax = plt.gca() color_values = get_standard_colors( num_colors=len(classes), colormap=colormap, color_type="random", color=color ) if sort_labels: classes = sorted(classes) color_values = sorted(color_values) colors = dict(zip(classes, color_values)) for i in range(n): y = df.iloc[i].values kls = class_col.iat[i] label = pprint_thing(kls) if label not in used_legends: used_legends.add(label) ax.plot(x, y, color=colors[kls], label=label, **kwds) else: ax.plot(x, y, color=colors[kls], **kwds) if axvlines: for i in x: ax.axvline(i, **axvlines_kwds) ax.set_xticks(x) ax.set_xticklabels(df.columns) ax.set_xlim(x[0], x[-1]) ax.legend(loc="upper right") ax.grid() return ax
null
172,994
from __future__ import annotations import random from typing import ( TYPE_CHECKING, Hashable, ) from matplotlib import patches import matplotlib.lines as mlines import numpy as np from pandas.core.dtypes.missing import notna from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib.style import get_standard_colors from pandas.plotting._matplotlib.tools import ( create_subplots, do_adjust_figure, maybe_adjust_figure, set_ticks_props, ) def lag_plot(series: Series, lag: int = 1, ax: Axes | None = None, **kwds) -> Axes: # workaround because `c='b'` is hardcoded in matplotlib's scatter method import matplotlib.pyplot as plt kwds.setdefault("c", plt.rcParams["patch.facecolor"]) data = series.values y1 = data[:-lag] y2 = data[lag:] if ax is None: ax = plt.gca() ax.set_xlabel("y(t)") ax.set_ylabel(f"y(t + {lag})") ax.scatter(y1, y2, **kwds) return ax
null
172,995
from __future__ import annotations import random from typing import ( TYPE_CHECKING, Hashable, ) from matplotlib import patches import matplotlib.lines as mlines import numpy as np from pandas.core.dtypes.missing import notna from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib.style import get_standard_colors from pandas.plotting._matplotlib.tools import ( create_subplots, do_adjust_figure, maybe_adjust_figure, set_ticks_props, ) def autocorrelation_plot(series: Series, ax: Axes | None = None, **kwds) -> Axes: import matplotlib.pyplot as plt n = len(series) data = np.asarray(series) if ax is None: ax = plt.gca() ax.set_xlim(1, n) ax.set_ylim(-1.0, 1.0) mean = np.mean(data) c0 = np.sum((data - mean) ** 2) / n def r(h): return ((data[: n - h] - mean) * (data[h:] - mean)).sum() / n / c0 x = np.arange(n) + 1 y = [r(loc) for loc in x] z95 = 1.959963984540054 z99 = 2.5758293035489004 ax.axhline(y=z99 / np.sqrt(n), linestyle="--", color="grey") ax.axhline(y=z95 / np.sqrt(n), color="grey") ax.axhline(y=0.0, color="black") ax.axhline(y=-z95 / np.sqrt(n), color="grey") ax.axhline(y=-z99 / np.sqrt(n), linestyle="--", color="grey") ax.set_xlabel("Lag") ax.set_ylabel("Autocorrelation") ax.plot(x, y, **kwds) if "label" in kwds: ax.legend() ax.grid() return ax
null
172,996
from __future__ import annotations import numpy as np from pandas._typing import ( Dict, IndexLabel, ) from pandas.core.dtypes.missing import remove_na_arraylike from pandas import ( DataFrame, MultiIndex, Series, concat, ) from pandas.plotting._matplotlib.misc import unpack_single_str_list The provided code snippet includes necessary dependencies for implementing the `create_iter_data_given_by` function. Write a Python function `def create_iter_data_given_by( data: DataFrame, kind: str = "hist" ) -> Dict[str, DataFrame | Series]` to solve the following problem: Create data for iteration given `by` is assigned or not, and it is only used in both hist and boxplot. If `by` is assigned, return a dictionary of DataFrames in which the key of dictionary is the values in groups. If `by` is not assigned, return input as is, and this preserves current status of iter_data. Parameters ---------- data : reformatted grouped data from `_compute_plot_data` method. kind : str, plot kind. This function is only used for `hist` and `box` plots. Returns ------- iter_data : DataFrame or Dictionary of DataFrames Examples -------- If `by` is assigned: >>> import numpy as np >>> tuples = [('h1', 'a'), ('h1', 'b'), ('h2', 'a'), ('h2', 'b')] >>> mi = MultiIndex.from_tuples(tuples) >>> value = [[1, 3, np.nan, np.nan], ... [3, 4, np.nan, np.nan], [np.nan, np.nan, 5, 6]] >>> data = DataFrame(value, columns=mi) >>> create_iter_data_given_by(data) {'h1': h1 a b 0 1.0 3.0 1 3.0 4.0 2 NaN NaN, 'h2': h2 a b 0 NaN NaN 1 NaN NaN 2 5.0 6.0} Here is the function: def create_iter_data_given_by( data: DataFrame, kind: str = "hist" ) -> Dict[str, DataFrame | Series]: """ Create data for iteration given `by` is assigned or not, and it is only used in both hist and boxplot. If `by` is assigned, return a dictionary of DataFrames in which the key of dictionary is the values in groups. If `by` is not assigned, return input as is, and this preserves current status of iter_data. Parameters ---------- data : reformatted grouped data from `_compute_plot_data` method. kind : str, plot kind. This function is only used for `hist` and `box` plots. Returns ------- iter_data : DataFrame or Dictionary of DataFrames Examples -------- If `by` is assigned: >>> import numpy as np >>> tuples = [('h1', 'a'), ('h1', 'b'), ('h2', 'a'), ('h2', 'b')] >>> mi = MultiIndex.from_tuples(tuples) >>> value = [[1, 3, np.nan, np.nan], ... [3, 4, np.nan, np.nan], [np.nan, np.nan, 5, 6]] >>> data = DataFrame(value, columns=mi) >>> create_iter_data_given_by(data) {'h1': h1 a b 0 1.0 3.0 1 3.0 4.0 2 NaN NaN, 'h2': h2 a b 0 NaN NaN 1 NaN NaN 2 5.0 6.0} """ # For `hist` plot, before transformation, the values in level 0 are values # in groups and subplot titles, and later used for column subselection and # iteration; For `box` plot, values in level 1 are column names to show, # and are used for iteration and as subplots titles. if kind == "hist": level = 0 else: level = 1 # Select sub-columns based on the value of level of MI, and if `by` is # assigned, data must be a MI DataFrame assert isinstance(data.columns, MultiIndex) return { col: data.loc[:, data.columns.get_level_values(level) == col] for col in data.columns.levels[level] }
Create data for iteration given `by` is assigned or not, and it is only used in both hist and boxplot. If `by` is assigned, return a dictionary of DataFrames in which the key of dictionary is the values in groups. If `by` is not assigned, return input as is, and this preserves current status of iter_data. Parameters ---------- data : reformatted grouped data from `_compute_plot_data` method. kind : str, plot kind. This function is only used for `hist` and `box` plots. Returns ------- iter_data : DataFrame or Dictionary of DataFrames Examples -------- If `by` is assigned: >>> import numpy as np >>> tuples = [('h1', 'a'), ('h1', 'b'), ('h2', 'a'), ('h2', 'b')] >>> mi = MultiIndex.from_tuples(tuples) >>> value = [[1, 3, np.nan, np.nan], ... [3, 4, np.nan, np.nan], [np.nan, np.nan, 5, 6]] >>> data = DataFrame(value, columns=mi) >>> create_iter_data_given_by(data) {'h1': h1 a b 0 1.0 3.0 1 3.0 4.0 2 NaN NaN, 'h2': h2 a b 0 NaN NaN 1 NaN NaN 2 5.0 6.0}
172,997
from __future__ import annotations import numpy as np from pandas._typing import ( Dict, IndexLabel, ) from pandas.core.dtypes.missing import remove_na_arraylike from pandas import ( DataFrame, MultiIndex, Series, concat, ) from pandas.plotting._matplotlib.misc import unpack_single_str_list IndexLabel = Union[Hashable, Sequence[Hashable]] def unpack_single_str_list(keys): # GH 42795 if isinstance(keys, list) and len(keys) == 1: keys = keys[0] return keys The provided code snippet includes necessary dependencies for implementing the `reconstruct_data_with_by` function. Write a Python function `def reconstruct_data_with_by( data: DataFrame, by: IndexLabel, cols: IndexLabel ) -> DataFrame` to solve the following problem: Internal function to group data, and reassign multiindex column names onto the result in order to let grouped data be used in _compute_plot_data method. Parameters ---------- data : Original DataFrame to plot by : grouped `by` parameter selected by users cols : columns of data set (excluding columns used in `by`) Returns ------- Output is the reconstructed DataFrame with MultiIndex columns. The first level of MI is unique values of groups, and second level of MI is the columns selected by users. Examples -------- >>> d = {'h': ['h1', 'h1', 'h2'], 'a': [1, 3, 5], 'b': [3, 4, 6]} >>> df = DataFrame(d) >>> reconstruct_data_with_by(df, by='h', cols=['a', 'b']) h1 h2 a b a b 0 1.0 3.0 NaN NaN 1 3.0 4.0 NaN NaN 2 NaN NaN 5.0 6.0 Here is the function: def reconstruct_data_with_by( data: DataFrame, by: IndexLabel, cols: IndexLabel ) -> DataFrame: """ Internal function to group data, and reassign multiindex column names onto the result in order to let grouped data be used in _compute_plot_data method. Parameters ---------- data : Original DataFrame to plot by : grouped `by` parameter selected by users cols : columns of data set (excluding columns used in `by`) Returns ------- Output is the reconstructed DataFrame with MultiIndex columns. The first level of MI is unique values of groups, and second level of MI is the columns selected by users. Examples -------- >>> d = {'h': ['h1', 'h1', 'h2'], 'a': [1, 3, 5], 'b': [3, 4, 6]} >>> df = DataFrame(d) >>> reconstruct_data_with_by(df, by='h', cols=['a', 'b']) h1 h2 a b a b 0 1.0 3.0 NaN NaN 1 3.0 4.0 NaN NaN 2 NaN NaN 5.0 6.0 """ by_modified = unpack_single_str_list(by) grouped = data.groupby(by_modified) data_list = [] for key, group in grouped: # error: List item 1 has incompatible type "Union[Hashable, # Sequence[Hashable]]"; expected "Iterable[Hashable]" columns = MultiIndex.from_product([[key], cols]) # type: ignore[list-item] sub_group = group[cols] sub_group.columns = columns data_list.append(sub_group) data = concat(data_list, axis=1) return data
Internal function to group data, and reassign multiindex column names onto the result in order to let grouped data be used in _compute_plot_data method. Parameters ---------- data : Original DataFrame to plot by : grouped `by` parameter selected by users cols : columns of data set (excluding columns used in `by`) Returns ------- Output is the reconstructed DataFrame with MultiIndex columns. The first level of MI is unique values of groups, and second level of MI is the columns selected by users. Examples -------- >>> d = {'h': ['h1', 'h1', 'h2'], 'a': [1, 3, 5], 'b': [3, 4, 6]} >>> df = DataFrame(d) >>> reconstruct_data_with_by(df, by='h', cols=['a', 'b']) h1 h2 a b a b 0 1.0 3.0 NaN NaN 1 3.0 4.0 NaN NaN 2 NaN NaN 5.0 6.0
172,998
from __future__ import annotations import numpy as np from pandas._typing import ( Dict, IndexLabel, ) from pandas.core.dtypes.missing import remove_na_arraylike from pandas import ( DataFrame, MultiIndex, Series, concat, ) from pandas.plotting._matplotlib.misc import unpack_single_str_list IndexLabel = Union[Hashable, Sequence[Hashable]] def remove_na_arraylike(arr): """ Return array-like containing only true/non-NaN values, possibly empty. """ if is_extension_array_dtype(arr): return arr[notna(arr)] else: return arr[notna(np.asarray(arr))] The provided code snippet includes necessary dependencies for implementing the `reformat_hist_y_given_by` function. Write a Python function `def reformat_hist_y_given_by( y: Series | np.ndarray, by: IndexLabel | None ) -> Series | np.ndarray` to solve the following problem: Internal function to reformat y given `by` is applied or not for hist plot. If by is None, input y is 1-d with NaN removed; and if by is not None, groupby will take place and input y is multi-dimensional array. Here is the function: def reformat_hist_y_given_by( y: Series | np.ndarray, by: IndexLabel | None ) -> Series | np.ndarray: """Internal function to reformat y given `by` is applied or not for hist plot. If by is None, input y is 1-d with NaN removed; and if by is not None, groupby will take place and input y is multi-dimensional array. """ if by is not None and len(y.shape) > 1: return np.array([remove_na_arraylike(col) for col in y.T]).T return remove_na_arraylike(y)
Internal function to reformat y given `by` is applied or not for hist plot. If by is None, input y is 1-d with NaN removed; and if by is not None, groupby will take place and input y is multi-dimensional array.
172,999
from __future__ import annotations from datetime import timedelta import functools from typing import ( TYPE_CHECKING, cast, ) import numpy as np from pandas._libs.tslibs import ( BaseOffset, Period, to_offset, ) from pandas._libs.tslibs.dtypes import FreqGroup from pandas.core.dtypes.generic import ( ABCDatetimeIndex, ABCPeriodIndex, ABCTimedeltaIndex, ) from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib.converter import ( TimeSeries_DateFormatter, TimeSeries_DateLocator, TimeSeries_TimedeltaFormatter, ) from pandas.tseries.frequencies import ( get_period_alias, is_subperiod, is_superperiod, ) def _is_sub(f1: str, f2: str) -> bool: return (f1.startswith("W") and is_subperiod("D", f2)) or ( f2.startswith("W") and is_subperiod(f1, "D") ) def _is_sup(f1: str, f2: str) -> bool: return (f1.startswith("W") and is_superperiod("D", f2)) or ( f2.startswith("W") and is_superperiod(f1, "D") ) def _upsample_others(ax: Axes, freq, kwargs) -> None: legend = ax.get_legend() lines, labels = _replot_ax(ax, freq, kwargs) _replot_ax(ax, freq, kwargs) other_ax = None if hasattr(ax, "left_ax"): other_ax = ax.left_ax if hasattr(ax, "right_ax"): other_ax = ax.right_ax if other_ax is not None: rlines, rlabels = _replot_ax(other_ax, freq, kwargs) lines.extend(rlines) labels.extend(rlabels) if legend is not None and kwargs.get("legend", True) and len(lines) > 0: title = legend.get_title().get_text() if title == "None": title = None ax.legend(lines, labels, loc="best", title=title) def _get_freq(ax: Axes, series: Series): # get frequency from data freq = getattr(series.index, "freq", None) if freq is None: freq = getattr(series.index, "inferred_freq", None) freq = to_offset(freq) ax_freq = _get_ax_freq(ax) # use axes freq if no data freq if freq is None: freq = ax_freq # get the period frequency freq = _get_period_alias(freq) return freq, ax_freq ABCDatetimeIndex = cast( "Type[DatetimeIndex]", create_pandas_abc_type("ABCDatetimeIndex", "_typ", ("datetimeindex",)), ) def is_subperiod(source, target) -> bool: """ Returns True if downsampling is possible between source and target frequencies Parameters ---------- source : str or DateOffset Frequency converting from target : str or DateOffset Frequency converting to Returns ------- bool """ if target is None or source is None: return False source = _maybe_coerce_freq(source) target = _maybe_coerce_freq(target) if _is_annual(target): if _is_quarterly(source): return _quarter_months_conform( get_rule_month(source), get_rule_month(target) ) return source in {"D", "C", "B", "M", "H", "T", "S", "L", "U", "N"} elif _is_quarterly(target): return source in {"D", "C", "B", "M", "H", "T", "S", "L", "U", "N"} elif _is_monthly(target): return source in {"D", "C", "B", "H", "T", "S", "L", "U", "N"} elif _is_weekly(target): return source in {target, "D", "C", "B", "H", "T", "S", "L", "U", "N"} elif target == "B": return source in {"B", "H", "T", "S", "L", "U", "N"} elif target == "C": return source in {"C", "H", "T", "S", "L", "U", "N"} elif target == "D": return source in {"D", "H", "T", "S", "L", "U", "N"} elif target == "H": return source in {"H", "T", "S", "L", "U", "N"} elif target == "T": return source in {"T", "S", "L", "U", "N"} elif target == "S": return source in {"S", "L", "U", "N"} elif target == "L": return source in {"L", "U", "N"} elif target == "U": return source in {"U", "N"} elif target == "N": return source in {"N"} else: return False def is_superperiod(source, target) -> bool: """ Returns True if upsampling is possible between source and target frequencies Parameters ---------- source : str or DateOffset Frequency converting from target : str or DateOffset Frequency converting to Returns ------- bool """ if target is None or source is None: return False source = _maybe_coerce_freq(source) target = _maybe_coerce_freq(target) if _is_annual(source): if _is_annual(target): return get_rule_month(source) == get_rule_month(target) if _is_quarterly(target): smonth = get_rule_month(source) tmonth = get_rule_month(target) return _quarter_months_conform(smonth, tmonth) return target in {"D", "C", "B", "M", "H", "T", "S", "L", "U", "N"} elif _is_quarterly(source): return target in {"D", "C", "B", "M", "H", "T", "S", "L", "U", "N"} elif _is_monthly(source): return target in {"D", "C", "B", "H", "T", "S", "L", "U", "N"} elif _is_weekly(source): return target in {source, "D", "C", "B", "H", "T", "S", "L", "U", "N"} elif source == "B": return target in {"D", "C", "B", "H", "T", "S", "L", "U", "N"} elif source == "C": return target in {"D", "C", "B", "H", "T", "S", "L", "U", "N"} elif source == "D": return target in {"D", "C", "B", "H", "T", "S", "L", "U", "N"} elif source == "H": return target in {"H", "T", "S", "L", "U", "N"} elif source == "T": return target in {"T", "S", "L", "U", "N"} elif source == "S": return target in {"S", "L", "U", "N"} elif source == "L": return target in {"L", "U", "N"} elif source == "U": return target in {"U", "N"} elif source == "N": return target in {"N"} else: return False def maybe_resample(series: Series, ax: Axes, kwargs): # resample against axes freq if necessary freq, ax_freq = _get_freq(ax, series) if freq is None: # pragma: no cover raise ValueError("Cannot use dynamic axis without frequency info") # Convert DatetimeIndex to PeriodIndex if isinstance(series.index, ABCDatetimeIndex): series = series.to_period(freq=freq) if ax_freq is not None and freq != ax_freq: if is_superperiod(freq, ax_freq): # upsample input series = series.copy() # error: "Index" has no attribute "asfreq" series.index = series.index.asfreq( # type: ignore[attr-defined] ax_freq, how="s" ) freq = ax_freq elif _is_sup(freq, ax_freq): # one is weekly how = kwargs.pop("how", "last") series = getattr(series.resample("D"), how)().dropna() series = getattr(series.resample(ax_freq), how)().dropna() freq = ax_freq elif is_subperiod(freq, ax_freq) or _is_sub(freq, ax_freq): _upsample_others(ax, freq, kwargs) else: # pragma: no cover raise ValueError("Incompatible frequency conversion") return freq, series
null
173,000
from __future__ import annotations from datetime import timedelta import functools from typing import ( TYPE_CHECKING, cast, ) import numpy as np from pandas._libs.tslibs import ( BaseOffset, Period, to_offset, ) from pandas._libs.tslibs.dtypes import FreqGroup from pandas.core.dtypes.generic import ( ABCDatetimeIndex, ABCPeriodIndex, ABCTimedeltaIndex, ) from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib.converter import ( TimeSeries_DateFormatter, TimeSeries_DateLocator, TimeSeries_TimedeltaFormatter, ) from pandas.tseries.frequencies import ( get_period_alias, is_subperiod, is_superperiod, ) def _get_ax_freq(ax: Axes): """ Get the freq attribute of the ax object if set. Also checks shared axes (eg when using secondary yaxis, sharex=True or twinx) """ ax_freq = getattr(ax, "freq", None) if ax_freq is None: # check for left/right ax in case of secondary yaxis if hasattr(ax, "left_ax"): ax_freq = getattr(ax.left_ax, "freq", None) elif hasattr(ax, "right_ax"): ax_freq = getattr(ax.right_ax, "freq", None) if ax_freq is None: # check if a shared ax (sharex/twinx) has already freq set shared_axes = ax.get_shared_x_axes().get_siblings(ax) if len(shared_axes) > 1: for shared_ax in shared_axes: ax_freq = getattr(shared_ax, "freq", None) if ax_freq is not None: break return ax_freq def _get_period_alias(freq: timedelta | BaseOffset | str) -> str | None: freqstr = to_offset(freq).rule_code return get_period_alias(freqstr) def _get_index_freq(index: Index) -> BaseOffset | None: freq = getattr(index, "freq", None) if freq is None: freq = getattr(index, "inferred_freq", None) if freq == "B": # error: "Index" has no attribute "dayofweek" weekdays = np.unique(index.dayofweek) # type: ignore[attr-defined] if (5 in weekdays) or (6 in weekdays): freq = None freq = to_offset(freq) return freq ABCDatetimeIndex = cast( "Type[DatetimeIndex]", create_pandas_abc_type("ABCDatetimeIndex", "_typ", ("datetimeindex",)), ) def use_dynamic_x(ax: Axes, data: DataFrame | Series) -> bool: freq = _get_index_freq(data.index) ax_freq = _get_ax_freq(ax) if freq is None: # convert irregular if axes has freq info freq = ax_freq else: # do not use tsplot if irregular was plotted first if (ax_freq is None) and (len(ax.get_lines()) > 0): return False if freq is None: return False freq_str = _get_period_alias(freq) if freq_str is None: return False # FIXME: hack this for 0.10.1, creating more technical debt...sigh if isinstance(data.index, ABCDatetimeIndex): # error: "BaseOffset" has no attribute "_period_dtype_code" base = to_offset(freq_str)._period_dtype_code # type: ignore[attr-defined] x = data.index if base <= FreqGroup.FR_DAY.value: return x[:1].is_normalized period = Period(x[0], freq_str) assert isinstance(period, Period) return period.to_timestamp().tz_localize(x.tz) == x[0] return True
null
173,001
from __future__ import annotations from datetime import timedelta import functools from typing import ( TYPE_CHECKING, cast, ) import numpy as np from pandas._libs.tslibs import ( BaseOffset, Period, to_offset, ) from pandas._libs.tslibs.dtypes import FreqGroup from pandas.core.dtypes.generic import ( ABCDatetimeIndex, ABCPeriodIndex, ABCTimedeltaIndex, ) from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib.converter import ( TimeSeries_DateFormatter, TimeSeries_DateLocator, TimeSeries_TimedeltaFormatter, ) from pandas.tseries.frequencies import ( get_period_alias, is_subperiod, is_superperiod, ) def _get_ax_freq(ax: Axes): """ Get the freq attribute of the ax object if set. Also checks shared axes (eg when using secondary yaxis, sharex=True or twinx) """ ax_freq = getattr(ax, "freq", None) if ax_freq is None: # check for left/right ax in case of secondary yaxis if hasattr(ax, "left_ax"): ax_freq = getattr(ax.left_ax, "freq", None) elif hasattr(ax, "right_ax"): ax_freq = getattr(ax.right_ax, "freq", None) if ax_freq is None: # check if a shared ax (sharex/twinx) has already freq set shared_axes = ax.get_shared_x_axes().get_siblings(ax) if len(shared_axes) > 1: for shared_ax in shared_axes: ax_freq = getattr(shared_ax, "freq", None) if ax_freq is not None: break return ax_freq def _get_period_alias(freq: timedelta | BaseOffset | str) -> str | None: freqstr = to_offset(freq).rule_code return get_period_alias(freqstr) def cast(typ: Type[_T], val: Any) -> _T: ... def cast(typ: str, val: Any) -> Any: ... def cast(typ: object, val: Any) -> Any: ... ABCDatetimeIndex = cast( "Type[DatetimeIndex]", create_pandas_abc_type("ABCDatetimeIndex", "_typ", ("datetimeindex",)), ) ABCPeriodIndex = cast( "Type[PeriodIndex]", create_pandas_abc_type("ABCPeriodIndex", "_typ", ("periodindex",)), ) def maybe_convert_index(ax: Axes, data): # tsplot converts automatically, but don't want to convert index # over and over for DataFrames if isinstance(data.index, (ABCDatetimeIndex, ABCPeriodIndex)): freq: str | BaseOffset | None = data.index.freq if freq is None: # We only get here for DatetimeIndex data.index = cast("DatetimeIndex", data.index) freq = data.index.inferred_freq freq = to_offset(freq) if freq is None: freq = _get_ax_freq(ax) if freq is None: raise ValueError("Could not get frequency alias for plotting") freq_str = _get_period_alias(freq) if isinstance(data.index, ABCDatetimeIndex): data = data.tz_localize(None).to_period(freq=freq_str) elif isinstance(data.index, ABCPeriodIndex): data.index = data.index.asfreq(freq=freq_str) return data
null
173,002
from __future__ import annotations from datetime import timedelta import functools from typing import ( TYPE_CHECKING, cast, ) import numpy as np from pandas._libs.tslibs import ( BaseOffset, Period, to_offset, ) from pandas._libs.tslibs.dtypes import FreqGroup from pandas.core.dtypes.generic import ( ABCDatetimeIndex, ABCPeriodIndex, ABCTimedeltaIndex, ) from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib.converter import ( TimeSeries_DateFormatter, TimeSeries_DateLocator, TimeSeries_TimedeltaFormatter, ) from pandas.tseries.frequencies import ( get_period_alias, is_subperiod, is_superperiod, ) def _format_coord(freq, t, y) -> str: time_period = Period(ordinal=int(t), freq=freq) return f"t = {time_period} y = {y:8f}" ABCTimedeltaIndex = cast( "Type[TimedeltaIndex]", create_pandas_abc_type("ABCTimedeltaIndex", "_typ", ("timedeltaindex",)), ) ABCPeriodIndex = cast( "Type[PeriodIndex]", create_pandas_abc_type("ABCPeriodIndex", "_typ", ("periodindex",)), ) class TimeSeries_DateLocator(Locator): """ Locates the ticks along an axis controlled by a :class:`Series`. Parameters ---------- freq : BaseOffset Valid frequency specifier. minor_locator : {False, True}, optional Whether the locator is for minor ticks (True) or not. dynamic_mode : {True, False}, optional Whether the locator should work in dynamic mode. base : {int}, optional quarter : {int}, optional month : {int}, optional day : {int}, optional """ def __init__( self, freq: BaseOffset, minor_locator: bool = False, dynamic_mode: bool = True, base: int = 1, quarter: int = 1, month: int = 1, day: int = 1, plot_obj=None, ) -> None: freq = to_offset(freq) self.freq = freq self.base = base (self.quarter, self.month, self.day) = (quarter, month, day) self.isminor = minor_locator self.isdynamic = dynamic_mode self.offset = 0 self.plot_obj = plot_obj self.finder = get_finder(freq) def _get_default_locs(self, vmin, vmax): """Returns the default locations of ticks.""" if self.plot_obj.date_axis_info is None: self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq) locator = self.plot_obj.date_axis_info if self.isminor: return np.compress(locator["min"], locator["val"]) return np.compress(locator["maj"], locator["val"]) def __call__(self): """Return the locations of the ticks.""" # axis calls Locator.set_axis inside set_m<xxxx>_formatter vi = tuple(self.axis.get_view_interval()) if vi != self.plot_obj.view_interval: self.plot_obj.date_axis_info = None self.plot_obj.view_interval = vi vmin, vmax = vi if vmax < vmin: vmin, vmax = vmax, vmin if self.isdynamic: locs = self._get_default_locs(vmin, vmax) else: # pragma: no cover base = self.base (d, m) = divmod(vmin, base) vmin = (d + 1) * base locs = list(range(vmin, vmax + 1, base)) return locs def autoscale(self): """ Sets the view limits to the nearest multiples of base that contain the data. """ # requires matplotlib >= 0.98.0 (vmin, vmax) = self.axis.get_data_interval() locs = self._get_default_locs(vmin, vmax) (vmin, vmax) = locs[[0, -1]] if vmin == vmax: vmin -= 1 vmax += 1 return nonsingular(vmin, vmax) class TimeSeries_DateFormatter(Formatter): """ Formats the ticks along an axis controlled by a :class:`PeriodIndex`. Parameters ---------- freq : BaseOffset Valid frequency specifier. minor_locator : bool, default False Whether the current formatter should apply to minor ticks (True) or major ticks (False). dynamic_mode : bool, default True Whether the formatter works in dynamic mode or not. """ def __init__( self, freq: BaseOffset, minor_locator: bool = False, dynamic_mode: bool = True, plot_obj=None, ) -> None: freq = to_offset(freq) self.format = None self.freq = freq self.locs: list[Any] = [] # unused, for matplotlib compat self.formatdict: dict[Any, Any] | None = None self.isminor = minor_locator self.isdynamic = dynamic_mode self.offset = 0 self.plot_obj = plot_obj self.finder = get_finder(freq) def _set_default_format(self, vmin, vmax): """Returns the default ticks spacing.""" if self.plot_obj.date_axis_info is None: self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq) info = self.plot_obj.date_axis_info if self.isminor: format = np.compress(info["min"] & np.logical_not(info["maj"]), info) else: format = np.compress(info["maj"], info) self.formatdict = {x: f for (x, _, _, f) in format} return self.formatdict def set_locs(self, locs) -> None: """Sets the locations of the ticks""" # don't actually use the locs. This is just needed to work with # matplotlib. Force to use vmin, vmax self.locs = locs (vmin, vmax) = vi = tuple(self.axis.get_view_interval()) if vi != self.plot_obj.view_interval: self.plot_obj.date_axis_info = None self.plot_obj.view_interval = vi if vmax < vmin: (vmin, vmax) = (vmax, vmin) self._set_default_format(vmin, vmax) def __call__(self, x, pos: int = 0) -> str: if self.formatdict is None: return "" else: fmt = self.formatdict.pop(x, "") if isinstance(fmt, np.bytes_): fmt = fmt.decode("utf-8") period = Period(ordinal=int(x), freq=self.freq) assert isinstance(period, Period) return period.strftime(fmt) class TimeSeries_TimedeltaFormatter(Formatter): """ Formats the ticks along an axis controlled by a :class:`TimedeltaIndex`. """ def format_timedelta_ticks(x, pos, n_decimals: int) -> str: """ Convert seconds to 'D days HH:MM:SS.F' """ s, ns = divmod(x, 10**9) m, s = divmod(s, 60) h, m = divmod(m, 60) d, h = divmod(h, 24) decimals = int(ns * 10 ** (n_decimals - 9)) s = f"{int(h):02d}:{int(m):02d}:{int(s):02d}" if n_decimals > 0: s += f".{decimals:0{n_decimals}d}" if d != 0: s = f"{int(d):d} days {s}" return s def __call__(self, x, pos: int = 0) -> str: (vmin, vmax) = tuple(self.axis.get_view_interval()) n_decimals = min(int(np.ceil(np.log10(100 * 10**9 / abs(vmax - vmin)))), 9) return self.format_timedelta_ticks(x, pos, n_decimals) The provided code snippet includes necessary dependencies for implementing the `format_dateaxis` function. Write a Python function `def format_dateaxis(subplot, freq, index) -> None` to solve the following problem: Pretty-formats the date axis (x-axis). Major and minor ticks are automatically set for the frequency of the current underlying series. As the dynamic mode is activated by default, changing the limits of the x axis will intelligently change the positions of the ticks. Here is the function: def format_dateaxis(subplot, freq, index) -> None: """ Pretty-formats the date axis (x-axis). Major and minor ticks are automatically set for the frequency of the current underlying series. As the dynamic mode is activated by default, changing the limits of the x axis will intelligently change the positions of the ticks. """ from matplotlib import pylab # handle index specific formatting # Note: DatetimeIndex does not use this # interface. DatetimeIndex uses matplotlib.date directly if isinstance(index, ABCPeriodIndex): majlocator = TimeSeries_DateLocator( freq, dynamic_mode=True, minor_locator=False, plot_obj=subplot ) minlocator = TimeSeries_DateLocator( freq, dynamic_mode=True, minor_locator=True, plot_obj=subplot ) subplot.xaxis.set_major_locator(majlocator) subplot.xaxis.set_minor_locator(minlocator) majformatter = TimeSeries_DateFormatter( freq, dynamic_mode=True, minor_locator=False, plot_obj=subplot ) minformatter = TimeSeries_DateFormatter( freq, dynamic_mode=True, minor_locator=True, plot_obj=subplot ) subplot.xaxis.set_major_formatter(majformatter) subplot.xaxis.set_minor_formatter(minformatter) # x and y coord info subplot.format_coord = functools.partial(_format_coord, freq) elif isinstance(index, ABCTimedeltaIndex): subplot.xaxis.set_major_formatter(TimeSeries_TimedeltaFormatter()) else: raise TypeError("index type not supported") pylab.draw_if_interactive()
Pretty-formats the date axis (x-axis). Major and minor ticks are automatically set for the frequency of the current underlying series. As the dynamic mode is activated by default, changing the limits of the x axis will intelligently change the positions of the ticks.
173,003
from __future__ import annotations import contextlib import datetime as pydt from datetime import ( datetime, timedelta, tzinfo, ) import functools from typing import ( TYPE_CHECKING, Any, Final, Generator, cast, ) from dateutil.relativedelta import relativedelta import matplotlib.dates as mdates from matplotlib.ticker import ( AutoLocator, Formatter, Locator, ) from matplotlib.transforms import nonsingular import matplotlib.units as munits import numpy as np from pandas._libs import lib from pandas._libs.tslibs import ( Timestamp, to_offset, ) from pandas._libs.tslibs.dtypes import FreqGroup from pandas._typing import F from pandas.core.dtypes.common import ( is_float, is_float_dtype, is_integer, is_integer_dtype, is_nested_list_like, ) from pandas import ( Index, Series, get_option, ) import pandas.core.common as com from pandas.core.indexes.datetimes import date_range from pandas.core.indexes.period import ( Period, PeriodIndex, period_range, ) import pandas.core.tools.datetimes as tools def pandas_converters() -> Generator[None, None, None]: """ Context manager registering pandas' converters for a plot. See Also -------- register_pandas_matplotlib_converters : Decorator that applies this. """ value = get_option("plotting.matplotlib.register_converters") if value: # register for True or "auto" register() try: yield finally: if value == "auto": # only deregister for "auto" deregister() def cast(typ: Type[_T], val: Any) -> _T: ... def cast(typ: str, val: Any) -> Any: ... def cast(typ: object, val: Any) -> Any: ... F = TypeVar("F", bound=FuncType) The provided code snippet includes necessary dependencies for implementing the `register_pandas_matplotlib_converters` function. Write a Python function `def register_pandas_matplotlib_converters(func: F) -> F` to solve the following problem: Decorator applying pandas_converters. Here is the function: def register_pandas_matplotlib_converters(func: F) -> F: """ Decorator applying pandas_converters. """ @functools.wraps(func) def wrapper(*args, **kwargs): with pandas_converters(): return func(*args, **kwargs) return cast(F, wrapper)
Decorator applying pandas_converters.
173,004
from __future__ import annotations import contextlib import datetime as pydt from datetime import ( datetime, timedelta, tzinfo, ) import functools from typing import ( TYPE_CHECKING, Any, Final, Generator, cast, ) from dateutil.relativedelta import relativedelta import matplotlib.dates as mdates from matplotlib.ticker import ( AutoLocator, Formatter, Locator, ) from matplotlib.transforms import nonsingular import matplotlib.units as munits import numpy as np from pandas._libs import lib from pandas._libs.tslibs import ( Timestamp, to_offset, ) from pandas._libs.tslibs.dtypes import FreqGroup from pandas._typing import F from pandas.core.dtypes.common import ( is_float, is_float_dtype, is_integer, is_integer_dtype, is_nested_list_like, ) from pandas import ( Index, Series, get_option, ) import pandas.core.common as com from pandas.core.indexes.datetimes import date_range from pandas.core.indexes.period import ( Period, PeriodIndex, period_range, ) import pandas.core.tools.datetimes as tools def _to_ordinalf(tm: pydt.time) -> float: tot_sec = tm.hour * 3600 + tm.minute * 60 + tm.second + tm.microsecond / 10**6 return tot_sec def time2num(d): if isinstance(d, str): parsed = Timestamp(d) return _to_ordinalf(parsed.time()) if isinstance(d, pydt.time): return _to_ordinalf(d) return d
null
173,005
from __future__ import annotations import contextlib import datetime as pydt from datetime import ( datetime, timedelta, tzinfo, ) import functools from typing import ( TYPE_CHECKING, Any, Final, Generator, cast, ) from dateutil.relativedelta import relativedelta import matplotlib.dates as mdates from matplotlib.ticker import ( AutoLocator, Formatter, Locator, ) from matplotlib.transforms import nonsingular import matplotlib.units as munits import numpy as np from pandas._libs import lib from pandas._libs.tslibs import ( Timestamp, to_offset, ) from pandas._libs.tslibs.dtypes import FreqGroup from pandas._typing import F from pandas.core.dtypes.common import ( is_float, is_float_dtype, is_integer, is_integer_dtype, is_nested_list_like, ) from pandas import ( Index, Series, get_option, ) import pandas.core.common as com from pandas.core.indexes.datetimes import date_range from pandas.core.indexes.period import ( Period, PeriodIndex, period_range, ) import pandas.core.tools.datetimes as tools class datetime(date): min: ClassVar[datetime] max: ClassVar[datetime] resolution: ClassVar[timedelta] if sys.version_info >= (3, 6): def __new__( cls: Type[_S], year: int, month: int, day: int, hour: int = ..., minute: int = ..., second: int = ..., microsecond: int = ..., tzinfo: Optional[_tzinfo] = ..., *, fold: int = ..., ) -> _S: ... else: def __new__( cls: Type[_S], year: int, month: int, day: int, hour: int = ..., minute: int = ..., second: int = ..., microsecond: int = ..., tzinfo: Optional[_tzinfo] = ..., ) -> _S: ... def year(self) -> int: ... def month(self) -> int: ... def day(self) -> int: ... def hour(self) -> int: ... def minute(self) -> int: ... def second(self) -> int: ... def microsecond(self) -> int: ... def tzinfo(self) -> Optional[_tzinfo]: ... if sys.version_info >= (3, 6): def fold(self) -> int: ... def fromtimestamp(cls: Type[_S], t: float, tz: Optional[_tzinfo] = ...) -> _S: ... def utcfromtimestamp(cls: Type[_S], t: float) -> _S: ... def today(cls: Type[_S]) -> _S: ... def fromordinal(cls: Type[_S], n: int) -> _S: ... if sys.version_info >= (3, 8): def now(cls: Type[_S], tz: Optional[_tzinfo] = ...) -> _S: ... else: def now(cls: Type[_S], tz: None = ...) -> _S: ... def now(cls, tz: _tzinfo) -> datetime: ... def utcnow(cls: Type[_S]) -> _S: ... if sys.version_info >= (3, 6): def combine(cls, date: _date, time: _time, tzinfo: Optional[_tzinfo] = ...) -> datetime: ... else: def combine(cls, date: _date, time: _time) -> datetime: ... if sys.version_info >= (3, 7): def fromisoformat(cls: Type[_S], date_string: str) -> _S: ... def strftime(self, fmt: _Text) -> str: ... if sys.version_info >= (3,): def __format__(self, fmt: str) -> str: ... else: def __format__(self, fmt: AnyStr) -> AnyStr: ... def toordinal(self) -> int: ... def timetuple(self) -> struct_time: ... if sys.version_info >= (3, 3): def timestamp(self) -> float: ... def utctimetuple(self) -> struct_time: ... def date(self) -> _date: ... def time(self) -> _time: ... def timetz(self) -> _time: ... if sys.version_info >= (3, 6): def replace( self, year: int = ..., month: int = ..., day: int = ..., hour: int = ..., minute: int = ..., second: int = ..., microsecond: int = ..., tzinfo: Optional[_tzinfo] = ..., *, fold: int = ..., ) -> datetime: ... else: def replace( self, year: int = ..., month: int = ..., day: int = ..., hour: int = ..., minute: int = ..., second: int = ..., microsecond: int = ..., tzinfo: Optional[_tzinfo] = ..., ) -> datetime: ... if sys.version_info >= (3, 8): def astimezone(self: _S, tz: Optional[_tzinfo] = ...) -> _S: ... elif sys.version_info >= (3, 3): def astimezone(self, tz: Optional[_tzinfo] = ...) -> datetime: ... else: def astimezone(self, tz: _tzinfo) -> datetime: ... def ctime(self) -> str: ... if sys.version_info >= (3, 6): def isoformat(self, sep: str = ..., timespec: str = ...) -> str: ... else: def isoformat(self, sep: str = ...) -> str: ... def strptime(cls, date_string: _Text, format: _Text) -> datetime: ... def utcoffset(self) -> Optional[timedelta]: ... def tzname(self) -> Optional[str]: ... def dst(self) -> Optional[timedelta]: ... def __le__(self, other: datetime) -> bool: ... # type: ignore def __lt__(self, other: datetime) -> bool: ... # type: ignore def __ge__(self, other: datetime) -> bool: ... # type: ignore def __gt__(self, other: datetime) -> bool: ... # type: ignore if sys.version_info >= (3, 8): def __add__(self: _S, other: timedelta) -> _S: ... def __radd__(self: _S, other: timedelta) -> _S: ... else: def __add__(self, other: timedelta) -> datetime: ... def __radd__(self, other: timedelta) -> datetime: ... def __sub__(self, other: datetime) -> timedelta: ... def __sub__(self, other: timedelta) -> datetime: ... def __hash__(self) -> int: ... def weekday(self) -> int: ... def isoweekday(self) -> int: ... def isocalendar(self) -> Tuple[int, int, int]: ... def get_datevalue(date, freq): if isinstance(date, Period): return date.asfreq(freq).ordinal elif isinstance(date, (str, datetime, pydt.date, pydt.time, np.datetime64)): return Period(date, freq).ordinal elif ( is_integer(date) or is_float(date) or (isinstance(date, (np.ndarray, Index)) and (date.size == 1)) ): return date elif date is None: return None raise ValueError(f"Unrecognizable date '{date}'")
null
173,006
from __future__ import annotations import contextlib import datetime as pydt from datetime import ( datetime, timedelta, tzinfo, ) import functools from typing import ( TYPE_CHECKING, Any, Final, Generator, cast, ) from dateutil.relativedelta import relativedelta import matplotlib.dates as mdates from matplotlib.ticker import ( AutoLocator, Formatter, Locator, ) from matplotlib.transforms import nonsingular import matplotlib.units as munits import numpy as np from pandas._libs import lib from pandas._libs.tslibs import ( Timestamp, to_offset, ) from pandas._libs.tslibs.dtypes import FreqGroup from pandas._typing import F from pandas.core.dtypes.common import ( is_float, is_float_dtype, is_integer, is_integer_dtype, is_nested_list_like, ) from pandas import ( Index, Series, get_option, ) import pandas.core.common as com from pandas.core.indexes.datetimes import date_range from pandas.core.indexes.period import ( Period, PeriodIndex, period_range, ) import pandas.core.tools.datetimes as tools class tzinfo: def tzname(self, dt: Optional[datetime]) -> Optional[str]: ... def utcoffset(self, dt: Optional[datetime]) -> Optional[timedelta]: ... def dst(self, dt: Optional[datetime]) -> Optional[timedelta]: ... def fromutc(self, dt: datetime) -> datetime: ... class timedelta(SupportsAbs[timedelta]): min: ClassVar[timedelta] max: ClassVar[timedelta] resolution: ClassVar[timedelta] if sys.version_info >= (3, 6): def __init__( self, days: float = ..., seconds: float = ..., microseconds: float = ..., milliseconds: float = ..., minutes: float = ..., hours: float = ..., weeks: float = ..., *, fold: int = ..., ) -> None: ... else: def __init__( self, days: float = ..., seconds: float = ..., microseconds: float = ..., milliseconds: float = ..., minutes: float = ..., hours: float = ..., weeks: float = ..., ) -> None: ... def days(self) -> int: ... def seconds(self) -> int: ... def microseconds(self) -> int: ... def total_seconds(self) -> float: ... def __add__(self, other: timedelta) -> timedelta: ... def __radd__(self, other: timedelta) -> timedelta: ... def __sub__(self, other: timedelta) -> timedelta: ... def __rsub__(self, other: timedelta) -> timedelta: ... def __neg__(self) -> timedelta: ... def __pos__(self) -> timedelta: ... def __abs__(self) -> timedelta: ... def __mul__(self, other: float) -> timedelta: ... def __rmul__(self, other: float) -> timedelta: ... def __floordiv__(self, other: timedelta) -> int: ... def __floordiv__(self, other: int) -> timedelta: ... if sys.version_info >= (3,): def __truediv__(self, other: timedelta) -> float: ... def __truediv__(self, other: float) -> timedelta: ... def __mod__(self, other: timedelta) -> timedelta: ... def __divmod__(self, other: timedelta) -> Tuple[int, timedelta]: ... else: def __div__(self, other: timedelta) -> float: ... def __div__(self, other: float) -> timedelta: ... def __le__(self, other: timedelta) -> bool: ... def __lt__(self, other: timedelta) -> bool: ... def __ge__(self, other: timedelta) -> bool: ... def __gt__(self, other: timedelta) -> bool: ... def __hash__(self) -> int: ... class datetime(date): min: ClassVar[datetime] max: ClassVar[datetime] resolution: ClassVar[timedelta] if sys.version_info >= (3, 6): def __new__( cls: Type[_S], year: int, month: int, day: int, hour: int = ..., minute: int = ..., second: int = ..., microsecond: int = ..., tzinfo: Optional[_tzinfo] = ..., *, fold: int = ..., ) -> _S: ... else: def __new__( cls: Type[_S], year: int, month: int, day: int, hour: int = ..., minute: int = ..., second: int = ..., microsecond: int = ..., tzinfo: Optional[_tzinfo] = ..., ) -> _S: ... def year(self) -> int: ... def month(self) -> int: ... def day(self) -> int: ... def hour(self) -> int: ... def minute(self) -> int: ... def second(self) -> int: ... def microsecond(self) -> int: ... def tzinfo(self) -> Optional[_tzinfo]: ... if sys.version_info >= (3, 6): def fold(self) -> int: ... def fromtimestamp(cls: Type[_S], t: float, tz: Optional[_tzinfo] = ...) -> _S: ... def utcfromtimestamp(cls: Type[_S], t: float) -> _S: ... def today(cls: Type[_S]) -> _S: ... def fromordinal(cls: Type[_S], n: int) -> _S: ... if sys.version_info >= (3, 8): def now(cls: Type[_S], tz: Optional[_tzinfo] = ...) -> _S: ... else: def now(cls: Type[_S], tz: None = ...) -> _S: ... def now(cls, tz: _tzinfo) -> datetime: ... def utcnow(cls: Type[_S]) -> _S: ... if sys.version_info >= (3, 6): def combine(cls, date: _date, time: _time, tzinfo: Optional[_tzinfo] = ...) -> datetime: ... else: def combine(cls, date: _date, time: _time) -> datetime: ... if sys.version_info >= (3, 7): def fromisoformat(cls: Type[_S], date_string: str) -> _S: ... def strftime(self, fmt: _Text) -> str: ... if sys.version_info >= (3,): def __format__(self, fmt: str) -> str: ... else: def __format__(self, fmt: AnyStr) -> AnyStr: ... def toordinal(self) -> int: ... def timetuple(self) -> struct_time: ... if sys.version_info >= (3, 3): def timestamp(self) -> float: ... def utctimetuple(self) -> struct_time: ... def date(self) -> _date: ... def time(self) -> _time: ... def timetz(self) -> _time: ... if sys.version_info >= (3, 6): def replace( self, year: int = ..., month: int = ..., day: int = ..., hour: int = ..., minute: int = ..., second: int = ..., microsecond: int = ..., tzinfo: Optional[_tzinfo] = ..., *, fold: int = ..., ) -> datetime: ... else: def replace( self, year: int = ..., month: int = ..., day: int = ..., hour: int = ..., minute: int = ..., second: int = ..., microsecond: int = ..., tzinfo: Optional[_tzinfo] = ..., ) -> datetime: ... if sys.version_info >= (3, 8): def astimezone(self: _S, tz: Optional[_tzinfo] = ...) -> _S: ... elif sys.version_info >= (3, 3): def astimezone(self, tz: Optional[_tzinfo] = ...) -> datetime: ... else: def astimezone(self, tz: _tzinfo) -> datetime: ... def ctime(self) -> str: ... if sys.version_info >= (3, 6): def isoformat(self, sep: str = ..., timespec: str = ...) -> str: ... else: def isoformat(self, sep: str = ...) -> str: ... def strptime(cls, date_string: _Text, format: _Text) -> datetime: ... def utcoffset(self) -> Optional[timedelta]: ... def tzname(self) -> Optional[str]: ... def dst(self) -> Optional[timedelta]: ... def __le__(self, other: datetime) -> bool: ... # type: ignore def __lt__(self, other: datetime) -> bool: ... # type: ignore def __ge__(self, other: datetime) -> bool: ... # type: ignore def __gt__(self, other: datetime) -> bool: ... # type: ignore if sys.version_info >= (3, 8): def __add__(self: _S, other: timedelta) -> _S: ... def __radd__(self: _S, other: timedelta) -> _S: ... else: def __add__(self, other: timedelta) -> datetime: ... def __radd__(self, other: timedelta) -> datetime: ... def __sub__(self, other: datetime) -> timedelta: ... def __sub__(self, other: timedelta) -> datetime: ... def __hash__(self) -> int: ... def weekday(self) -> int: ... def isoweekday(self) -> int: ... def isocalendar(self) -> Tuple[int, int, int]: ... def _from_ordinal(x, tz: tzinfo | None = None) -> datetime: ix = int(x) dt = datetime.fromordinal(ix) remainder = float(x) - ix hour, remainder = divmod(24 * remainder, 1) minute, remainder = divmod(60 * remainder, 1) second, remainder = divmod(60 * remainder, 1) microsecond = int(1_000_000 * remainder) if microsecond < 10: microsecond = 0 # compensate for rounding errors dt = datetime( dt.year, dt.month, dt.day, int(hour), int(minute), int(second), microsecond ) if tz is not None: dt = dt.astimezone(tz) if microsecond > 999990: # compensate for rounding errors dt += timedelta(microseconds=1_000_000 - microsecond) return dt
null
173,007
from __future__ import annotations import contextlib import datetime as pydt from datetime import ( datetime, timedelta, tzinfo, ) import functools from typing import ( TYPE_CHECKING, Any, Final, Generator, cast, ) from dateutil.relativedelta import relativedelta import matplotlib.dates as mdates from matplotlib.ticker import ( AutoLocator, Formatter, Locator, ) from matplotlib.transforms import nonsingular import matplotlib.units as munits import numpy as np from pandas._libs import lib from pandas._libs.tslibs import ( Timestamp, to_offset, ) from pandas._libs.tslibs.dtypes import FreqGroup from pandas._typing import F from pandas.core.dtypes.common import ( is_float, is_float_dtype, is_integer, is_integer_dtype, is_nested_list_like, ) from pandas import ( Index, Series, get_option, ) import pandas.core.common as com from pandas.core.indexes.datetimes import date_range from pandas.core.indexes.period import ( Period, PeriodIndex, period_range, ) import pandas.core.tools.datetimes as tools def _daily_finder(vmin, vmax, freq: BaseOffset): def _monthly_finder(vmin, vmax, freq): def _quarterly_finder(vmin, vmax, freq): def _annual_finder(vmin, vmax, freq): def get_finder(freq: BaseOffset): # error: "BaseOffset" has no attribute "_period_dtype_code" dtype_code = freq._period_dtype_code # type: ignore[attr-defined] fgroup = FreqGroup.from_period_dtype_code(dtype_code) if fgroup == FreqGroup.FR_ANN: return _annual_finder elif fgroup == FreqGroup.FR_QTR: return _quarterly_finder elif fgroup == FreqGroup.FR_MTH: return _monthly_finder elif (dtype_code >= FreqGroup.FR_BUS.value) or fgroup == FreqGroup.FR_WK: return _daily_finder else: # pragma: no cover raise NotImplementedError(f"Unsupported frequency: {dtype_code}")
null
173,008
from __future__ import annotations from typing import ( TYPE_CHECKING, Literal, ) import numpy as np from pandas._typing import PlottingOrientation from pandas.core.dtypes.common import ( is_integer, is_list_like, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCIndex, ) from pandas.core.dtypes.missing import ( isna, remove_na_arraylike, ) from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib.core import ( LinePlot, MPLPlot, ) from pandas.plotting._matplotlib.groupby import ( create_iter_data_given_by, reformat_hist_y_given_by, ) from pandas.plotting._matplotlib.misc import unpack_single_str_list from pandas.plotting._matplotlib.tools import ( create_subplots, flatten_axes, maybe_adjust_figure, set_ticks_props, ) def _grouped_hist( data, column=None, by=None, ax=None, bins: int = 50, figsize=None, layout=None, sharex: bool = False, sharey: bool = False, rot: float = 90, grid: bool = True, xlabelsize=None, xrot=None, ylabelsize=None, yrot=None, legend: bool = False, **kwargs, ): """ Grouped histogram Parameters ---------- data : Series/DataFrame column : object, optional by : object, optional ax : axes, optional bins : int, default 50 figsize : tuple, optional layout : optional sharex : bool, default False sharey : bool, default False rot : float, default 90 grid : bool, default True legend: : bool, default False kwargs : dict, keyword arguments passed to matplotlib.Axes.hist Returns ------- collection of Matplotlib Axes """ if legend: assert "label" not in kwargs if data.ndim == 1: kwargs["label"] = data.name elif column is None: kwargs["label"] = data.columns else: kwargs["label"] = column def plot_group(group, ax) -> None: ax.hist(group.dropna().values, bins=bins, **kwargs) if legend: ax.legend() if xrot is None: xrot = rot fig, axes = _grouped_plot( plot_group, data, column=column, by=by, sharex=sharex, sharey=sharey, ax=ax, figsize=figsize, layout=layout, rot=rot, ) set_ticks_props( axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot ) maybe_adjust_figure( fig, bottom=0.15, top=0.9, left=0.1, right=0.9, hspace=0.5, wspace=0.3 ) return axes def set_ticks_props( axes: Axes | Sequence[Axes], xlabelsize=None, xrot=None, ylabelsize=None, yrot=None, ): import matplotlib.pyplot as plt for ax in flatten_axes(axes): if xlabelsize is not None: plt.setp(ax.get_xticklabels(), fontsize=xlabelsize) if xrot is not None: plt.setp(ax.get_xticklabels(), rotation=xrot) if ylabelsize is not None: plt.setp(ax.get_yticklabels(), fontsize=ylabelsize) if yrot is not None: plt.setp(ax.get_yticklabels(), rotation=yrot) return axes def hist_series( self, by=None, ax=None, grid: bool = True, xlabelsize=None, xrot=None, ylabelsize=None, yrot=None, figsize=None, bins: int = 10, legend: bool = False, **kwds, ): import matplotlib.pyplot as plt if legend and "label" in kwds: raise ValueError("Cannot use both legend and label") if by is None: if kwds.get("layout", None) is not None: raise ValueError("The 'layout' keyword is not supported when 'by' is None") # hack until the plotting interface is a bit more unified fig = kwds.pop( "figure", plt.gcf() if plt.get_fignums() else plt.figure(figsize=figsize) ) if figsize is not None and tuple(figsize) != tuple(fig.get_size_inches()): fig.set_size_inches(*figsize, forward=True) if ax is None: ax = fig.gca() elif ax.get_figure() != fig: raise AssertionError("passed axis not bound to passed figure") values = self.dropna().values if legend: kwds["label"] = self.name ax.hist(values, bins=bins, **kwds) if legend: ax.legend() ax.grid(grid) axes = np.array([ax]) set_ticks_props( axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot ) else: if "figure" in kwds: raise ValueError( "Cannot pass 'figure' when using the " "'by' argument, since a new 'Figure' instance will be created" ) axes = _grouped_hist( self, by=by, ax=ax, grid=grid, figsize=figsize, bins=bins, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot, legend=legend, **kwds, ) if hasattr(axes, "ndim"): if axes.ndim == 1 and len(axes) == 1: return axes[0] return axes
null
173,009
from __future__ import annotations from typing import ( TYPE_CHECKING, Literal, ) import numpy as np from pandas._typing import PlottingOrientation from pandas.core.dtypes.common import ( is_integer, is_list_like, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCIndex, ) from pandas.core.dtypes.missing import ( isna, remove_na_arraylike, ) from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib.core import ( LinePlot, MPLPlot, ) from pandas.plotting._matplotlib.groupby import ( create_iter_data_given_by, reformat_hist_y_given_by, ) from pandas.plotting._matplotlib.misc import unpack_single_str_list from pandas.plotting._matplotlib.tools import ( create_subplots, flatten_axes, maybe_adjust_figure, set_ticks_props, ) def _grouped_hist( data, column=None, by=None, ax=None, bins: int = 50, figsize=None, layout=None, sharex: bool = False, sharey: bool = False, rot: float = 90, grid: bool = True, xlabelsize=None, xrot=None, ylabelsize=None, yrot=None, legend: bool = False, **kwargs, ): ABCIndex = cast( "Type[Index]", create_pandas_abc_type( "ABCIndex", "_typ", { "index", "rangeindex", "multiindex", "datetimeindex", "timedeltaindex", "periodindex", "categoricalindex", "intervalindex", }, ), ) def maybe_adjust_figure(fig: Figure, *args, **kwargs) -> None: def create_subplots( naxes: int, sharex: bool = False, sharey: bool = False, squeeze: bool = True, subplot_kw=None, ax=None, layout=None, layout_type: str = "box", **fig_kw, ): def flatten_axes(axes: Axes | Sequence[Axes]) -> np.ndarray: def set_ticks_props( axes: Axes | Sequence[Axes], xlabelsize=None, xrot=None, ylabelsize=None, yrot=None, ): def hist_frame( data, column=None, by=None, grid: bool = True, xlabelsize=None, xrot=None, ylabelsize=None, yrot=None, ax=None, sharex: bool = False, sharey: bool = False, figsize=None, layout=None, bins: int = 10, legend: bool = False, **kwds, ): if legend and "label" in kwds: raise ValueError("Cannot use both legend and label") if by is not None: axes = _grouped_hist( data, column=column, by=by, ax=ax, grid=grid, figsize=figsize, sharex=sharex, sharey=sharey, layout=layout, bins=bins, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot, legend=legend, **kwds, ) return axes if column is not None: if not isinstance(column, (list, np.ndarray, ABCIndex)): column = [column] data = data[column] # GH32590 data = data.select_dtypes( include=(np.number, "datetime64", "datetimetz"), exclude="timedelta" ) naxes = len(data.columns) if naxes == 0: raise ValueError( "hist method requires numerical or datetime columns, nothing to plot." ) fig, axes = create_subplots( naxes=naxes, ax=ax, squeeze=False, sharex=sharex, sharey=sharey, figsize=figsize, layout=layout, ) _axes = flatten_axes(axes) can_set_label = "label" not in kwds for i, col in enumerate(data.columns): ax = _axes[i] if legend and can_set_label: kwds["label"] = col ax.hist(data[col].dropna().values, bins=bins, **kwds) ax.set_title(col) ax.grid(grid) if legend: ax.legend() set_ticks_props( axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot ) maybe_adjust_figure(fig, wspace=0.3, hspace=0.3) return axes
null
173,010
from __future__ import annotations from typing import ( TYPE_CHECKING, Collection, Literal, NamedTuple, ) import warnings from matplotlib.artist import setp import numpy as np from pandas._typing import MatplotlibColor from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import is_dict_like from pandas.core.dtypes.missing import remove_na_arraylike import pandas as pd import pandas.core.common as com from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib.core import ( LinePlot, MPLPlot, ) from pandas.plotting._matplotlib.groupby import create_iter_data_given_by from pandas.plotting._matplotlib.style import get_standard_colors from pandas.plotting._matplotlib.tools import ( create_subplots, flatten_axes, maybe_adjust_figure, ) def boxplot( data, column=None, by=None, ax=None, fontsize=None, rot: int = 0, grid: bool = True, figsize=None, layout=None, return_type=None, **kwds, ): def boxplot_frame( self, column=None, by=None, ax=None, fontsize=None, rot: int = 0, grid: bool = True, figsize=None, layout=None, return_type=None, **kwds, ): import matplotlib.pyplot as plt ax = boxplot( self, column=column, by=by, ax=ax, fontsize=fontsize, grid=grid, rot=rot, figsize=figsize, layout=layout, return_type=return_type, **kwds, ) plt.draw_if_interactive() return ax
null
173,011
from __future__ import annotations from typing import ( TYPE_CHECKING, Collection, Literal, NamedTuple, ) import warnings from matplotlib.artist import setp import numpy as np from pandas._typing import MatplotlibColor from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import is_dict_like from pandas.core.dtypes.missing import remove_na_arraylike import pandas as pd import pandas.core.common as com from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib.core import ( LinePlot, MPLPlot, ) from pandas.plotting._matplotlib.groupby import create_iter_data_given_by from pandas.plotting._matplotlib.style import get_standard_colors from pandas.plotting._matplotlib.tools import ( create_subplots, flatten_axes, maybe_adjust_figure, ) def boxplot( data, column=None, by=None, ax=None, fontsize=None, rot: int = 0, grid: bool = True, figsize=None, layout=None, return_type=None, **kwds, ): import matplotlib.pyplot as plt # validate return_type: if return_type not in BoxPlot._valid_return_types: raise ValueError("return_type must be {'axes', 'dict', 'both'}") if isinstance(data, pd.Series): data = data.to_frame("x") column = "x" def _get_colors(): # num_colors=3 is required as method maybe_color_bp takes the colors # in positions 0 and 2. # if colors not provided, use same defaults as DataFrame.plot.box result = get_standard_colors(num_colors=3) result = np.take(result, [0, 0, 2]) result = np.append(result, "k") colors = kwds.pop("color", None) if colors: if is_dict_like(colors): # replace colors in result array with user-specified colors # taken from the colors dict parameter # "boxes" value placed in position 0, "whiskers" in 1, etc. valid_keys = ["boxes", "whiskers", "medians", "caps"] key_to_index = dict(zip(valid_keys, range(4))) for key, value in colors.items(): if key in valid_keys: result[key_to_index[key]] = value else: raise ValueError( f"color dict contains invalid key '{key}'. " f"The key must be either {valid_keys}" ) else: result.fill(colors) return result def maybe_color_bp(bp, **kwds) -> None: # GH 30346, when users specifying those arguments explicitly, our defaults # for these four kwargs should be overridden; if not, use Pandas settings if not kwds.get("boxprops"): setp(bp["boxes"], color=colors[0], alpha=1) if not kwds.get("whiskerprops"): setp(bp["whiskers"], color=colors[1], alpha=1) if not kwds.get("medianprops"): setp(bp["medians"], color=colors[2], alpha=1) if not kwds.get("capprops"): setp(bp["caps"], color=colors[3], alpha=1) def plot_group(keys, values, ax: Axes, **kwds): # GH 45465: xlabel/ylabel need to be popped out before plotting happens xlabel, ylabel = kwds.pop("xlabel", None), kwds.pop("ylabel", None) if xlabel: ax.set_xlabel(pprint_thing(xlabel)) if ylabel: ax.set_ylabel(pprint_thing(ylabel)) keys = [pprint_thing(x) for x in keys] values = [np.asarray(remove_na_arraylike(v), dtype=object) for v in values] bp = ax.boxplot(values, **kwds) if fontsize is not None: ax.tick_params(axis="both", labelsize=fontsize) # GH 45465: x/y are flipped when "vert" changes is_vertical = kwds.get("vert", True) ticks = ax.get_xticks() if is_vertical else ax.get_yticks() if len(ticks) != len(keys): i, remainder = divmod(len(ticks), len(keys)) assert remainder == 0, remainder keys *= i if is_vertical: ax.set_xticklabels(keys, rotation=rot) else: ax.set_yticklabels(keys, rotation=rot) maybe_color_bp(bp, **kwds) # Return axes in multiplot case, maybe revisit later # 985 if return_type == "dict": return bp elif return_type == "both": return BoxPlot.BP(ax=ax, lines=bp) else: return ax colors = _get_colors() if column is None: columns = None else: if isinstance(column, (list, tuple)): columns = column else: columns = [column] if by is not None: # Prefer array return type for 2-D plots to match the subplot layout # https://github.com/pandas-dev/pandas/pull/12216#issuecomment-241175580 result = _grouped_plot_by_column( plot_group, data, columns=columns, by=by, grid=grid, figsize=figsize, ax=ax, layout=layout, return_type=return_type, **kwds, ) else: if return_type is None: return_type = "axes" if layout is not None: raise ValueError("The 'layout' keyword is not supported when 'by' is None") if ax is None: rc = {"figure.figsize": figsize} if figsize is not None else {} with plt.rc_context(rc): ax = plt.gca() data = data._get_numeric_data() naxes = len(data.columns) if naxes == 0: raise ValueError( "boxplot method requires numerical columns, nothing to plot." ) if columns is None: columns = data.columns else: data = data[columns] result = plot_group(columns, data.values.T, ax, **kwds) ax.grid(grid) return result def pprint_thing( thing: Any, _nest_lvl: int = 0, escape_chars: EscapeChars | None = None, default_escapes: bool = False, quote_strings: bool = False, max_seq_items: int | None = None, ) -> str: """ This function is the sanctioned way of converting objects to a string representation and properly handles nested sequences. Parameters ---------- thing : anything to be formatted _nest_lvl : internal use only. pprint_thing() is mutually-recursive with pprint_sequence, this argument is used to keep track of the current nesting level, and limit it. escape_chars : list or dict, optional Characters to escape. If a dict is passed the values are the replacements default_escapes : bool, default False Whether the input escape characters replaces or adds to the defaults max_seq_items : int or None, default None Pass through to other pretty printers to limit sequence printing Returns ------- str """ def as_escaped_string( thing: Any, escape_chars: EscapeChars | None = escape_chars ) -> str: translate = {"\t": r"\t", "\n": r"\n", "\r": r"\r"} if isinstance(escape_chars, dict): if default_escapes: translate.update(escape_chars) else: translate = escape_chars escape_chars = list(escape_chars.keys()) else: escape_chars = escape_chars or () result = str(thing) for c in escape_chars: result = result.replace(c, translate[c]) return result if hasattr(thing, "__next__"): return str(thing) elif isinstance(thing, dict) and _nest_lvl < get_option( "display.pprint_nest_depth" ): result = _pprint_dict( thing, _nest_lvl, quote_strings=True, max_seq_items=max_seq_items ) elif is_sequence(thing) and _nest_lvl < get_option("display.pprint_nest_depth"): result = _pprint_seq( thing, _nest_lvl, escape_chars=escape_chars, quote_strings=quote_strings, max_seq_items=max_seq_items, ) elif isinstance(thing, str) and quote_strings: result = f"'{as_escaped_string(thing)}'" else: result = as_escaped_string(thing) return result def maybe_adjust_figure(fig: Figure, *args, **kwargs) -> None: """Call fig.subplots_adjust unless fig has constrained_layout enabled.""" if do_adjust_figure(fig): fig.subplots_adjust(*args, **kwargs) def create_subplots( naxes: int, sharex: bool = False, sharey: bool = False, squeeze: bool = True, subplot_kw=None, ax=None, layout=None, layout_type: str = "box", **fig_kw, ): """ Create a figure with a set of subplots already made. This utility wrapper makes it convenient to create common layouts of subplots, including the enclosing figure object, in a single call. Parameters ---------- naxes : int Number of required axes. Exceeded axes are set invisible. Default is nrows * ncols. sharex : bool If True, the X axis will be shared amongst all subplots. sharey : bool If True, the Y axis will be shared amongst all subplots. squeeze : bool If True, extra dimensions are squeezed out from the returned axis object: - if only one subplot is constructed (nrows=ncols=1), the resulting single Axis object is returned as a scalar. - for Nx1 or 1xN subplots, the returned object is a 1-d numpy object array of Axis objects are returned as numpy 1-d arrays. - for NxM subplots with N>1 and M>1 are returned as a 2d array. If False, no squeezing is done: the returned axis object is always a 2-d array containing Axis instances, even if it ends up being 1x1. subplot_kw : dict Dict with keywords passed to the add_subplot() call used to create each subplots. ax : Matplotlib axis object, optional layout : tuple Number of rows and columns of the subplot grid. If not specified, calculated from naxes and layout_type layout_type : {'box', 'horizontal', 'vertical'}, default 'box' Specify how to layout the subplot grid. fig_kw : Other keyword arguments to be passed to the figure() call. Note that all keywords not recognized above will be automatically included here. Returns ------- fig, ax : tuple - fig is the Matplotlib Figure object - ax can be either a single axis object or an array of axis objects if more than one subplot was created. The dimensions of the resulting array can be controlled with the squeeze keyword, see above. Examples -------- x = np.linspace(0, 2*np.pi, 400) y = np.sin(x**2) # Just a figure and one subplot f, ax = plt.subplots() ax.plot(x, y) ax.set_title('Simple plot') # Two subplots, unpack the output array immediately f, (ax1, ax2) = plt.subplots(1, 2, sharey=True) ax1.plot(x, y) ax1.set_title('Sharing Y axis') ax2.scatter(x, y) # Four polar axes plt.subplots(2, 2, subplot_kw=dict(polar=True)) """ import matplotlib.pyplot as plt if subplot_kw is None: subplot_kw = {} if ax is None: fig = plt.figure(**fig_kw) else: if is_list_like(ax): if squeeze: ax = flatten_axes(ax) if layout is not None: warnings.warn( "When passing multiple axes, layout keyword is ignored.", UserWarning, stacklevel=find_stack_level(), ) if sharex or sharey: warnings.warn( "When passing multiple axes, sharex and sharey " "are ignored. These settings must be specified when creating axes.", UserWarning, stacklevel=find_stack_level(), ) if ax.size == naxes: fig = ax.flat[0].get_figure() return fig, ax else: raise ValueError( f"The number of passed axes must be {naxes}, the " "same as the output plot" ) fig = ax.get_figure() # if ax is passed and a number of subplots is 1, return ax as it is if naxes == 1: if squeeze: return fig, ax else: return fig, flatten_axes(ax) else: warnings.warn( "To output multiple subplots, the figure containing " "the passed axes is being cleared.", UserWarning, stacklevel=find_stack_level(), ) fig.clear() nrows, ncols = _get_layout(naxes, layout=layout, layout_type=layout_type) nplots = nrows * ncols # Create empty object array to hold all axes. It's easiest to make it 1-d # so we can just append subplots upon creation, and then axarr = np.empty(nplots, dtype=object) # Create first subplot separately, so we can share it if requested ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw) if sharex: subplot_kw["sharex"] = ax0 if sharey: subplot_kw["sharey"] = ax0 axarr[0] = ax0 # Note off-by-one counting because add_subplot uses the MATLAB 1-based # convention. for i in range(1, nplots): kwds = subplot_kw.copy() # Set sharex and sharey to None for blank/dummy axes, these can # interfere with proper axis limits on the visible axes if # they share axes e.g. issue #7528 if i >= naxes: kwds["sharex"] = None kwds["sharey"] = None ax = fig.add_subplot(nrows, ncols, i + 1, **kwds) axarr[i] = ax if naxes != nplots: for ax in axarr[naxes:]: ax.set_visible(False) handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey) if squeeze: # Reshape the array to have the final desired dimension (nrow,ncol), # though discarding unneeded dimensions that equal 1. If we only have # one subplot, just return it instead of a 1-element array. if nplots == 1: axes = axarr[0] else: axes = axarr.reshape(nrows, ncols).squeeze() else: # returned axis array will be always 2-d, even if nrows=ncols=1 axes = axarr.reshape(nrows, ncols) return fig, axes def flatten_axes(axes: Axes | Sequence[Axes]) -> np.ndarray: if not is_list_like(axes): return np.array([axes]) elif isinstance(axes, (np.ndarray, ABCIndex)): return np.asarray(axes).ravel() return np.array(axes) def boxplot_frame_groupby( grouped, subplots: bool = True, column=None, fontsize=None, rot: int = 0, grid: bool = True, ax=None, figsize=None, layout=None, sharex: bool = False, sharey: bool = True, **kwds, ): if subplots is True: naxes = len(grouped) fig, axes = create_subplots( naxes=naxes, squeeze=False, ax=ax, sharex=sharex, sharey=sharey, figsize=figsize, layout=layout, ) axes = flatten_axes(axes) ret = pd.Series(dtype=object) for (key, group), ax in zip(grouped, axes): d = group.boxplot( ax=ax, column=column, fontsize=fontsize, rot=rot, grid=grid, **kwds ) ax.set_title(pprint_thing(key)) ret.loc[key] = d maybe_adjust_figure(fig, bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2) else: keys, frames = zip(*grouped) if grouped.axis == 0: df = pd.concat(frames, keys=keys, axis=1) else: if len(frames) > 1: df = frames[0].join(frames[1::]) else: df = frames[0] # GH 16748, DataFrameGroupby fails when subplots=False and `column` argument # is assigned, and in this case, since `df` here becomes MI after groupby, # so we need to couple the keys (grouped values) and column (original df # column) together to search for subset to plot if column is not None: column = com.convert_to_list_like(column) multi_key = pd.MultiIndex.from_product([keys, column]) column = list(multi_key.values) ret = df.boxplot( column=column, fontsize=fontsize, rot=rot, grid=grid, ax=ax, figsize=figsize, layout=layout, **kwds, ) return ret
null
173,012
from __future__ import annotations import importlib import types from typing import ( TYPE_CHECKING, Sequence, ) from pandas._config import get_option from pandas._typing import IndexLabel from pandas.util._decorators import ( Appender, Substitution, ) from pandas.core.dtypes.common import ( is_integer, is_list_like, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.base import PandasObject def _get_plot_backend(backend: str | None = None): """ Return the plotting backend to use (e.g. `pandas.plotting._matplotlib`). The plotting system of pandas uses matplotlib by default, but the idea here is that it can also work with other third-party backends. This function returns the module which provides a top-level `.plot` method that will actually do the plotting. The backend is specified from a string, which either comes from the keyword argument `backend`, or, if not specified, from the option `pandas.options.plotting.backend`. All the rest of the code in this file uses the backend specified there for the plotting. The backend is imported lazily, as matplotlib is a soft dependency, and pandas can be used without it being installed. Notes ----- Modifies `_backends` with imported backend as a side effect. """ backend_str: str = backend or get_option("plotting.backend") if backend_str in _backends: return _backends[backend_str] module = _load_backend(backend_str) _backends[backend_str] = module return module class Sequence(_Collection[_T_co], Reversible[_T_co], Generic[_T_co]): def __getitem__(self, i: int) -> _T_co: ... def __getitem__(self, s: slice) -> Sequence[_T_co]: ... # Mixin methods def index(self, value: Any, start: int = ..., stop: int = ...) -> int: ... def count(self, value: Any) -> int: ... def __contains__(self, x: object) -> bool: ... def __iter__(self) -> Iterator[_T_co]: ... def __reversed__(self) -> Iterator[_T_co]: ... The provided code snippet includes necessary dependencies for implementing the `hist_series` function. Write a Python function `def hist_series( self, by=None, ax=None, grid: bool = True, xlabelsize: int | None = None, xrot: float | None = None, ylabelsize: int | None = None, yrot: float | None = None, figsize: tuple[int, int] | None = None, bins: int | Sequence[int] = 10, backend: str | None = None, legend: bool = False, **kwargs, )` to solve the following problem: Draw histogram of the input series using matplotlib. Parameters ---------- by : object, optional If passed, then used to form histograms for separate groups. ax : matplotlib axis object If not passed, uses gca(). grid : bool, default True Whether to show axis grid lines. xlabelsize : int, default None If specified changes the x-axis label size. xrot : float, default None Rotation of x axis labels. ylabelsize : int, default None If specified changes the y-axis label size. yrot : float, default None Rotation of y axis labels. figsize : tuple, default None Figure size in inches by default. bins : int or sequence, default 10 Number of histogram bins to be used. If an integer is given, bins + 1 bin edges are calculated and returned. If bins is a sequence, gives bin edges, including left edge of first bin and right edge of last bin. In this case, bins is returned unmodified. backend : str, default None Backend to use instead of the backend specified in the option ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to specify the ``plotting.backend`` for the whole session, set ``pd.options.plotting.backend``. legend : bool, default False Whether to show the legend. .. versionadded:: 1.1.0 **kwargs To be passed to the actual plotting function. Returns ------- matplotlib.AxesSubplot A histogram plot. See Also -------- matplotlib.axes.Axes.hist : Plot a histogram using matplotlib. Here is the function: def hist_series( self, by=None, ax=None, grid: bool = True, xlabelsize: int | None = None, xrot: float | None = None, ylabelsize: int | None = None, yrot: float | None = None, figsize: tuple[int, int] | None = None, bins: int | Sequence[int] = 10, backend: str | None = None, legend: bool = False, **kwargs, ): """ Draw histogram of the input series using matplotlib. Parameters ---------- by : object, optional If passed, then used to form histograms for separate groups. ax : matplotlib axis object If not passed, uses gca(). grid : bool, default True Whether to show axis grid lines. xlabelsize : int, default None If specified changes the x-axis label size. xrot : float, default None Rotation of x axis labels. ylabelsize : int, default None If specified changes the y-axis label size. yrot : float, default None Rotation of y axis labels. figsize : tuple, default None Figure size in inches by default. bins : int or sequence, default 10 Number of histogram bins to be used. If an integer is given, bins + 1 bin edges are calculated and returned. If bins is a sequence, gives bin edges, including left edge of first bin and right edge of last bin. In this case, bins is returned unmodified. backend : str, default None Backend to use instead of the backend specified in the option ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to specify the ``plotting.backend`` for the whole session, set ``pd.options.plotting.backend``. legend : bool, default False Whether to show the legend. .. versionadded:: 1.1.0 **kwargs To be passed to the actual plotting function. Returns ------- matplotlib.AxesSubplot A histogram plot. See Also -------- matplotlib.axes.Axes.hist : Plot a histogram using matplotlib. """ plot_backend = _get_plot_backend(backend) return plot_backend.hist_series( self, by=by, ax=ax, grid=grid, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot, figsize=figsize, bins=bins, legend=legend, **kwargs, )
Draw histogram of the input series using matplotlib. Parameters ---------- by : object, optional If passed, then used to form histograms for separate groups. ax : matplotlib axis object If not passed, uses gca(). grid : bool, default True Whether to show axis grid lines. xlabelsize : int, default None If specified changes the x-axis label size. xrot : float, default None Rotation of x axis labels. ylabelsize : int, default None If specified changes the y-axis label size. yrot : float, default None Rotation of y axis labels. figsize : tuple, default None Figure size in inches by default. bins : int or sequence, default 10 Number of histogram bins to be used. If an integer is given, bins + 1 bin edges are calculated and returned. If bins is a sequence, gives bin edges, including left edge of first bin and right edge of last bin. In this case, bins is returned unmodified. backend : str, default None Backend to use instead of the backend specified in the option ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to specify the ``plotting.backend`` for the whole session, set ``pd.options.plotting.backend``. legend : bool, default False Whether to show the legend. .. versionadded:: 1.1.0 **kwargs To be passed to the actual plotting function. Returns ------- matplotlib.AxesSubplot A histogram plot. See Also -------- matplotlib.axes.Axes.hist : Plot a histogram using matplotlib.
173,013
from __future__ import annotations import importlib import types from typing import ( TYPE_CHECKING, Sequence, ) from pandas._config import get_option from pandas._typing import IndexLabel from pandas.util._decorators import ( Appender, Substitution, ) from pandas.core.dtypes.common import ( is_integer, is_list_like, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.base import PandasObject def _get_plot_backend(backend: str | None = None): """ Return the plotting backend to use (e.g. `pandas.plotting._matplotlib`). The plotting system of pandas uses matplotlib by default, but the idea here is that it can also work with other third-party backends. This function returns the module which provides a top-level `.plot` method that will actually do the plotting. The backend is specified from a string, which either comes from the keyword argument `backend`, or, if not specified, from the option `pandas.options.plotting.backend`. All the rest of the code in this file uses the backend specified there for the plotting. The backend is imported lazily, as matplotlib is a soft dependency, and pandas can be used without it being installed. Notes ----- Modifies `_backends` with imported backend as a side effect. """ backend_str: str = backend or get_option("plotting.backend") if backend_str in _backends: return _backends[backend_str] module = _load_backend(backend_str) _backends[backend_str] = module return module class Sequence(_Collection[_T_co], Reversible[_T_co], Generic[_T_co]): def __getitem__(self, i: int) -> _T_co: ... def __getitem__(self, s: slice) -> Sequence[_T_co]: ... # Mixin methods def index(self, value: Any, start: int = ..., stop: int = ...) -> int: ... def count(self, value: Any) -> int: ... def __contains__(self, x: object) -> bool: ... def __iter__(self) -> Iterator[_T_co]: ... def __reversed__(self) -> Iterator[_T_co]: ... IndexLabel = Union[Hashable, Sequence[Hashable]] The provided code snippet includes necessary dependencies for implementing the `hist_frame` function. Write a Python function `def hist_frame( data: DataFrame, column: IndexLabel = None, by=None, grid: bool = True, xlabelsize: int | None = None, xrot: float | None = None, ylabelsize: int | None = None, yrot: float | None = None, ax=None, sharex: bool = False, sharey: bool = False, figsize: tuple[int, int] | None = None, layout: tuple[int, int] | None = None, bins: int | Sequence[int] = 10, backend: str | None = None, legend: bool = False, **kwargs, )` to solve the following problem: Make a histogram of the DataFrame's columns. A `histogram`_ is a representation of the distribution of data. This function calls :meth:`matplotlib.pyplot.hist`, on each series in the DataFrame, resulting in one histogram per column. .. _histogram: https://en.wikipedia.org/wiki/Histogram Parameters ---------- data : DataFrame The pandas object holding the data. column : str or sequence, optional If passed, will be used to limit data to a subset of columns. by : object, optional If passed, then used to form histograms for separate groups. grid : bool, default True Whether to show axis grid lines. xlabelsize : int, default None If specified changes the x-axis label size. xrot : float, default None Rotation of x axis labels. For example, a value of 90 displays the x labels rotated 90 degrees clockwise. ylabelsize : int, default None If specified changes the y-axis label size. yrot : float, default None Rotation of y axis labels. For example, a value of 90 displays the y labels rotated 90 degrees clockwise. ax : Matplotlib axes object, default None The axes to plot the histogram on. sharex : bool, default True if ax is None else False In case subplots=True, share x axis and set some x axis labels to invisible; defaults to True if ax is None otherwise False if an ax is passed in. Note that passing in both an ax and sharex=True will alter all x axis labels for all subplots in a figure. sharey : bool, default False In case subplots=True, share y axis and set some y axis labels to invisible. figsize : tuple, optional The size in inches of the figure to create. Uses the value in `matplotlib.rcParams` by default. layout : tuple, optional Tuple of (rows, columns) for the layout of the histograms. bins : int or sequence, default 10 Number of histogram bins to be used. If an integer is given, bins + 1 bin edges are calculated and returned. If bins is a sequence, gives bin edges, including left edge of first bin and right edge of last bin. In this case, bins is returned unmodified. backend : str, default None Backend to use instead of the backend specified in the option ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to specify the ``plotting.backend`` for the whole session, set ``pd.options.plotting.backend``. legend : bool, default False Whether to show the legend. .. versionadded:: 1.1.0 **kwargs All other plotting keyword arguments to be passed to :meth:`matplotlib.pyplot.hist`. Returns ------- matplotlib.AxesSubplot or numpy.ndarray of them See Also -------- matplotlib.pyplot.hist : Plot a histogram using matplotlib. Examples -------- This example draws a histogram based on the length and width of some animals, displayed in three bins .. plot:: :context: close-figs >>> df = pd.DataFrame({ ... 'length': [1.5, 0.5, 1.2, 0.9, 3], ... 'width': [0.7, 0.2, 0.15, 0.2, 1.1] ... }, index=['pig', 'rabbit', 'duck', 'chicken', 'horse']) >>> hist = df.hist(bins=3) Here is the function: def hist_frame( data: DataFrame, column: IndexLabel = None, by=None, grid: bool = True, xlabelsize: int | None = None, xrot: float | None = None, ylabelsize: int | None = None, yrot: float | None = None, ax=None, sharex: bool = False, sharey: bool = False, figsize: tuple[int, int] | None = None, layout: tuple[int, int] | None = None, bins: int | Sequence[int] = 10, backend: str | None = None, legend: bool = False, **kwargs, ): """ Make a histogram of the DataFrame's columns. A `histogram`_ is a representation of the distribution of data. This function calls :meth:`matplotlib.pyplot.hist`, on each series in the DataFrame, resulting in one histogram per column. .. _histogram: https://en.wikipedia.org/wiki/Histogram Parameters ---------- data : DataFrame The pandas object holding the data. column : str or sequence, optional If passed, will be used to limit data to a subset of columns. by : object, optional If passed, then used to form histograms for separate groups. grid : bool, default True Whether to show axis grid lines. xlabelsize : int, default None If specified changes the x-axis label size. xrot : float, default None Rotation of x axis labels. For example, a value of 90 displays the x labels rotated 90 degrees clockwise. ylabelsize : int, default None If specified changes the y-axis label size. yrot : float, default None Rotation of y axis labels. For example, a value of 90 displays the y labels rotated 90 degrees clockwise. ax : Matplotlib axes object, default None The axes to plot the histogram on. sharex : bool, default True if ax is None else False In case subplots=True, share x axis and set some x axis labels to invisible; defaults to True if ax is None otherwise False if an ax is passed in. Note that passing in both an ax and sharex=True will alter all x axis labels for all subplots in a figure. sharey : bool, default False In case subplots=True, share y axis and set some y axis labels to invisible. figsize : tuple, optional The size in inches of the figure to create. Uses the value in `matplotlib.rcParams` by default. layout : tuple, optional Tuple of (rows, columns) for the layout of the histograms. bins : int or sequence, default 10 Number of histogram bins to be used. If an integer is given, bins + 1 bin edges are calculated and returned. If bins is a sequence, gives bin edges, including left edge of first bin and right edge of last bin. In this case, bins is returned unmodified. backend : str, default None Backend to use instead of the backend specified in the option ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to specify the ``plotting.backend`` for the whole session, set ``pd.options.plotting.backend``. legend : bool, default False Whether to show the legend. .. versionadded:: 1.1.0 **kwargs All other plotting keyword arguments to be passed to :meth:`matplotlib.pyplot.hist`. Returns ------- matplotlib.AxesSubplot or numpy.ndarray of them See Also -------- matplotlib.pyplot.hist : Plot a histogram using matplotlib. Examples -------- This example draws a histogram based on the length and width of some animals, displayed in three bins .. plot:: :context: close-figs >>> df = pd.DataFrame({ ... 'length': [1.5, 0.5, 1.2, 0.9, 3], ... 'width': [0.7, 0.2, 0.15, 0.2, 1.1] ... }, index=['pig', 'rabbit', 'duck', 'chicken', 'horse']) >>> hist = df.hist(bins=3) """ plot_backend = _get_plot_backend(backend) return plot_backend.hist_frame( data, column=column, by=by, grid=grid, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot, ax=ax, sharex=sharex, sharey=sharey, figsize=figsize, layout=layout, legend=legend, bins=bins, **kwargs, )
Make a histogram of the DataFrame's columns. A `histogram`_ is a representation of the distribution of data. This function calls :meth:`matplotlib.pyplot.hist`, on each series in the DataFrame, resulting in one histogram per column. .. _histogram: https://en.wikipedia.org/wiki/Histogram Parameters ---------- data : DataFrame The pandas object holding the data. column : str or sequence, optional If passed, will be used to limit data to a subset of columns. by : object, optional If passed, then used to form histograms for separate groups. grid : bool, default True Whether to show axis grid lines. xlabelsize : int, default None If specified changes the x-axis label size. xrot : float, default None Rotation of x axis labels. For example, a value of 90 displays the x labels rotated 90 degrees clockwise. ylabelsize : int, default None If specified changes the y-axis label size. yrot : float, default None Rotation of y axis labels. For example, a value of 90 displays the y labels rotated 90 degrees clockwise. ax : Matplotlib axes object, default None The axes to plot the histogram on. sharex : bool, default True if ax is None else False In case subplots=True, share x axis and set some x axis labels to invisible; defaults to True if ax is None otherwise False if an ax is passed in. Note that passing in both an ax and sharex=True will alter all x axis labels for all subplots in a figure. sharey : bool, default False In case subplots=True, share y axis and set some y axis labels to invisible. figsize : tuple, optional The size in inches of the figure to create. Uses the value in `matplotlib.rcParams` by default. layout : tuple, optional Tuple of (rows, columns) for the layout of the histograms. bins : int or sequence, default 10 Number of histogram bins to be used. If an integer is given, bins + 1 bin edges are calculated and returned. If bins is a sequence, gives bin edges, including left edge of first bin and right edge of last bin. In this case, bins is returned unmodified. backend : str, default None Backend to use instead of the backend specified in the option ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to specify the ``plotting.backend`` for the whole session, set ``pd.options.plotting.backend``. legend : bool, default False Whether to show the legend. .. versionadded:: 1.1.0 **kwargs All other plotting keyword arguments to be passed to :meth:`matplotlib.pyplot.hist`. Returns ------- matplotlib.AxesSubplot or numpy.ndarray of them See Also -------- matplotlib.pyplot.hist : Plot a histogram using matplotlib. Examples -------- This example draws a histogram based on the length and width of some animals, displayed in three bins .. plot:: :context: close-figs >>> df = pd.DataFrame({ ... 'length': [1.5, 0.5, 1.2, 0.9, 3], ... 'width': [0.7, 0.2, 0.15, 0.2, 1.1] ... }, index=['pig', 'rabbit', 'duck', 'chicken', 'horse']) >>> hist = df.hist(bins=3)
173,014
from __future__ import annotations import importlib import types from typing import ( TYPE_CHECKING, Sequence, ) from pandas._config import get_option from pandas._typing import IndexLabel from pandas.util._decorators import ( Appender, Substitution, ) from pandas.core.dtypes.common import ( is_integer, is_list_like, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.base import PandasObject def _get_plot_backend(backend: str | None = None): """ Return the plotting backend to use (e.g. `pandas.plotting._matplotlib`). The plotting system of pandas uses matplotlib by default, but the idea here is that it can also work with other third-party backends. This function returns the module which provides a top-level `.plot` method that will actually do the plotting. The backend is specified from a string, which either comes from the keyword argument `backend`, or, if not specified, from the option `pandas.options.plotting.backend`. All the rest of the code in this file uses the backend specified there for the plotting. The backend is imported lazily, as matplotlib is a soft dependency, and pandas can be used without it being installed. Notes ----- Modifies `_backends` with imported backend as a side effect. """ backend_str: str = backend or get_option("plotting.backend") if backend_str in _backends: return _backends[backend_str] module = _load_backend(backend_str) _backends[backend_str] = module return module def boxplot( data: DataFrame, column: str | list[str] | None = None, by: str | list[str] | None = None, ax: Axes | None = None, fontsize: float | str | None = None, rot: int = 0, grid: bool = True, figsize: tuple[float, float] | None = None, layout: tuple[int, int] | None = None, return_type: str | None = None, **kwargs, ): plot_backend = _get_plot_backend("matplotlib") return plot_backend.boxplot( data, column=column, by=by, ax=ax, fontsize=fontsize, rot=rot, grid=grid, figsize=figsize, layout=layout, return_type=return_type, **kwargs, )
null
173,015
from __future__ import annotations import importlib import types from typing import ( TYPE_CHECKING, Sequence, ) from pandas._config import get_option from pandas._typing import IndexLabel from pandas.util._decorators import ( Appender, Substitution, ) from pandas.core.dtypes.common import ( is_integer, is_list_like, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.base import PandasObject def _get_plot_backend(backend: str | None = None): """ Return the plotting backend to use (e.g. `pandas.plotting._matplotlib`). The plotting system of pandas uses matplotlib by default, but the idea here is that it can also work with other third-party backends. This function returns the module which provides a top-level `.plot` method that will actually do the plotting. The backend is specified from a string, which either comes from the keyword argument `backend`, or, if not specified, from the option `pandas.options.plotting.backend`. All the rest of the code in this file uses the backend specified there for the plotting. The backend is imported lazily, as matplotlib is a soft dependency, and pandas can be used without it being installed. Notes ----- Modifies `_backends` with imported backend as a side effect. """ backend_str: str = backend or get_option("plotting.backend") if backend_str in _backends: return _backends[backend_str] module = _load_backend(backend_str) _backends[backend_str] = module return module def boxplot_frame( self, column=None, by=None, ax=None, fontsize=None, rot: int = 0, grid: bool = True, figsize=None, layout=None, return_type=None, backend=None, **kwargs, ): plot_backend = _get_plot_backend(backend) return plot_backend.boxplot_frame( self, column=column, by=by, ax=ax, fontsize=fontsize, rot=rot, grid=grid, figsize=figsize, layout=layout, return_type=return_type, **kwargs, )
null
173,016
from __future__ import annotations import importlib import types from typing import ( TYPE_CHECKING, Sequence, ) from pandas._config import get_option from pandas._typing import IndexLabel from pandas.util._decorators import ( Appender, Substitution, ) from pandas.core.dtypes.common import ( is_integer, is_list_like, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.base import PandasObject def _get_plot_backend(backend: str | None = None): """ Return the plotting backend to use (e.g. `pandas.plotting._matplotlib`). The plotting system of pandas uses matplotlib by default, but the idea here is that it can also work with other third-party backends. This function returns the module which provides a top-level `.plot` method that will actually do the plotting. The backend is specified from a string, which either comes from the keyword argument `backend`, or, if not specified, from the option `pandas.options.plotting.backend`. All the rest of the code in this file uses the backend specified there for the plotting. The backend is imported lazily, as matplotlib is a soft dependency, and pandas can be used without it being installed. Notes ----- Modifies `_backends` with imported backend as a side effect. """ backend_str: str = backend or get_option("plotting.backend") if backend_str in _backends: return _backends[backend_str] module = _load_backend(backend_str) _backends[backend_str] = module return module The provided code snippet includes necessary dependencies for implementing the `boxplot_frame_groupby` function. Write a Python function `def boxplot_frame_groupby( grouped, subplots: bool = True, column=None, fontsize=None, rot: int = 0, grid: bool = True, ax=None, figsize=None, layout=None, sharex: bool = False, sharey: bool = True, backend=None, **kwargs, )` to solve the following problem: Make box plots from DataFrameGroupBy data. Parameters ---------- grouped : Grouped DataFrame subplots : bool * ``False`` - no subplots will be used * ``True`` - create a subplot for each group. column : column name or list of names, or vector Can be any valid input to groupby. fontsize : float or str rot : label rotation angle grid : Setting this to True will show the grid ax : Matplotlib axis object, default None figsize : A tuple (width, height) in inches layout : tuple (optional) The layout of the plot: (rows, columns). sharex : bool, default False Whether x-axes will be shared among subplots. sharey : bool, default True Whether y-axes will be shared among subplots. backend : str, default None Backend to use instead of the backend specified in the option ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to specify the ``plotting.backend`` for the whole session, set ``pd.options.plotting.backend``. **kwargs All other plotting keyword arguments to be passed to matplotlib's boxplot function. Returns ------- dict of key/value = group key/DataFrame.boxplot return value or DataFrame.boxplot return value in case subplots=figures=False Examples -------- You can create boxplots for grouped data and show them as separate subplots: .. plot:: :context: close-figs >>> import itertools >>> tuples = [t for t in itertools.product(range(1000), range(4))] >>> index = pd.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1']) >>> data = np.random.randn(len(index),4) >>> df = pd.DataFrame(data, columns=list('ABCD'), index=index) >>> grouped = df.groupby(level='lvl1') >>> grouped.boxplot(rot=45, fontsize=12, figsize=(8,10)) # doctest: +SKIP The ``subplots=False`` option shows the boxplots in a single figure. .. plot:: :context: close-figs >>> grouped.boxplot(subplots=False, rot=45, fontsize=12) # doctest: +SKIP Here is the function: def boxplot_frame_groupby( grouped, subplots: bool = True, column=None, fontsize=None, rot: int = 0, grid: bool = True, ax=None, figsize=None, layout=None, sharex: bool = False, sharey: bool = True, backend=None, **kwargs, ): """ Make box plots from DataFrameGroupBy data. Parameters ---------- grouped : Grouped DataFrame subplots : bool * ``False`` - no subplots will be used * ``True`` - create a subplot for each group. column : column name or list of names, or vector Can be any valid input to groupby. fontsize : float or str rot : label rotation angle grid : Setting this to True will show the grid ax : Matplotlib axis object, default None figsize : A tuple (width, height) in inches layout : tuple (optional) The layout of the plot: (rows, columns). sharex : bool, default False Whether x-axes will be shared among subplots. sharey : bool, default True Whether y-axes will be shared among subplots. backend : str, default None Backend to use instead of the backend specified in the option ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to specify the ``plotting.backend`` for the whole session, set ``pd.options.plotting.backend``. **kwargs All other plotting keyword arguments to be passed to matplotlib's boxplot function. Returns ------- dict of key/value = group key/DataFrame.boxplot return value or DataFrame.boxplot return value in case subplots=figures=False Examples -------- You can create boxplots for grouped data and show them as separate subplots: .. plot:: :context: close-figs >>> import itertools >>> tuples = [t for t in itertools.product(range(1000), range(4))] >>> index = pd.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1']) >>> data = np.random.randn(len(index),4) >>> df = pd.DataFrame(data, columns=list('ABCD'), index=index) >>> grouped = df.groupby(level='lvl1') >>> grouped.boxplot(rot=45, fontsize=12, figsize=(8,10)) # doctest: +SKIP The ``subplots=False`` option shows the boxplots in a single figure. .. plot:: :context: close-figs >>> grouped.boxplot(subplots=False, rot=45, fontsize=12) # doctest: +SKIP """ plot_backend = _get_plot_backend(backend) return plot_backend.boxplot_frame_groupby( grouped, subplots=subplots, column=column, fontsize=fontsize, rot=rot, grid=grid, ax=ax, figsize=figsize, layout=layout, sharex=sharex, sharey=sharey, **kwargs, )
Make box plots from DataFrameGroupBy data. Parameters ---------- grouped : Grouped DataFrame subplots : bool * ``False`` - no subplots will be used * ``True`` - create a subplot for each group. column : column name or list of names, or vector Can be any valid input to groupby. fontsize : float or str rot : label rotation angle grid : Setting this to True will show the grid ax : Matplotlib axis object, default None figsize : A tuple (width, height) in inches layout : tuple (optional) The layout of the plot: (rows, columns). sharex : bool, default False Whether x-axes will be shared among subplots. sharey : bool, default True Whether y-axes will be shared among subplots. backend : str, default None Backend to use instead of the backend specified in the option ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to specify the ``plotting.backend`` for the whole session, set ``pd.options.plotting.backend``. **kwargs All other plotting keyword arguments to be passed to matplotlib's boxplot function. Returns ------- dict of key/value = group key/DataFrame.boxplot return value or DataFrame.boxplot return value in case subplots=figures=False Examples -------- You can create boxplots for grouped data and show them as separate subplots: .. plot:: :context: close-figs >>> import itertools >>> tuples = [t for t in itertools.product(range(1000), range(4))] >>> index = pd.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1']) >>> data = np.random.randn(len(index),4) >>> df = pd.DataFrame(data, columns=list('ABCD'), index=index) >>> grouped = df.groupby(level='lvl1') >>> grouped.boxplot(rot=45, fontsize=12, figsize=(8,10)) # doctest: +SKIP The ``subplots=False`` option shows the boxplots in a single figure. .. plot:: :context: close-figs >>> grouped.boxplot(subplots=False, rot=45, fontsize=12) # doctest: +SKIP
173,017
from __future__ import annotations from contextlib import suppress import sys from typing import ( TYPE_CHECKING, Hashable, Sequence, TypeVar, cast, final, ) import warnings import numpy as np from pandas._config import using_copy_on_write from pandas._libs.indexing import NDFrameIndexerBase from pandas._libs.lib import item_from_zerodim from pandas._typing import ( Axis, AxisInt, ) from pandas.compat import PYPY from pandas.errors import ( AbstractMethodError, ChainedAssignmentError, IndexingError, InvalidIndexError, LossySetitemError, _chained_assignment_msg, ) from pandas.util._decorators import doc from pandas.core.dtypes.cast import ( can_hold_element, maybe_promote, ) from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_extension_array_dtype, is_hashable, is_integer, is_iterator, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, is_sequence, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import ( infer_fill_value, is_valid_na_for_dtype, isna, na_value_for_dtype, ) from pandas.core import algorithms as algos import pandas.core.common as com from pandas.core.construction import ( array as pd_array, extract_array, ) from pandas.core.indexers import ( check_array_indexer, is_list_like_indexer, is_scalar_indexer, length_of_indexer, ) from pandas.core.indexes.api import ( Index, MultiIndex, ) class Hashable(Protocol, metaclass=ABCMeta): # TODO: This is special, in that a subclass of a hashable class may not be hashable # (for example, list vs. object). It's not obvious how to represent this. This class # is currently mostly useless for static checking. def __hash__(self) -> int: ... The provided code snippet includes necessary dependencies for implementing the `_tuplify` function. Write a Python function `def _tuplify(ndim: int, loc: Hashable) -> tuple[Hashable | slice, ...]` to solve the following problem: Given an indexer for the first dimension, create an equivalent tuple for indexing over all dimensions. Parameters ---------- ndim : int loc : object Returns ------- tuple Here is the function: def _tuplify(ndim: int, loc: Hashable) -> tuple[Hashable | slice, ...]: """ Given an indexer for the first dimension, create an equivalent tuple for indexing over all dimensions. Parameters ---------- ndim : int loc : object Returns ------- tuple """ _tup: list[Hashable | slice] _tup = [slice(None, None) for _ in range(ndim)] _tup[0] = loc return tuple(_tup)
Given an indexer for the first dimension, create an equivalent tuple for indexing over all dimensions. Parameters ---------- ndim : int loc : object Returns ------- tuple
173,018
from __future__ import annotations from contextlib import suppress import sys from typing import ( TYPE_CHECKING, Hashable, Sequence, TypeVar, cast, final, ) import warnings import numpy as np from pandas._config import using_copy_on_write from pandas._libs.indexing import NDFrameIndexerBase from pandas._libs.lib import item_from_zerodim from pandas._typing import ( Axis, AxisInt, ) from pandas.compat import PYPY from pandas.errors import ( AbstractMethodError, ChainedAssignmentError, IndexingError, InvalidIndexError, LossySetitemError, _chained_assignment_msg, ) from pandas.util._decorators import doc from pandas.core.dtypes.cast import ( can_hold_element, maybe_promote, ) from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_extension_array_dtype, is_hashable, is_integer, is_iterator, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, is_sequence, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import ( infer_fill_value, is_valid_na_for_dtype, isna, na_value_for_dtype, ) from pandas.core import algorithms as algos import pandas.core.common as com from pandas.core.construction import ( array as pd_array, extract_array, ) from pandas.core.indexers import ( check_array_indexer, is_list_like_indexer, is_scalar_indexer, length_of_indexer, ) from pandas.core.indexes.api import ( Index, MultiIndex, ) AxisInt = int The provided code snippet includes necessary dependencies for implementing the `_tupleize_axis_indexer` function. Write a Python function `def _tupleize_axis_indexer(ndim: int, axis: AxisInt, key) -> tuple` to solve the following problem: If we have an axis, adapt the given key to be axis-independent. Here is the function: def _tupleize_axis_indexer(ndim: int, axis: AxisInt, key) -> tuple: """ If we have an axis, adapt the given key to be axis-independent. """ new_key = [slice(None)] * ndim new_key[axis] = key return tuple(new_key)
If we have an axis, adapt the given key to be axis-independent.
173,019
from __future__ import annotations from contextlib import suppress import sys from typing import ( TYPE_CHECKING, Hashable, Sequence, TypeVar, cast, final, ) import warnings import numpy as np from pandas._config import using_copy_on_write from pandas._libs.indexing import NDFrameIndexerBase from pandas._libs.lib import item_from_zerodim from pandas._typing import ( Axis, AxisInt, ) from pandas.compat import PYPY from pandas.errors import ( AbstractMethodError, ChainedAssignmentError, IndexingError, InvalidIndexError, LossySetitemError, _chained_assignment_msg, ) from pandas.util._decorators import doc from pandas.core.dtypes.cast import ( can_hold_element, maybe_promote, ) from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_extension_array_dtype, is_hashable, is_integer, is_iterator, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, is_sequence, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import ( infer_fill_value, is_valid_na_for_dtype, isna, na_value_for_dtype, ) from pandas.core import algorithms as algos import pandas.core.common as com from pandas.core.construction import ( array as pd_array, extract_array, ) from pandas.core.indexers import ( check_array_indexer, is_list_like_indexer, is_scalar_indexer, length_of_indexer, ) from pandas.core.indexes.api import ( Index, MultiIndex, ) class IndexingError(Exception): """ Exception is raised when trying to index and there is a mismatch in dimensions. Examples -------- >>> df = pd.DataFrame({'A': [1, 1, 1]}) >>> df.loc[..., ..., 'A'] # doctest: +SKIP ... # IndexingError: indexer may only contain one '...' entry >>> df = pd.DataFrame({'A': [1, 1, 1]}) >>> df.loc[1, ..., ...] # doctest: +SKIP ... # IndexingError: Too many indexers >>> df[pd.Series([True], dtype=bool)] # doctest: +SKIP ... # IndexingError: Unalignable boolean Series provided as indexer... >>> s = pd.Series(range(2), ... index = pd.MultiIndex.from_product([["a", "b"], ["c"]])) >>> s.loc["a", "c", "d"] # doctest: +SKIP ... # IndexingError: Too many indexers """ def is_object_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of the object dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the object dtype. Examples -------- >>> from pandas.api.types import is_object_dtype >>> is_object_dtype(object) True >>> is_object_dtype(int) False >>> is_object_dtype(np.array([], dtype=object)) True >>> is_object_dtype(np.array([], dtype=int)) False >>> is_object_dtype([1, 2, 3]) False """ return _is_dtype_type(arr_or_dtype, classes(np.object_)) def is_extension_array_dtype(arr_or_dtype) -> bool: """ Check if an object is a pandas extension array type. See the :ref:`Use Guide <extending.extension-types>` for more. Parameters ---------- arr_or_dtype : object For array-like input, the ``.dtype`` attribute will be extracted. Returns ------- bool Whether the `arr_or_dtype` is an extension array type. Notes ----- This checks whether an object implements the pandas extension array interface. In pandas, this includes: * Categorical * Sparse * Interval * Period * DatetimeArray * TimedeltaArray Third-party libraries may implement arrays or types satisfying this interface as well. Examples -------- >>> from pandas.api.types import is_extension_array_dtype >>> arr = pd.Categorical(['a', 'b']) >>> is_extension_array_dtype(arr) True >>> is_extension_array_dtype(arr.dtype) True >>> arr = np.array(['a', 'b']) >>> is_extension_array_dtype(arr.dtype) False """ dtype = getattr(arr_or_dtype, "dtype", arr_or_dtype) if isinstance(dtype, ExtensionDtype): return True elif isinstance(dtype, np.dtype): return False else: return registry.find(dtype) is not None ABCSeries = cast( "Type[Series]", create_pandas_abc_type("ABCSeries", "_typ", ("series",)), ) The provided code snippet includes necessary dependencies for implementing the `check_bool_indexer` function. Write a Python function `def check_bool_indexer(index: Index, key) -> np.ndarray` to solve the following problem: Check if key is a valid boolean indexer for an object with such index and perform reindexing or conversion if needed. This function assumes that is_bool_indexer(key) == True. Parameters ---------- index : Index Index of the object on which the indexing is done. key : list-like Boolean indexer to check. Returns ------- np.array Resulting key. Raises ------ IndexError If the key does not have the same length as index. IndexingError If the index of the key is unalignable to index. Here is the function: def check_bool_indexer(index: Index, key) -> np.ndarray: """ Check if key is a valid boolean indexer for an object with such index and perform reindexing or conversion if needed. This function assumes that is_bool_indexer(key) == True. Parameters ---------- index : Index Index of the object on which the indexing is done. key : list-like Boolean indexer to check. Returns ------- np.array Resulting key. Raises ------ IndexError If the key does not have the same length as index. IndexingError If the index of the key is unalignable to index. """ result = key if isinstance(key, ABCSeries) and not key.index.equals(index): indexer = result.index.get_indexer_for(index) if -1 in indexer: raise IndexingError( "Unalignable boolean Series provided as " "indexer (index of the boolean Series and of " "the indexed object do not match)." ) result = result.take(indexer) # fall through for boolean if not is_extension_array_dtype(result.dtype): return result.astype(bool)._values if is_object_dtype(key): # key might be object-dtype bool, check_array_indexer needs bool array result = np.asarray(result, dtype=bool) elif not is_array_like(result): # GH 33924 # key may contain nan elements, check_array_indexer needs bool array result = pd_array(result, dtype=bool) return check_array_indexer(index, result)
Check if key is a valid boolean indexer for an object with such index and perform reindexing or conversion if needed. This function assumes that is_bool_indexer(key) == True. Parameters ---------- index : Index Index of the object on which the indexing is done. key : list-like Boolean indexer to check. Returns ------- np.array Resulting key. Raises ------ IndexError If the key does not have the same length as index. IndexingError If the index of the key is unalignable to index.
173,020
from __future__ import annotations from contextlib import suppress import sys from typing import ( TYPE_CHECKING, Hashable, Sequence, TypeVar, cast, final, ) import warnings import numpy as np from pandas._config import using_copy_on_write from pandas._libs.indexing import NDFrameIndexerBase from pandas._libs.lib import item_from_zerodim from pandas._typing import ( Axis, AxisInt, ) from pandas.compat import PYPY from pandas.errors import ( AbstractMethodError, ChainedAssignmentError, IndexingError, InvalidIndexError, LossySetitemError, _chained_assignment_msg, ) from pandas.util._decorators import doc from pandas.core.dtypes.cast import ( can_hold_element, maybe_promote, ) from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_extension_array_dtype, is_hashable, is_integer, is_iterator, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, is_sequence, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import ( infer_fill_value, is_valid_na_for_dtype, isna, na_value_for_dtype, ) from pandas.core import algorithms as algos import pandas.core.common as com from pandas.core.construction import ( array as pd_array, extract_array, ) from pandas.core.indexers import ( check_array_indexer, is_list_like_indexer, is_scalar_indexer, length_of_indexer, ) from pandas.core.indexes.api import ( Index, MultiIndex, ) The provided code snippet includes necessary dependencies for implementing the `convert_missing_indexer` function. Write a Python function `def convert_missing_indexer(indexer)` to solve the following problem: Reverse convert a missing indexer, which is a dict return the scalar indexer and a boolean indicating if we converted Here is the function: def convert_missing_indexer(indexer): """ Reverse convert a missing indexer, which is a dict return the scalar indexer and a boolean indicating if we converted """ if isinstance(indexer, dict): # a missing key (but not a tuple indexer) indexer = indexer["key"] if isinstance(indexer, bool): raise KeyError("cannot use a single bool to index into setitem") return indexer, True return indexer, False
Reverse convert a missing indexer, which is a dict return the scalar indexer and a boolean indicating if we converted
173,021
from __future__ import annotations from contextlib import suppress import sys from typing import ( TYPE_CHECKING, Hashable, Sequence, TypeVar, cast, final, ) import warnings import numpy as np from pandas._config import using_copy_on_write from pandas._libs.indexing import NDFrameIndexerBase from pandas._libs.lib import item_from_zerodim from pandas._typing import ( Axis, AxisInt, ) from pandas.compat import PYPY from pandas.errors import ( AbstractMethodError, ChainedAssignmentError, IndexingError, InvalidIndexError, LossySetitemError, _chained_assignment_msg, ) from pandas.util._decorators import doc from pandas.core.dtypes.cast import ( can_hold_element, maybe_promote, ) from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_extension_array_dtype, is_hashable, is_integer, is_iterator, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, is_sequence, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import ( infer_fill_value, is_valid_na_for_dtype, isna, na_value_for_dtype, ) from pandas.core import algorithms as algos import pandas.core.common as com from pandas.core.construction import ( array as pd_array, extract_array, ) from pandas.core.indexers import ( check_array_indexer, is_list_like_indexer, is_scalar_indexer, length_of_indexer, ) from pandas.core.indexes.api import ( Index, MultiIndex, ) The provided code snippet includes necessary dependencies for implementing the `convert_from_missing_indexer_tuple` function. Write a Python function `def convert_from_missing_indexer_tuple(indexer, axes)` to solve the following problem: Create a filtered indexer that doesn't have any missing indexers. Here is the function: def convert_from_missing_indexer_tuple(indexer, axes): """ Create a filtered indexer that doesn't have any missing indexers. """ def get_indexer(_i, _idx): return axes[_i].get_loc(_idx["key"]) if isinstance(_idx, dict) else _idx return tuple(get_indexer(_i, _idx) for _i, _idx in enumerate(indexer))
Create a filtered indexer that doesn't have any missing indexers.
173,022
from __future__ import annotations from contextlib import suppress import sys from typing import ( TYPE_CHECKING, Hashable, Sequence, TypeVar, cast, final, ) import warnings import numpy as np from pandas._config import using_copy_on_write from pandas._libs.indexing import NDFrameIndexerBase from pandas._libs.lib import item_from_zerodim from pandas._typing import ( Axis, AxisInt, ) from pandas.compat import PYPY from pandas.errors import ( AbstractMethodError, ChainedAssignmentError, IndexingError, InvalidIndexError, LossySetitemError, _chained_assignment_msg, ) from pandas.util._decorators import doc from pandas.core.dtypes.cast import ( can_hold_element, maybe_promote, ) from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_extension_array_dtype, is_hashable, is_integer, is_iterator, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, is_sequence, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import ( infer_fill_value, is_valid_na_for_dtype, isna, na_value_for_dtype, ) from pandas.core import algorithms as algos import pandas.core.common as com from pandas.core.construction import ( array as pd_array, extract_array, ) from pandas.core.indexers import ( check_array_indexer, is_list_like_indexer, is_scalar_indexer, length_of_indexer, ) from pandas.core.indexes.api import ( Index, MultiIndex, ) ABCSeries = cast( "Type[Series]", create_pandas_abc_type("ABCSeries", "_typ", ("series",)), ) The provided code snippet includes necessary dependencies for implementing the `maybe_convert_ix` function. Write a Python function `def maybe_convert_ix(*args)` to solve the following problem: We likely want to take the cross-product. Here is the function: def maybe_convert_ix(*args): """ We likely want to take the cross-product. """ for arg in args: if not isinstance(arg, (np.ndarray, list, ABCSeries, Index)): return args return np.ix_(*args)
We likely want to take the cross-product.
173,023
from __future__ import annotations from contextlib import suppress import sys from typing import ( TYPE_CHECKING, Hashable, Sequence, TypeVar, cast, final, ) import warnings import numpy as np from pandas._config import using_copy_on_write from pandas._libs.indexing import NDFrameIndexerBase from pandas._libs.lib import item_from_zerodim from pandas._typing import ( Axis, AxisInt, ) from pandas.compat import PYPY from pandas.errors import ( AbstractMethodError, ChainedAssignmentError, IndexingError, InvalidIndexError, LossySetitemError, _chained_assignment_msg, ) from pandas.util._decorators import doc from pandas.core.dtypes.cast import ( can_hold_element, maybe_promote, ) from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_extension_array_dtype, is_hashable, is_integer, is_iterator, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, is_sequence, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import ( infer_fill_value, is_valid_na_for_dtype, isna, na_value_for_dtype, ) from pandas.core import algorithms as algos import pandas.core.common as com from pandas.core.construction import ( array as pd_array, extract_array, ) from pandas.core.indexers import ( check_array_indexer, is_list_like_indexer, is_scalar_indexer, length_of_indexer, ) from pandas.core.indexes.api import ( Index, MultiIndex, ) The provided code snippet includes necessary dependencies for implementing the `is_nested_tuple` function. Write a Python function `def is_nested_tuple(tup, labels) -> bool` to solve the following problem: Returns ------- bool Here is the function: def is_nested_tuple(tup, labels) -> bool: """ Returns ------- bool """ # check for a compatible nested tuple and multiindexes among the axes if not isinstance(tup, tuple): return False for k in tup: if is_list_like(k) or isinstance(k, slice): return isinstance(labels, MultiIndex) return False
Returns ------- bool
173,024
from __future__ import annotations from contextlib import suppress import sys from typing import ( TYPE_CHECKING, Hashable, Sequence, TypeVar, cast, final, ) import warnings import numpy as np from pandas._config import using_copy_on_write from pandas._libs.indexing import NDFrameIndexerBase from pandas._libs.lib import item_from_zerodim from pandas._typing import ( Axis, AxisInt, ) from pandas.compat import PYPY from pandas.errors import ( AbstractMethodError, ChainedAssignmentError, IndexingError, InvalidIndexError, LossySetitemError, _chained_assignment_msg, ) from pandas.util._decorators import doc from pandas.core.dtypes.cast import ( can_hold_element, maybe_promote, ) from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_extension_array_dtype, is_hashable, is_integer, is_iterator, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, is_sequence, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import ( infer_fill_value, is_valid_na_for_dtype, isna, na_value_for_dtype, ) from pandas.core import algorithms as algos import pandas.core.common as com from pandas.core.construction import ( array as pd_array, extract_array, ) from pandas.core.indexers import ( check_array_indexer, is_list_like_indexer, is_scalar_indexer, length_of_indexer, ) from pandas.core.indexes.api import ( Index, MultiIndex, ) The provided code snippet includes necessary dependencies for implementing the `is_label_like` function. Write a Python function `def is_label_like(key) -> bool` to solve the following problem: Returns ------- bool Here is the function: def is_label_like(key) -> bool: """ Returns ------- bool """ # select a label or row return ( not isinstance(key, slice) and not is_list_like_indexer(key) and key is not Ellipsis )
Returns ------- bool
173,025
from __future__ import annotations from contextlib import suppress import sys from typing import ( TYPE_CHECKING, Hashable, Sequence, TypeVar, cast, final, ) import warnings import numpy as np from pandas._config import using_copy_on_write from pandas._libs.indexing import NDFrameIndexerBase from pandas._libs.lib import item_from_zerodim from pandas._typing import ( Axis, AxisInt, ) from pandas.compat import PYPY from pandas.errors import ( AbstractMethodError, ChainedAssignmentError, IndexingError, InvalidIndexError, LossySetitemError, _chained_assignment_msg, ) from pandas.util._decorators import doc from pandas.core.dtypes.cast import ( can_hold_element, maybe_promote, ) from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_extension_array_dtype, is_hashable, is_integer, is_iterator, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, is_sequence, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import ( infer_fill_value, is_valid_na_for_dtype, isna, na_value_for_dtype, ) from pandas.core import algorithms as algos import pandas.core.common as com from pandas.core.construction import ( array as pd_array, extract_array, ) from pandas.core.indexers import ( check_array_indexer, is_list_like_indexer, is_scalar_indexer, length_of_indexer, ) from pandas.core.indexes.api import ( Index, MultiIndex, ) The provided code snippet includes necessary dependencies for implementing the `need_slice` function. Write a Python function `def need_slice(obj: slice) -> bool` to solve the following problem: Returns ------- bool Here is the function: def need_slice(obj: slice) -> bool: """ Returns ------- bool """ return ( obj.start is not None or obj.stop is not None or (obj.step is not None and obj.step != 1) )
Returns ------- bool
173,026
from __future__ import annotations from contextlib import suppress import sys from typing import ( TYPE_CHECKING, Hashable, Sequence, TypeVar, cast, final, ) import warnings import numpy as np from pandas._config import using_copy_on_write from pandas._libs.indexing import NDFrameIndexerBase from pandas._libs.lib import item_from_zerodim from pandas._typing import ( Axis, AxisInt, ) from pandas.compat import PYPY from pandas.errors import ( AbstractMethodError, ChainedAssignmentError, IndexingError, InvalidIndexError, LossySetitemError, _chained_assignment_msg, ) from pandas.util._decorators import doc from pandas.core.dtypes.cast import ( can_hold_element, maybe_promote, ) from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, is_extension_array_dtype, is_hashable, is_integer, is_iterator, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, is_sequence, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.missing import ( infer_fill_value, is_valid_na_for_dtype, isna, na_value_for_dtype, ) from pandas.core import algorithms as algos import pandas.core.common as com from pandas.core.construction import ( array as pd_array, extract_array, ) from pandas.core.indexers import ( check_array_indexer, is_list_like_indexer, is_scalar_indexer, length_of_indexer, ) from pandas.core.indexes.api import ( Index, MultiIndex, ) The provided code snippet includes necessary dependencies for implementing the `check_dict_or_set_indexers` function. Write a Python function `def check_dict_or_set_indexers(key) -> None` to solve the following problem: Check if the indexer is or contains a dict or set, which is no longer allowed. Here is the function: def check_dict_or_set_indexers(key) -> None: """ Check if the indexer is or contains a dict or set, which is no longer allowed. """ if ( isinstance(key, set) or isinstance(key, tuple) and any(isinstance(x, set) for x in key) ): raise TypeError( "Passing a set as an indexer is not supported. Use a list instead." ) if ( isinstance(key, dict) or isinstance(key, tuple) and any(isinstance(x, dict) for x in key) ): raise TypeError( "Passing a dict as an indexer is not supported. Use a list instead." )
Check if the indexer is or contains a dict or set, which is no longer allowed.
173,027
from __future__ import annotations import collections from collections import abc import datetime import functools from io import StringIO import itertools import sys from textwrap import dedent from typing import ( TYPE_CHECKING, Any, Callable, Hashable, Iterable, Iterator, Literal, Mapping, Sequence, cast, overload, ) import warnings import numpy as np from numpy import ma from pandas._config import ( get_option, using_copy_on_write, ) from pandas._libs import ( algos as libalgos, lib, properties, ) from pandas._libs.hashtable import duplicated from pandas._libs.lib import ( NoDefault, is_range_indexer, no_default, ) from pandas._typing import ( AggFuncType, AlignJoin, AnyAll, AnyArrayLike, ArrayLike, Axes, Axis, AxisInt, ColspaceArgType, CompressionOptions, CorrelationMethod, DropKeep, Dtype, DtypeObj, FilePath, FillnaOptions, FloatFormatType, FormattersType, Frequency, IgnoreRaise, IndexKeyFunc, IndexLabel, Level, MergeHow, NaPosition, PythonFuncType, QuantileInterpolation, ReadBuffer, Renamer, Scalar, SortKind, StorageOptions, Suffixes, TimedeltaConvertibleTypes, TimestampConvertibleTypes, ValueKeyFunc, WriteBuffer, npt, ) from pandas.compat import PYPY from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import ( function as nv, np_percentile_argname, ) from pandas.errors import ( ChainedAssignmentError, InvalidIndexError, _chained_assignment_msg, ) from pandas.util._decorators import ( Appender, Substitution, doc, ) from pandas.util._exceptions import find_stack_level from pandas.util._validators import ( validate_ascending, validate_bool_kwarg, validate_percentile, ) from pandas.core.dtypes.cast import ( LossySetitemError, can_hold_element, construct_1d_arraylike_from_scalar, construct_2d_arraylike_from_scalar, find_common_type, infer_dtype_from_scalar, invalidate_string_dtypes, maybe_box_native, maybe_downcast_to_dtype, ) from pandas.core.dtypes.common import ( infer_dtype_from_object, is_1d_only_ea_dtype, is_bool_dtype, is_dataclass, is_dict_like, is_dtype_equal, is_extension_array_dtype, is_float, is_float_dtype, is_hashable, is_integer, is_integer_dtype, is_iterator, is_list_like, is_scalar, is_sequence, needs_i8_conversion, pandas_dtype, ) from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.missing import ( isna, notna, ) from pandas.core import ( algorithms, common as com, nanops, ops, ) from pandas.core.accessor import CachedAccessor from pandas.core.apply import ( reconstruct_func, relabel_result, ) from pandas.core.array_algos.take import take_2d_multi from pandas.core.arraylike import OpsMixin from pandas.core.arrays import ( DatetimeArray, ExtensionArray, PeriodArray, TimedeltaArray, ) from pandas.core.arrays.sparse import SparseFrameAccessor from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, sanitize_array, sanitize_masked_array, ) from pandas.core.generic import NDFrame from pandas.core.indexers import check_key_length from pandas.core.indexes.api import ( DatetimeIndex, Index, PeriodIndex, default_index, ensure_index, ensure_index_from_sequences, ) from pandas.core.indexes.multi import ( MultiIndex, maybe_droplevels, ) from pandas.core.indexing import ( check_bool_indexer, check_dict_or_set_indexers, ) from pandas.core.internals import ( ArrayManager, BlockManager, ) from pandas.core.internals.construction import ( arrays_to_mgr, dataclasses_to_dicts, dict_to_mgr, mgr_to_mgr, ndarray_to_mgr, nested_data_to_arrays, rec_array_to_mgr, reorder_arrays, to_arrays, treat_as_nested, ) from pandas.core.methods import selectn from pandas.core.reshape.melt import melt from pandas.core.series import Series from pandas.core.shared_docs import _shared_docs from pandas.core.sorting import ( get_group_index, lexsort_indexer, nargsort, ) from pandas.io.common import get_handle from pandas.io.formats import ( console, format as fmt, ) from pandas.io.formats.info import ( INFO_DOCSTRING, DataFrameInfo, frame_sub_kwargs, ) import pandas.plotting index = properties.AxisProperty( axis=1, doc="The index (row labels) of the DataFrame." ) def _from_nested_dict(data) -> collections.defaultdict: new_data: collections.defaultdict = collections.defaultdict(dict) for index, s in data.items(): for col, v in s.items(): new_data[col][index] = v return new_data
null
173,028
from __future__ import annotations import collections from collections import abc import datetime import functools from io import StringIO import itertools import sys from textwrap import dedent from typing import ( TYPE_CHECKING, Any, Callable, Hashable, Iterable, Iterator, Literal, Mapping, Sequence, cast, overload, ) import warnings import numpy as np from numpy import ma from pandas._config import ( get_option, using_copy_on_write, ) from pandas._libs import ( algos as libalgos, lib, properties, ) from pandas._libs.hashtable import duplicated from pandas._libs.lib import ( NoDefault, is_range_indexer, no_default, ) from pandas._typing import ( AggFuncType, AlignJoin, AnyAll, AnyArrayLike, ArrayLike, Axes, Axis, AxisInt, ColspaceArgType, CompressionOptions, CorrelationMethod, DropKeep, Dtype, DtypeObj, FilePath, FillnaOptions, FloatFormatType, FormattersType, Frequency, IgnoreRaise, IndexKeyFunc, IndexLabel, Level, MergeHow, NaPosition, PythonFuncType, QuantileInterpolation, ReadBuffer, Renamer, Scalar, SortKind, StorageOptions, Suffixes, TimedeltaConvertibleTypes, TimestampConvertibleTypes, ValueKeyFunc, WriteBuffer, npt, ) from pandas.compat import PYPY from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import ( function as nv, np_percentile_argname, ) from pandas.errors import ( ChainedAssignmentError, InvalidIndexError, _chained_assignment_msg, ) from pandas.util._decorators import ( Appender, Substitution, doc, ) from pandas.util._exceptions import find_stack_level from pandas.util._validators import ( validate_ascending, validate_bool_kwarg, validate_percentile, ) from pandas.core.dtypes.cast import ( LossySetitemError, can_hold_element, construct_1d_arraylike_from_scalar, construct_2d_arraylike_from_scalar, find_common_type, infer_dtype_from_scalar, invalidate_string_dtypes, maybe_box_native, maybe_downcast_to_dtype, ) from pandas.core.dtypes.common import ( infer_dtype_from_object, is_1d_only_ea_dtype, is_bool_dtype, is_dataclass, is_dict_like, is_dtype_equal, is_extension_array_dtype, is_float, is_float_dtype, is_hashable, is_integer, is_integer_dtype, is_iterator, is_list_like, is_scalar, is_sequence, needs_i8_conversion, pandas_dtype, ) from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.missing import ( isna, notna, ) from pandas.core import ( algorithms, common as com, nanops, ops, ) from pandas.core.accessor import CachedAccessor from pandas.core.apply import ( reconstruct_func, relabel_result, ) from pandas.core.array_algos.take import take_2d_multi from pandas.core.arraylike import OpsMixin from pandas.core.arrays import ( DatetimeArray, ExtensionArray, PeriodArray, TimedeltaArray, ) from pandas.core.arrays.sparse import SparseFrameAccessor from pandas.core.construction import ( ensure_wrapped_if_datetimelike, extract_array, sanitize_array, sanitize_masked_array, ) from pandas.core.generic import NDFrame from pandas.core.indexers import check_key_length from pandas.core.indexes.api import ( DatetimeIndex, Index, PeriodIndex, default_index, ensure_index, ensure_index_from_sequences, ) from pandas.core.indexes.multi import ( MultiIndex, maybe_droplevels, ) from pandas.core.indexing import ( check_bool_indexer, check_dict_or_set_indexers, ) from pandas.core.internals import ( ArrayManager, BlockManager, ) from pandas.core.internals.construction import ( arrays_to_mgr, dataclasses_to_dicts, dict_to_mgr, mgr_to_mgr, ndarray_to_mgr, nested_data_to_arrays, rec_array_to_mgr, reorder_arrays, to_arrays, treat_as_nested, ) from pandas.core.methods import selectn from pandas.core.reshape.melt import melt from pandas.core.series import Series from pandas.core.shared_docs import _shared_docs from pandas.core.sorting import ( get_group_index, lexsort_indexer, nargsort, ) from pandas.io.common import get_handle from pandas.io.formats import ( console, format as fmt, ) from pandas.io.formats.info import ( INFO_DOCSTRING, DataFrameInfo, frame_sub_kwargs, ) import pandas.plotting class DataFrame(NDFrame, OpsMixin): """ Two-dimensional, size-mutable, potentially heterogeneous tabular data. Data structure also contains labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects. The primary pandas data structure. Parameters ---------- data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame Dict can contain Series, arrays, constants, dataclass or list-like objects. If data is a dict, column order follows insertion-order. If a dict contains Series which have an index defined, it is aligned by its index. This alignment also occurs if data is a Series or a DataFrame itself. Alignment is done on Series/DataFrame inputs. If data is a list of dicts, column order follows insertion-order. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided. columns : Index or array-like Column labels to use for resulting frame when data does not have them, defaulting to RangeIndex(0, 1, 2, ..., n). If data contains column labels, will perform column selection instead. dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer. copy : bool or None, default None Copy data from inputs. For dict data, the default of None behaves like ``copy=True``. For DataFrame or 2d ndarray input, the default of None behaves like ``copy=False``. If data is a dict containing one or more Series (possibly of different dtypes), ``copy=False`` will ensure that these inputs are not copied. .. versionchanged:: 1.3.0 See Also -------- DataFrame.from_records : Constructor from tuples, also record arrays. DataFrame.from_dict : From dicts of Series, arrays, or dicts. read_csv : Read a comma-separated values (csv) file into DataFrame. read_table : Read general delimited file into DataFrame. read_clipboard : Read text from clipboard into DataFrame. Notes ----- Please reference the :ref:`User Guide <basics.dataframe>` for more information. Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = pd.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from a dictionary including Series: >>> d = {'col1': [0, 1, 2, 3], 'col2': pd.Series([2, 3], index=[2, 3])} >>> pd.DataFrame(data=d, index=[0, 1, 2, 3]) col1 col2 0 0 NaN 1 1 NaN 2 2 2.0 3 3 3.0 Constructing DataFrame from numpy ndarray: >>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), ... columns=['a', 'b', 'c']) >>> df2 a b c 0 1 2 3 1 4 5 6 2 7 8 9 Constructing DataFrame from a numpy ndarray that has labeled columns: >>> data = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], ... dtype=[("a", "i4"), ("b", "i4"), ("c", "i4")]) >>> df3 = pd.DataFrame(data, columns=['c', 'a']) ... >>> df3 c a 0 3 1 1 6 4 2 9 7 Constructing DataFrame from dataclass: >>> from dataclasses import make_dataclass >>> Point = make_dataclass("Point", [("x", int), ("y", int)]) >>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)]) x y 0 0 0 1 0 3 2 2 3 Constructing DataFrame from Series/DataFrame: >>> ser = pd.Series([1, 2, 3], index=["a", "b", "c"]) >>> df = pd.DataFrame(data=ser, index=["a", "c"]) >>> df 0 a 1 c 3 >>> df1 = pd.DataFrame([1, 2, 3], index=["a", "b", "c"], columns=["x"]) >>> df2 = pd.DataFrame(data=df1, index=["a", "c"]) >>> df2 x a 1 c 3 """ _internal_names_set = {"columns", "index"} | NDFrame._internal_names_set _typ = "dataframe" _HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray) _accessors: set[str] = {"sparse"} _hidden_attrs: frozenset[str] = NDFrame._hidden_attrs | frozenset([]) _mgr: BlockManager | ArrayManager def _constructor(self) -> Callable[..., DataFrame]: return DataFrame _constructor_sliced: Callable[..., Series] = Series # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index: Axes | None = None, columns: Axes | None = None, dtype: Dtype | None = None, copy: bool | None = None, ) -> None: if dtype is not None: dtype = self._validate_dtype(dtype) if isinstance(data, DataFrame): data = data._mgr if not copy: # if not copying data, ensure to still return a shallow copy # to avoid the result sharing the same Manager data = data.copy(deep=False) if isinstance(data, (BlockManager, ArrayManager)): if using_copy_on_write(): data = data.copy(deep=False) # first check if a Manager is passed without any other arguments # -> use fastpath (without checking Manager type) if index is None and columns is None and dtype is None and not copy: # GH#33357 fastpath NDFrame.__init__(self, data) return manager = get_option("mode.data_manager") # GH47215 if index is not None and isinstance(index, set): raise ValueError("index cannot be a set") if columns is not None and isinstance(columns, set): raise ValueError("columns cannot be a set") if copy is None: if isinstance(data, dict): # retain pre-GH#38939 default behavior copy = True elif ( manager == "array" and isinstance(data, (np.ndarray, ExtensionArray)) and data.ndim == 2 ): # INFO(ArrayManager) by default copy the 2D input array to get # contiguous 1D arrays copy = True elif using_copy_on_write() and not isinstance( data, (Index, DataFrame, Series) ): copy = True else: copy = False if data is None: index = index if index is not None else default_index(0) columns = columns if columns is not None else default_index(0) dtype = dtype if dtype is not None else pandas_dtype(object) data = [] if isinstance(data, (BlockManager, ArrayManager)): mgr = self._init_mgr( data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy ) elif isinstance(data, dict): # GH#38939 de facto copy defaults to False only in non-dict cases mgr = dict_to_mgr(data, index, columns, dtype=dtype, copy=copy, typ=manager) elif isinstance(data, ma.MaskedArray): from numpy.ma import mrecords # masked recarray if isinstance(data, mrecords.MaskedRecords): raise TypeError( "MaskedRecords are not supported. Pass " "{name: data[name] for name in data.dtype.names} " "instead" ) # a masked array data = sanitize_masked_array(data) mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) elif isinstance(data, (np.ndarray, Series, Index, ExtensionArray)): if data.dtype.names: # i.e. numpy structured array data = cast(np.ndarray, data) mgr = rec_array_to_mgr( data, index, columns, dtype, copy, typ=manager, ) elif getattr(data, "name", None) is not None: # i.e. Series/Index with non-None name _copy = copy if using_copy_on_write() else True mgr = dict_to_mgr( # error: Item "ndarray" of "Union[ndarray, Series, Index]" has no # attribute "name" {data.name: data}, # type: ignore[union-attr] index, columns, dtype=dtype, typ=manager, copy=_copy, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) # For data is list-like, or Iterable (will consume into list) elif is_list_like(data): if not isinstance(data, abc.Sequence): if hasattr(data, "__array__"): # GH#44616 big perf improvement for e.g. pytorch tensor data = np.asarray(data) else: data = list(data) if len(data) > 0: if is_dataclass(data[0]): data = dataclasses_to_dicts(data) if not isinstance(data, np.ndarray) and treat_as_nested(data): # exclude ndarray as we may have cast it a few lines above if columns is not None: columns = ensure_index(columns) arrays, columns, index = nested_data_to_arrays( # error: Argument 3 to "nested_data_to_arrays" has incompatible # type "Optional[Collection[Any]]"; expected "Optional[Index]" data, columns, index, # type: ignore[arg-type] dtype, ) mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, typ=manager, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) else: mgr = dict_to_mgr( {}, index, columns if columns is not None else default_index(0), dtype=dtype, typ=manager, ) # For data is scalar else: if index is None or columns is None: raise ValueError("DataFrame constructor not properly called!") index = ensure_index(index) columns = ensure_index(columns) if not dtype: dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True) # For data is a scalar extension dtype if isinstance(dtype, ExtensionDtype): # TODO(EA2D): special case not needed with 2D EAs values = [ construct_1d_arraylike_from_scalar(data, len(index), dtype) for _ in range(len(columns)) ] mgr = arrays_to_mgr(values, columns, index, dtype=None, typ=manager) else: arr2d = construct_2d_arraylike_from_scalar( data, len(index), len(columns), dtype, copy, ) mgr = ndarray_to_mgr( arr2d, index, columns, dtype=arr2d.dtype, copy=False, typ=manager, ) # ensure correct Manager type according to settings mgr = mgr_to_mgr(mgr, typ=manager) NDFrame.__init__(self, mgr) # ---------------------------------------------------------------------- def __dataframe__( self, nan_as_null: bool = False, allow_copy: bool = True ) -> DataFrameXchg: """ Return the dataframe interchange object implementing the interchange protocol. Parameters ---------- nan_as_null : bool, default False Whether to tell the DataFrame to overwrite null values in the data with ``NaN`` (or ``NaT``). allow_copy : bool, default True Whether to allow memory copying when exporting. If set to False it would cause non-zero-copy exports to fail. Returns ------- DataFrame interchange object The object which consuming library can use to ingress the dataframe. Notes ----- Details on the interchange protocol: https://data-apis.org/dataframe-protocol/latest/index.html `nan_as_null` currently has no effect; once support for nullable extension dtypes is added, this value should be propagated to columns. """ from pandas.core.interchange.dataframe import PandasDataFrameXchg return PandasDataFrameXchg(self, nan_as_null, allow_copy) # ---------------------------------------------------------------------- def axes(self) -> list[Index]: """ Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='object')] """ return [self.index, self.columns] def shape(self) -> tuple[int, int]: """ Return a tuple representing the dimensionality of the DataFrame. See Also -------- ndarray.shape : Tuple of array dimensions. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self.index), len(self.columns) def _is_homogeneous_type(self) -> bool: """ Whether all the columns in a DataFrame have the same type. Returns ------- bool See Also -------- Index._is_homogeneous_type : Whether the object has a single dtype. MultiIndex._is_homogeneous_type : Whether all the levels of a MultiIndex have the same dtype. Examples -------- >>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type True >>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type False Items with the same type but different sizes are considered different types. >>> DataFrame({ ... "A": np.array([1, 2], dtype=np.int32), ... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type False """ if isinstance(self._mgr, ArrayManager): return len({arr.dtype for arr in self._mgr.arrays}) == 1 if self._mgr.any_extension_types: return len({block.dtype for block in self._mgr.blocks}) == 1 else: return not self._is_mixed_type def _can_fast_transpose(self) -> bool: """ Can we transpose this DataFrame without creating any new array objects. """ if isinstance(self._mgr, ArrayManager): return False blocks = self._mgr.blocks if len(blocks) != 1: return False dtype = blocks[0].dtype # TODO(EA2D) special case would be unnecessary with 2D EAs return not is_1d_only_ea_dtype(dtype) def _values(self) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray: """ Analogue to ._values that may return a 2D ExtensionArray. """ mgr = self._mgr if isinstance(mgr, ArrayManager): if len(mgr.arrays) == 1 and not is_1d_only_ea_dtype(mgr.arrays[0].dtype): # error: Item "ExtensionArray" of "Union[ndarray, ExtensionArray]" # has no attribute "reshape" return mgr.arrays[0].reshape(-1, 1) # type: ignore[union-attr] return ensure_wrapped_if_datetimelike(self.values) blocks = mgr.blocks if len(blocks) != 1: return ensure_wrapped_if_datetimelike(self.values) arr = blocks[0].values if arr.ndim == 1: # non-2D ExtensionArray return self.values # more generally, whatever we allow in NDArrayBackedExtensionBlock arr = cast("np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray", arr) return arr.T # ---------------------------------------------------------------------- # Rendering Methods def _repr_fits_vertical_(self) -> bool: """ Check length against max_rows. """ max_rows = get_option("display.max_rows") return len(self) <= max_rows def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool: """ Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns. In case of non-interactive session, no boundaries apply. `ignore_width` is here so ipynb+HTML output can behave the way users expect. display.max_columns remains in effect. GH3541, GH3573 """ width, height = console.get_console_size() max_columns = get_option("display.max_columns") nb_columns = len(self.columns) # exceed max columns if (max_columns and nb_columns > max_columns) or ( (not ignore_width) and width and nb_columns > (width // 2) ): return False # used by repr_html under IPython notebook or scripts ignore terminal # dims if ignore_width or width is None or not console.in_interactive_session(): return True if get_option("display.width") is not None or console.in_ipython_frontend(): # check at least the column row for excessive width max_rows = 1 else: max_rows = get_option("display.max_rows") # when auto-detecting, so width=None and not in ipython front end # check whether repr fits horizontal by actually checking # the width of the rendered repr buf = StringIO() # only care about the stuff we'll actually print out # and to_string on entire frame may be expensive d = self if max_rows is not None: # unlimited rows # min of two, where one may be None d = d.iloc[: min(max_rows, len(d))] else: return True d.to_string(buf=buf) value = buf.getvalue() repr_width = max(len(line) for line in value.split("\n")) return repr_width < width def _info_repr(self) -> bool: """ True if the repr should show the info view. """ info_repr_option = get_option("display.large_repr") == "info" return info_repr_option and not ( self._repr_fits_horizontal_() and self._repr_fits_vertical_() ) def __repr__(self) -> str: """ Return a string representation for a particular DataFrame. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) return buf.getvalue() repr_params = fmt.get_dataframe_repr_params() return self.to_string(**repr_params) def _repr_html_(self) -> str | None: """ Return a html representation for a particular DataFrame. Mainly for IPython notebook. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) # need to escape the <class>, should be the first line. val = buf.getvalue().replace("<", r"&lt;", 1) val = val.replace(">", r"&gt;", 1) return f"<pre>{val}</pre>" if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") min_rows = get_option("display.min_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") formatter = fmt.DataFrameFormatter( self, columns=None, col_space=None, na_rep="NaN", formatters=None, float_format=None, sparsify=None, justify=None, index_names=True, header=True, index=True, bold_rows=True, escape=True, max_rows=max_rows, min_rows=min_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=".", ) return fmt.DataFrameRenderer(formatter).to_html(notebook=True) else: return None def to_string( self, buf: None = ..., columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> None: ... header_type="bool or sequence of str", header="Write out the column names. If a list of strings " "is given, it is assumed to be aliases for the " "column names", col_space_type="int, list or dict of int", col_space="The minimum width of each column. If a list of ints is given " "every integers corresponds with one column. If a dict is given, the key " "references the column, while the value defines the space to use.", ) def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[str] | None = None, col_space: int | list[int] | dict[Hashable, int] | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: fmt.FormattersType | None = None, float_format: fmt.FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool = False, decimal: str = ".", line_width: int | None = None, min_rows: int | None = None, max_colwidth: int | None = None, encoding: str | None = None, ) -> str | None: """ Render a DataFrame to a console-friendly tabular output. %(shared_params)s line_width : int, optional Width to wrap a line in characters. min_rows : int, optional The number of rows to display in the console in a truncated repr (when number of rows is above `max_rows`). max_colwidth : int, optional Max width to truncate each column in characters. By default, no limit. encoding : str, default "utf-8" Set character encoding. %(returns)s See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]} >>> df = pd.DataFrame(d) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 """ from pandas import option_context with option_context("display.max_colwidth", max_colwidth): formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, min_rows=min_rows, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, ) return fmt.DataFrameRenderer(formatter).to_string( buf=buf, encoding=encoding, line_width=line_width, ) # ---------------------------------------------------------------------- def style(self) -> Styler: """ Returns a Styler object. Contains methods for building a styled HTML representation of the DataFrame. See Also -------- io.formats.style.Styler : Helps style a DataFrame or Series according to the data with HTML and CSS. """ from pandas.io.formats.style import Styler return Styler(self) _shared_docs[ "items" ] = r""" Iterate over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Yields ------ label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. Examples -------- >>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.items(): ... print(f'label: {label}') ... print(f'content: {content}', sep='\n') ... label: species content: panda bear polar bear koala marsupial Name: species, dtype: object label: population content: panda 1864 polar 22000 koala 80000 Name: population, dtype: int64 """ def items(self) -> Iterable[tuple[Hashable, Series]]: if self.columns.is_unique and hasattr(self, "_item_cache"): for k in self.columns: yield k, self._get_item_cache(k) else: for i, k in enumerate(self.columns): yield k, self._ixs(i, axis=1) def iterrows(self) -> Iterable[tuple[Hashable, Series]]: """ Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a `MultiIndex`. data : Series The data of the row as a Series. See Also -------- DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- 1. Because ``iterrows`` returns a Series for each row, it does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float']) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row['int'].dtype) float64 >>> print(df['int'].dtype) int64 To preserve dtypes while iterating over the rows, it is better to use :meth:`itertuples` which returns namedtuples of the values and which is generally faster than ``iterrows``. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect. """ columns = self.columns klass = self._constructor_sliced using_cow = using_copy_on_write() for k, v in zip(self.index, self.values): s = klass(v, index=columns, name=k).__finalize__(self) if using_cow and self._mgr.is_single_block: s._mgr.add_references(self._mgr) # type: ignore[arg-type] yield k, s def itertuples( self, index: bool = True, name: str | None = "Pandas" ) -> Iterable[tuple[Any, ...]]: """ Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "Pandas" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An object to iterate over namedtuples for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. Examples -------- >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) ... Pandas(Index='dog', num_legs=4, num_wings=0) Pandas(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) ... Pandas(num_legs=4, num_wings=0) Pandas(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name='Animal'): ... print(row) ... Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2) """ arrays = [] fields = list(self.columns) if index: arrays.append(self.index) fields.insert(0, "Index") # use integer indexing because of possible duplicate column names arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) if name is not None: # https://github.com/python/mypy/issues/9046 # error: namedtuple() expects a string literal as the first argument itertuple = collections.namedtuple( # type: ignore[misc] name, fields, rename=True ) return map(itertuple._make, zip(*arrays)) # fallback to regular tuples return zip(*arrays) def __len__(self) -> int: """ Returns length of info axis, but here we use the index. """ return len(self.index) def dot(self, other: Series) -> Series: ... def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame: ... def dot(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Compute the matrix multiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series, DataFrame or a numpy array. It can also be called using ``self @ other`` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the matrix product with. Returns ------- Series or DataFrame If other is a Series, return the matrix product between self and other as a Series. If other is a DataFrame or a numpy.array, return the matrix product of self and other in a DataFrame of a np.array. See Also -------- Series.dot: Similar method for Series. Notes ----- The dimensions of DataFrame and other must be compatible in order to compute the matrix multiplication. In addition, the column names of DataFrame and the index of other must contain the same values, as they will be aligned prior to the multiplication. The dot method for Series computes the inner product, instead of the matrix product here. Examples -------- Here we multiply a DataFrame with a Series. >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) >>> s = pd.Series([1, 1, 2, 1]) >>> df.dot(s) 0 -4 1 5 dtype: int64 Here we multiply a DataFrame with another DataFrame. >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(other) 0 1 0 1 4 1 2 2 Note that the dot method give the same result as @ >>> df @ other 0 1 0 1 4 1 2 2 The dot method works also if other is an np.array. >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(arr) 0 1 0 1 4 1 2 2 Note how shuffling of the objects does not change the result. >>> s2 = s.reindex([1, 0, 2, 3]) >>> df.dot(s2) 0 -4 1 5 dtype: int64 """ if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index) if len(common) > len(self.columns) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(columns=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right._values else: left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: raise ValueError( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, DataFrame): return self._constructor( np.dot(lvals, rvals), index=left.index, columns=other.columns, copy=False, ) elif isinstance(other, Series): return self._constructor_sliced( np.dot(lvals, rvals), index=left.index, copy=False ) elif isinstance(rvals, (np.ndarray, Index)): result = np.dot(lvals, rvals) if result.ndim == 2: return self._constructor(result, index=left.index, copy=False) else: return self._constructor_sliced(result, index=left.index, copy=False) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other: Series) -> Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other) -> DataFrame: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ try: return self.T.dot(np.transpose(other)).T except ValueError as err: if "shape mismatch" not in str(err): raise # GH#21581 give exception message for original shapes msg = f"shapes {np.shape(other)} and {self.shape} not aligned" raise ValueError(msg) from err # ---------------------------------------------------------------------- # IO methods (to / from other formats) def from_dict( cls, data: dict, orient: str = "columns", dtype: Dtype | None = None, columns: Axes | None = None, ) -> DataFrame: """ Construct DataFrame from dict of array-like or dicts. Creates DataFrame object from dictionary by columns or by index allowing dtype specification. Parameters ---------- data : dict Of the form {field : array-like} or {field : dict}. orient : {'columns', 'index', 'tight'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. If 'tight', assume a dict with keys ['index', 'columns', 'data', 'index_names', 'column_names']. .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument dtype : dtype, default None Data type to force after DataFrame construction, otherwise infer. columns : list, default None Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'`` or ``orient='tight'``. Returns ------- DataFrame See Also -------- DataFrame.from_records : DataFrame from structured ndarray, sequence of tuples or dicts, or DataFrame. DataFrame : DataFrame object creation using constructor. DataFrame.to_dict : Convert the DataFrame to a dictionary. Examples -------- By default the keys of the dict become the DataFrame columns: >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Specify ``orient='index'`` to create the DataFrame using dictionary keys as rows: >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data, orient='index') 0 1 2 3 row_1 3 2 1 0 row_2 a b c d When using the 'index' orientation, the column names can be specified manually: >>> pd.DataFrame.from_dict(data, orient='index', ... columns=['A', 'B', 'C', 'D']) A B C D row_1 3 2 1 0 row_2 a b c d Specify ``orient='tight'`` to create the DataFrame using a 'tight' format: >>> data = {'index': [('a', 'b'), ('a', 'c')], ... 'columns': [('x', 1), ('y', 2)], ... 'data': [[1, 3], [2, 4]], ... 'index_names': ['n1', 'n2'], ... 'column_names': ['z1', 'z2']} >>> pd.DataFrame.from_dict(data, orient='tight') z1 x y z2 1 2 n1 n2 a b 1 3 c 2 4 """ index = None orient = orient.lower() if orient == "index": if len(data) > 0: # TODO speed up Series case if isinstance(list(data.values())[0], (Series, dict)): data = _from_nested_dict(data) else: index = list(data.keys()) # error: Incompatible types in assignment (expression has type # "List[Any]", variable has type "Dict[Any, Any]") data = list(data.values()) # type: ignore[assignment] elif orient in ("columns", "tight"): if columns is not None: raise ValueError(f"cannot use columns parameter with orient='{orient}'") else: # pragma: no cover raise ValueError( f"Expected 'index', 'columns' or 'tight' for orient parameter. " f"Got '{orient}' instead" ) if orient != "tight": return cls(data, index=index, columns=columns, dtype=dtype) else: realdata = data["data"] def create_index(indexlist, namelist): index: Index if len(namelist) > 1: index = MultiIndex.from_tuples(indexlist, names=namelist) else: index = Index(indexlist, name=namelist[0]) return index index = create_index(data["index"], data["index_names"]) columns = create_index(data["columns"], data["column_names"]) return cls(realdata, index=index, columns=columns, dtype=dtype) def to_numpy( self, dtype: npt.DTypeLike | None = None, copy: bool = False, na_value: object = lib.no_default, ) -> np.ndarray: """ Convert the DataFrame to a NumPy array. By default, the dtype of the returned array will be the common NumPy dtype of all types in the DataFrame. For example, if the dtypes are ``float16`` and ``float32``, the results dtype will be ``float32``. This may require copying data and coercing values, which may be expensive. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray`. copy : bool, default False Whether to ensure that the returned value is not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. na_value : Any, optional The value to use for missing values. The default value depends on `dtype` and the dtypes of the DataFrame columns. .. versionadded:: 1.1.0 Returns ------- numpy.ndarray See Also -------- Series.to_numpy : Similar method for Series. Examples -------- >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() array([[1, 3], [2, 4]]) With heterogeneous data, the lowest common type will have to be used. >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) >>> df.to_numpy() array([[1. , 3. ], [2. , 4.5]]) For a mix of numeric and non-numeric types, the output array will have object dtype. >>> df['C'] = pd.date_range('2000', periods=2) >>> df.to_numpy() array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) """ if dtype is not None: dtype = np.dtype(dtype) result = self._mgr.as_array(dtype=dtype, copy=copy, na_value=na_value) if result.dtype is not dtype: result = np.array(result, dtype=dtype, copy=False) return result def _create_data_for_split_and_tight_to_dict( self, are_all_object_dtype_cols: bool, object_dtype_indices: list[int] ) -> list: """ Simple helper method to create data for to ``to_dict(orient="split")`` and ``to_dict(orient="tight")`` to create the main output data """ if are_all_object_dtype_cols: data = [ list(map(maybe_box_native, t)) for t in self.itertuples(index=False, name=None) ] else: data = [list(t) for t in self.itertuples(index=False, name=None)] if object_dtype_indices: # If we have object_dtype_cols, apply maybe_box_naive after list # comprehension for perf for row in data: for i in object_dtype_indices: row[i] = maybe_box_native(row[i]) return data def to_dict( self, orient: Literal["dict", "list", "series", "split", "tight", "index"] = ..., into: type[dict] = ..., ) -> dict: ... def to_dict(self, orient: Literal["records"], into: type[dict] = ...) -> list[dict]: ... def to_dict( self, orient: Literal[ "dict", "list", "series", "split", "tight", "records", "index" ] = "dict", into: type[dict] = dict, index: bool = True, ) -> dict | list[dict]: """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'tight', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'tight' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values], 'index_names' -> [index.names], 'column_names' -> [column.names]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. index : bool, default True Whether to include the index item (and index_names item if `orient` is 'tight') in the returned dictionary. Can only be ``False`` when `orient` is 'split' or 'tight'. .. versionadded:: 2.0.0 Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. See Also -------- DataFrame.from_dict: Create a DataFrame from a dictionary. DataFrame.to_json: Convert a DataFrame to JSON format. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df.to_dict() {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}} You can specify the return orientation. >>> df.to_dict('series') {'col1': row1 1 row2 2 Name: col1, dtype: int64, 'col2': row1 0.50 row2 0.75 Name: col2, dtype: float64} >>> df.to_dict('split') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]]} >>> df.to_dict('records') [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}] >>> df.to_dict('index') {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}} >>> df.to_dict('tight') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]], 'index_names': [None], 'column_names': [None]} You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) [defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}), defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})] """ from pandas.core.methods.to_dict import to_dict return to_dict(self, orient, into, index) def to_gbq( self, destination_table: str, project_id: str | None = None, chunksize: int | None = None, reauth: bool = False, if_exists: str = "fail", auth_local_webserver: bool = True, table_schema: list[dict[str, str]] | None = None, location: str | None = None, progress_bar: bool = True, credentials=None, ) -> None: """ Write a DataFrame to a Google BigQuery table. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See the `How to authenticate with Google BigQuery <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__ guide for authentication instructions. Parameters ---------- destination_table : str Name of table to be written, in the form ``dataset.tablename``. project_id : str, optional Google BigQuery Account project ID. Optional when available from the environment. chunksize : int, optional Number of rows to be inserted in each chunk from the dataframe. Set to ``None`` to load the whole dataframe at once. reauth : bool, default False Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. if_exists : str, default 'fail' Behavior when the destination table exists. Value can be one of: ``'fail'`` If table exists raise pandas_gbq.gbq.TableCreationError. ``'replace'`` If table exists, drop it, recreate it, and insert data. ``'append'`` If table exists, insert data. Create if does not exist. auth_local_webserver : bool, default True Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. .. _local webserver flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server .. _console flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console *New in version 0.2.0 of pandas-gbq*. .. versionchanged:: 1.5.0 Default value is changed to ``True``. Google has deprecated the ``auth_local_webserver = False`` `"out of band" (copy-paste) flow <https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html?m=1#disallowed-oob>`_. table_schema : list of dicts, optional List of BigQuery table fields to which according DataFrame columns conform to, e.g. ``[{'name': 'col1', 'type': 'STRING'},...]``. If schema is not provided, it will be generated according to dtypes of DataFrame columns. See BigQuery API documentation on available names of a field. *New in version 0.3.1 of pandas-gbq*. location : str, optional Location where the load job should run. See the `BigQuery locations documentation <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a list of available locations. The location must match that of the target dataset. *New in version 0.5.0 of pandas-gbq*. progress_bar : bool, default True Use the library `tqdm` to show the progress bar for the upload, chunk by chunk. *New in version 0.5.0 of pandas-gbq*. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine :class:`google.auth.compute_engine.Credentials` or Service Account :class:`google.oauth2.service_account.Credentials` directly. *New in version 0.8.0 of pandas-gbq*. See Also -------- pandas_gbq.to_gbq : This function in the pandas-gbq library. read_gbq : Read a DataFrame from Google BigQuery. """ from pandas.io import gbq gbq.to_gbq( self, destination_table, project_id=project_id, chunksize=chunksize, reauth=reauth, if_exists=if_exists, auth_local_webserver=auth_local_webserver, table_schema=table_schema, location=location, progress_bar=progress_bar, credentials=credentials, ) def from_records( cls, data, index=None, exclude=None, columns=None, coerce_float: bool = False, nrows: int | None = None, ) -> DataFrame: """ Convert structured or record ndarray to DataFrame. Creates a DataFrame object from a structured ndarray, sequence of tuples or dicts, or DataFrame. Parameters ---------- data : structured ndarray, sequence of tuples or dicts, or DataFrame Structured input data. index : str, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use. exclude : sequence, default None Columns or fields to exclude. columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns). coerce_float : bool, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. nrows : int, default None Number of rows to read if data is an iterator. Returns ------- DataFrame See Also -------- DataFrame.from_dict : DataFrame from dict of array-like or dicts. DataFrame : DataFrame object creation using constructor. Examples -------- Data can be provided as a structured ndarray: >>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')], ... dtype=[('col_1', 'i4'), ('col_2', 'U1')]) >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of dicts: >>> data = [{'col_1': 3, 'col_2': 'a'}, ... {'col_1': 2, 'col_2': 'b'}, ... {'col_1': 1, 'col_2': 'c'}, ... {'col_1': 0, 'col_2': 'd'}] >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of tuples with corresponding columns: >>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')] >>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2']) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d """ if isinstance(data, DataFrame): if columns is not None: if is_scalar(columns): columns = [columns] data = data[columns] if index is not None: data = data.set_index(index) if exclude is not None: data = data.drop(columns=exclude) return data.copy(deep=False) result_index = None # Make a copy of the input columns so we can modify it if columns is not None: columns = ensure_index(columns) def maybe_reorder( arrays: list[ArrayLike], arr_columns: Index, columns: Index, index ) -> tuple[list[ArrayLike], Index, Index | None]: """ If our desired 'columns' do not match the data's pre-existing 'arr_columns', we re-order our arrays. This is like a pre-emptive (cheap) reindex. """ if len(arrays): length = len(arrays[0]) else: length = 0 result_index = None if len(arrays) == 0 and index is None and length == 0: result_index = default_index(0) arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, length) return arrays, arr_columns, result_index if is_iterator(data): if nrows == 0: return cls() try: first_row = next(data) except StopIteration: return cls(index=index, columns=columns) dtype = None if hasattr(first_row, "dtype") and first_row.dtype.names: dtype = first_row.dtype values = [first_row] if nrows is None: values += data else: values.extend(itertools.islice(data, nrows - 1)) if dtype is not None: data = np.array(values, dtype=dtype) else: data = values if isinstance(data, dict): if columns is None: columns = arr_columns = ensure_index(sorted(data)) arrays = [data[k] for k in columns] else: arrays = [] arr_columns_list = [] for k, v in data.items(): if k in columns: arr_columns_list.append(k) arrays.append(v) arr_columns = Index(arr_columns_list) arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = to_arrays(data, columns) arr_columns = columns else: arrays, arr_columns = to_arrays(data, columns) if coerce_float: for i, arr in enumerate(arrays): if arr.dtype == object: # error: Argument 1 to "maybe_convert_objects" has # incompatible type "Union[ExtensionArray, ndarray]"; # expected "ndarray" arrays[i] = lib.maybe_convert_objects( arr, # type: ignore[arg-type] try_float=True, ) arr_columns = ensure_index(arr_columns) if columns is None: columns = arr_columns else: arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) if exclude is None: exclude = set() else: exclude = set(exclude) if index is not None: if isinstance(index, str) or not hasattr(index, "__iter__"): i = columns.get_loc(index) exclude.add(index) if len(arrays) > 0: result_index = Index(arrays[i], name=index) else: result_index = Index([], name=index) else: try: index_data = [arrays[arr_columns.get_loc(field)] for field in index] except (KeyError, TypeError): # raised by get_loc, see GH#29258 result_index = index else: result_index = ensure_index_from_sequences(index_data, names=index) exclude.update(index) if any(exclude): arr_exclude = [x for x in exclude if x in arr_columns] to_remove = [arr_columns.get_loc(col) for col in arr_exclude] arrays = [v for i, v in enumerate(arrays) if i not in to_remove] columns = columns.drop(exclude) manager = get_option("mode.data_manager") mgr = arrays_to_mgr(arrays, columns, result_index, typ=manager) return cls(mgr) def to_records( self, index: bool = True, column_dtypes=None, index_dtypes=None ) -> np.recarray: """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. column_dtypes : str, type, dict, default None If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) If the DataFrame index has no label then the recarray field name is set to 'index'. If the index has a label then this is used as the field name: >>> df.index = df.index.rename("I") >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')]) As well as for the index: >>> df.to_records(index_dtypes="<S2") rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')]) >>> index_dtypes = f"<S{df.index.str.len().max()}" >>> df.to_records(index_dtypes=index_dtypes) rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')]) """ if index: ix_vals = [ np.asarray(self.index.get_level_values(i)) for i in range(self.index.nlevels) ] arrays = ix_vals + [ np.asarray(self.iloc[:, i]) for i in range(len(self.columns)) ] index_names = list(self.index.names) if isinstance(self.index, MultiIndex): index_names = com.fill_missing_names(index_names) elif index_names[0] is None: index_names = ["index"] names = [str(name) for name in itertools.chain(index_names, self.columns)] else: arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))] names = [str(c) for c in self.columns] index_names = [] index_len = len(index_names) formats = [] for i, v in enumerate(arrays): index_int = i # When the names and arrays are collected, we # first collect those in the DataFrame's index, # followed by those in its columns. # # Thus, the total length of the array is: # len(index_names) + len(DataFrame.columns). # # This check allows us to see whether we are # handling a name / array in the index or column. if index_int < index_len: dtype_mapping = index_dtypes name = index_names[index_int] else: index_int -= index_len dtype_mapping = column_dtypes name = self.columns[index_int] # We have a dictionary, so we get the data type # associated with the index or column (which can # be denoted by its name in the DataFrame or its # position in DataFrame's array of indices or # columns, whichever is applicable. if is_dict_like(dtype_mapping): if name in dtype_mapping: dtype_mapping = dtype_mapping[name] elif index_int in dtype_mapping: dtype_mapping = dtype_mapping[index_int] else: dtype_mapping = None # If no mapping can be found, use the array's # dtype attribute for formatting. # # A valid dtype must either be a type or # string naming a type. if dtype_mapping is None: formats.append(v.dtype) elif isinstance(dtype_mapping, (type, np.dtype, str)): # error: Argument 1 to "append" of "list" has incompatible # type "Union[type, dtype[Any], str]"; expected "dtype[Any]" formats.append(dtype_mapping) # type: ignore[arg-type] else: element = "row" if i < index_len else "column" msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}" raise ValueError(msg) return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats}) def _from_arrays( cls, arrays, columns, index, dtype: Dtype | None = None, verify_integrity: bool = True, ) -> DataFrame: """ Create DataFrame from a list of arrays corresponding to the columns. Parameters ---------- arrays : list-like of arrays Each array in the list corresponds to one column, in order. columns : list-like, Index The column names for the resulting DataFrame. index : list-like, Index The rows labels for the resulting DataFrame. dtype : dtype, optional Optional dtype to enforce for all arrays. verify_integrity : bool, default True Validate and homogenize all input. If set to False, it is assumed that all elements of `arrays` are actual arrays how they will be stored in a block (numpy ndarray or ExtensionArray), have the same length as and are aligned with the index, and that `columns` and `index` are ensured to be an Index object. Returns ------- DataFrame """ if dtype is not None: dtype = pandas_dtype(dtype) manager = get_option("mode.data_manager") columns = ensure_index(columns) if len(columns) != len(arrays): raise ValueError("len(columns) must match len(arrays)") mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, verify_integrity=verify_integrity, typ=manager, ) return cls(mgr) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path", ) def to_stata( self, path: FilePath | WriteBuffer[bytes], *, convert_dates: dict[Hashable, str] | None = None, write_index: bool = True, byteorder: str | None = None, time_stamp: datetime.datetime | None = None, data_label: str | None = None, variable_labels: dict[Hashable, str] | None = None, version: int | None = 114, convert_strl: Sequence[Hashable] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, value_labels: dict[Hashable, dict[float, str]] | None = None, ) -> None: """ Export DataFrame object to Stata dta format. Writes the DataFrame to a Stata dataset file. "dta" files contain a Stata dataset. Parameters ---------- path : str, path object, or buffer String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has timezone information. write_index : bool Write the index to Stata dataset. byteorder : str Can be ">", "<", "little", or "big". default is `sys.byteorder`. time_stamp : datetime A datetime to use as file creation date. Default is the current time. data_label : str, optional A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. version : {{114, 117, 118, 119, None}}, default 114 Version to use in the output dta file. Set to None to let pandas decide between 118 or 119 formats depending on the number of columns in the frame. Version 114 can be read by Stata 10 and later. Version 117 can be read by Stata 13 or later. Version 118 is supported in Stata 14 and later. Version 119 is supported in Stata 15 and later. Version 114 limits string variables to 244 characters or fewer while versions 117 and later allow strings with lengths up to 2,000,000 characters. Versions 118 and 119 support Unicode characters, and version 119 supports more than 32,767 variables. Version 119 should usually only be used when the number of variables exceeds the capacity of dta format 118. Exporting smaller datasets in format 119 may have unintended consequences, and, as of November 2020, Stata SE cannot read version 119 files. convert_strl : list, optional List of column names to convert to string columns to Stata StrL format. Only available if version is 117. Storing strings in the StrL format can produce smaller dta files if strings have more than 8 characters and values are repeated. {compression_options} .. versionadded:: 1.1.0 .. versionchanged:: 1.4.0 Zstandard support. {storage_options} .. versionadded:: 1.2.0 value_labels : dict of dicts Dictionary containing columns as keys and dictionaries of column value to labels as values. Labels for a single variable must be 32,000 characters or smaller. .. versionadded:: 1.4.0 Raises ------ NotImplementedError * If datetimes contain timezone information * Column dtype is not representable in Stata ValueError * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters See Also -------- read_stata : Import Stata data files. io.stata.StataWriter : Low-level writer for Stata data files. io.stata.StataWriter117 : Low-level writer for version 117 files. Examples -------- >>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], ... 'speed': [350, 18, 361, 15]}}) >>> df.to_stata('animals.dta') # doctest: +SKIP """ if version not in (114, 117, 118, 119, None): raise ValueError("Only formats 114, 117, 118 and 119 are supported.") if version == 114: if convert_strl is not None: raise ValueError("strl is not supported in format 114") from pandas.io.stata import StataWriter as statawriter elif version == 117: # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriter117 as statawriter, ) else: # versions 118 and 119 # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriterUTF8 as statawriter, ) kwargs: dict[str, Any] = {} if version is None or version >= 117: # strl conversion is only supported >= 117 kwargs["convert_strl"] = convert_strl if version is None or version >= 118: # Specifying the version is only supported for UTF8 (118 or 119) kwargs["version"] = version writer = statawriter( path, self, convert_dates=convert_dates, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, write_index=write_index, variable_labels=variable_labels, compression=compression, storage_options=storage_options, value_labels=value_labels, **kwargs, ) writer.write_file() def to_feather(self, path: FilePath | WriteBuffer[bytes], **kwargs) -> None: """ Write a DataFrame to the binary Feather format. Parameters ---------- path : str, path object, file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If a string or a path, it will be used as Root Directory path when writing a partitioned dataset. **kwargs : Additional keywords passed to :func:`pyarrow.feather.write_feather`. Starting with pyarrow 0.17, this includes the `compression`, `compression_level`, `chunksize` and `version` keywords. .. versionadded:: 1.1.0 Notes ----- This function writes the dataframe as a `feather file <https://arrow.apache.org/docs/python/feather.html>`_. Requires a default index. For saving the DataFrame with your custom index use a method that supports custom indices e.g. `to_parquet`. """ from pandas.io.feather_format import to_feather to_feather(self, path, **kwargs) Series.to_markdown, klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples="""Examples -------- >>> df = pd.DataFrame( ... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]} ... ) >>> print(df.to_markdown()) | | animal_1 | animal_2 | |---:|:-----------|:-----------| | 0 | elk | dog | | 1 | pig | quetzal | Output markdown with a tabulate option. >>> print(df.to_markdown(tablefmt="grid")) +----+------------+------------+ | | animal_1 | animal_2 | +====+============+============+ | 0 | elk | dog | +----+------------+------------+ | 1 | pig | quetzal | +----+------------+------------+""", ) def to_markdown( self, buf: FilePath | WriteBuffer[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: if "showindex" in kwargs: raise ValueError("Pass 'index' instead of 'showindex") kwargs.setdefault("headers", "keys") kwargs.setdefault("tablefmt", "pipe") kwargs.setdefault("showindex", index) tabulate = import_optional_dependency("tabulate") result = tabulate.tabulate(self, **kwargs) if buf is None: return result with get_handle(buf, mode, storage_options=storage_options) as handles: handles.handle.write(result) return None def to_parquet( self, path: None = ..., engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> bytes: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes], engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> None: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes] | None = None, engine: str = "auto", compression: str | None = "snappy", index: bool | None = None, partition_cols: list[str] | None = None, storage_options: StorageOptions = None, **kwargs, ) -> bytes | None: """ Write a DataFrame to the binary parquet format. This function writes the dataframe as a `parquet file <https://parquet.apache.org/>`_. You can choose different parquet backends, and have the option of compression. See :ref:`the user guide <io.parquet>` for more details. Parameters ---------- path : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If None, the result is returned as bytes. If a string or path, it will be used as Root Directory path when writing a partitioned dataset. .. versionchanged:: 1.2.0 Previously this was "fname" engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``True`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. partition_cols : list, optional, default None Column names by which to partition the dataset. Columns are partitioned in the order they are given. Must be None if path is not a string. {storage_options} .. versionadded:: 1.2.0 **kwargs Additional arguments passed to the parquet library. See :ref:`pandas io <io.parquet>` for more details. Returns ------- bytes if no path argument is provided else None See Also -------- read_parquet : Read a parquet file. DataFrame.to_orc : Write an orc file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- This function requires either the `fastparquet <https://pypi.org/project/fastparquet>`_ or `pyarrow <https://arrow.apache.org/docs/python/>`_ library. Examples -------- >>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}}) >>> df.to_parquet('df.parquet.gzip', ... compression='gzip') # doctest: +SKIP >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP col1 col2 0 1 3 1 2 4 If you want to get a buffer to the parquet content you can use a io.BytesIO object, as long as you don't use partition_cols, which creates multiple files. >>> import io >>> f = io.BytesIO() >>> df.to_parquet(f) >>> f.seek(0) 0 >>> content = f.read() """ from pandas.io.parquet import to_parquet return to_parquet( self, path, engine, compression=compression, index=index, partition_cols=partition_cols, storage_options=storage_options, **kwargs, ) def to_orc( self, path: FilePath | WriteBuffer[bytes] | None = None, *, engine: Literal["pyarrow"] = "pyarrow", index: bool | None = None, engine_kwargs: dict[str, Any] | None = None, ) -> bytes | None: """ Write a DataFrame to the ORC format. .. versionadded:: 1.5.0 Parameters ---------- path : str, file-like object or None, default None If a string, it will be used as Root Directory path when writing a partitioned dataset. By file-like object, we refer to objects with a write() method, such as a file handle (e.g. via builtin open function). If path is None, a bytes object is returned. engine : str, default 'pyarrow' ORC library to use. Pyarrow must be >= 7.0.0. index : bool, optional If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``infer`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. engine_kwargs : dict[str, Any] or None, default None Additional keyword arguments passed to :func:`pyarrow.orc.write_table`. Returns ------- bytes if no path argument is provided else None Raises ------ NotImplementedError Dtype of one or more columns is category, unsigned integers, interval, period or sparse. ValueError engine is not pyarrow. See Also -------- read_orc : Read a ORC file. DataFrame.to_parquet : Write a parquet file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- * Before using this function you should read the :ref:`user guide about ORC <io.orc>` and :ref:`install optional dependencies <install.warn_orc>`. * This function requires `pyarrow <https://arrow.apache.org/docs/python/>`_ library. * For supported dtypes please refer to `supported ORC features in Arrow <https://arrow.apache.org/docs/cpp/orc.html#data-types>`__. * Currently timezones in datetime columns are not preserved when a dataframe is converted into ORC files. Examples -------- >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [4, 3]}) >>> df.to_orc('df.orc') # doctest: +SKIP >>> pd.read_orc('df.orc') # doctest: +SKIP col1 col2 0 1 4 1 2 3 If you want to get a buffer to the orc content you can write it to io.BytesIO >>> import io >>> b = io.BytesIO(df.to_orc()) # doctest: +SKIP >>> b.seek(0) # doctest: +SKIP 0 >>> content = b.read() # doctest: +SKIP """ from pandas.io.orc import to_orc return to_orc( self, path, engine=engine, index=index, engine_kwargs=engine_kwargs ) def to_html( self, buf: FilePath | WriteBuffer[str], columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> None: ... def to_html( self, buf: None = ..., columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> str: ... header_type="bool", header="Whether to print column labels, default True", col_space_type="str or int, list or dict of int or str", col_space="The minimum width of each column in CSS length " "units. An int is assumed to be px units.", ) def to_html( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[Level] | None = None, col_space: ColspaceArgType | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: FormattersType | None = None, float_format: FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool | str = False, decimal: str = ".", bold_rows: bool = True, classes: str | list | tuple | None = None, escape: bool = True, notebook: bool = False, border: int | bool | None = None, table_id: str | None = None, render_links: bool = False, encoding: str | None = None, ) -> str | None: """ Render a DataFrame as an HTML table. %(shared_params)s bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.display.html.border``. table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links. encoding : str, default "utf-8" Set character encoding. .. versionadded:: 1.0 %(returns)s See Also -------- to_string : Convert DataFrame to a string. """ if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS: raise ValueError("Invalid value for justify parameter") formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, header=header, index=index, formatters=formatters, float_format=float_format, bold_rows=bold_rows, sparsify=sparsify, justify=justify, index_names=index_names, escape=escape, decimal=decimal, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, ) # TODO: a generic formatter wld b in DataFrameFormatter return fmt.DataFrameRenderer(formatter).to_html( buf=buf, classes=classes, notebook=notebook, border=border, encoding=encoding, table_id=table_id, render_links=render_links, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buffer", ) def to_xml( self, path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, index: bool = True, root_name: str | None = "data", row_name: str | None = "row", na_rep: str | None = None, attr_cols: list[str] | None = None, elem_cols: list[str] | None = None, namespaces: dict[str | None, str] | None = None, prefix: str | None = None, encoding: str = "utf-8", xml_declaration: bool | None = True, pretty_print: bool | None = True, parser: str | None = "lxml", stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ) -> str | None: """ Render a DataFrame to an XML document. .. versionadded:: 1.3.0 Parameters ---------- path_or_buffer : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a ``write()`` function. If None, the result is returned as a string. index : bool, default True Whether to include index in XML document. root_name : str, default 'data' The name of root element in XML document. row_name : str, default 'row' The name of row element in XML document. na_rep : str, optional Missing data representation. attr_cols : list-like, optional List of columns to write as attributes in row element. Hierarchical columns will be flattened with underscore delimiting the different levels. elem_cols : list-like, optional List of columns to write as children in row element. By default, all columns output as children of row element. Hierarchical columns will be flattened with underscore delimiting the different levels. namespaces : dict, optional All namespaces to be defined in root element. Keys of dict should be prefix names and values of dict corresponding URIs. Default namespaces should be given empty string key. For example, :: namespaces = {{"": "https://example.com"}} prefix : str, optional Namespace prefix to be used for every element and/or attribute in document. This should be one of the keys in ``namespaces`` dict. encoding : str, default 'utf-8' Encoding of the resulting document. xml_declaration : bool, default True Whether to include the XML declaration at start of document. pretty_print : bool, default True Whether output should be pretty printed with indentation and line breaks. parser : {{'lxml','etree'}}, default 'lxml' Parser module to use for building of tree. Only 'lxml' and 'etree' are supported. With 'lxml', the ability to use XSLT stylesheet is supported. stylesheet : str, path object or file-like object, optional A URL, file-like object, or a raw string containing an XSLT script used to transform the raw XML output. Script should use layout of elements and attributes from original output. This argument requires ``lxml`` to be installed. Only XSLT 1.0 scripts and not later versions is currently supported. {compression_options} .. versionchanged:: 1.4.0 Zstandard support. {storage_options} Returns ------- None or str If ``io`` is None, returns the resulting XML format as a string. Otherwise returns None. See Also -------- to_json : Convert the pandas object to a JSON string. to_html : Convert DataFrame to a html. Examples -------- >>> df = pd.DataFrame({{'shape': ['square', 'circle', 'triangle'], ... 'degrees': [360, 360, 180], ... 'sides': [4, np.nan, 3]}}) >>> df.to_xml() # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row> <index>0</index> <shape>square</shape> <degrees>360</degrees> <sides>4.0</sides> </row> <row> <index>1</index> <shape>circle</shape> <degrees>360</degrees> <sides/> </row> <row> <index>2</index> <shape>triangle</shape> <degrees>180</degrees> <sides>3.0</sides> </row> </data> >>> df.to_xml(attr_cols=[ ... 'index', 'shape', 'degrees', 'sides' ... ]) # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row index="0" shape="square" degrees="360" sides="4.0"/> <row index="1" shape="circle" degrees="360"/> <row index="2" shape="triangle" degrees="180" sides="3.0"/> </data> >>> df.to_xml(namespaces={{"doc": "https://example.com"}}, ... prefix="doc") # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <doc:data xmlns:doc="https://example.com"> <doc:row> <doc:index>0</doc:index> <doc:shape>square</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides>4.0</doc:sides> </doc:row> <doc:row> <doc:index>1</doc:index> <doc:shape>circle</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides/> </doc:row> <doc:row> <doc:index>2</doc:index> <doc:shape>triangle</doc:shape> <doc:degrees>180</doc:degrees> <doc:sides>3.0</doc:sides> </doc:row> </doc:data> """ from pandas.io.formats.xml import ( EtreeXMLFormatter, LxmlXMLFormatter, ) lxml = import_optional_dependency("lxml.etree", errors="ignore") TreeBuilder: type[EtreeXMLFormatter] | type[LxmlXMLFormatter] if parser == "lxml": if lxml is not None: TreeBuilder = LxmlXMLFormatter else: raise ImportError( "lxml not found, please install or use the etree parser." ) elif parser == "etree": TreeBuilder = EtreeXMLFormatter else: raise ValueError("Values for parser can only be lxml or etree.") xml_formatter = TreeBuilder( self, path_or_buffer=path_or_buffer, index=index, root_name=root_name, row_name=row_name, na_rep=na_rep, attr_cols=attr_cols, elem_cols=elem_cols, namespaces=namespaces, prefix=prefix, encoding=encoding, xml_declaration=xml_declaration, pretty_print=pretty_print, stylesheet=stylesheet, compression=compression, storage_options=storage_options, ) return xml_formatter.write_output() # ---------------------------------------------------------------------- def info( self, verbose: bool | None = None, buf: WriteBuffer[str] | None = None, max_cols: int | None = None, memory_usage: bool | str | None = None, show_counts: bool | None = None, ) -> None: info = DataFrameInfo( data=self, memory_usage=memory_usage, ) info.render( buf=buf, max_cols=max_cols, verbose=verbose, show_counts=show_counts, ) def memory_usage(self, index: bool = True, deep: bool = False) -> Series: """ Return the memory usage of each column in bytes. The memory usage can optionally include the contribution of the index and elements of `object` dtype. This value is displayed in `DataFrame.info` by default. This can be suppressed by setting ``pandas.options.display.memory_usage`` to False. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the DataFrame's index in returned Series. If ``index=True``, the memory usage of the index is the first item in the output. deep : bool, default False If True, introspect the data deeply by interrogating `object` dtypes for system-level memory consumption, and include it in the returned values. Returns ------- Series A Series whose index is the original column names and whose values is the memory usage of each column in bytes. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of an ndarray. Series.memory_usage : Bytes consumed by a Series. Categorical : Memory-efficient array for string values with many repeated values. DataFrame.info : Concise summary of a DataFrame. Notes ----- See the :ref:`Frequently Asked Questions <df-memory-usage>` for more details. Examples -------- >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool'] >>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t)) ... for t in dtypes]) >>> df = pd.DataFrame(data) >>> df.head() int64 float64 complex128 object bool 0 1 1.0 1.0+0.0j 1 True 1 1 1.0 1.0+0.0j 1 True 2 1 1.0 1.0+0.0j 1 True 3 1 1.0 1.0+0.0j 1 True 4 1 1.0 1.0+0.0j 1 True >>> df.memory_usage() Index 128 int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 >>> df.memory_usage(index=False) int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 The memory footprint of `object` dtype columns is ignored by default: >>> df.memory_usage(deep=True) Index 128 int64 40000 float64 40000 complex128 80000 object 180000 bool 5000 dtype: int64 Use a Categorical for efficient storage of an object-dtype column with many repeated values. >>> df['object'].astype('category').memory_usage(deep=True) 5244 """ result = self._constructor_sliced( [c.memory_usage(index=False, deep=deep) for col, c in self.items()], index=self.columns, dtype=np.intp, ) if index: index_memory_usage = self._constructor_sliced( self.index.memory_usage(deep=deep), index=["Index"] ) result = index_memory_usage._append(result) return result def transpose(self, *args, copy: bool = False) -> DataFrame: """ Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. Parameters ---------- *args : tuple, optional Accepted for compatibility with NumPy. copy : bool, default False Whether to copy the data after transposing, even for DataFrames with a single dtype. Note that a copy is always required for mixed dtype DataFrames, or for DataFrames with any extension types. Returns ------- DataFrame The transposed DataFrame. See Also -------- numpy.transpose : Permute the dimensions of a given array. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the `object` dtype. In such a case, a copy of the data is always made. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = pd.DataFrame(data=d1) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T # or df1.transpose() >>> df1_transposed 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'name': ['Alice', 'Bob'], ... 'score': [9.5, 8], ... 'employed': [False, True], ... 'kids': [0, 0]} >>> df2 = pd.DataFrame(data=d2) >>> df2 name score employed kids 0 Alice 9.5 False 0 1 Bob 8.0 True 0 >>> df2_transposed = df2.T # or df2.transpose() >>> df2_transposed 0 1 name Alice Bob score 9.5 8.0 employed False True kids 0 0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the `object` dtype: >>> df2.dtypes name object score float64 employed bool kids int64 dtype: object >>> df2_transposed.dtypes 0 object 1 object dtype: object """ nv.validate_transpose(args, {}) # construct the args dtypes = list(self.dtypes) if self._can_fast_transpose: # Note: tests pass without this, but this improves perf quite a bit. new_vals = self._values.T if copy and not using_copy_on_write(): new_vals = new_vals.copy() result = self._constructor( new_vals, index=self.columns, columns=self.index, copy=False ) if using_copy_on_write() and len(self) > 0: result._mgr.add_references(self._mgr) # type: ignore[arg-type] elif ( self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]) ): # We have EAs with the same dtype. We can preserve that dtype in transpose. dtype = dtypes[0] arr_type = dtype.construct_array_type() values = self.values new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values] result = type(self)._from_arrays( new_values, index=self.columns, columns=self.index ) else: new_arr = self.values.T if copy and not using_copy_on_write(): new_arr = new_arr.copy() result = self._constructor( new_arr, index=self.columns, columns=self.index, # We already made a copy (more than one block) copy=False, ) return result.__finalize__(self, method="transpose") def T(self) -> DataFrame: """ The transpose of the DataFrame. Returns ------- DataFrame The transposed DataFrame. See Also -------- DataFrame.transpose : Transpose index and columns. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.T 0 1 col1 1 2 col2 3 4 """ return self.transpose() # ---------------------------------------------------------------------- # Indexing Methods def _ixs(self, i: int, axis: AxisInt = 0) -> Series: """ Parameters ---------- i : int axis : int Returns ------- Series """ # irow if axis == 0: new_mgr = self._mgr.fast_xs(i) # if we are a copy, mark as such copy = isinstance(new_mgr.array, np.ndarray) and new_mgr.array.base is None result = self._constructor_sliced(new_mgr, name=self.index[i]).__finalize__( self ) result._set_is_copy(self, copy=copy) return result # icol else: label = self.columns[i] col_mgr = self._mgr.iget(i) result = self._box_col_values(col_mgr, i) # this is a cached value, mark it so result._set_as_cached(label, self) return result def _get_column_array(self, i: int) -> ArrayLike: """ Get the values of the i'th column (ndarray or ExtensionArray, as stored in the Block) Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ return self._mgr.iget_values(i) def _iter_column_arrays(self) -> Iterator[ArrayLike]: """ Iterate over the arrays of all columns in order. This returns the values as stored in the Block (ndarray or ExtensionArray). Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ for i in range(len(self.columns)): yield self._get_column_array(i) def _getitem_nocopy(self, key: list): """ Behaves like __getitem__, but returns a view in cases where __getitem__ would make a copy. """ # TODO(CoW): can be removed if/when we are always Copy-on-Write indexer = self.columns._get_indexer_strict(key, "columns")[1] new_axis = self.columns[indexer] new_mgr = self._mgr.reindex_indexer( new_axis, indexer, axis=0, allow_dups=True, copy=False, only_slice=True, ) return self._constructor(new_mgr) def __getitem__(self, key): check_dict_or_set_indexers(key) key = lib.item_from_zerodim(key) key = com.apply_if_callable(key, self) if is_hashable(key) and not is_iterator(key): # is_iterator to exclude generator e.g. test_getitem_listlike # shortcut if the key is in columns is_mi = isinstance(self.columns, MultiIndex) # GH#45316 Return view if key is not duplicated # Only use drop_duplicates with duplicates for performance if not is_mi and ( self.columns.is_unique and key in self.columns or key in self.columns.drop_duplicates(keep=False) ): return self._get_item_cache(key) elif is_mi and self.columns.is_unique and key in self.columns: return self._getitem_multilevel(key) # Do we have a slicer (on rows)? if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") if isinstance(indexer, np.ndarray): # reachable with DatetimeIndex indexer = lib.maybe_indices_to_slice( indexer.astype(np.intp, copy=False), len(self) ) if isinstance(indexer, np.ndarray): # GH#43223 If we can not convert, use take return self.take(indexer, axis=0) return self._slice(indexer, axis=0) # Do we have a (boolean) DataFrame? if isinstance(key, DataFrame): return self.where(key) # Do we have a (boolean) 1d indexer? if com.is_bool_indexer(key): return self._getitem_bool_array(key) # We are left with two options: a single key, and a collection of keys, # We interpret tuples as collections only for non-MultiIndex is_single_key = isinstance(key, tuple) or not is_list_like(key) if is_single_key: if self.columns.nlevels > 1: return self._getitem_multilevel(key) indexer = self.columns.get_loc(key) if is_integer(indexer): indexer = [indexer] else: if is_iterator(key): key = list(key) indexer = self.columns._get_indexer_strict(key, "columns")[1] # take() does not accept boolean indexers if getattr(indexer, "dtype", None) == bool: indexer = np.where(indexer)[0] data = self._take_with_is_copy(indexer, axis=1) if is_single_key: # What does looking for a single key in a non-unique index return? # The behavior is inconsistent. It returns a Series, except when # - the key itself is repeated (test on data.shape, #9519), or # - we have a MultiIndex on columns (test on self.columns, #21309) if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex): # GH#26490 using data[key] can cause RecursionError return data._get_item_cache(key) return data def _getitem_bool_array(self, key): # also raises Exception if object array with NA values # warning here just in case -- previously __setitem__ was # reindexing but __getitem__ was not; it seems more reasonable to # go with the __setitem__ behavior since that is more consistent # with all other indexing behavior if isinstance(key, Series) and not key.index.equals(self.index): warnings.warn( "Boolean Series key will be reindexed to match DataFrame index.", UserWarning, stacklevel=find_stack_level(), ) elif len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}." ) # check_bool_indexer will throw exception if Series key cannot # be reindexed to match DataFrame rows key = check_bool_indexer(self.index, key) if key.all(): return self.copy(deep=None) indexer = key.nonzero()[0] return self._take_with_is_copy(indexer, axis=0) def _getitem_multilevel(self, key): # self.columns is a MultiIndex loc = self.columns.get_loc(key) if isinstance(loc, (slice, np.ndarray)): new_columns = self.columns[loc] result_columns = maybe_droplevels(new_columns, key) if self._is_mixed_type: result = self.reindex(columns=new_columns) result.columns = result_columns else: new_values = self._values[:, loc] result = self._constructor( new_values, index=self.index, columns=result_columns, copy=False ) if using_copy_on_write() and isinstance(loc, slice): result._mgr.add_references(self._mgr) # type: ignore[arg-type] result = result.__finalize__(self) # If there is only one column being returned, and its name is # either an empty string, or a tuple with an empty string as its # first element, then treat the empty string as a placeholder # and return the column as if the user had provided that empty # string in the key. If the result is a Series, exclude the # implied empty string from its name. if len(result.columns) == 1: # e.g. test_frame_getitem_multicolumn_empty_level, # test_frame_mixed_depth_get, test_loc_setitem_single_column_slice top = result.columns[0] if isinstance(top, tuple): top = top[0] if top == "": result = result[""] if isinstance(result, Series): result = self._constructor_sliced( result, index=self.index, name=key ) result._set_is_copy(self) return result else: # loc is neither a slice nor ndarray, so must be an int return self._ixs(loc, axis=1) def _get_value(self, index, col, takeable: bool = False) -> Scalar: """ Quickly retrieve single value at passed column and index. Parameters ---------- index : row label col : column label takeable : interpret the index/col as indexers, default False Returns ------- scalar Notes ----- Assumes that both `self.index._index_as_unique` and `self.columns._index_as_unique`; Caller is responsible for checking. """ if takeable: series = self._ixs(col, axis=1) return series._values[index] series = self._get_item_cache(col) engine = self.index._engine if not isinstance(self.index, MultiIndex): # CategoricalIndex: Trying to use the engine fastpath may give incorrect # results if our categories are integers that dont match our codes # IntervalIndex: IntervalTree has no get_loc row = self.index.get_loc(index) return series._values[row] # For MultiIndex going through engine effectively restricts us to # same-length tuples; see test_get_set_value_no_partial_indexing loc = engine.get_loc(index) return series._values[loc] def isetitem(self, loc, value) -> None: """ Set the given value in the column with position `loc`. This is a positional analogue to ``__setitem__``. Parameters ---------- loc : int or sequence of ints Index position for the column. value : scalar or arraylike Value(s) for the column. Notes ----- ``frame.isetitem(loc, value)`` is an in-place method as it will modify the DataFrame in place (not returning a new object). In contrast to ``frame.iloc[:, i] = value`` which will try to update the existing values in place, ``frame.isetitem(loc, value)`` will not update the values of the column itself in place, it will instead insert a new array. In cases where ``frame.columns`` is unique, this is equivalent to ``frame[frame.columns[i]] = value``. """ if isinstance(value, DataFrame): if is_scalar(loc): loc = [loc] for i, idx in enumerate(loc): arraylike = self._sanitize_column(value.iloc[:, i]) self._iset_item_mgr(idx, arraylike, inplace=False) return arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=False) def __setitem__(self, key, value): if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) key = com.apply_if_callable(key, self) # see if we can slice the rows if isinstance(key, slice): slc = self.index._convert_slice_indexer(key, kind="getitem") return self._setitem_slice(slc, value) if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2: self._setitem_frame(key, value) elif isinstance(key, (Series, np.ndarray, list, Index)): self._setitem_array(key, value) elif isinstance(value, DataFrame): self._set_item_frame_value(key, value) elif ( is_list_like(value) and not self.columns.is_unique and 1 < len(self.columns.get_indexer_for([key])) == len(value) ): # Column to set is duplicated self._setitem_array([key], value) else: # set column self._set_item(key, value) def _setitem_slice(self, key: slice, value) -> None: # NB: we can't just use self.loc[key] = value because that # operates on labels and we need to operate positional for # backwards-compat, xref GH#31469 self._check_setitem_copy() self.iloc[key] = value def _setitem_array(self, key, value): # also raises Exception if object array with NA values if com.is_bool_indexer(key): # bool indexer is indexing along rows if len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}!" ) key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] self._check_setitem_copy() if isinstance(value, DataFrame): # GH#39931 reindex since iloc does not align value = value.reindex(self.index.take(indexer)) self.iloc[indexer] = value else: # Note: unlike self.iloc[:, indexer] = value, this will # never try to overwrite values inplace if isinstance(value, DataFrame): check_key_length(self.columns, key, value) for k1, k2 in zip(key, value.columns): self[k1] = value[k2] elif not is_list_like(value): for col in key: self[col] = value elif isinstance(value, np.ndarray) and value.ndim == 2: self._iset_not_inplace(key, value) elif np.ndim(value) > 1: # list of lists value = DataFrame(value).values return self._setitem_array(key, value) else: self._iset_not_inplace(key, value) def _iset_not_inplace(self, key, value): # GH#39510 when setting with df[key] = obj with a list-like key and # list-like value, we iterate over those listlikes and set columns # one at a time. This is different from dispatching to # `self.loc[:, key]= value` because loc.__setitem__ may overwrite # data inplace, whereas this will insert new arrays. def igetitem(obj, i: int): # Note: we catch DataFrame obj before getting here, but # hypothetically would return obj.iloc[:, i] if isinstance(obj, np.ndarray): return obj[..., i] else: return obj[i] if self.columns.is_unique: if np.shape(value)[-1] != len(key): raise ValueError("Columns must be same length as key") for i, col in enumerate(key): self[col] = igetitem(value, i) else: ilocs = self.columns.get_indexer_non_unique(key)[0] if (ilocs < 0).any(): # key entries not in self.columns raise NotImplementedError if np.shape(value)[-1] != len(ilocs): raise ValueError("Columns must be same length as key") assert np.ndim(value) <= 2 orig_columns = self.columns # Using self.iloc[:, i] = ... may set values inplace, which # by convention we do not do in __setitem__ try: self.columns = Index(range(len(self.columns))) for i, iloc in enumerate(ilocs): self[iloc] = igetitem(value, i) finally: self.columns = orig_columns def _setitem_frame(self, key, value): # support boolean setting with DataFrame input, e.g. # df[df > df2] = 0 if isinstance(key, np.ndarray): if key.shape != self.shape: raise ValueError("Array conditional must be same shape as self") key = self._constructor(key, **self._construct_axes_dict(), copy=False) if key.size and not all(is_bool_dtype(dtype) for dtype in key.dtypes): raise TypeError( "Must pass DataFrame or 2-d ndarray with boolean values only" ) self._check_inplace_setting(value) self._check_setitem_copy() self._where(-key, value, inplace=True) def _set_item_frame_value(self, key, value: DataFrame) -> None: self._ensure_valid_index(value) # align columns if key in self.columns: loc = self.columns.get_loc(key) cols = self.columns[loc] len_cols = 1 if is_scalar(cols) or isinstance(cols, tuple) else len(cols) if len_cols != len(value.columns): raise ValueError("Columns must be same length as key") # align right-hand-side columns if self.columns # is multi-index and self[key] is a sub-frame if isinstance(self.columns, MultiIndex) and isinstance( loc, (slice, Series, np.ndarray, Index) ): cols_droplevel = maybe_droplevels(cols, key) if len(cols_droplevel) and not cols_droplevel.equals(value.columns): value = value.reindex(cols_droplevel, axis=1) for col, col_droplevel in zip(cols, cols_droplevel): self[col] = value[col_droplevel] return if is_scalar(cols): self[cols] = value[value.columns[0]] return # now align rows arraylike = _reindex_for_setitem(value, self.index) self._set_item_mgr(key, arraylike) return if len(value.columns) != 1: raise ValueError( "Cannot set a DataFrame with multiple columns to the single " f"column {key}" ) self[key] = value[value.columns[0]] def _iset_item_mgr( self, loc: int | slice | np.ndarray, value, inplace: bool = False ) -> None: # when called from _set_item_mgr loc can be anything returned from get_loc self._mgr.iset(loc, value, inplace=inplace) self._clear_item_cache() def _set_item_mgr(self, key, value: ArrayLike) -> None: try: loc = self._info_axis.get_loc(key) except KeyError: # This item wasn't present, just insert at end self._mgr.insert(len(self._info_axis), key, value) else: self._iset_item_mgr(loc, value) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _iset_item(self, loc: int, value) -> None: arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=True) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _set_item(self, key, value) -> None: """ Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the same length as the DataFrames index or an error will be thrown. Series/TimeSeries will be conformed to the DataFrames index to ensure homogeneity. """ value = self._sanitize_column(value) if ( key in self.columns and value.ndim == 1 and not is_extension_array_dtype(value) ): # broadcast across multiple columns if necessary if not self.columns.is_unique or isinstance(self.columns, MultiIndex): existing_piece = self[key] if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)).T self._set_item_mgr(key, value) def _set_value( self, index: IndexLabel, col, value: Scalar, takeable: bool = False ) -> None: """ Put single value at passed column and index. Parameters ---------- index : Label row label col : Label column label value : scalar takeable : bool, default False Sets whether or not index/col interpreted as indexers """ try: if takeable: icol = col iindex = cast(int, index) else: icol = self.columns.get_loc(col) iindex = self.index.get_loc(index) self._mgr.column_setitem(icol, iindex, value, inplace_only=True) self._clear_item_cache() except (KeyError, TypeError, ValueError, LossySetitemError): # get_loc might raise a KeyError for missing labels (falling back # to (i)loc will do expansion of the index) # column_setitem will do validation that may raise TypeError, # ValueError, or LossySetitemError # set using a non-recursive method & reset the cache if takeable: self.iloc[index, col] = value else: self.loc[index, col] = value self._item_cache.pop(col, None) except InvalidIndexError as ii_err: # GH48729: Seems like you are trying to assign a value to a # row when only scalar options are permitted raise InvalidIndexError( f"You can only assign a scalar value not a {type(value)}" ) from ii_err def _ensure_valid_index(self, value) -> None: """ Ensure that if we don't have an index, that we can create one from the passed value. """ # GH5632, make sure that we are a Series convertible if not len(self.index) and is_list_like(value) and len(value): if not isinstance(value, DataFrame): try: value = Series(value) except (ValueError, NotImplementedError, TypeError) as err: raise ValueError( "Cannot set a frame with no defined index " "and a value that cannot be converted to a Series" ) from err # GH31368 preserve name of index index_copy = value.index.copy() if self.index.name is not None: index_copy.name = self.index.name self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan) def _box_col_values(self, values: SingleDataManager, loc: int) -> Series: """ Provide boxed values for a column. """ # Lookup in columns so that if e.g. a str datetime was passed # we attach the Timestamp object as the name. name = self.columns[loc] klass = self._constructor_sliced # We get index=self.index bc values is a SingleDataManager return klass(values, name=name, fastpath=True).__finalize__(self) # ---------------------------------------------------------------------- # Lookup Caching def _clear_item_cache(self) -> None: self._item_cache.clear() def _get_item_cache(self, item: Hashable) -> Series: """Return the cached item, item represents a label indexer.""" if using_copy_on_write(): loc = self.columns.get_loc(item) return self._ixs(loc, axis=1) cache = self._item_cache res = cache.get(item) if res is None: # All places that call _get_item_cache have unique columns, # pending resolution of GH#33047 loc = self.columns.get_loc(item) res = self._ixs(loc, axis=1) cache[item] = res # for a chain res._is_copy = self._is_copy return res def _reset_cacher(self) -> None: # no-op for DataFrame pass def _maybe_cache_changed(self, item, value: Series, inplace: bool) -> None: """ The object has called back to us saying maybe it has changed. """ loc = self._info_axis.get_loc(item) arraylike = value._values old = self._ixs(loc, axis=1) if old._values is value._values and inplace: # GH#46149 avoid making unnecessary copies/block-splitting return self._mgr.iset(loc, arraylike, inplace=inplace) # ---------------------------------------------------------------------- # Unsorted def query(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> DataFrame: ... def query(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def query(self, expr: str, *, inplace: bool = ..., **kwargs) -> DataFrame | None: ... def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | None: """ Query the columns of a DataFrame with a boolean expression. Parameters ---------- expr : str The query string to evaluate. You can refer to variables in the environment by prefixing them with an '@' character like ``@a + b``. You can refer to column names that are not valid Python variable names by surrounding them in backticks. Thus, column names containing spaces or punctuations (besides underscores) or starting with digits must be surrounded by backticks. (For example, a column named "Area (cm^2)" would be referenced as ```Area (cm^2)```). Column names which are Python keywords (like "list", "for", "import", etc) cannot be used. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. inplace : bool Whether to modify the DataFrame rather than creating a new one. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`DataFrame.query`. Returns ------- DataFrame or None DataFrame resulting from the provided query expression or None if ``inplace=True``. See Also -------- eval : Evaluate a string describing operations on DataFrame columns. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Notes ----- The result of the evaluation of this expression is first passed to :attr:`DataFrame.loc` and if that fails because of a multidimensional key (e.g., a DataFrame) then the result will be passed to :meth:`DataFrame.__getitem__`. This method uses the top-level :func:`eval` function to evaluate the passed query. The :meth:`~pandas.DataFrame.query` method uses a slightly modified Python syntax by default. For example, the ``&`` and ``|`` (bitwise) operators have the precedence of their boolean cousins, :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, however the semantics are different. You can change the semantics of the expression by passing the keyword argument ``parser='python'``. This enforces the same semantics as evaluation in Python space. Likewise, you can pass ``engine='python'`` to evaluate an expression using Python itself as a backend. This is not recommended as it is inefficient compared to using ``numexpr`` as the engine. The :attr:`DataFrame.index` and :attr:`DataFrame.columns` attributes of the :class:`~pandas.DataFrame` instance are placed in the query namespace by default, which allows you to treat both the index and columns of the frame as a column in the frame. The identifier ``index`` is used for the frame index; you can also use the name of the index to identify it in a query. Please note that Python keywords may not be used as identifiers. For further details and examples see the ``query`` documentation in :ref:`indexing <indexing.query>`. *Backtick quoted variables* Backtick quoted variables are parsed as literal Python code and are converted internally to a Python valid identifier. This can lead to the following problems. During parsing a number of disallowed characters inside the backtick quoted string are replaced by strings that are allowed as a Python identifier. These characters include all operators in Python, the space character, the question mark, the exclamation mark, the dollar sign, and the euro sign. For other characters that fall outside the ASCII range (U+0001..U+007F) and those that are not further specified in PEP 3131, the query parser will raise an error. This excludes whitespace different than the space character, but also the hashtag (as it is used for comments) and the backtick itself (backtick can also not be escaped). In a special case, quotes that make a pair around a backtick can confuse the parser. For example, ```it's` > `that's``` will raise an error, as it forms a quoted string (``'s > `that'``) with a backtick inside. See also the Python documentation about lexical analysis (https://docs.python.org/3/reference/lexical_analysis.html) in combination with the source code in :mod:`pandas.core.computation.parsing`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10 """ inplace = validate_bool_kwarg(inplace, "inplace") if not isinstance(expr, str): msg = f"expr must be a string to be evaluated, {type(expr)} given" raise ValueError(msg) kwargs["level"] = kwargs.pop("level", 0) + 1 kwargs["target"] = None res = self.eval(expr, **kwargs) try: result = self.loc[res] except ValueError: # when res is multi-dimensional loc raises, but this is sometimes a # valid query result = self[res] if inplace: self._update_inplace(result) return None else: return result def eval(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> Any: ... def eval(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def eval(self, expr: str, *, inplace: bool = False, **kwargs) -> Any | None: """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. Returns ------- ndarray, scalar, pandas object, or None The result of the evaluation or None if ``inplace=True``. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Notes ----- For more details see the API documentation for :func:`~eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Multiple columns can be assigned to using multi-line expressions: >>> df.eval( ... ''' ... C = A + B ... D = A - B ... ''' ... ) A B C D 0 1 10 11 -9 1 2 8 10 -6 2 3 6 9 -3 3 4 4 8 0 4 5 2 7 3 """ from pandas.core.computation.eval import eval as _eval inplace = validate_bool_kwarg(inplace, "inplace") kwargs["level"] = kwargs.pop("level", 0) + 1 index_resolvers = self._get_index_resolvers() column_resolvers = self._get_cleaned_column_resolvers() resolvers = column_resolvers, index_resolvers if "target" not in kwargs: kwargs["target"] = self kwargs["resolvers"] = tuple(kwargs.get("resolvers", ())) + resolvers return _eval(expr, inplace=inplace, **kwargs) def select_dtypes(self, include=None, exclude=None) -> DataFrame: """ Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty * If ``include`` and ``exclude`` have overlapping elements * If any kind of string dtype is passed in. See Also -------- DataFrame.dtypes: Return Series with the data type of each column. Notes ----- * To select all *numeric* types, use ``np.number`` or ``'number'`` * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy <https://numpy.org/doc/stable/reference/arrays.scalars.html>`__ * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or ``'timedelta64'`` * To select Pandas categorical dtypes, use ``'category'`` * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in 0.20.0) or ``'datetime64[ns, tz]'`` Examples -------- >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int64']) b c 0 True 1.0 1 False 2.0 2 True 1.0 3 False 2.0 4 True 1.0 5 False 2.0 """ if not is_list_like(include): include = (include,) if include is not None else () if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () selection = (frozenset(include), frozenset(exclude)) if not any(selection): raise ValueError("at least one of include or exclude must be nonempty") # convert the myriad valid dtypes object to a single representation def check_int_infer_dtype(dtypes): converted_dtypes: list[type] = [] for dtype in dtypes: # Numpy maps int to different types (int32, in64) on Windows and Linux # see https://github.com/numpy/numpy/issues/9464 if (isinstance(dtype, str) and dtype == "int") or (dtype is int): converted_dtypes.append(np.int32) converted_dtypes.append(np.int64) elif dtype == "float" or dtype is float: # GH#42452 : np.dtype("float") coerces to np.float64 from Numpy 1.20 converted_dtypes.extend([np.float64, np.float32]) else: converted_dtypes.append(infer_dtype_from_object(dtype)) return frozenset(converted_dtypes) include = check_int_infer_dtype(include) exclude = check_int_infer_dtype(exclude) for dtypes in (include, exclude): invalidate_string_dtypes(dtypes) # can't both include AND exclude! if not include.isdisjoint(exclude): raise ValueError(f"include and exclude overlap on {(include & exclude)}") def dtype_predicate(dtype: DtypeObj, dtypes_set) -> bool: # GH 46870: BooleanDtype._is_numeric == True but should be excluded return issubclass(dtype.type, tuple(dtypes_set)) or ( np.number in dtypes_set and getattr(dtype, "_is_numeric", False) and not is_bool_dtype(dtype) ) def predicate(arr: ArrayLike) -> bool: dtype = arr.dtype if include: if not dtype_predicate(dtype, include): return False if exclude: if dtype_predicate(dtype, exclude): return False return True mgr = self._mgr._get_data_subset(predicate).copy(deep=None) return type(self)(mgr).__finalize__(self) def insert( self, loc: int, column: Hashable, value: Scalar | AnyArrayLike, allow_duplicates: bool | lib.NoDefault = lib.no_default, ) -> None: """ Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns). column : str, number, or hashable object Label of the inserted column. value : Scalar, Series, or array-like allow_duplicates : bool, optional, default lib.no_default See Also -------- Index.insert : Insert new item by index. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.insert(1, "newcol", [99, 99]) >>> df col1 newcol col2 0 1 99 3 1 2 99 4 >>> df.insert(0, "col1", [100, 100], allow_duplicates=True) >>> df col1 col1 newcol col2 0 100 1 99 3 1 100 2 99 4 Notice that pandas uses index alignment in case of `value` from type `Series`: >>> df.insert(0, "col0", pd.Series([5, 6], index=[1, 2])) >>> df col0 col1 col1 newcol col2 0 NaN 100 1 99 3 1 5.0 100 2 99 4 """ if allow_duplicates is lib.no_default: allow_duplicates = False if allow_duplicates and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'allow_duplicates=True' when " "'self.flags.allows_duplicate_labels' is False." ) if not allow_duplicates and column in self.columns: # Should this be a different kind of error?? raise ValueError(f"cannot insert {column}, already exists") if not isinstance(loc, int): raise TypeError("loc must be int") value = self._sanitize_column(value) self._mgr.insert(loc, column, value) def assign(self, **kwargs) -> DataFrame: r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. Later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 You can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 """ data = self.copy(deep=None) for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) return data def _sanitize_column(self, value) -> ArrayLike: """ Ensures new columns (which go into the BlockManager as new blocks) are always copied and converted into an array. Parameters ---------- value : scalar, Series, or array-like Returns ------- numpy.ndarray or ExtensionArray """ self._ensure_valid_index(value) # We can get there through isetitem with a DataFrame # or through loc single_block_path if isinstance(value, DataFrame): return _reindex_for_setitem(value, self.index) elif is_dict_like(value): return _reindex_for_setitem(Series(value), self.index) if is_list_like(value): com.require_length_match(value, self.index) return sanitize_array(value, self.index, copy=True, allow_2d=True) def _series(self): return { item: Series( self._mgr.iget(idx), index=self.index, name=item, fastpath=True ) for idx, item in enumerate(self.columns) } # ---------------------------------------------------------------------- # Reindexing and alignment def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy): frame = self columns = axes["columns"] if columns is not None: frame = frame._reindex_columns( columns, method, copy, level, fill_value, limit, tolerance ) index = axes["index"] if index is not None: frame = frame._reindex_index( index, method, copy, level, fill_value, limit, tolerance ) return frame def _reindex_index( self, new_index, method, copy: bool, level: Level, fill_value=np.nan, limit=None, tolerance=None, ): new_index, indexer = self.index.reindex( new_index, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {0: [new_index, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_columns( self, new_columns, method, copy: bool, level: Level, fill_value=None, limit=None, tolerance=None, ): new_columns, indexer = self.columns.reindex( new_columns, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {1: [new_columns, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_multi( self, axes: dict[str, Index], copy: bool, fill_value ) -> DataFrame: """ We are guaranteed non-Nones in the axes. """ new_index, row_indexer = self.index.reindex(axes["index"]) new_columns, col_indexer = self.columns.reindex(axes["columns"]) if row_indexer is not None and col_indexer is not None: # Fastpath. By doing two 'take's at once we avoid making an # unnecessary copy. # We only get here with `not self._is_mixed_type`, which (almost) # ensures that self.values is cheap. It may be worth making this # condition more specific. indexer = row_indexer, col_indexer new_values = take_2d_multi(self.values, indexer, fill_value=fill_value) return self._constructor( new_values, index=new_index, columns=new_columns, copy=False ) else: return self._reindex_with_indexers( {0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, copy=copy, fill_value=fill_value, ) def align( self, other: DataFrame, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value=None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> DataFrame: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) """ Examples -------- >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) Change the row labels. >>> df.set_axis(['a', 'b', 'c'], axis='index') A B a 1 4 b 2 5 c 3 6 Change the column labels. >>> df.set_axis(['I', 'II'], axis='columns') I II 0 1 4 1 2 5 2 3 6 """ ) **_shared_doc_kwargs, extended_summary_sub=" column or", axis_description_sub=", and 1 identifies the columns", see_also_sub=" or columns", def reindex( # type: ignore[override] self, labels=None, *, index=None, columns=None, axis: Axis | None = None, method: str | None = None, copy: bool | None = None, level: Level | None = None, fill_value: Scalar | None = np.nan, limit: int | None = None, tolerance=None, ) -> DataFrame: return super().reindex( labels=labels, index=index, columns=columns, axis=axis, method=method, copy=copy, level=level, fill_value=fill_value, limit=limit, tolerance=tolerance, ) DataFrame ArrayLike = Union["ExtensionArray", np.ndarray] class Series(base.IndexOpsMixin, NDFrame): # type: ignore[misc] """ One-dimensional ndarray with axis labels (including time series). Labels need not be unique but must be a hashable type. The object supports both integer- and label-based indexing and provides a host of methods for performing operations involving the index. Statistical methods from ndarray have been overridden to automatically exclude missing data (currently represented as NaN). Operations between Series (+, -, /, \\*, \\*\\*) align values based on their associated index values-- they need not be the same length. The result index will be the sorted union of the two indexes. Parameters ---------- data : array-like, Iterable, dict, or scalar value Contains data stored in Series. If data is a dict, argument order is maintained. index : array-like or Index (1d) Values must be hashable and have the same length as `data`. Non-unique index values are allowed. Will default to RangeIndex (0, 1, 2, ..., n) if not provided. If data is dict-like and index is None, then the keys in the data are used as the index. If the index is not None, the resulting Series is reindexed with the index values. dtype : str, numpy.dtype, or ExtensionDtype, optional Data type for the output Series. If not specified, this will be inferred from `data`. See the :ref:`user guide <basics.dtypes>` for more usages. name : Hashable, default None The name to give to the Series. copy : bool, default False Copy input data. Only affects Series or 1d ndarray input. See examples. Notes ----- Please reference the :ref:`User Guide <basics.series>` for more information. Examples -------- Constructing Series from a dictionary with an Index specified >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['a', 'b', 'c']) >>> ser a 1 b 2 c 3 dtype: int64 The keys of the dictionary match with the Index values, hence the Index values have no effect. >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['x', 'y', 'z']) >>> ser x NaN y NaN z NaN dtype: float64 Note that the Index is first build with the keys from the dictionary. After this the Series is reindexed with the given Index values, hence we get all NaN as a result. Constructing Series from a list with `copy=False`. >>> r = [1, 2] >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r [1, 2] >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `copy` of the original data even though `copy=False`, so the data is unchanged. Constructing Series from a 1d ndarray with `copy=False`. >>> r = np.array([1, 2]) >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r array([999, 2]) >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `view` on the original data, so the data is changed as well. """ _typ = "series" _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) _name: Hashable _metadata: list[str] = ["name"] _internal_names_set = {"index"} | NDFrame._internal_names_set _accessors = {"dt", "cat", "str", "sparse"} _hidden_attrs = ( base.IndexOpsMixin._hidden_attrs | NDFrame._hidden_attrs | frozenset([]) ) # Override cache_readonly bc Series is mutable # error: Incompatible types in assignment (expression has type "property", # base class "IndexOpsMixin" defined the type as "Callable[[IndexOpsMixin], bool]") hasnans = property( # type: ignore[assignment] # error: "Callable[[IndexOpsMixin], bool]" has no attribute "fget" base.IndexOpsMixin.hasnans.fget, # type: ignore[attr-defined] doc=base.IndexOpsMixin.hasnans.__doc__, ) _mgr: SingleManager div: Callable[[Series, Any], Series] rdiv: Callable[[Series, Any], Series] # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index=None, dtype: Dtype | None = None, name=None, copy: bool | None = None, fastpath: bool = False, ) -> None: if ( isinstance(data, (SingleBlockManager, SingleArrayManager)) and index is None and dtype is None and (copy is False or copy is None) ): if using_copy_on_write(): data = data.copy(deep=False) # GH#33357 called with just the SingleBlockManager NDFrame.__init__(self, data) if fastpath: # e.g. from _box_col_values, skip validation of name object.__setattr__(self, "_name", name) else: self.name = name return if isinstance(data, (ExtensionArray, np.ndarray)): if copy is not False and using_copy_on_write(): if dtype is None or astype_is_view(data.dtype, pandas_dtype(dtype)): data = data.copy() if copy is None: copy = False # we are called internally, so short-circuit if fastpath: # data is a ndarray, index is defined if not isinstance(data, (SingleBlockManager, SingleArrayManager)): manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index) elif manager == "array": data = SingleArrayManager.from_array(data, index) elif using_copy_on_write() and not copy: data = data.copy(deep=False) if copy: data = data.copy() # skips validation of the name object.__setattr__(self, "_name", name) NDFrame.__init__(self, data) return if isinstance(data, SingleBlockManager) and using_copy_on_write() and not copy: data = data.copy(deep=False) name = ibase.maybe_extract_name(name, data, type(self)) if index is not None: index = ensure_index(index) if dtype is not None: dtype = self._validate_dtype(dtype) if data is None: index = index if index is not None else default_index(0) if len(index) or dtype is not None: data = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: data = [] if isinstance(data, MultiIndex): raise NotImplementedError( "initializing a Series from a MultiIndex is not supported" ) refs = None if isinstance(data, Index): if dtype is not None: data = data.astype(dtype, copy=False) if using_copy_on_write(): refs = data._references data = data._values else: # GH#24096 we need to ensure the index remains immutable data = data._values.copy() copy = False elif isinstance(data, np.ndarray): if len(data.dtype): # GH#13296 we are dealing with a compound dtype, which # should be treated as 2D raise ValueError( "Cannot construct a Series from an ndarray with " "compound dtype. Use DataFrame instead." ) elif isinstance(data, Series): if index is None: index = data.index data = data._mgr.copy(deep=False) else: data = data.reindex(index, copy=copy) copy = False data = data._mgr elif is_dict_like(data): data, index = self._init_dict(data, index, dtype) dtype = None copy = False elif isinstance(data, (SingleBlockManager, SingleArrayManager)): if index is None: index = data.index elif not data.index.equals(index) or copy: # GH#19275 SingleBlockManager input should only be called # internally raise AssertionError( "Cannot pass both SingleBlockManager " "`data` argument and a different " "`index` argument. `copy` must be False." ) elif isinstance(data, ExtensionArray): pass else: data = com.maybe_iterable_to_list(data) if is_list_like(data) and not len(data) and dtype is None: # GH 29405: Pre-2.0, this defaulted to float. dtype = np.dtype(object) if index is None: if not is_list_like(data): data = [data] index = default_index(len(data)) elif is_list_like(data): com.require_length_match(data, index) # create/copy the manager if isinstance(data, (SingleBlockManager, SingleArrayManager)): if dtype is not None: data = data.astype(dtype=dtype, errors="ignore", copy=copy) elif copy: data = data.copy() else: data = sanitize_array(data, index, dtype, copy) manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index, refs=refs) elif manager == "array": data = SingleArrayManager.from_array(data, index) NDFrame.__init__(self, data) self.name = name self._set_axis(0, index) def _init_dict( self, data, index: Index | None = None, dtype: DtypeObj | None = None ): """ Derive the "_mgr" and "index" attributes of a new Series from a dictionary input. Parameters ---------- data : dict or dict-like Data used to populate the new Series. index : Index or None, default None Index for the new Series: if None, use dict keys. dtype : np.dtype, ExtensionDtype, or None, default None The dtype for the new Series: if None, infer from data. Returns ------- _data : BlockManager for the new Series index : index for the new Series """ keys: Index | tuple # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')] # raises KeyError), so we iterate the entire dict, and align if data: # GH:34717, issue was using zip to extract key and values from data. # using generators in effects the performance. # Below is the new way of extracting the keys and values keys = tuple(data.keys()) values = list(data.values()) # Generating list of values- faster way elif index is not None: # fastpath for Series(data=None). Just use broadcasting a scalar # instead of reindexing. if len(index) or dtype is not None: values = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: values = [] keys = index else: keys, values = (), [] # Input is now list-like, so rely on "standard" construction: s = self._constructor( values, index=keys, dtype=dtype, ) # Now we just make sure the order is respected, if any if data and index is not None: s = s.reindex(index, copy=False) return s._mgr, s.index # ---------------------------------------------------------------------- def _constructor(self) -> Callable[..., Series]: return Series def _constructor_expanddim(self) -> Callable[..., DataFrame]: """ Used when a manipulation result has one higher dimension as the original, such as Series.to_frame() """ from pandas.core.frame import DataFrame return DataFrame # types def _can_hold_na(self) -> bool: return self._mgr._can_hold_na # ndarray compatibility def dtype(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtype dtype('int64') """ return self._mgr.dtype def dtypes(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtypes dtype('int64') """ # DataFrame compatibility return self.dtype def name(self) -> Hashable: """ Return the name of the Series. The name of a Series becomes its index or column name if it is used to form a DataFrame. It is also used whenever displaying the Series using the interpreter. Returns ------- label (hashable object) The name of the Series, also the column name if part of a DataFrame. See Also -------- Series.rename : Sets the Series name when given a scalar input. Index.name : Corresponding Index property. Examples -------- The Series name can be set initially when calling the constructor. >>> s = pd.Series([1, 2, 3], dtype=np.int64, name='Numbers') >>> s 0 1 1 2 2 3 Name: Numbers, dtype: int64 >>> s.name = "Integers" >>> s 0 1 1 2 2 3 Name: Integers, dtype: int64 The name of a Series within a DataFrame is its column name. >>> df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], ... columns=["Odd Numbers", "Even Numbers"]) >>> df Odd Numbers Even Numbers 0 1 2 1 3 4 2 5 6 >>> df["Even Numbers"].name 'Even Numbers' """ return self._name def name(self, value: Hashable) -> None: validate_all_hashable(value, error_name=f"{type(self).__name__}.name") object.__setattr__(self, "_name", value) def values(self): """ Return Series as ndarray or ndarray-like depending on the dtype. .. warning:: We recommend using :attr:`Series.array` or :meth:`Series.to_numpy`, depending on whether you need a reference to the underlying data or a NumPy array. Returns ------- numpy.ndarray or ndarray-like See Also -------- Series.array : Reference to the underlying data. Series.to_numpy : A NumPy array representing the underlying data. Examples -------- >>> pd.Series([1, 2, 3]).values array([1, 2, 3]) >>> pd.Series(list('aabc')).values array(['a', 'a', 'b', 'c'], dtype=object) >>> pd.Series(list('aabc')).astype('category').values ['a', 'a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] Timezone aware datetime data is converted to UTC: >>> pd.Series(pd.date_range('20130101', periods=3, ... tz='US/Eastern')).values array(['2013-01-01T05:00:00.000000000', '2013-01-02T05:00:00.000000000', '2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]') """ return self._mgr.external_values() def _values(self): """ Return the internal repr of this data (defined by Block.interval_values). This are the values as stored in the Block (ndarray or ExtensionArray depending on the Block class), with datetime64[ns] and timedelta64[ns] wrapped in ExtensionArrays to match Index._values behavior. Differs from the public ``.values`` for certain data types, because of historical backwards compatibility of the public attribute (e.g. period returns object ndarray and datetimetz a datetime64[ns] ndarray for ``.values`` while it returns an ExtensionArray for ``._values`` in those cases). Differs from ``.array`` in that this still returns the numpy array if the Block is backed by a numpy array (except for datetime64 and timedelta64 dtypes), while ``.array`` ensures to always return an ExtensionArray. Overview: dtype | values | _values | array | ----------- | ------------- | ------------- | ------------- | Numeric | ndarray | ndarray | PandasArray | Category | Categorical | Categorical | Categorical | dt64[ns] | ndarray[M8ns] | DatetimeArray | DatetimeArray | dt64[ns tz] | ndarray[M8ns] | DatetimeArray | DatetimeArray | td64[ns] | ndarray[m8ns] | TimedeltaArray| ndarray[m8ns] | Period | ndarray[obj] | PeriodArray | PeriodArray | Nullable | EA | EA | EA | """ return self._mgr.internal_values() def _references(self) -> BlockValuesRefs | None: if isinstance(self._mgr, SingleArrayManager): return None return self._mgr._block.refs # error: Decorated property not supported def array(self) -> ExtensionArray: return self._mgr.array_values() # ops def ravel(self, order: str = "C") -> ArrayLike: """ Return the flattened underlying data as an ndarray or ExtensionArray. Returns ------- numpy.ndarray or ExtensionArray Flattened data of the Series. See Also -------- numpy.ndarray.ravel : Return a flattened array. """ arr = self._values.ravel(order=order) if isinstance(arr, np.ndarray) and using_copy_on_write(): arr.flags.writeable = False return arr def __len__(self) -> int: """ Return the length of the Series. """ return len(self._mgr) def view(self, dtype: Dtype | None = None) -> Series: """ Create a new view of the Series. This function will return a new Series with a view of the same underlying values in memory, optionally reinterpreted with a new data type. The new data type must preserve the same size in bytes as to not cause index misalignment. Parameters ---------- dtype : data type Data type object or one of their string representations. Returns ------- Series A new Series object as a view of the same data in memory. See Also -------- numpy.ndarray.view : Equivalent numpy function to create a new view of the same data in memory. Notes ----- Series are instantiated with ``dtype=float64`` by default. While ``numpy.ndarray.view()`` will return a view with the same data type as the original array, ``Series.view()`` (without specified dtype) will try using ``float64`` and may fail if the original data type size in bytes is not the same. Examples -------- >>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8') >>> s 0 -2 1 -1 2 0 3 1 4 2 dtype: int8 The 8 bit signed integer representation of `-1` is `0b11111111`, but the same bytes represent 255 if read as an 8 bit unsigned integer: >>> us = s.view('uint8') >>> us 0 254 1 255 2 0 3 1 4 2 dtype: uint8 The views share the same underlying values: >>> us[0] = 128 >>> s 0 -128 1 -1 2 0 3 1 4 2 dtype: int8 """ # self.array instead of self._values so we piggyback on PandasArray # implementation res_values = self.array.view(dtype) res_ser = self._constructor(res_values, index=self.index, copy=False) if isinstance(res_ser._mgr, SingleBlockManager) and using_copy_on_write(): blk = res_ser._mgr._block blk.refs = cast("BlockValuesRefs", self._references) blk.refs.add_reference(blk) # type: ignore[arg-type] return res_ser.__finalize__(self, method="view") # ---------------------------------------------------------------------- # NDArray Compat _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: """ Return the values as a NumPy array. Users should not call this directly. Rather, it is invoked by :func:`numpy.array` and :func:`numpy.asarray`. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to use for the resulting NumPy array. By default, the dtype is inferred from the data. Returns ------- numpy.ndarray The values in the series converted to a :class:`numpy.ndarray` with the specified `dtype`. See Also -------- array : Create a new array from data. Series.array : Zero-copy view to the array backing the Series. Series.to_numpy : Series method for similar behavior. Examples -------- >>> ser = pd.Series([1, 2, 3]) >>> np.asarray(ser) array([1, 2, 3]) For timezone-aware data, the timezones may be retained with ``dtype='object'`` >>> tzser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) >>> np.asarray(tzser, dtype="object") array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'), Timestamp('2000-01-02 00:00:00+0100', tz='CET')], dtype=object) Or the values may be localized to UTC and the tzinfo discarded with ``dtype='datetime64[ns]'`` >>> np.asarray(tzser, dtype="datetime64[ns]") # doctest: +ELLIPSIS array(['1999-12-31T23:00:00.000000000', ...], dtype='datetime64[ns]') """ values = self._values arr = np.asarray(values, dtype=dtype) if using_copy_on_write() and astype_is_view(values.dtype, arr.dtype): arr = arr.view() arr.flags.writeable = False return arr # ---------------------------------------------------------------------- # Unary Methods # coercion __float__ = _coerce_method(float) __int__ = _coerce_method(int) # ---------------------------------------------------------------------- # indexers def axes(self) -> list[Index]: """ Return a list of the row axis labels. """ return [self.index] # ---------------------------------------------------------------------- # Indexing Methods def take(self, indices, axis: Axis = 0, **kwargs) -> Series: nv.validate_take((), kwargs) indices = ensure_platform_int(indices) if ( indices.ndim == 1 and using_copy_on_write() and is_range_indexer(indices, len(self)) ): return self.copy(deep=None) new_index = self.index.take(indices) new_values = self._values.take(indices) result = self._constructor(new_values, index=new_index, fastpath=True) return result.__finalize__(self, method="take") def _take_with_is_copy(self, indices, axis: Axis = 0) -> Series: """ Internal version of the `take` method that sets the `_is_copy` attribute to keep track of the parent dataframe (using in indexing for the SettingWithCopyWarning). For Series this does the same as the public take (it never sets `_is_copy`). See the docstring of `take` for full explanation of the parameters. """ return self.take(indices=indices, axis=axis) def _ixs(self, i: int, axis: AxisInt = 0) -> Any: """ Return the i-th value or values in the Series by location. Parameters ---------- i : int Returns ------- scalar (int) or Series (slice, sequence) """ return self._values[i] def _slice(self, slobj: slice | np.ndarray, axis: Axis = 0) -> Series: # axis kwarg is retained for compat with NDFrame method # _slice is *always* positional return self._get_values(slobj) def __getitem__(self, key): check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) if key is Ellipsis: return self key_is_scalar = is_scalar(key) if isinstance(key, (list, tuple)): key = unpack_1tuple(key) if is_integer(key) and self.index._should_fallback_to_positional: return self._values[key] elif key_is_scalar: return self._get_value(key) if is_hashable(key): # Otherwise index.get_value will raise InvalidIndexError try: # For labels that don't resolve as scalars like tuples and frozensets result = self._get_value(key) return result except (KeyError, TypeError, InvalidIndexError): # InvalidIndexError for e.g. generator # see test_series_getitem_corner_generator if isinstance(key, tuple) and isinstance(self.index, MultiIndex): # We still have the corner case where a tuple is a key # in the first level of our MultiIndex return self._get_values_tuple(key) if is_iterator(key): key = list(key) if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) return self._get_values(key) return self._get_with(key) def _get_with(self, key): # other: fancy integer or otherwise if isinstance(key, slice): # _convert_slice_indexer to determine if this slice is positional # or label based, and if the latter, convert to positional slobj = self.index._convert_slice_indexer(key, kind="getitem") return self._slice(slobj) elif isinstance(key, ABCDataFrame): raise TypeError( "Indexing a Series with DataFrame is not " "supported, use the appropriate DataFrame column" ) elif isinstance(key, tuple): return self._get_values_tuple(key) elif not is_list_like(key): # e.g. scalars that aren't recognized by lib.is_scalar, GH#32684 return self.loc[key] if not isinstance(key, (list, np.ndarray, ExtensionArray, Series, Index)): key = list(key) if isinstance(key, Index): key_type = key.inferred_type else: key_type = lib.infer_dtype(key, skipna=False) # Note: The key_type == "boolean" case should be caught by the # com.is_bool_indexer check in __getitem__ if key_type == "integer": # We need to decide whether to treat this as a positional indexer # (i.e. self.iloc) or label-based (i.e. self.loc) if not self.index._should_fallback_to_positional: return self.loc[key] else: return self.iloc[key] # handle the dup indexing case GH#4246 return self.loc[key] def _get_values_tuple(self, key: tuple): # mpl hackaround if com.any_none(*key): # mpl compat if we look up e.g. ser[:, np.newaxis]; # see tests.series.timeseries.test_mpl_compat_hack # the asarray is needed to avoid returning a 2D DatetimeArray result = np.asarray(self._values[key]) disallow_ndim_indexing(result) return result if not isinstance(self.index, MultiIndex): raise KeyError("key of type tuple not found and not a MultiIndex") # If key is contained, would have returned by now indexer, new_index = self.index.get_loc_level(key) new_ser = self._constructor(self._values[indexer], index=new_index, copy=False) if using_copy_on_write() and isinstance(indexer, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) def _get_values(self, indexer: slice | npt.NDArray[np.bool_]) -> Series: new_mgr = self._mgr.getitem_mgr(indexer) return self._constructor(new_mgr).__finalize__(self) def _get_value(self, label, takeable: bool = False): """ Quickly retrieve single value at passed index label. Parameters ---------- label : object takeable : interpret the index as indexers, default False Returns ------- scalar value """ if takeable: return self._values[label] # Similar to Index.get_value, but we do not fall back to positional loc = self.index.get_loc(label) if is_integer(loc): return self._values[loc] if isinstance(self.index, MultiIndex): mi = self.index new_values = self._values[loc] if len(new_values) == 1 and mi.nlevels == 1: # If more than one level left, we can not return a scalar return new_values[0] new_index = mi[loc] new_index = maybe_droplevels(new_index, label) new_ser = self._constructor( new_values, index=new_index, name=self.name, copy=False ) if using_copy_on_write() and isinstance(loc, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) else: return self.iloc[loc] def __setitem__(self, key, value) -> None: if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) cacher_needs_updating = self._check_is_chained_assignment_possible() if key is Ellipsis: key = slice(None) if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") return self._set_values(indexer, value) try: self._set_with_engine(key, value) except KeyError: # We have a scalar (or for MultiIndex or object-dtype, scalar-like) # key that is not present in self.index. if is_integer(key): if not self.index._should_fallback_to_positional: # GH#33469 self.loc[key] = value else: # positional setter # can't use _mgr.setitem_inplace yet bc could have *both* # KeyError and then ValueError, xref GH#45070 self._set_values(key, value) else: # GH#12862 adding a new key to the Series self.loc[key] = value except (TypeError, ValueError, LossySetitemError): # The key was OK, but we cannot set the value losslessly indexer = self.index.get_loc(key) self._set_values(indexer, value) except InvalidIndexError as err: if isinstance(key, tuple) and not isinstance(self.index, MultiIndex): # cases with MultiIndex don't get here bc they raise KeyError # e.g. test_basic_getitem_setitem_corner raise KeyError( "key of type tuple not found and not a MultiIndex" ) from err if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) if ( is_list_like(value) and len(value) != len(self) and not isinstance(value, Series) and not is_object_dtype(self.dtype) ): # Series will be reindexed to have matching length inside # _where call below # GH#44265 indexer = key.nonzero()[0] self._set_values(indexer, value) return # otherwise with listlike other we interpret series[mask] = other # as series[mask] = other[mask] try: self._where(~key, value, inplace=True) except InvalidIndexError: # test_where_dups self.iloc[key] = value return else: self._set_with(key, value) if cacher_needs_updating: self._maybe_update_cacher(inplace=True) def _set_with_engine(self, key, value) -> None: loc = self.index.get_loc(key) # this is equivalent to self._values[key] = value self._mgr.setitem_inplace(loc, value) def _set_with(self, key, value) -> None: # We got here via exception-handling off of InvalidIndexError, so # key should always be listlike at this point. assert not isinstance(key, tuple) if is_iterator(key): # Without this, the call to infer_dtype will consume the generator key = list(key) if not self.index._should_fallback_to_positional: # Regardless of the key type, we're treating it as labels self._set_labels(key, value) else: # Note: key_type == "boolean" should not occur because that # should be caught by the is_bool_indexer check in __setitem__ key_type = lib.infer_dtype(key, skipna=False) if key_type == "integer": self._set_values(key, value) else: self._set_labels(key, value) def _set_labels(self, key, value) -> None: key = com.asarray_tuplesafe(key) indexer: np.ndarray = self.index.get_indexer(key) mask = indexer == -1 if mask.any(): raise KeyError(f"{key[mask]} not in index") self._set_values(indexer, value) def _set_values(self, key, value) -> None: if isinstance(key, (Index, Series)): key = key._values self._mgr = self._mgr.setitem(indexer=key, value=value) self._maybe_update_cacher() def _set_value(self, label, value, takeable: bool = False) -> None: """ Quickly set single value at passed label. If label is not contained, a new object is created with the label placed at the end of the result index. Parameters ---------- label : object Partial indexing with MultiIndex not allowed. value : object Scalar value. takeable : interpret the index as indexers, default False """ if not takeable: try: loc = self.index.get_loc(label) except KeyError: # set using a non-recursive method self.loc[label] = value return else: loc = label self._set_values(loc, value) # ---------------------------------------------------------------------- # Lookup Caching def _is_cached(self) -> bool: """Return boolean indicating if self is cached or not.""" return getattr(self, "_cacher", None) is not None def _get_cacher(self): """return my cacher or None""" cacher = getattr(self, "_cacher", None) if cacher is not None: cacher = cacher[1]() return cacher def _reset_cacher(self) -> None: """ Reset the cacher. """ if hasattr(self, "_cacher"): del self._cacher def _set_as_cached(self, item, cacher) -> None: """ Set the _cacher attribute on the calling object with a weakref to cacher. """ if using_copy_on_write(): return self._cacher = (item, weakref.ref(cacher)) def _clear_item_cache(self) -> None: # no-op for Series pass def _check_is_chained_assignment_possible(self) -> bool: """ See NDFrame._check_is_chained_assignment_possible.__doc__ """ if self._is_view and self._is_cached: ref = self._get_cacher() if ref is not None and ref._is_mixed_type: self._check_setitem_copy(t="referent", force=True) return True return super()._check_is_chained_assignment_possible() def _maybe_update_cacher( self, clear: bool = False, verify_is_copy: bool = True, inplace: bool = False ) -> None: """ See NDFrame._maybe_update_cacher.__doc__ """ # for CoW, we never want to update the parent DataFrame cache # if the Series changed, but don't keep track of any cacher if using_copy_on_write(): return cacher = getattr(self, "_cacher", None) if cacher is not None: assert self.ndim == 1 ref: DataFrame = cacher[1]() # we are trying to reference a dead referent, hence # a copy if ref is None: del self._cacher elif len(self) == len(ref) and self.name in ref.columns: # GH#42530 self.name must be in ref.columns # to ensure column still in dataframe # otherwise, either self or ref has swapped in new arrays ref._maybe_cache_changed(cacher[0], self, inplace=inplace) else: # GH#33675 we have swapped in a new array, so parent # reference to self is now invalid ref._item_cache.pop(cacher[0], None) super()._maybe_update_cacher( clear=clear, verify_is_copy=verify_is_copy, inplace=inplace ) # ---------------------------------------------------------------------- # Unsorted def _is_mixed_type(self) -> bool: return False def repeat(self, repeats: int | Sequence[int], axis: None = None) -> Series: """ Repeat elements of a Series. Returns a new Series where each element of the current Series is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty Series. axis : None Unused. Parameter needed for compatibility with DataFrame. Returns ------- Series Newly created Series with repeated elements. See Also -------- Index.repeat : Equivalent function for Index. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> s = pd.Series(['a', 'b', 'c']) >>> s 0 a 1 b 2 c dtype: object >>> s.repeat(2) 0 a 0 a 1 b 1 b 2 c 2 c dtype: object >>> s.repeat([1, 2, 3]) 0 a 1 b 1 b 2 c 2 c 2 c dtype: object """ nv.validate_repeat((), {"axis": axis}) new_index = self.index.repeat(repeats) new_values = self._values.repeat(repeats) return self._constructor(new_values, index=new_index, copy=False).__finalize__( self, method="repeat" ) def reset_index( self, level: IndexLabel = ..., *, drop: Literal[False] = ..., name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> DataFrame: ... def reset_index( self, level: IndexLabel = ..., *, drop: Literal[True], name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> Series: ... def reset_index( self, level: IndexLabel = ..., *, drop: bool = ..., name: Level = ..., inplace: Literal[True], allow_duplicates: bool = ..., ) -> None: ... def reset_index( self, level: IndexLabel = None, *, drop: bool = False, name: Level = lib.no_default, inplace: bool = False, allow_duplicates: bool = False, ) -> DataFrame | Series | None: """ Generate a new DataFrame or Series with the index reset. This is useful when the index needs to be treated as a column, or when the index is meaningless and needs to be reset to the default before another operation. Parameters ---------- level : int, str, tuple, or list, default optional For a Series with a MultiIndex, only remove the specified levels from the index. Removes all levels by default. drop : bool, default False Just reset the index, without inserting it as a column in the new DataFrame. name : object, optional The name to use for the column containing the original Series values. Uses ``self.name`` by default. This argument is ignored when `drop` is True. inplace : bool, default False Modify the Series in place (do not create a new object). allow_duplicates : bool, default False Allow duplicate column labels to be created. .. versionadded:: 1.5.0 Returns ------- Series or DataFrame or None When `drop` is False (the default), a DataFrame is returned. The newly created columns will come first in the DataFrame, followed by the original Series values. When `drop` is True, a `Series` is returned. In either case, if ``inplace=True``, no value is returned. See Also -------- DataFrame.reset_index: Analogous function for DataFrame. Examples -------- >>> s = pd.Series([1, 2, 3, 4], name='foo', ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx')) Generate a DataFrame with default index. >>> s.reset_index() idx foo 0 a 1 1 b 2 2 c 3 3 d 4 To specify the name of the new column use `name`. >>> s.reset_index(name='values') idx values 0 a 1 1 b 2 2 c 3 3 d 4 To generate a new Series with the default set `drop` to True. >>> s.reset_index(drop=True) 0 1 1 2 2 3 3 4 Name: foo, dtype: int64 The `level` parameter is interesting for Series with a multi-level index. >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']), ... np.array(['one', 'two', 'one', 'two'])] >>> s2 = pd.Series( ... range(4), name='foo', ... index=pd.MultiIndex.from_arrays(arrays, ... names=['a', 'b'])) To remove a specific level from the Index, use `level`. >>> s2.reset_index(level='a') a foo b one bar 0 two bar 1 one baz 2 two baz 3 If `level` is not set, all levels are removed from the Index. >>> s2.reset_index() a b foo 0 bar one 0 1 bar two 1 2 baz one 2 3 baz two 3 """ inplace = validate_bool_kwarg(inplace, "inplace") if drop: new_index = default_index(len(self)) if level is not None: level_list: Sequence[Hashable] if not isinstance(level, (tuple, list)): level_list = [level] else: level_list = level level_list = [self.index._get_level_number(lev) for lev in level_list] if len(level_list) < self.index.nlevels: new_index = self.index.droplevel(level_list) if inplace: self.index = new_index elif using_copy_on_write(): new_ser = self.copy(deep=False) new_ser.index = new_index return new_ser.__finalize__(self, method="reset_index") else: return self._constructor( self._values.copy(), index=new_index, copy=False ).__finalize__(self, method="reset_index") elif inplace: raise TypeError( "Cannot reset_index inplace on a Series to create a DataFrame" ) else: if name is lib.no_default: # For backwards compatibility, keep columns as [0] instead of # [None] when self.name is None if self.name is None: name = 0 else: name = self.name df = self.to_frame(name) return df.reset_index( level=level, drop=drop, allow_duplicates=allow_duplicates ) return None # ---------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str: """ Return a string representation for a particular Series. """ # pylint: disable=invalid-repr-returned repr_params = fmt.get_series_repr_params() return self.to_string(**repr_params) def to_string( self, buf: None = ..., na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> None: ... def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, na_rep: str = "NaN", float_format: str | None = None, header: bool = True, index: bool = True, length: bool = False, dtype: bool = False, name: bool = False, max_rows: int | None = None, min_rows: int | None = None, ) -> str | None: """ Render a string representation of the Series. Parameters ---------- buf : StringIO-like, optional Buffer to write to. na_rep : str, optional String representation of NaN to use, default 'NaN'. float_format : one-parameter function, optional Formatter function to apply to columns' elements if they are floats, default None. header : bool, default True Add the Series header (index name). index : bool, optional Add index (row) labels, default True. length : bool, default False Add the Series length. dtype : bool, default False Add the Series dtype. name : bool, default False Add the Series name if not None. max_rows : int, optional Maximum number of rows to show before truncating. If None, show all. min_rows : int, optional The number of rows to display in a truncated repr (when number of rows is above `max_rows`). Returns ------- str or None String representation of Series if ``buf=None``, otherwise None. """ formatter = fmt.SeriesFormatter( self, name=name, length=length, header=header, index=index, dtype=dtype, na_rep=na_rep, float_format=float_format, min_rows=min_rows, max_rows=max_rows, ) result = formatter.to_string() # catch contract violations if not isinstance(result, str): raise AssertionError( "result must be of type str, type " f"of result is {repr(type(result).__name__)}" ) if buf is None: return result else: if hasattr(buf, "write"): buf.write(result) else: with open(buf, "w") as f: f.write(result) return None klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples=dedent( """Examples -------- >>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal") >>> print(s.to_markdown()) | | animal | |---:|:---------| | 0 | elk | | 1 | pig | | 2 | dog | | 3 | quetzal | Output markdown with a tabulate option. >>> print(s.to_markdown(tablefmt="grid")) +----+----------+ | | animal | +====+==========+ | 0 | elk | +----+----------+ | 1 | pig | +----+----------+ | 2 | dog | +----+----------+ | 3 | quetzal | +----+----------+""" ), ) def to_markdown( self, buf: IO[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: """ Print {klass} in Markdown-friendly format. Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. mode : str, optional Mode in which file is opened, "wt" by default. index : bool, optional, default True Add index (row) labels. .. versionadded:: 1.1.0 {storage_options} .. versionadded:: 1.2.0 **kwargs These parameters will be passed to `tabulate \ <https://pypi.org/project/tabulate>`_. Returns ------- str {klass} in Markdown-friendly format. Notes ----- Requires the `tabulate <https://pypi.org/project/tabulate>`_ package. {examples} """ return self.to_frame().to_markdown( buf, mode, index, storage_options=storage_options, **kwargs ) # ---------------------------------------------------------------------- def items(self) -> Iterable[tuple[Hashable, Any]]: """ Lazily iterate over (index, value) tuples. This method returns an iterable tuple (index, value). This is convenient if you want to create a lazy iterator. Returns ------- iterable Iterable of tuples containing the (index, value) pairs from a Series. See Also -------- DataFrame.items : Iterate over (column name, Series) pairs. DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. Examples -------- >>> s = pd.Series(['A', 'B', 'C']) >>> for index, value in s.items(): ... print(f"Index : {index}, Value : {value}") Index : 0, Value : A Index : 1, Value : B Index : 2, Value : C """ return zip(iter(self.index), iter(self)) # ---------------------------------------------------------------------- # Misc public methods def keys(self) -> Index: """ Return alias for index. Returns ------- Index Index of the Series. """ return self.index def to_dict(self, into: type[dict] = dict) -> dict: """ Convert Series to {label -> value} dict or dict-like object. Parameters ---------- into : class, default dict The collections.abc.Mapping subclass to use as the return object. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- collections.abc.Mapping Key-value representation of Series. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.to_dict() {0: 1, 1: 2, 2: 3, 3: 4} >>> from collections import OrderedDict, defaultdict >>> s.to_dict(OrderedDict) OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> dd = defaultdict(list) >>> s.to_dict(dd) defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4}) """ # GH16122 into_c = com.standardize_mapping(into) if is_object_dtype(self) or is_extension_array_dtype(self): return into_c((k, maybe_box_native(v)) for k, v in self.items()) else: # Not an object dtype => all types will be the same so let the default # indexer return native python type return into_c(self.items()) def to_frame(self, name: Hashable = lib.no_default) -> DataFrame: """ Convert Series to DataFrame. Parameters ---------- name : object, optional The passed name should substitute for the series name (if it has one). Returns ------- DataFrame DataFrame representation of Series. Examples -------- >>> s = pd.Series(["a", "b", "c"], ... name="vals") >>> s.to_frame() vals 0 a 1 b 2 c """ columns: Index if name is lib.no_default: name = self.name if name is None: # default to [0], same as we would get with DataFrame(self) columns = default_index(1) else: columns = Index([name]) else: columns = Index([name]) mgr = self._mgr.to_2d_mgr(columns) df = self._constructor_expanddim(mgr) return df.__finalize__(self, method="to_frame") def _set_name(self, name, inplace: bool = False) -> Series: """ Set the Series name. Parameters ---------- name : str inplace : bool Whether to modify `self` directly or return a copy. """ inplace = validate_bool_kwarg(inplace, "inplace") ser = self if inplace else self.copy() ser.name = name return ser """ Examples -------- >>> ser = pd.Series([390., 350., 30., 20.], ... index=['Falcon', 'Falcon', 'Parrot', 'Parrot'], name="Max Speed") >>> ser Falcon 390.0 Falcon 350.0 Parrot 30.0 Parrot 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", "b"]).mean() a 210.0 b 185.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(ser > 100).mean() Max Speed False 25.0 True 370.0 Name: Max Speed, dtype: float64 **Grouping by Indexes** We can groupby different levels of a hierarchical index using the `level` parameter: >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], ... ['Captive', 'Wild', 'Captive', 'Wild']] >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) >>> ser = pd.Series([390., 350., 30., 20.], index=index, name="Max Speed") >>> ser Animal Type Falcon Captive 390.0 Wild 350.0 Parrot Captive 30.0 Wild 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Animal Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level="Type").mean() Type Captive 210.0 Wild 185.0 Name: Max Speed, dtype: float64 We can also choose to include `NA` in group keys or not by defining `dropna` parameter, the default setting is `True`. >>> ser = pd.Series([1, 2, 3, 3], index=["a", 'a', 'b', np.nan]) >>> ser.groupby(level=0).sum() a 3 b 3 dtype: int64 >>> ser.groupby(level=0, dropna=False).sum() a 3 b 3 NaN 3 dtype: int64 >>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot'] >>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name="Max Speed") >>> ser.groupby(["a", "b", "a", np.nan]).mean() a 210.0 b 350.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", np.nan], dropna=False).mean() a 210.0 b 350.0 NaN 20.0 Name: Max Speed, dtype: float64 """ ) def groupby( self, by=None, axis: Axis = 0, level: IndexLabel = None, as_index: bool = True, sort: bool = True, group_keys: bool = True, observed: bool = False, dropna: bool = True, ) -> SeriesGroupBy: from pandas.core.groupby.generic import SeriesGroupBy if level is None and by is None: raise TypeError("You have to supply one of 'by' and 'level'") if not as_index: raise TypeError("as_index=False only valid with DataFrame") axis = self._get_axis_number(axis) return SeriesGroupBy( obj=self, keys=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, observed=observed, dropna=dropna, ) # ---------------------------------------------------------------------- # Statistics, overridden ndarray methods # TODO: integrate bottleneck def count(self): """ Return number of non-NA/null observations in the Series. Returns ------- int or Series (if level specified) Number of non-null values in the Series. See Also -------- DataFrame.count : Count non-NA cells for each column or row. Examples -------- >>> s = pd.Series([0.0, 1.0, np.nan]) >>> s.count() 2 """ return notna(self._values).sum().astype("int64") def mode(self, dropna: bool = True) -> Series: """ Return the mode(s) of the Series. The mode is the value that appears most often. There can be multiple modes. Always returns Series even if only one value is returned. Parameters ---------- dropna : bool, default True Don't consider counts of NaN/NaT. Returns ------- Series Modes of the Series in sorted order. """ # TODO: Add option for bins like value_counts() values = self._values if isinstance(values, np.ndarray): res_values = algorithms.mode(values, dropna=dropna) else: res_values = values._mode(dropna=dropna) # Ensure index is type stable (should always use int index) return self._constructor( res_values, index=range(len(res_values)), name=self.name, copy=False ) def unique(self) -> ArrayLike: # pylint: disable=useless-parent-delegation """ Return unique values of Series object. Uniques are returned in order of appearance. Hash table-based unique, therefore does NOT sort. Returns ------- ndarray or ExtensionArray The unique values returned as a NumPy array. See Notes. See Also -------- Series.drop_duplicates : Return Series with duplicate values removed. unique : Top-level unique method for any 1-d array-like object. Index.unique : Return Index with unique values from an Index object. Notes ----- Returns the unique values as a NumPy array. In case of an extension-array backed Series, a new :class:`~api.extensions.ExtensionArray` of that type with just the unique values is returned. This includes * Categorical * Period * Datetime with Timezone * Datetime without Timezone * Timedelta * Interval * Sparse * IntegerNA See Examples section. Examples -------- >>> pd.Series([2, 1, 3, 3], name='A').unique() array([2, 1, 3]) >>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00'] Length: 1, dtype: datetime64[ns] >>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern') ... for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00-05:00'] Length: 1, dtype: datetime64[ns, US/Eastern] An Categorical will return categories in the order of appearance and with the same dtype. >>> pd.Series(pd.Categorical(list('baabc'))).unique() ['b', 'a', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'), ... ordered=True)).unique() ['b', 'a', 'c'] Categories (3, object): ['a' < 'b' < 'c'] """ return super().unique() def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[False] = ..., ignore_index: bool = ..., ) -> Series: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[True], ignore_index: bool = ... ) -> None: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: bool = ..., ignore_index: bool = ... ) -> Series | None: ... def drop_duplicates( self, *, keep: DropKeep = "first", inplace: bool = False, ignore_index: bool = False, ) -> Series | None: """ Return Series with duplicate values removed. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' Method to handle dropping duplicates: - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. inplace : bool, default ``False`` If ``True``, performs operation inplace and returns None. ignore_index : bool, default ``False`` If ``True``, the resulting axis will be labeled 0, 1, …, n - 1. .. versionadded:: 2.0.0 Returns ------- Series or None Series with duplicates dropped or None if ``inplace=True``. See Also -------- Index.drop_duplicates : Equivalent method on Index. DataFrame.drop_duplicates : Equivalent method on DataFrame. Series.duplicated : Related method on Series, indicating duplicate Series values. Series.unique : Return unique values as an array. Examples -------- Generate a Series with duplicated entries. >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], ... name='animal') >>> s 0 lama 1 cow 2 lama 3 beetle 4 lama 5 hippo Name: animal, dtype: object With the 'keep' parameter, the selection behaviour of duplicated values can be changed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> s.drop_duplicates() 0 lama 1 cow 3 beetle 5 hippo Name: animal, dtype: object The value 'last' for parameter 'keep' keeps the last occurrence for each set of duplicated entries. >>> s.drop_duplicates(keep='last') 1 cow 3 beetle 4 lama 5 hippo Name: animal, dtype: object The value ``False`` for parameter 'keep' discards all sets of duplicated entries. >>> s.drop_duplicates(keep=False) 1 cow 3 beetle 5 hippo Name: animal, dtype: object """ inplace = validate_bool_kwarg(inplace, "inplace") result = super().drop_duplicates(keep=keep) if ignore_index: result.index = default_index(len(result)) if inplace: self._update_inplace(result) return None else: return result def duplicated(self, keep: DropKeep = "first") -> Series: """ Indicate duplicate Series values. Duplicated values are indicated as ``True`` values in the resulting Series. Either all duplicates, all except the first or all except the last occurrence of duplicates can be indicated. Parameters ---------- keep : {'first', 'last', False}, default 'first' Method to handle dropping duplicates: - 'first' : Mark duplicates as ``True`` except for the first occurrence. - 'last' : Mark duplicates as ``True`` except for the last occurrence. - ``False`` : Mark all duplicates as ``True``. Returns ------- Series[bool] Series indicating whether each value has occurred in the preceding values. See Also -------- Index.duplicated : Equivalent method on pandas.Index. DataFrame.duplicated : Equivalent method on pandas.DataFrame. Series.drop_duplicates : Remove duplicate values from Series. Examples -------- By default, for each set of duplicated values, the first occurrence is set on False and all others on True: >>> animals = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama']) >>> animals.duplicated() 0 False 1 False 2 True 3 False 4 True dtype: bool which is equivalent to >>> animals.duplicated(keep='first') 0 False 1 False 2 True 3 False 4 True dtype: bool By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True: >>> animals.duplicated(keep='last') 0 True 1 False 2 True 3 False 4 False dtype: bool By setting keep on ``False``, all duplicates are True: >>> animals.duplicated(keep=False) 0 True 1 False 2 True 3 False 4 True dtype: bool """ res = self._duplicated(keep=keep) result = self._constructor(res, index=self.index, copy=False) return result.__finalize__(self, method="duplicated") def idxmin(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the minimum value. If multiple values equal the minimum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the minimum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmin : Return indices of the minimum values along the given axis. DataFrame.idxmin : Return index of first occurrence of minimum over requested axis. Series.idxmax : Return index *label* of the first occurrence of maximum of values. Notes ----- This method is the Series version of ``ndarray.argmin``. This method returns the label of the minimum, while ``ndarray.argmin`` returns the position. To get the position, use ``series.values.argmin()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 1], ... index=['A', 'B', 'C', 'D']) >>> s A 1.0 B NaN C 4.0 D 1.0 dtype: float64 >>> s.idxmin() 'A' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmin(skipna=False) nan """ # error: Argument 1 to "argmin" of "IndexOpsMixin" has incompatible type "Union # [int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmin(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def idxmax(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the maximum value. If multiple values equal the maximum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the maximum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmax : Return indices of the maximum values along the given axis. DataFrame.idxmax : Return index of first occurrence of maximum over requested axis. Series.idxmin : Return index *label* of the first occurrence of minimum of values. Notes ----- This method is the Series version of ``ndarray.argmax``. This method returns the label of the maximum, while ``ndarray.argmax`` returns the position. To get the position, use ``series.values.argmax()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 3, 4], ... index=['A', 'B', 'C', 'D', 'E']) >>> s A 1.0 B NaN C 4.0 D 3.0 E 4.0 dtype: float64 >>> s.idxmax() 'C' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmax(skipna=False) nan """ # error: Argument 1 to "argmax" of "IndexOpsMixin" has incompatible type # "Union[int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmax(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def round(self, decimals: int = 0, *args, **kwargs) -> Series: """ Round each value in a Series to the given number of decimals. Parameters ---------- decimals : int, default 0 Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Series Rounded values of the Series. See Also -------- numpy.around : Round values of an np.array. DataFrame.round : Round values of a DataFrame. Examples -------- >>> s = pd.Series([0.1, 1.3, 2.7]) >>> s.round() 0 0.0 1 1.0 2 3.0 dtype: float64 """ nv.validate_round(args, kwargs) result = self._values.round(decimals) result = self._constructor(result, index=self.index, copy=False).__finalize__( self, method="round" ) return result def quantile( self, q: float = ..., interpolation: QuantileInterpolation = ... ) -> float: ... def quantile( self, q: Sequence[float] | AnyArrayLike, interpolation: QuantileInterpolation = ..., ) -> Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = ..., interpolation: QuantileInterpolation = ..., ) -> float | Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = 0.5, interpolation: QuantileInterpolation = "linear", ) -> float | Series: """ Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) The quantile(s) to compute, which can lie in range: 0 <= q <= 1. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- float or Series If ``q`` is an array, a Series will be returned where the index is ``q`` and the values are the quantiles, otherwise a float will be returned. See Also -------- core.window.Rolling.quantile : Calculate the rolling quantile. numpy.percentile : Returns the q-th percentile(s) of the array elements. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.quantile(.5) 2.5 >>> s.quantile([.25, .5, .75]) 0.25 1.75 0.50 2.50 0.75 3.25 dtype: float64 """ validate_percentile(q) # We dispatch to DataFrame so that core.internals only has to worry # about 2D cases. df = self.to_frame() result = df.quantile(q=q, interpolation=interpolation, numeric_only=False) if result.ndim == 2: result = result.iloc[:, 0] if is_list_like(q): result.name = self.name idx = Index(q, dtype=np.float64) return self._constructor(result, index=idx, name=self.name) else: # scalar return result.iloc[0] def corr( self, other: Series, method: CorrelationMethod = "pearson", min_periods: int | None = None, ) -> float: """ Compute correlation with `other` Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the correlation function is applied. Parameters ---------- other : Series Series with which to compute the correlation. method : {'pearson', 'kendall', 'spearman'} or callable Method used to compute correlation: - pearson : Standard correlation coefficient - kendall : Kendall Tau correlation coefficient - spearman : Spearman rank correlation - callable: Callable with input two 1d ndarrays and returning a float. .. warning:: Note that the returned matrix from corr will have 1 along the diagonals and will be symmetric regardless of the callable's behavior. min_periods : int, optional Minimum number of observations needed to have a valid result. Returns ------- float Correlation with other. See Also -------- DataFrame.corr : Compute pairwise correlation between columns. DataFrame.corrwith : Compute pairwise correlation with another DataFrame or Series. Notes ----- Pearson, Kendall and Spearman correlation are currently computed using pairwise complete observations. * `Pearson correlation coefficient <https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`_ * `Kendall rank correlation coefficient <https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient>`_ * `Spearman's rank correlation coefficient <https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient>`_ Examples -------- >>> def histogram_intersection(a, b): ... v = np.minimum(a, b).sum().round(decimals=1) ... return v >>> s1 = pd.Series([.2, .0, .6, .2]) >>> s2 = pd.Series([.3, .6, .0, .1]) >>> s1.corr(s2, method=histogram_intersection) 0.3 """ # noqa:E501 this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan if method in ["pearson", "spearman", "kendall"] or callable(method): return nanops.nancorr( this.values, other.values, method=method, min_periods=min_periods ) raise ValueError( "method must be either 'pearson', " "'spearman', 'kendall', or a callable, " f"'{method}' was supplied" ) def cov( self, other: Series, min_periods: int | None = None, ddof: int | None = 1, ) -> float: """ Compute covariance with Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the covariance is calculated. Parameters ---------- other : Series Series with which to compute the covariance. min_periods : int, optional Minimum number of observations needed to have a valid result. ddof : int, default 1 Delta degrees of freedom. The divisor used in calculations is ``N - ddof``, where ``N`` represents the number of elements. .. versionadded:: 1.1.0 Returns ------- float Covariance between Series and other normalized by N-1 (unbiased estimator). See Also -------- DataFrame.cov : Compute pairwise covariance of columns. Examples -------- >>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035]) >>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198]) >>> s1.cov(s2) -0.01685762652715874 """ this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan return nanops.nancov( this.values, other.values, min_periods=min_periods, ddof=ddof ) klass="Series", extra_params="", other_klass="DataFrame", examples=dedent( """ Difference with previous row >>> s = pd.Series([1, 1, 2, 3, 5, 8]) >>> s.diff() 0 NaN 1 0.0 2 1.0 3 1.0 4 2.0 5 3.0 dtype: float64 Difference with 3rd previous row >>> s.diff(periods=3) 0 NaN 1 NaN 2 NaN 3 2.0 4 4.0 5 6.0 dtype: float64 Difference with following row >>> s.diff(periods=-1) 0 0.0 1 -1.0 2 -1.0 3 -2.0 4 -3.0 5 NaN dtype: float64 Overflow in input dtype >>> s = pd.Series([1, 0], dtype=np.uint8) >>> s.diff() 0 NaN 1 255.0 dtype: float64""" ), ) def diff(self, periods: int = 1) -> Series: """ First discrete difference of element. Calculates the difference of a {klass} element compared with another element in the {klass} (default is element in previous row). Parameters ---------- periods : int, default 1 Periods to shift for calculating difference, accepts negative values. {extra_params} Returns ------- {klass} First differences of the Series. See Also -------- {klass}.pct_change: Percent change over given number of periods. {klass}.shift: Shift index by desired number of periods with an optional time freq. {other_klass}.diff: First discrete difference of object. Notes ----- For boolean dtypes, this uses :meth:`operator.xor` rather than :meth:`operator.sub`. The result is calculated according to current dtype in {klass}, however dtype of the result is always float64. Examples -------- {examples} """ result = algorithms.diff(self._values, periods) return self._constructor(result, index=self.index, copy=False).__finalize__( self, method="diff" ) def autocorr(self, lag: int = 1) -> float: """ Compute the lag-N autocorrelation. This method computes the Pearson correlation between the Series and its shifted self. Parameters ---------- lag : int, default 1 Number of lags to apply before performing autocorrelation. Returns ------- float The Pearson correlation between self and self.shift(lag). See Also -------- Series.corr : Compute the correlation between two Series. Series.shift : Shift index by desired number of periods. DataFrame.corr : Compute pairwise correlation of columns. DataFrame.corrwith : Compute pairwise correlation between rows or columns of two DataFrame objects. Notes ----- If the Pearson correlation is not well defined return 'NaN'. Examples -------- >>> s = pd.Series([0.25, 0.5, 0.2, -0.05]) >>> s.autocorr() # doctest: +ELLIPSIS 0.10355... >>> s.autocorr(lag=2) # doctest: +ELLIPSIS -0.99999... If the Pearson correlation is not well defined, then 'NaN' is returned. >>> s = pd.Series([1, 0, 0, 0]) >>> s.autocorr() nan """ return self.corr(self.shift(lag)) def dot(self, other: AnyArrayLike) -> Series | np.ndarray: """ Compute the dot product between the Series and the columns of other. This method computes the dot product between the Series and another one, or the Series and each columns of a DataFrame, or the Series and each columns of an array. It can also be called using `self @ other` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the dot product with its columns. Returns ------- scalar, Series or numpy.ndarray Return the dot product of the Series and other if other is a Series, the Series of the dot product of Series and each rows of other if other is a DataFrame or a numpy.ndarray between the Series and each columns of the numpy array. See Also -------- DataFrame.dot: Compute the matrix product with the DataFrame. Series.mul: Multiplication of series and other, element-wise. Notes ----- The Series and other has to share the same index if other is a Series or a DataFrame. Examples -------- >>> s = pd.Series([0, 1, 2, 3]) >>> other = pd.Series([-1, 2, -3, 4]) >>> s.dot(other) 8 >>> s @ other 8 >>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(df) 0 24 1 14 dtype: int64 >>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(arr) array([24, 14]) """ if isinstance(other, (Series, ABCDataFrame)): common = self.index.union(other.index) if len(common) > len(self.index) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(index=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right.values else: lvals = self.values rvals = np.asarray(other) if lvals.shape[0] != rvals.shape[0]: raise Exception( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, ABCDataFrame): return self._constructor( np.dot(lvals, rvals), index=other.columns, copy=False ).__finalize__(self, method="dot") elif isinstance(other, Series): return np.dot(lvals, rvals) elif isinstance(rvals, np.ndarray): return np.dot(lvals, rvals) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(np.transpose(other)) # Signature of "searchsorted" incompatible with supertype "IndexOpsMixin" def searchsorted( # type: ignore[override] self, value: NumpyValueArrayLike | ExtensionArray, side: Literal["left", "right"] = "left", sorter: NumpySorter = None, ) -> npt.NDArray[np.intp] | np.intp: return base.IndexOpsMixin.searchsorted(self, value, side=side, sorter=sorter) # ------------------------------------------------------------------- # Combination def _append( self, to_append, ignore_index: bool = False, verify_integrity: bool = False ): from pandas.core.reshape.concat import concat if isinstance(to_append, (list, tuple)): to_concat = [self] to_concat.extend(to_append) else: to_concat = [self, to_append] if any(isinstance(x, (ABCDataFrame,)) for x in to_concat[1:]): msg = "to_append should be a Series or list/tuple of Series, got DataFrame" raise TypeError(msg) return concat( to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity ) def _binop(self, other: Series, func, level=None, fill_value=None): """ Perform generic binary operation with optional fill value. Parameters ---------- other : Series func : binary operator fill_value : float or object Value to substitute for NA/null values. If both Series are NA in a location, the result will be NA regardless of the passed fill value. level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level. Returns ------- Series """ if not isinstance(other, Series): raise AssertionError("Other operand must be Series") this = self if not self.index.equals(other.index): this, other = self.align(other, level=level, join="outer", copy=False) this_vals, other_vals = ops.fill_binop(this._values, other._values, fill_value) with np.errstate(all="ignore"): result = func(this_vals, other_vals) name = ops.get_op_result_name(self, other) return this._construct_result(result, name) def _construct_result( self, result: ArrayLike | tuple[ArrayLike, ArrayLike], name: Hashable ) -> Series | tuple[Series, Series]: """ Construct an appropriately-labelled Series from the result of an op. Parameters ---------- result : ndarray or ExtensionArray name : Label Returns ------- Series In the case of __divmod__ or __rdivmod__, a 2-tuple of Series. """ if isinstance(result, tuple): # produced by divmod or rdivmod res1 = self._construct_result(result[0], name=name) res2 = self._construct_result(result[1], name=name) # GH#33427 assertions to keep mypy happy assert isinstance(res1, Series) assert isinstance(res2, Series) return (res1, res2) # TODO: result should always be ArrayLike, but this fails for some # JSONArray tests dtype = getattr(result, "dtype", None) out = self._constructor(result, index=self.index, dtype=dtype) out = out.__finalize__(self) # Set the result's name after __finalize__ is called because __finalize__ # would set it back to self.name out.name = name return out _shared_docs["compare"], """ Returns ------- Series or DataFrame If axis is 0 or 'index' the result will be a Series. The resulting index will be a MultiIndex with 'self' and 'other' stacked alternately at the inner level. If axis is 1 or 'columns' the result will be a DataFrame. It will have two columns namely 'self' and 'other'. See Also -------- DataFrame.compare : Compare with another DataFrame and show differences. Notes ----- Matching NaNs will not appear as a difference. Examples -------- >>> s1 = pd.Series(["a", "b", "c", "d", "e"]) >>> s2 = pd.Series(["a", "a", "c", "b", "e"]) Align the differences on columns >>> s1.compare(s2) self other 1 b a 3 d b Stack the differences on indices >>> s1.compare(s2, align_axis=0) 1 self b other a 3 self d other b dtype: object Keep all original rows >>> s1.compare(s2, keep_shape=True) self other 0 NaN NaN 1 b a 2 NaN NaN 3 d b 4 NaN NaN Keep all original rows and also all original values >>> s1.compare(s2, keep_shape=True, keep_equal=True) self other 0 a a 1 b a 2 c c 3 d b 4 e e """, klass=_shared_doc_kwargs["klass"], ) def compare( self, other: Series, align_axis: Axis = 1, keep_shape: bool = False, keep_equal: bool = False, result_names: Suffixes = ("self", "other"), ) -> DataFrame | Series: return super().compare( other=other, align_axis=align_axis, keep_shape=keep_shape, keep_equal=keep_equal, result_names=result_names, ) def combine( self, other: Series | Hashable, func: Callable[[Hashable, Hashable], Hashable], fill_value: Hashable = None, ) -> Series: """ Combine the Series with a Series or scalar according to `func`. Combine the Series and `other` using `func` to perform elementwise selection for combined Series. `fill_value` is assumed when value is missing at some index from one of the two objects being combined. Parameters ---------- other : Series or scalar The value(s) to be combined with the `Series`. func : function Function that takes two scalars as inputs and returns an element. fill_value : scalar, optional The value to assume when an index is missing from one Series or the other. The default specifies to use the appropriate NaN value for the underlying dtype of the Series. Returns ------- Series The result of combining the Series with the other object. See Also -------- Series.combine_first : Combine Series values, choosing the calling Series' values first. Examples -------- Consider 2 Datasets ``s1`` and ``s2`` containing highest clocked speeds of different birds. >>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0}) >>> s1 falcon 330.0 eagle 160.0 dtype: float64 >>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0}) >>> s2 falcon 345.0 eagle 200.0 duck 30.0 dtype: float64 Now, to combine the two datasets and view the highest speeds of the birds across the two datasets >>> s1.combine(s2, max) duck NaN eagle 200.0 falcon 345.0 dtype: float64 In the previous example, the resulting value for duck is missing, because the maximum of a NaN and a float is a NaN. So, in the example, we set ``fill_value=0``, so the maximum value returned will be the value from some dataset. >>> s1.combine(s2, max, fill_value=0) duck 30.0 eagle 200.0 falcon 345.0 dtype: float64 """ if fill_value is None: fill_value = na_value_for_dtype(self.dtype, compat=False) if isinstance(other, Series): # If other is a Series, result is based on union of Series, # so do this element by element new_index = self.index.union(other.index) new_name = ops.get_op_result_name(self, other) new_values = np.empty(len(new_index), dtype=object) for i, idx in enumerate(new_index): lv = self.get(idx, fill_value) rv = other.get(idx, fill_value) with np.errstate(all="ignore"): new_values[i] = func(lv, rv) else: # Assume that other is a scalar, so apply the function for # each element in the Series new_index = self.index new_values = np.empty(len(new_index), dtype=object) with np.errstate(all="ignore"): new_values[:] = [func(lv, other) for lv in self._values] new_name = self.name # try_float=False is to match agg_series npvalues = lib.maybe_convert_objects(new_values, try_float=False) res_values = maybe_cast_pointwise_result(npvalues, self.dtype, same_dtype=False) return self._constructor(res_values, index=new_index, name=new_name, copy=False) def combine_first(self, other) -> Series: """ Update null elements with value in the same location in 'other'. Combine two Series objects by filling null values in one Series with non-null values from the other Series. Result index will be the union of the two indexes. Parameters ---------- other : Series The value(s) to be used for filling null values. Returns ------- Series The result of combining the provided Series with the other object. See Also -------- Series.combine : Perform element-wise operation on two Series using a given function. Examples -------- >>> s1 = pd.Series([1, np.nan]) >>> s2 = pd.Series([3, 4, 5]) >>> s1.combine_first(s2) 0 1.0 1 4.0 2 5.0 dtype: float64 Null values still persist if the location of that null value does not exist in `other` >>> s1 = pd.Series({'falcon': np.nan, 'eagle': 160.0}) >>> s2 = pd.Series({'eagle': 200.0, 'duck': 30.0}) >>> s1.combine_first(s2) duck 30.0 eagle 160.0 falcon NaN dtype: float64 """ new_index = self.index.union(other.index) this = self.reindex(new_index, copy=False) other = other.reindex(new_index, copy=False) if this.dtype.kind == "M" and other.dtype.kind != "M": other = to_datetime(other) return this.where(notna(this), other) def update(self, other: Series | Sequence | Mapping) -> None: """ Modify Series in place using values from passed Series. Uses non-NA values from passed Series to make updates. Aligns on index. Parameters ---------- other : Series, or object coercible into Series Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6])) >>> s 0 4 1 5 2 6 dtype: int64 >>> s = pd.Series(['a', 'b', 'c']) >>> s.update(pd.Series(['d', 'e'], index=[0, 2])) >>> s 0 d 1 b 2 e dtype: object >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6, 7, 8])) >>> s 0 4 1 5 2 6 dtype: int64 If ``other`` contains NaNs the corresponding values are not updated in the original Series. >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, np.nan, 6])) >>> s 0 4 1 2 2 6 dtype: int64 ``other`` can also be a non-Series object type that is coercible into a Series >>> s = pd.Series([1, 2, 3]) >>> s.update([4, np.nan, 6]) >>> s 0 4 1 2 2 6 dtype: int64 >>> s = pd.Series([1, 2, 3]) >>> s.update({1: 9}) >>> s 0 1 1 9 2 3 dtype: int64 """ if not isinstance(other, Series): other = Series(other) other = other.reindex_like(self) mask = notna(other) self._mgr = self._mgr.putmask(mask=mask, new=other) self._maybe_update_cacher() # ---------------------------------------------------------------------- # Reindexing, sorting def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[False] = ..., kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> Series: ... def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[True], kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> None: ... def sort_values( self, *, axis: Axis = 0, ascending: bool | int | Sequence[bool] | Sequence[int] = True, inplace: bool = False, kind: str = "quicksort", na_position: str = "last", ignore_index: bool = False, key: ValueKeyFunc = None, ) -> Series | None: """ Sort by the values. Sort a Series in ascending or descending order by some criterion. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. ascending : bool or list of bools, default True If True, sort values in ascending order, otherwise descending. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the series values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect a ``Series`` and return an array-like. .. versionadded:: 1.1.0 Returns ------- Series or None Series ordered by values or None if ``inplace=True``. See Also -------- Series.sort_index : Sort by the Series indices. DataFrame.sort_values : Sort DataFrame by the values along either axis. DataFrame.sort_index : Sort DataFrame by indices. Examples -------- >>> s = pd.Series([np.nan, 1, 3, 10, 5]) >>> s 0 NaN 1 1.0 2 3.0 3 10.0 4 5.0 dtype: float64 Sort values ascending order (default behaviour) >>> s.sort_values(ascending=True) 1 1.0 2 3.0 4 5.0 3 10.0 0 NaN dtype: float64 Sort values descending order >>> s.sort_values(ascending=False) 3 10.0 4 5.0 2 3.0 1 1.0 0 NaN dtype: float64 Sort values putting NAs first >>> s.sort_values(na_position='first') 0 NaN 1 1.0 2 3.0 4 5.0 3 10.0 dtype: float64 Sort a series of strings >>> s = pd.Series(['z', 'b', 'd', 'a', 'c']) >>> s 0 z 1 b 2 d 3 a 4 c dtype: object >>> s.sort_values() 3 a 1 b 4 c 2 d 0 z dtype: object Sort using a key function. Your `key` function will be given the ``Series`` of values and should return an array-like. >>> s = pd.Series(['a', 'B', 'c', 'D', 'e']) >>> s.sort_values() 1 B 3 D 0 a 2 c 4 e dtype: object >>> s.sort_values(key=lambda x: x.str.lower()) 0 a 1 B 2 c 3 D 4 e dtype: object NumPy ufuncs work well here. For example, we can sort by the ``sin`` of the value >>> s = pd.Series([-4, -2, 0, 2, 4]) >>> s.sort_values(key=np.sin) 1 -2 4 4 2 0 0 -4 3 2 dtype: int64 More complicated user-defined functions can be used, as long as they expect a Series and return an array-like >>> s.sort_values(key=lambda x: (np.tan(x.cumsum()))) 0 -4 3 2 4 4 1 -2 2 0 dtype: int64 """ inplace = validate_bool_kwarg(inplace, "inplace") # Validate the axis parameter self._get_axis_number(axis) # GH 5856/5853 if inplace and self._is_cached: raise ValueError( "This Series is a view of some other array, to " "sort in-place you must create a copy" ) if is_list_like(ascending): ascending = cast(Sequence[Union[bool, int]], ascending) if len(ascending) != 1: raise ValueError( f"Length of ascending ({len(ascending)}) must be 1 for Series" ) ascending = ascending[0] ascending = validate_ascending(ascending) if na_position not in ["first", "last"]: raise ValueError(f"invalid na_position: {na_position}") # GH 35922. Make sorting stable by leveraging nargsort values_to_sort = ensure_key_mapped(self, key)._values if key else self._values sorted_index = nargsort(values_to_sort, kind, bool(ascending), na_position) if is_range_indexer(sorted_index, len(sorted_index)): if inplace: return self._update_inplace(self) return self.copy(deep=None) result = self._constructor( self._values[sorted_index], index=self.index[sorted_index], copy=False ) if ignore_index: result.index = default_index(len(sorted_index)) if not inplace: return result.__finalize__(self, method="sort_values") self._update_inplace(result) return None def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[True], kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> None: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[False] = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: bool = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series | None: ... def sort_index( self, *, axis: Axis = 0, level: IndexLabel = None, ascending: bool | Sequence[bool] = True, inplace: bool = False, kind: SortKind = "quicksort", na_position: NaPosition = "last", sort_remaining: bool = True, ignore_index: bool = False, key: IndexKeyFunc = None, ) -> Series | None: """ Sort Series by index labels. Returns a new Series sorted by label if `inplace` argument is ``False``, otherwise updates the original series and returns None. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. level : int, optional If not None, sort on values in specified index level(s). ascending : bool or list-like of bools, default True Sort ascending vs. descending. When the index is a MultiIndex the sort direction can be controlled for each level individually. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. Not implemented for MultiIndex. sort_remaining : bool, default True If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. .. versionadded:: 1.1.0 Returns ------- Series or None The original Series sorted by the labels or None if ``inplace=True``. See Also -------- DataFrame.sort_index: Sort DataFrame by the index. DataFrame.sort_values: Sort DataFrame by the value. Series.sort_values : Sort Series by the value. Examples -------- >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4]) >>> s.sort_index() 1 c 2 b 3 a 4 d dtype: object Sort Descending >>> s.sort_index(ascending=False) 4 d 3 a 2 b 1 c dtype: object By default NaNs are put at the end, but use `na_position` to place them at the beginning >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan]) >>> s.sort_index(na_position='first') NaN d 1.0 c 2.0 b 3.0 a dtype: object Specify index level to sort >>> arrays = [np.array(['qux', 'qux', 'foo', 'foo', ... 'baz', 'baz', 'bar', 'bar']), ... np.array(['two', 'one', 'two', 'one', ... 'two', 'one', 'two', 'one'])] >>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays) >>> s.sort_index(level=1) bar one 8 baz one 6 foo one 4 qux one 2 bar two 7 baz two 5 foo two 3 qux two 1 dtype: int64 Does not sort by remaining levels when sorting by levels >>> s.sort_index(level=1, sort_remaining=False) qux one 2 foo one 4 baz one 6 bar one 8 qux two 1 foo two 3 baz two 5 bar two 7 dtype: int64 Apply a key function before sorting >>> s = pd.Series([1, 2, 3, 4], index=['A', 'b', 'C', 'd']) >>> s.sort_index(key=lambda x : x.str.lower()) A 1 b 2 C 3 d 4 dtype: int64 """ return super().sort_index( axis=axis, level=level, ascending=ascending, inplace=inplace, kind=kind, na_position=na_position, sort_remaining=sort_remaining, ignore_index=ignore_index, key=key, ) def argsort( self, axis: Axis = 0, kind: SortKind = "quicksort", order: None = None, ) -> Series: """ Return the integer indices that would sort the Series values. Override ndarray.argsort. Argsorts the value, omitting NA/null values, and places the result in the same locations as the non-NA values. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. order : None Has no effect but is accepted for compatibility with numpy. Returns ------- Series[np.intp] Positions of values within the sort order with -1 indicating nan values. See Also -------- numpy.ndarray.argsort : Returns the indices that would sort this array. """ values = self._values mask = isna(values) if mask.any(): result = np.full(len(self), -1, dtype=np.intp) notmask = ~mask result[notmask] = np.argsort(values[notmask], kind=kind) else: result = np.argsort(values, kind=kind) res = self._constructor( result, index=self.index, name=self.name, dtype=np.intp, copy=False ) return res.__finalize__(self, method="argsort") def nlargest( self, n: int = 5, keep: Literal["first", "last", "all"] = "first" ) -> Series: """ Return the largest `n` elements. Parameters ---------- n : int, default 5 Return this many descending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` largest values in the Series, sorted in decreasing order. See Also -------- Series.nsmallest: Get the `n` smallest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values(ascending=False).head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Malta": 434000, "Maldives": 434000, ... "Brunei": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Malta 434000 Maldives 434000 Brunei 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` largest elements where ``n=5`` by default. >>> s.nlargest() France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3``. Default `keep` value is 'first' so Malta will be kept. >>> s.nlargest(3) France 65000000 Italy 59000000 Malta 434000 dtype: int64 The `n` largest elements where ``n=3`` and keeping the last duplicates. Brunei will be kept since it is the last with value 434000 based on the index order. >>> s.nlargest(3, keep='last') France 65000000 Italy 59000000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3`` with all duplicates kept. Note that the returned Series has five elements due to the three duplicates. >>> s.nlargest(3, keep='all') France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nlargest() def nsmallest(self, n: int = 5, keep: str = "first") -> Series: """ Return the smallest `n` elements. Parameters ---------- n : int, default 5 Return this many ascending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` smallest values in the Series, sorted in increasing order. See Also -------- Series.nlargest: Get the `n` largest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values().head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Brunei": 434000, "Malta": 434000, ... "Maldives": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Brunei 434000 Malta 434000 Maldives 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` smallest elements where ``n=5`` by default. >>> s.nsmallest() Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 Iceland 337000 dtype: int64 The `n` smallest elements where ``n=3``. Default `keep` value is 'first' so Nauru and Tuvalu will be kept. >>> s.nsmallest(3) Montserrat 5200 Nauru 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` and keeping the last duplicates. Anguilla and Tuvalu will be kept since they are the last with value 11300 based on the index order. >>> s.nsmallest(3, keep='last') Montserrat 5200 Anguilla 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` with all duplicates kept. Note that the returned Series has four elements due to the three duplicates. >>> s.nsmallest(3, keep='all') Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nsmallest() klass=_shared_doc_kwargs["klass"], extra_params=dedent( """copy : bool, default True Whether to copy underlying data.""" ), examples=dedent( """\ Examples -------- >>> s = pd.Series( ... ["A", "B", "A", "C"], ... index=[ ... ["Final exam", "Final exam", "Coursework", "Coursework"], ... ["History", "Geography", "History", "Geography"], ... ["January", "February", "March", "April"], ... ], ... ) >>> s Final exam History January A Geography February B Coursework History March A Geography April C dtype: object In the following example, we will swap the levels of the indices. Here, we will swap the levels column-wise, but levels can be swapped row-wise in a similar manner. Note that column-wise is the default behaviour. By not supplying any arguments for i and j, we swap the last and second to last indices. >>> s.swaplevel() Final exam January History A February Geography B Coursework March History A April Geography C dtype: object By supplying one argument, we can choose which index to swap the last index with. We can for example swap the first index with the last one as follows. >>> s.swaplevel(0) January History Final exam A February Geography Final exam B March History Coursework A April Geography Coursework C dtype: object We can also define explicitly which indices we want to swap by supplying values for both i and j. Here, we for example swap the first and second indices. >>> s.swaplevel(0, 1) History Final exam January A Geography Final exam February B History Coursework March A Geography Coursework April C dtype: object""" ), ) def swaplevel( self, i: Level = -2, j: Level = -1, copy: bool | None = None ) -> Series: """ Swap levels i and j in a :class:`MultiIndex`. Default is to swap the two innermost levels of the index. Parameters ---------- i, j : int or str Levels of the indices to be swapped. Can pass level name as string. {extra_params} Returns ------- {klass} {klass} with levels swapped in MultiIndex. {examples} """ assert isinstance(self.index, MultiIndex) result = self.copy(deep=copy and not using_copy_on_write()) result.index = self.index.swaplevel(i, j) return result def reorder_levels(self, order: Sequence[Level]) -> Series: """ Rearrange index levels using input order. May not drop or duplicate levels. Parameters ---------- order : list of int representing new level order Reference level by number or key. Returns ------- type of caller (new object) """ if not isinstance(self.index, MultiIndex): # pragma: no cover raise Exception("Can only reorder levels on a hierarchical axis.") result = self.copy(deep=None) assert isinstance(result.index, MultiIndex) result.index = result.index.reorder_levels(order) return result def explode(self, ignore_index: bool = False) -> Series: """ Transform each element of a list-like to a row. Parameters ---------- ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 1.1.0 Returns ------- Series Exploded lists to rows; index will be duplicated for these rows. See Also -------- Series.str.split : Split string values on specified separator. Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame. DataFrame.melt : Unpivot a DataFrame from wide format to long format. DataFrame.explode : Explode a DataFrame from list-like columns to long format. Notes ----- This routine will explode list-likes including lists, tuples, sets, Series, and np.ndarray. The result dtype of the subset rows will be object. Scalars will be returned unchanged, and empty list-likes will result in a np.nan for that row. In addition, the ordering of elements in the output will be non-deterministic when exploding sets. Reference :ref:`the user guide <reshaping.explode>` for more examples. Examples -------- >>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]]) >>> s 0 [1, 2, 3] 1 foo 2 [] 3 [3, 4] dtype: object >>> s.explode() 0 1 0 2 0 3 1 foo 2 NaN 3 3 3 4 dtype: object """ if not len(self) or not is_object_dtype(self): result = self.copy() return result.reset_index(drop=True) if ignore_index else result values, counts = reshape.explode(np.asarray(self._values)) if ignore_index: index = default_index(len(values)) else: index = self.index.repeat(counts) return self._constructor(values, index=index, name=self.name, copy=False) def unstack(self, level: IndexLabel = -1, fill_value: Hashable = None) -> DataFrame: """ Unstack, also known as pivot, Series with MultiIndex to produce DataFrame. Parameters ---------- level : int, str, or list of these, default last level Level(s) to unstack, can pass level name. fill_value : scalar value, default None Value to use when replacing NaN values. Returns ------- DataFrame Unstacked Series. Notes ----- Reference :ref:`the user guide <reshaping.stacking>` for more examples. Examples -------- >>> s = pd.Series([1, 2, 3, 4], ... index=pd.MultiIndex.from_product([['one', 'two'], ... ['a', 'b']])) >>> s one a 1 b 2 two a 3 b 4 dtype: int64 >>> s.unstack(level=-1) a b one 1 2 two 3 4 >>> s.unstack(level=0) one two a 1 3 b 2 4 """ from pandas.core.reshape.reshape import unstack return unstack(self, level, fill_value) # ---------------------------------------------------------------------- # function application def map( self, arg: Callable | Mapping | Series, na_action: Literal["ignore"] | None = None, ) -> Series: """ Map values of Series according to an input mapping or function. Used for substituting each value in a Series with another value, that may be derived from a function, a ``dict`` or a :class:`Series`. Parameters ---------- arg : function, collections.abc.Mapping subclass or Series Mapping correspondence. na_action : {None, 'ignore'}, default None If 'ignore', propagate NaN values, without passing them to the mapping correspondence. Returns ------- Series Same index as caller. See Also -------- Series.apply : For applying more complex functions on a Series. DataFrame.apply : Apply a function row-/column-wise. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Notes ----- When ``arg`` is a dictionary, values in Series that are not in the dictionary (as keys) are converted to ``NaN``. However, if the dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e. provides a method for default values), then this default is used rather than ``NaN``. Examples -------- >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit']) >>> s 0 cat 1 dog 2 NaN 3 rabbit dtype: object ``map`` accepts a ``dict`` or a ``Series``. Values that are not found in the ``dict`` are converted to ``NaN``, unless the dict has a default value (e.g. ``defaultdict``): >>> s.map({'cat': 'kitten', 'dog': 'puppy'}) 0 kitten 1 puppy 2 NaN 3 NaN dtype: object It also accepts a function: >>> s.map('I am a {}'.format) 0 I am a cat 1 I am a dog 2 I am a nan 3 I am a rabbit dtype: object To avoid applying the function to missing values (and keep them as ``NaN``) ``na_action='ignore'`` can be used: >>> s.map('I am a {}'.format, na_action='ignore') 0 I am a cat 1 I am a dog 2 NaN 3 I am a rabbit dtype: object """ new_values = self._map_values(arg, na_action=na_action) return self._constructor(new_values, index=self.index, copy=False).__finalize__( self, method="map" ) def _gotitem(self, key, ndim, subset=None) -> Series: """ Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : {1, 2} Requested ndim of result. subset : object, default None Subset to act on. """ return self _agg_see_also_doc = dedent( """ See Also -------- Series.apply : Invoke function on a Series. Series.transform : Transform function producing a Series with like indexes. """ ) _agg_examples_doc = dedent( """ Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.agg('min') 1 >>> s.agg(['min', 'max']) min 1 max 4 dtype: int64 """ ) _shared_docs["aggregate"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, ) def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs): # Validate the axis parameter self._get_axis_number(axis) # if func is None, will switch to user-provided "named aggregation" kwargs if func is None: func = dict(kwargs.items()) op = SeriesApply(self, func, convert_dtype=False, args=args, kwargs=kwargs) result = op.agg() return result agg = aggregate # error: Signature of "any" incompatible with supertype "NDFrame" [override] def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: None = ..., **kwargs, ) -> bool: ... def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: Level, **kwargs, ) -> Series | bool: ... # error: Missing return statement def any( # type: ignore[empty-body] self, axis: Axis = 0, bool_only: bool | None = None, skipna: bool = True, level: Level | None = None, **kwargs, ) -> Series | bool: ... _shared_docs["transform"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], ) def transform( self, func: AggFuncType, axis: Axis = 0, *args, **kwargs ) -> DataFrame | Series: # Validate axis argument self._get_axis_number(axis) result = SeriesApply( self, func=func, convert_dtype=True, args=args, kwargs=kwargs ).transform() return result def apply( self, func: AggFuncType, convert_dtype: bool = True, args: tuple[Any, ...] = (), **kwargs, ) -> DataFrame | Series: """ Invoke function on values of Series. Can be ufunc (a NumPy function that applies to the entire Series) or a Python function that only works on single values. Parameters ---------- func : function Python function or NumPy ufunc to apply. convert_dtype : bool, default True Try to find better dtype for elementwise function results. If False, leave as dtype=object. Note that the dtype is always preserved for some extension array dtypes, such as Categorical. args : tuple Positional arguments passed to func after the series value. **kwargs Additional keyword arguments passed to func. Returns ------- Series or DataFrame If func returns a Series object the result will be a DataFrame. See Also -------- Series.map: For element-wise operations. Series.agg: Only perform aggregating type operations. Series.transform: Only perform transforming type operations. Notes ----- Functions that mutate the passed object can produce unexpected behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` for more details. Examples -------- Create a series with typical summer temperatures for each city. >>> s = pd.Series([20, 21, 12], ... index=['London', 'New York', 'Helsinki']) >>> s London 20 New York 21 Helsinki 12 dtype: int64 Square the values by defining a function and passing it as an argument to ``apply()``. >>> def square(x): ... return x ** 2 >>> s.apply(square) London 400 New York 441 Helsinki 144 dtype: int64 Square the values by passing an anonymous function as an argument to ``apply()``. >>> s.apply(lambda x: x ** 2) London 400 New York 441 Helsinki 144 dtype: int64 Define a custom function that needs additional positional arguments and pass these additional arguments using the ``args`` keyword. >>> def subtract_custom_value(x, custom_value): ... return x - custom_value >>> s.apply(subtract_custom_value, args=(5,)) London 15 New York 16 Helsinki 7 dtype: int64 Define a custom function that takes keyword arguments and pass these arguments to ``apply``. >>> def add_custom_values(x, **kwargs): ... for month in kwargs: ... x += kwargs[month] ... return x >>> s.apply(add_custom_values, june=30, july=20, august=25) London 95 New York 96 Helsinki 87 dtype: int64 Use a function from the Numpy library. >>> s.apply(np.log) London 2.995732 New York 3.044522 Helsinki 2.484907 dtype: float64 """ return SeriesApply(self, func, convert_dtype, args, kwargs).apply() def _reduce( self, op, name: str, *, axis: Axis = 0, skipna: bool = True, numeric_only: bool = False, filter_type=None, **kwds, ): """ Perform a reduction operation. If we have an ndarray as a value, then simply perform the operation, otherwise delegate to the object. """ delegate = self._values if axis is not None: self._get_axis_number(axis) if isinstance(delegate, ExtensionArray): # dispatch to ExtensionArray interface return delegate._reduce(name, skipna=skipna, **kwds) else: # dispatch to numpy arrays if numeric_only and not is_numeric_dtype(self.dtype): kwd_name = "numeric_only" if name in ["any", "all"]: kwd_name = "bool_only" # GH#47500 - change to TypeError to match other methods raise TypeError( f"Series.{name} does not allow {kwd_name}={numeric_only} " "with non-numeric dtypes." ) with np.errstate(all="ignore"): return op(delegate, skipna=skipna, **kwds) def _reindex_indexer( self, new_index: Index | None, indexer: npt.NDArray[np.intp] | None, copy: bool | None, ) -> Series: # Note: new_index is None iff indexer is None # if not None, indexer is np.intp if indexer is None and ( new_index is None or new_index.names == self.index.names ): if using_copy_on_write(): return self.copy(deep=copy) if copy or copy is None: return self.copy(deep=copy) return self new_values = algorithms.take_nd( self._values, indexer, allow_fill=True, fill_value=None ) return self._constructor(new_values, index=new_index, copy=False) def _needs_reindex_multi(self, axes, method, level) -> bool: """ Check if we do need a multi reindex; this is for compat with higher dims. """ return False # error: Cannot determine type of 'align' NDFrame.align, # type: ignore[has-type] klass=_shared_doc_kwargs["klass"], axes_single_arg=_shared_doc_kwargs["axes_single_arg"], ) def align( self, other: Series, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value: Hashable = None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> Series: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[True], level: Level | None = ..., errors: IgnoreRaise = ..., ) -> None: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[False] = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: bool = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series | None: ... def rename( self, index: Renamer | Hashable | None = None, *, axis: Axis | None = None, copy: bool = True, inplace: bool = False, level: Level | None = None, errors: IgnoreRaise = "ignore", ) -> Series | None: """ Alter Series index labels or name. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. Alternatively, change ``Series.name`` with a scalar value. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- index : scalar, hashable sequence, dict-like or function optional Functions or dict-like are transformations to apply to the index. Scalar or hashable sequence-like will alter the ``Series.name`` attribute. axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new Series. If True the value of copy is ignored. level : int or level name, default None In case of MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise `KeyError` when a `dict-like mapper` or `index` contains labels that are not present in the index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- Series or None Series with index labels or name altered or None if ``inplace=True``. See Also -------- DataFrame.rename : Corresponding DataFrame method. Series.rename_axis : Set the name of the axis. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.rename("my_name") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 >>> s.rename(lambda x: x ** 2) # function, changes labels 0 1 1 2 4 3 dtype: int64 >>> s.rename({1: 3, 2: 5}) # mapping, changes labels 0 1 3 2 5 3 dtype: int64 """ if axis is not None: # Make sure we raise if an invalid 'axis' is passed. axis = self._get_axis_number(axis) if callable(index) or is_dict_like(index): # error: Argument 1 to "_rename" of "NDFrame" has incompatible # type "Union[Union[Mapping[Any, Hashable], Callable[[Any], # Hashable]], Hashable, None]"; expected "Union[Mapping[Any, # Hashable], Callable[[Any], Hashable], None]" return super()._rename( index, # type: ignore[arg-type] copy=copy, inplace=inplace, level=level, errors=errors, ) else: return self._set_name(index, inplace=inplace) """ Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.set_axis(['a', 'b', 'c'], axis=0) a 1 b 2 c 3 dtype: int64 """ ) **_shared_doc_kwargs, extended_summary_sub="", axis_description_sub="", see_also_sub="", ) ) ) # error: Cannot determine type of 'shift' # ---------------------------------------------------------------------- # Convert to types that support pd.NA # error: Cannot determine type of 'isna' # error: Return type "Series" of "isna" incompatible with return type "ndarray # [Any, dtype[bool_]]" in supertype "IndexOpsMixin" # error: Cannot determine type of 'isna' # error: Cannot determine type of 'notna' # error: Cannot determine type of 'notna' # ---------------------------------------------------------------------- # Time series-oriented methods # error: Cannot determine type of 'asfreq' # error: Cannot determine type of 'resample' # ---------------------------------------------------------------------- # Add index # ---------------------------------------------------------------------- # Accessor Methods # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # Add plotting methods to Series # ---------------------------------------------------------------------- # Template-Based Arithmetic/Comparison Methods Series class DataFrame(ABC): """ A data frame class, with only the methods required by the interchange protocol defined. A "data frame" represents an ordered collection of named columns. A column's "name" must be a unique string. Columns may be accessed by name or by position. This could be a public data frame class, or an object with the methods and attributes defined on this DataFrame class could be returned from the ``__dataframe__`` method of a public data frame class in a library adhering to the dataframe interchange protocol specification. """ version = 0 # version of the protocol def __dataframe__(self, nan_as_null: bool = False, allow_copy: bool = True): """Construct a new interchange object, potentially changing the parameters.""" def metadata(self) -> dict[str, Any]: """ The metadata for the data frame, as a dictionary with string keys. The contents of `metadata` may be anything, they are meant for a library to store information that it needs to, e.g., roundtrip losslessly or for two implementations to share data that is not (yet) part of the interchange protocol specification. For avoiding collisions with other entries, please add name the keys with the name of the library followed by a period and the desired name, e.g, ``pandas.indexcol``. """ def num_columns(self) -> int: """ Return the number of columns in the DataFrame. """ def num_rows(self) -> int | None: # TODO: not happy with Optional, but need to flag it may be expensive # why include it if it may be None - what do we expect consumers # to do here? """ Return the number of rows in the DataFrame, if available. """ def num_chunks(self) -> int: """ Return the number of chunks the DataFrame consists of. """ def column_names(self) -> Iterable[str]: """ Return an iterator yielding the column names. """ def get_column(self, i: int) -> Column: """ Return the column at the indicated position. """ def get_column_by_name(self, name: str) -> Column: """ Return the column whose name is the indicated name. """ def get_columns(self) -> Iterable[Column]: """ Return an iterator yielding the columns. """ def select_columns(self, indices: Sequence[int]) -> DataFrame: """ Create a new DataFrame by selecting a subset of columns by index. """ def select_columns_by_name(self, names: Sequence[str]) -> DataFrame: """ Create a new DataFrame by selecting a subset of columns by name. """ def get_chunks(self, n_chunks: int | None = None) -> Iterable[DataFrame]: """ Return an iterator yielding the chunks. By default (None), yields the chunks that the data is stored as by the producer. If given, ``n_chunks`` must be a multiple of ``self.num_chunks()``, meaning the producer must subdivide each chunk before yielding it. """ def _reindex_for_setitem(value: DataFrame | Series, index: Index) -> ArrayLike: # reindex if necessary if value.index.equals(index) or not len(index): return value._values.copy() # GH#4107 try: reindexed_value = value.reindex(index)._values except ValueError as err: # raised in MultiIndex.from_tuples, see test_insert_error_msmgs if not value.index.is_unique: # duplicate axis raise err raise TypeError( "incompatible index of inserted column with frame index" ) from err return reindexed_value
null
173,029
from __future__ import annotations import sys from textwrap import dedent from typing import ( IO, TYPE_CHECKING, Any, Callable, Hashable, Iterable, Literal, Mapping, Sequence, Union, cast, overload, ) import warnings import weakref import numpy as np from pandas._config import ( get_option, using_copy_on_write, ) from pandas._libs import ( lib, properties, reshape, ) from pandas._libs.internals import BlockValuesRefs from pandas._libs.lib import is_range_indexer from pandas._typing import ( AggFuncType, AlignJoin, AnyAll, AnyArrayLike, ArrayLike, Axis, AxisInt, CorrelationMethod, DropKeep, Dtype, DtypeBackend, DtypeObj, FilePath, FillnaOptions, Frequency, IgnoreRaise, IndexKeyFunc, IndexLabel, Level, NaPosition, QuantileInterpolation, Renamer, Scalar, SingleManager, SortKind, StorageOptions, TimedeltaConvertibleTypes, TimestampConvertibleTypes, ValueKeyFunc, WriteBuffer, npt, ) from pandas.compat import PYPY from pandas.compat.numpy import function as nv from pandas.errors import ( ChainedAssignmentError, InvalidIndexError, _chained_assignment_msg, ) from pandas.util._decorators import ( Appender, Substitution, doc, ) from pandas.util._exceptions import find_stack_level from pandas.util._validators import ( validate_ascending, validate_bool_kwarg, validate_percentile, ) from pandas.core.dtypes.astype import astype_is_view from pandas.core.dtypes.cast import ( LossySetitemError, convert_dtypes, maybe_box_native, maybe_cast_pointwise_result, ) from pandas.core.dtypes.common import ( ensure_platform_int, is_dict_like, is_extension_array_dtype, is_integer, is_iterator, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, pandas_dtype, validate_all_hashable, ) from pandas.core.dtypes.generic import ABCDataFrame from pandas.core.dtypes.inference import is_hashable from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, notna, remove_na_arraylike, ) from pandas.core import ( algorithms, base, common as com, missing, nanops, ops, ) from pandas.core.accessor import CachedAccessor from pandas.core.apply import SeriesApply from pandas.core.arrays import ExtensionArray from pandas.core.arrays.categorical import CategoricalAccessor from pandas.core.arrays.sparse import SparseAccessor from pandas.core.construction import ( extract_array, sanitize_array, ) from pandas.core.generic import NDFrame from pandas.core.indexers import ( disallow_ndim_indexing, unpack_1tuple, ) from pandas.core.indexes.accessors import CombinedDatetimelikeProperties from pandas.core.indexes.api import ( DatetimeIndex, Index, MultiIndex, PeriodIndex, default_index, ensure_index, ) import pandas.core.indexes.base as ibase from pandas.core.indexes.multi import maybe_droplevels from pandas.core.indexing import ( check_bool_indexer, check_dict_or_set_indexers, ) from pandas.core.internals import ( SingleArrayManager, SingleBlockManager, ) from pandas.core.methods import selectn from pandas.core.shared_docs import _shared_docs from pandas.core.sorting import ( ensure_key_mapped, nargsort, ) from pandas.core.strings.accessor import StringMethods from pandas.core.tools.datetimes import to_datetime import pandas.io.formats.format as fmt from pandas.io.formats.info import ( INFO_DOCSTRING, SeriesInfo, series_sub_kwargs, ) import pandas.plotting def find_stack_level() -> int: """ Find the first place in the stack that is not inside pandas (tests notwithstanding). """ import pandas as pd pkg_dir = os.path.dirname(pd.__file__) test_dir = os.path.join(pkg_dir, "tests") # https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow frame = inspect.currentframe() n = 0 while frame: fname = inspect.getfile(frame) if fname.startswith(pkg_dir) and not fname.startswith(test_dir): frame = frame.f_back n += 1 else: break return n The provided code snippet includes necessary dependencies for implementing the `_coerce_method` function. Write a Python function `def _coerce_method(converter)` to solve the following problem: Install the scalar coercion methods. Here is the function: def _coerce_method(converter): """ Install the scalar coercion methods. """ def wrapper(self): if len(self) == 1: warnings.warn( f"Calling {converter.__name__} on a single element Series is " "deprecated and will raise a TypeError in the future. " f"Use {converter.__name__}(ser.iloc[0]) instead", FutureWarning, stacklevel=find_stack_level(), ) return converter(self.iloc[0]) raise TypeError(f"cannot convert the series to {converter}") wrapper.__name__ = f"__{converter.__name__}__" return wrapper
Install the scalar coercion methods.
173,030
from __future__ import annotations from collections import defaultdict from typing import ( TYPE_CHECKING, Callable, DefaultDict, Hashable, Iterable, Sequence, cast, ) import numpy as np from pandas._libs import ( algos, hashtable, lib, ) from pandas._libs.hashtable import unique_label_indices from pandas._typing import ( AxisInt, IndexKeyFunc, Level, NaPosition, Shape, SortKind, npt, ) from pandas.core.dtypes.common import ( ensure_int64, ensure_platform_int, is_extension_array_dtype, ) from pandas.core.dtypes.generic import ( ABCMultiIndex, ABCRangeIndex, ) from pandas.core.dtypes.missing import isna from pandas.core.construction import extract_array def lexsort_indexer( keys, orders=None, na_position: str = "last", key: Callable | None = None ) -> npt.NDArray[np.intp]: """ Performs lexical sorting on a set of keys Parameters ---------- keys : sequence of arrays Sequence of ndarrays to be sorted by the indexer orders : bool or list of booleans, optional Determines the sorting order for each element in keys. If a list, it must be the same length as keys. This determines whether the corresponding element in keys should be sorted in ascending (True) or descending (False) order. if bool, applied to all elements as above. if None, defaults to True. na_position : {'first', 'last'}, default 'last' Determines placement of NA elements in the sorted list ("last" or "first") key : Callable, optional Callable key function applied to every element in keys before sorting Returns ------- np.ndarray[np.intp] """ from pandas.core.arrays import Categorical labels = [] shape = [] if isinstance(orders, bool): orders = [orders] * len(keys) elif orders is None: orders = [True] * len(keys) keys = [ensure_key_mapped(k, key) for k in keys] for k, order in zip(keys, orders): cat = Categorical(k, ordered=True) if na_position not in ["last", "first"]: raise ValueError(f"invalid na_position: {na_position}") n = len(cat.categories) codes = cat.codes.copy() mask = cat.codes == -1 if order: # ascending if na_position == "last": codes = np.where(mask, n, codes) elif na_position == "first": codes += 1 else: # not order means descending if na_position == "last": codes = np.where(mask, n, n - codes - 1) elif na_position == "first": codes = np.where(mask, 0, n - codes) if mask.any(): n += 1 shape.append(n) labels.append(codes) return indexer_from_factorized(labels, tuple(shape)) def nargsort( items, kind: str = "quicksort", ascending: bool = True, na_position: str = "last", key: Callable | None = None, mask: npt.NDArray[np.bool_] | None = None, ) -> npt.NDArray[np.intp]: """ Intended to be a drop-in replacement for np.argsort which handles NaNs. Adds ascending, na_position, and key parameters. (GH #6399, #5231, #27237) Parameters ---------- kind : str, default 'quicksort' ascending : bool, default True na_position : {'first', 'last'}, default 'last' key : Optional[Callable], default None mask : Optional[np.ndarray[bool]], default None Passed when called by ExtensionArray.argsort. Returns ------- np.ndarray[np.intp] """ if key is not None: items = ensure_key_mapped(items, key) return nargsort( items, kind=kind, ascending=ascending, na_position=na_position, key=None, mask=mask, ) if isinstance(items, ABCRangeIndex): return items.argsort(ascending=ascending) # TODO: test coverage with key? elif not isinstance(items, ABCMultiIndex): items = extract_array(items) if mask is None: mask = np.asarray(isna(items)) # TODO: does this exclude MultiIndex too? if is_extension_array_dtype(items): return items.argsort(ascending=ascending, kind=kind, na_position=na_position) else: items = np.asanyarray(items) idx = np.arange(len(items)) non_nans = items[~mask] non_nan_idx = idx[~mask] nan_idx = np.nonzero(mask)[0] if not ascending: non_nans = non_nans[::-1] non_nan_idx = non_nan_idx[::-1] indexer = non_nan_idx[non_nans.argsort(kind=kind)] if not ascending: indexer = indexer[::-1] # Finally, place the NaNs at the end or the beginning according to # na_position if na_position == "last": indexer = np.concatenate([indexer, nan_idx]) elif na_position == "first": indexer = np.concatenate([nan_idx, indexer]) else: raise ValueError(f"invalid na_position: {na_position}") return ensure_platform_int(indexer) def ensure_key_mapped(values, key: Callable | None, levels=None): """ Applies a callable key function to the values function and checks that the resulting value has the same shape. Can be called on Index subclasses, Series, DataFrames, or ndarrays. Parameters ---------- values : Series, DataFrame, Index subclass, or ndarray key : Optional[Callable], key to be called on the values array levels : Optional[List], if values is a MultiIndex, list of levels to apply the key to. """ from pandas.core.indexes.api import Index if not key: return values if isinstance(values, ABCMultiIndex): return _ensure_key_mapped_multiindex(values, key, level=levels) result = key(values.copy()) if len(result) != len(values): raise ValueError( "User-provided `key` function must not change the shape of the array." ) try: if isinstance( values, Index ): # convert to a new Index subclass, not necessarily the same result = Index(result) else: type_of_values = type(values) result = type_of_values(result) # try to revert to original type otherwise except TypeError: raise TypeError( f"User-provided `key` function returned an invalid type {type(result)} \ which could not be converted to {type(values)}." ) return result def cast(typ: Type[_T], val: Any) -> _T: ... def cast(typ: str, val: Any) -> Any: ... def cast(typ: object, val: Any) -> Any: ... Level = Hashable IndexKeyFunc = Optional[Callable[["Index"], Union["Index", AnyArrayLike]]] SortKind = Literal["quicksort", "mergesort", "heapsort", "stable"] NaPosition = Literal["first", "last"] ABCMultiIndex = cast( "Type[MultiIndex]", create_pandas_abc_type("ABCMultiIndex", "_typ", ("multiindex",)), ) class Index(IndexOpsMixin, PandasObject): """ Immutable sequence used for indexing and alignment. The basic object storing axis labels for all pandas objects. .. versionchanged:: 2.0.0 Index can hold all numpy numeric dtypes (except float16). Previously only int64/uint64/float64 dtypes were accepted. Parameters ---------- data : array-like (1-dimensional) dtype : NumPy dtype (default: object) If dtype is None, we find the dtype that best fits the data. If an actual dtype is provided, we coerce to that dtype if it's safe. Otherwise, an error will be raised. copy : bool Make a copy of input ndarray. name : object Name to be stored in the index. tupleize_cols : bool (default: True) When True, attempt to create a MultiIndex if possible. See Also -------- RangeIndex : Index implementing a monotonic integer range. CategoricalIndex : Index of :class:`Categorical` s. MultiIndex : A multi-level, or hierarchical Index. IntervalIndex : An Index of :class:`Interval` s. DatetimeIndex : Index of datetime64 data. TimedeltaIndex : Index of timedelta64 data. PeriodIndex : Index of Period data. Notes ----- An Index instance can **only** contain hashable objects. An Index instance *can not* hold numpy float16 dtype. Examples -------- >>> pd.Index([1, 2, 3]) Index([1, 2, 3], dtype='int64') >>> pd.Index(list('abc')) Index(['a', 'b', 'c'], dtype='object') >>> pd.Index([1, 2, 3], dtype="uint8") Index([1, 2, 3], dtype='uint8') """ # To hand over control to subclasses _join_precedence = 1 # Cython methods; see github.com/cython/cython/issues/2647 # for why we need to wrap these instead of making them class attributes # Moreover, cython will choose the appropriate-dtyped sub-function # given the dtypes of the passed arguments def _left_indexer_unique(self: _IndexT, other: _IndexT) -> npt.NDArray[np.intp]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) # similar but not identical to ov.searchsorted(sv) return libjoin.left_join_indexer_unique(sv, ov) def _left_indexer( self: _IndexT, other: _IndexT ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) joined_ndarray, lidx, ridx = libjoin.left_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return joined, lidx, ridx def _inner_indexer( self: _IndexT, other: _IndexT ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) joined_ndarray, lidx, ridx = libjoin.inner_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return joined, lidx, ridx def _outer_indexer( self: _IndexT, other: _IndexT ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) joined_ndarray, lidx, ridx = libjoin.outer_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return joined, lidx, ridx _typ: str = "index" _data: ExtensionArray | np.ndarray _data_cls: type[ExtensionArray] | tuple[type[np.ndarray], type[ExtensionArray]] = ( np.ndarray, ExtensionArray, ) _id: object | None = None _name: Hashable = None # MultiIndex.levels previously allowed setting the index name. We # don't allow this anymore, and raise if it happens rather than # failing silently. _no_setting_name: bool = False _comparables: list[str] = ["name"] _attributes: list[str] = ["name"] def _can_hold_strings(self) -> bool: return not is_numeric_dtype(self) _engine_types: dict[np.dtype | ExtensionDtype, type[libindex.IndexEngine]] = { np.dtype(np.int8): libindex.Int8Engine, np.dtype(np.int16): libindex.Int16Engine, np.dtype(np.int32): libindex.Int32Engine, np.dtype(np.int64): libindex.Int64Engine, np.dtype(np.uint8): libindex.UInt8Engine, np.dtype(np.uint16): libindex.UInt16Engine, np.dtype(np.uint32): libindex.UInt32Engine, np.dtype(np.uint64): libindex.UInt64Engine, np.dtype(np.float32): libindex.Float32Engine, np.dtype(np.float64): libindex.Float64Engine, np.dtype(np.complex64): libindex.Complex64Engine, np.dtype(np.complex128): libindex.Complex128Engine, } def _engine_type( self, ) -> type[libindex.IndexEngine] | type[libindex.ExtensionEngine]: return self._engine_types.get(self.dtype, libindex.ObjectEngine) # whether we support partial string indexing. Overridden # in DatetimeIndex and PeriodIndex _supports_partial_string_indexing = False _accessors = {"str"} str = CachedAccessor("str", StringMethods) _references = None # -------------------------------------------------------------------- # Constructors def __new__( cls, data=None, dtype=None, copy: bool = False, name=None, tupleize_cols: bool = True, ) -> Index: from pandas.core.indexes.range import RangeIndex name = maybe_extract_name(name, data, cls) if dtype is not None: dtype = pandas_dtype(dtype) data_dtype = getattr(data, "dtype", None) refs = None if not copy and isinstance(data, (ABCSeries, Index)): refs = data._references # range if isinstance(data, (range, RangeIndex)): result = RangeIndex(start=data, copy=copy, name=name) if dtype is not None: return result.astype(dtype, copy=False) return result elif is_ea_or_datetimelike_dtype(dtype): # non-EA dtype indexes have special casting logic, so we punt here pass elif is_ea_or_datetimelike_dtype(data_dtype): pass elif isinstance(data, (np.ndarray, Index, ABCSeries)): if isinstance(data, ABCMultiIndex): data = data._values if data.dtype.kind not in ["i", "u", "f", "b", "c", "m", "M"]: # GH#11836 we need to avoid having numpy coerce # things that look like ints/floats to ints unless # they are actually ints, e.g. '0' and 0.0 # should not be coerced data = com.asarray_tuplesafe(data, dtype=_dtype_obj) elif is_scalar(data): raise cls._raise_scalar_data_error(data) elif hasattr(data, "__array__"): return Index(np.asarray(data), dtype=dtype, copy=copy, name=name) elif not is_list_like(data) and not isinstance(data, memoryview): # 2022-11-16 the memoryview check is only necessary on some CI # builds, not clear why raise cls._raise_scalar_data_error(data) else: if tupleize_cols: # GH21470: convert iterable to list before determining if empty if is_iterator(data): data = list(data) if data and all(isinstance(e, tuple) for e in data): # we must be all tuples, otherwise don't construct # 10697 from pandas.core.indexes.multi import MultiIndex return MultiIndex.from_tuples(data, names=name) # other iterable of some kind if not isinstance(data, (list, tuple)): # we allow set/frozenset, which Series/sanitize_array does not, so # cast to list here data = list(data) if len(data) == 0: # unlike Series, we default to object dtype: data = np.array(data, dtype=object) if len(data) and isinstance(data[0], tuple): # Ensure we get 1-D array of tuples instead of 2D array. data = com.asarray_tuplesafe(data, dtype=_dtype_obj) try: arr = sanitize_array(data, None, dtype=dtype, copy=copy) except ValueError as err: if "index must be specified when data is not list-like" in str(err): raise cls._raise_scalar_data_error(data) from err if "Data must be 1-dimensional" in str(err): raise ValueError("Index data must be 1-dimensional") from err raise arr = ensure_wrapped_if_datetimelike(arr) klass = cls._dtype_to_subclass(arr.dtype) arr = klass._ensure_array(arr, arr.dtype, copy=False) return klass._simple_new(arr, name, refs=refs) def _ensure_array(cls, data, dtype, copy: bool): """ Ensure we have a valid array to pass to _simple_new. """ if data.ndim > 1: # GH#13601, GH#20285, GH#27125 raise ValueError("Index data must be 1-dimensional") elif dtype == np.float16: # float16 not supported (no indexing engine) raise NotImplementedError("float16 indexes are not supported") if copy: # asarray_tuplesafe does not always copy underlying data, # so need to make sure that this happens data = data.copy() return data def _dtype_to_subclass(cls, dtype: DtypeObj): # Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423 if isinstance(dtype, ExtensionDtype): if isinstance(dtype, DatetimeTZDtype): from pandas import DatetimeIndex return DatetimeIndex elif isinstance(dtype, CategoricalDtype): from pandas import CategoricalIndex return CategoricalIndex elif isinstance(dtype, IntervalDtype): from pandas import IntervalIndex return IntervalIndex elif isinstance(dtype, PeriodDtype): from pandas import PeriodIndex return PeriodIndex return Index if dtype.kind == "M": from pandas import DatetimeIndex return DatetimeIndex elif dtype.kind == "m": from pandas import TimedeltaIndex return TimedeltaIndex elif dtype.kind == "O": # NB: assuming away MultiIndex return Index elif issubclass(dtype.type, str) or is_numeric_dtype(dtype): return Index raise NotImplementedError(dtype) # NOTE for new Index creation: # - _simple_new: It returns new Index with the same type as the caller. # All metadata (such as name) must be provided by caller's responsibility. # Using _shallow_copy is recommended because it fills these metadata # otherwise specified. # - _shallow_copy: It returns new Index with the same type (using # _simple_new), but fills caller's metadata otherwise specified. Passed # kwargs will overwrite corresponding metadata. # See each method's docstring. def _simple_new( cls: type[_IndexT], values: ArrayLike, name: Hashable = None, refs=None ) -> _IndexT: """ We require that we have a dtype compat for the values. If we are passed a non-dtype compat, then coerce using the constructor. Must be careful not to recurse. """ assert isinstance(values, cls._data_cls), type(values) result = object.__new__(cls) result._data = values result._name = name result._cache = {} result._reset_identity() if refs is not None: result._references = refs else: result._references = BlockValuesRefs() result._references.add_index_reference(result) return result def _with_infer(cls, *args, **kwargs): """ Constructor that uses the 1.0.x behavior inferring numeric dtypes for ndarray[object] inputs. """ result = cls(*args, **kwargs) if result.dtype == _dtype_obj and not result._is_multi: # error: Argument 1 to "maybe_convert_objects" has incompatible type # "Union[ExtensionArray, ndarray[Any, Any]]"; expected # "ndarray[Any, Any]" values = lib.maybe_convert_objects(result._values) # type: ignore[arg-type] if values.dtype.kind in ["i", "u", "f", "b"]: return Index(values, name=result.name) return result def _constructor(self: _IndexT) -> type[_IndexT]: return type(self) def _maybe_check_unique(self) -> None: """ Check that an Index has no duplicates. This is typically only called via `NDFrame.flags.allows_duplicate_labels.setter` when it's set to True (duplicates aren't allowed). Raises ------ DuplicateLabelError When the index is not unique. """ if not self.is_unique: msg = """Index has duplicates.""" duplicates = self._format_duplicate_message() msg += f"\n{duplicates}" raise DuplicateLabelError(msg) def _format_duplicate_message(self) -> DataFrame: """ Construct the DataFrame for a DuplicateLabelError. This returns a DataFrame indicating the labels and positions of duplicates in an index. This should only be called when it's already known that duplicates are present. Examples -------- >>> idx = pd.Index(['a', 'b', 'a']) >>> idx._format_duplicate_message() positions label a [0, 2] """ from pandas import Series duplicates = self[self.duplicated(keep="first")].unique() assert len(duplicates) out = Series(np.arange(len(self))).groupby(self).agg(list)[duplicates] if self._is_multi: # test_format_duplicate_labels_message_multi # error: "Type[Index]" has no attribute "from_tuples" [attr-defined] out.index = type(self).from_tuples(out.index) # type: ignore[attr-defined] if self.nlevels == 1: out = out.rename_axis("label") return out.to_frame(name="positions") # -------------------------------------------------------------------- # Index Internals Methods def _shallow_copy(self: _IndexT, values, name: Hashable = no_default) -> _IndexT: """ Create a new Index with the same class as the caller, don't copy the data, use the same object attributes with passed in attributes taking precedence. *this is an internal non-public method* Parameters ---------- values : the values to create the new Index, optional name : Label, defaults to self.name """ name = self._name if name is no_default else name return self._simple_new(values, name=name, refs=self._references) def _view(self: _IndexT) -> _IndexT: """ fastpath to make a shallow copy, i.e. new object with same data. """ result = self._simple_new(self._values, name=self._name, refs=self._references) result._cache = self._cache return result def _rename(self: _IndexT, name: Hashable) -> _IndexT: """ fastpath for rename if new name is already validated. """ result = self._view() result._name = name return result def is_(self, other) -> bool: """ More flexible, faster check like ``is`` but that works through views. Note: this is *not* the same as ``Index.identical()``, which checks that metadata is also the same. Parameters ---------- other : object Other object to compare against. Returns ------- bool True if both have same underlying data, False otherwise. See Also -------- Index.identical : Works like ``Index.is_`` but also checks metadata. """ if self is other: return True elif not hasattr(other, "_id"): return False elif self._id is None or other._id is None: return False else: return self._id is other._id def _reset_identity(self) -> None: """ Initializes or resets ``_id`` attribute with new object. """ self._id = object() def _cleanup(self) -> None: self._engine.clear_mapping() def _engine( self, ) -> libindex.IndexEngine | libindex.ExtensionEngine | libindex.MaskedIndexEngine: # For base class (object dtype) we get ObjectEngine target_values = self._get_engine_target() if isinstance(target_values, ExtensionArray): if isinstance(target_values, (BaseMaskedArray, ArrowExtensionArray)): try: return _masked_engines[target_values.dtype.name](target_values) except KeyError: # Not supported yet e.g. decimal pass elif self._engine_type is libindex.ObjectEngine: return libindex.ExtensionEngine(target_values) target_values = cast(np.ndarray, target_values) # to avoid a reference cycle, bind `target_values` to a local variable, so # `self` is not passed into the lambda. if target_values.dtype == bool: return libindex.BoolEngine(target_values) elif target_values.dtype == np.complex64: return libindex.Complex64Engine(target_values) elif target_values.dtype == np.complex128: return libindex.Complex128Engine(target_values) elif needs_i8_conversion(self.dtype): # We need to keep M8/m8 dtype when initializing the Engine, # but don't want to change _get_engine_target bc it is used # elsewhere # error: Item "ExtensionArray" of "Union[ExtensionArray, # ndarray[Any, Any]]" has no attribute "_ndarray" [union-attr] target_values = self._data._ndarray # type: ignore[union-attr] # error: Argument 1 to "ExtensionEngine" has incompatible type # "ndarray[Any, Any]"; expected "ExtensionArray" return self._engine_type(target_values) # type: ignore[arg-type] def _dir_additions_for_owner(self) -> set[str_t]: """ Add the string-like labels to the owner dataframe/series dir output. If this is a MultiIndex, it's first level values are used. """ return { c for c in self.unique(level=0)[: get_option("display.max_dir_items")] if isinstance(c, str) and c.isidentifier() } # -------------------------------------------------------------------- # Array-Like Methods # ndarray compat def __len__(self) -> int: """ Return the length of the Index. """ return len(self._data) def __array__(self, dtype=None) -> np.ndarray: """ The array interface, return my values. """ return np.asarray(self._data, dtype=dtype) def __array_ufunc__(self, ufunc: np.ufunc, method: str_t, *inputs, **kwargs): if any(isinstance(other, (ABCSeries, ABCDataFrame)) for other in inputs): return NotImplemented result = arraylike.maybe_dispatch_ufunc_to_dunder_op( self, ufunc, method, *inputs, **kwargs ) if result is not NotImplemented: return result if "out" in kwargs: # e.g. test_dti_isub_tdi return arraylike.dispatch_ufunc_with_out( self, ufunc, method, *inputs, **kwargs ) if method == "reduce": result = arraylike.dispatch_reduction_ufunc( self, ufunc, method, *inputs, **kwargs ) if result is not NotImplemented: return result new_inputs = [x if x is not self else x._values for x in inputs] result = getattr(ufunc, method)(*new_inputs, **kwargs) if ufunc.nout == 2: # i.e. np.divmod, np.modf, np.frexp return tuple(self.__array_wrap__(x) for x in result) if result.dtype == np.float16: result = result.astype(np.float32) return self.__array_wrap__(result) def __array_wrap__(self, result, context=None): """ Gets called after a ufunc and other functions e.g. np.split. """ result = lib.item_from_zerodim(result) if is_bool_dtype(result) or lib.is_scalar(result) or np.ndim(result) > 1: return result return Index(result, name=self.name) def dtype(self) -> DtypeObj: """ Return the dtype object of the underlying data. """ return self._data.dtype def ravel(self, order: str_t = "C") -> Index: """ Return a view on self. Returns ------- Index See Also -------- numpy.ndarray.ravel : Return a flattened array. """ return self[:] def view(self, cls=None): # we need to see if we are subclassing an # index type here if cls is not None and not hasattr(cls, "_typ"): dtype = cls if isinstance(cls, str): dtype = pandas_dtype(cls) if isinstance(dtype, (np.dtype, ExtensionDtype)) and needs_i8_conversion( dtype ): if dtype.kind == "m" and dtype != "m8[ns]": # e.g. m8[s] return self._data.view(cls) idx_cls = self._dtype_to_subclass(dtype) # NB: we only get here for subclasses that override # _data_cls such that it is a type and not a tuple # of types. arr_cls = idx_cls._data_cls arr = arr_cls(self._data.view("i8"), dtype=dtype) return idx_cls._simple_new(arr, name=self.name, refs=self._references) result = self._data.view(cls) else: result = self._view() if isinstance(result, Index): result._id = self._id return result def astype(self, dtype, copy: bool = True): """ Create an Index with values cast to dtypes. The class of a new Index is determined by dtype. When conversion is impossible, a TypeError exception is raised. Parameters ---------- dtype : numpy dtype or pandas type Note that any signed integer `dtype` is treated as ``'int64'``, and any unsigned integer `dtype` is treated as ``'uint64'``, regardless of the size. copy : bool, default True By default, astype always returns a newly allocated object. If copy is set to False and internal requirements on dtype are satisfied, the original data is used to create a new Index or the original Index is returned. Returns ------- Index Index with values cast to specified dtype. """ if dtype is not None: dtype = pandas_dtype(dtype) if is_dtype_equal(self.dtype, dtype): # Ensure that self.astype(self.dtype) is self return self.copy() if copy else self values = self._data if isinstance(values, ExtensionArray): with rewrite_exception(type(values).__name__, type(self).__name__): new_values = values.astype(dtype, copy=copy) elif isinstance(dtype, ExtensionDtype): cls = dtype.construct_array_type() # Note: for RangeIndex and CategoricalDtype self vs self._values # behaves differently here. new_values = cls._from_sequence(self, dtype=dtype, copy=copy) else: # GH#13149 specifically use astype_array instead of astype new_values = astype_array(values, dtype=dtype, copy=copy) # pass copy=False because any copying will be done in the astype above result = Index(new_values, name=self.name, dtype=new_values.dtype, copy=False) if ( not copy and self._references is not None and astype_is_view(self.dtype, dtype) ): result._references = self._references result._references.add_index_reference(result) return result _index_shared_docs[ "take" ] = """ Return a new %(klass)s of the values selected by the indices. For internal compatibility with numpy arrays. Parameters ---------- indices : array-like Indices to be taken. axis : int, optional The axis over which to select values, always 0. allow_fill : bool, default True fill_value : scalar, default None If allow_fill=True and fill_value is not None, indices specified by -1 are regarded as NA. If Index doesn't hold NA, raise ValueError. Returns ------- Index An index formed of elements at the given indices. Will be the same type as self, except for RangeIndex. See Also -------- numpy.ndarray.take: Return an array formed from the elements of a at the given indices. """ def take( self, indices, axis: Axis = 0, allow_fill: bool = True, fill_value=None, **kwargs, ): if kwargs: nv.validate_take((), kwargs) if is_scalar(indices): raise TypeError("Expected indices to be array-like") indices = ensure_platform_int(indices) allow_fill = self._maybe_disallow_fill(allow_fill, fill_value, indices) # Note: we discard fill_value and use self._na_value, only relevant # in the case where allow_fill is True and fill_value is not None values = self._values if isinstance(values, np.ndarray): taken = algos.take( values, indices, allow_fill=allow_fill, fill_value=self._na_value ) else: # algos.take passes 'axis' keyword which not all EAs accept taken = values.take( indices, allow_fill=allow_fill, fill_value=self._na_value ) # _constructor so RangeIndex-> Index with an int64 dtype return self._constructor._simple_new(taken, name=self.name) def _maybe_disallow_fill(self, allow_fill: bool, fill_value, indices) -> bool: """ We only use pandas-style take when allow_fill is True _and_ fill_value is not None. """ if allow_fill and fill_value is not None: # only fill if we are passing a non-None fill_value if self._can_hold_na: if (indices < -1).any(): raise ValueError( "When allow_fill=True and fill_value is not None, " "all indices must be >= -1" ) else: cls_name = type(self).__name__ raise ValueError( f"Unable to fill values because {cls_name} cannot contain NA" ) else: allow_fill = False return allow_fill _index_shared_docs[ "repeat" ] = """ Repeat elements of a %(klass)s. Returns a new %(klass)s where each element of the current %(klass)s is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty %(klass)s. axis : None Must be ``None``. Has no effect but is accepted for compatibility with numpy. Returns ------- %(klass)s Newly created %(klass)s with repeated elements. See Also -------- Series.repeat : Equivalent function for Series. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> idx = pd.Index(['a', 'b', 'c']) >>> idx Index(['a', 'b', 'c'], dtype='object') >>> idx.repeat(2) Index(['a', 'a', 'b', 'b', 'c', 'c'], dtype='object') >>> idx.repeat([1, 2, 3]) Index(['a', 'b', 'b', 'c', 'c', 'c'], dtype='object') """ def repeat(self, repeats, axis=None): repeats = ensure_platform_int(repeats) nv.validate_repeat((), {"axis": axis}) res_values = self._values.repeat(repeats) # _constructor so RangeIndex-> Index with an int64 dtype return self._constructor._simple_new(res_values, name=self.name) # -------------------------------------------------------------------- # Copying Methods def copy( self: _IndexT, name: Hashable | None = None, deep: bool = False, ) -> _IndexT: """ Make a copy of this object. Name is set on the new object. Parameters ---------- name : Label, optional Set name for new object. deep : bool, default False Returns ------- Index Index refer to new object which is a copy of this object. Notes ----- In most cases, there should be no functional difference from using ``deep``, but if ``deep`` is passed it will attempt to deepcopy. """ name = self._validate_names(name=name, deep=deep)[0] if deep: new_data = self._data.copy() new_index = type(self)._simple_new(new_data, name=name) else: new_index = self._rename(name=name) return new_index def __copy__(self: _IndexT, **kwargs) -> _IndexT: return self.copy(**kwargs) def __deepcopy__(self: _IndexT, memo=None) -> _IndexT: """ Parameters ---------- memo, default None Standard signature. Unused """ return self.copy(deep=True) # -------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str_t: """ Return a string representation for this object. """ klass_name = type(self).__name__ data = self._format_data() attrs = self._format_attrs() space = self._format_space() attrs_str = [f"{k}={v}" for k, v in attrs] prepr = f",{space}".join(attrs_str) # no data provided, just attributes if data is None: data = "" return f"{klass_name}({data}{prepr})" def _format_space(self) -> str_t: # using space here controls if the attributes # are line separated or not (the default) # max_seq_items = get_option('display.max_seq_items') # if len(self) > max_seq_items: # space = "\n%s" % (' ' * (len(klass) + 1)) return " " def _formatter_func(self): """ Return the formatter function. """ return default_pprint def _format_data(self, name=None) -> str_t: """ Return the formatted data as a unicode string. """ # do we want to justify (only do so for non-objects) is_justify = True if self.inferred_type == "string": is_justify = False elif self.inferred_type == "categorical": self = cast("CategoricalIndex", self) if is_object_dtype(self.categories): is_justify = False return format_object_summary( self, self._formatter_func, is_justify=is_justify, name=name, line_break_each_value=self._is_multi, ) def _format_attrs(self) -> list[tuple[str_t, str_t | int | bool | None]]: """ Return a list of tuples of the (attr,formatted_value). """ attrs: list[tuple[str_t, str_t | int | bool | None]] = [] if not self._is_multi: attrs.append(("dtype", f"'{self.dtype}'")) if self.name is not None: attrs.append(("name", default_pprint(self.name))) elif self._is_multi and any(x is not None for x in self.names): attrs.append(("names", default_pprint(self.names))) max_seq_items = get_option("display.max_seq_items") or len(self) if len(self) > max_seq_items: attrs.append(("length", len(self))) return attrs def _get_level_names(self) -> Hashable | Sequence[Hashable]: """ Return a name or list of names with None replaced by the level number. """ if self._is_multi: return [ level if name is None else name for level, name in enumerate(self.names) ] else: return 0 if self.name is None else self.name def _mpl_repr(self) -> np.ndarray: # how to represent ourselves to matplotlib if isinstance(self.dtype, np.dtype) and self.dtype.kind != "M": return cast(np.ndarray, self.values) return self.astype(object, copy=False)._values def format( self, name: bool = False, formatter: Callable | None = None, na_rep: str_t = "NaN", ) -> list[str_t]: """ Render a string representation of the Index. """ header = [] if name: header.append( pprint_thing(self.name, escape_chars=("\t", "\r", "\n")) if self.name is not None else "" ) if formatter is not None: return header + list(self.map(formatter)) return self._format_with_header(header, na_rep=na_rep) def _format_with_header(self, header: list[str_t], na_rep: str_t) -> list[str_t]: from pandas.io.formats.format import format_array values = self._values if is_object_dtype(values.dtype): values = cast(np.ndarray, values) values = lib.maybe_convert_objects(values, safe=True) result = [pprint_thing(x, escape_chars=("\t", "\r", "\n")) for x in values] # could have nans mask = is_float_nan(values) if mask.any(): result_arr = np.array(result) result_arr[mask] = na_rep result = result_arr.tolist() else: result = trim_front(format_array(values, None, justify="left")) return header + result def _format_native_types( self, *, na_rep: str_t = "", decimal: str_t = ".", float_format=None, date_format=None, quoting=None, ) -> npt.NDArray[np.object_]: """ Actually format specific types of the index. """ from pandas.io.formats.format import FloatArrayFormatter if is_float_dtype(self.dtype) and not is_extension_array_dtype(self.dtype): formatter = FloatArrayFormatter( self._values, na_rep=na_rep, float_format=float_format, decimal=decimal, quoting=quoting, fixed_width=False, ) return formatter.get_result_as_array() mask = isna(self) if not is_object_dtype(self) and not quoting: values = np.asarray(self).astype(str) else: values = np.array(self, dtype=object, copy=True) values[mask] = na_rep return values def _summary(self, name=None) -> str_t: """ Return a summarized representation. Parameters ---------- name : str name to use in the summary representation Returns ------- String with a summarized representation of the index """ if len(self) > 0: head = self[0] if hasattr(head, "format") and not isinstance(head, str): head = head.format() elif needs_i8_conversion(self.dtype): # e.g. Timedelta, display as values, not quoted head = self._formatter_func(head).replace("'", "") tail = self[-1] if hasattr(tail, "format") and not isinstance(tail, str): tail = tail.format() elif needs_i8_conversion(self.dtype): # e.g. Timedelta, display as values, not quoted tail = self._formatter_func(tail).replace("'", "") index_summary = f", {head} to {tail}" else: index_summary = "" if name is None: name = type(self).__name__ return f"{name}: {len(self)} entries{index_summary}" # -------------------------------------------------------------------- # Conversion Methods def to_flat_index(self: _IndexT) -> _IndexT: """ Identity method. This is implemented for compatibility with subclass implementations when chaining. Returns ------- pd.Index Caller. See Also -------- MultiIndex.to_flat_index : Subclass implementation. """ return self def to_series(self, index=None, name: Hashable = None) -> Series: """ Create a Series with both index and values equal to the index keys. Useful with map for returning an indexer based on an index. Parameters ---------- index : Index, optional Index of resulting Series. If None, defaults to original index. name : str, optional Name of resulting Series. If None, defaults to name of original index. Returns ------- Series The dtype will be based on the type of the Index values. See Also -------- Index.to_frame : Convert an Index to a DataFrame. Series.to_frame : Convert Series to DataFrame. Examples -------- >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal') By default, the original Index and original name is reused. >>> idx.to_series() animal Ant Ant Bear Bear Cow Cow Name: animal, dtype: object To enforce a new Index, specify new labels to ``index``: >>> idx.to_series(index=[0, 1, 2]) 0 Ant 1 Bear 2 Cow Name: animal, dtype: object To override the name of the resulting column, specify `name`: >>> idx.to_series(name='zoo') animal Ant Ant Bear Bear Cow Cow Name: zoo, dtype: object """ from pandas import Series if index is None: index = self._view() if name is None: name = self.name return Series(self._values.copy(), index=index, name=name) def to_frame( self, index: bool = True, name: Hashable = lib.no_default ) -> DataFrame: """ Create a DataFrame with a column containing the Index. Parameters ---------- index : bool, default True Set the index of the returned DataFrame as the original Index. name : object, defaults to index.name The passed name should substitute for the index name (if it has one). Returns ------- DataFrame DataFrame containing the original Index data. See Also -------- Index.to_series : Convert an Index to a Series. Series.to_frame : Convert Series to DataFrame. Examples -------- >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal') >>> idx.to_frame() animal animal Ant Ant Bear Bear Cow Cow By default, the original Index is reused. To enforce a new Index: >>> idx.to_frame(index=False) animal 0 Ant 1 Bear 2 Cow To override the name of the resulting column, specify `name`: >>> idx.to_frame(index=False, name='zoo') zoo 0 Ant 1 Bear 2 Cow """ from pandas import DataFrame if name is lib.no_default: name = self._get_level_names() result = DataFrame({name: self._values.copy()}) if index: result.index = self return result # -------------------------------------------------------------------- # Name-Centric Methods def name(self) -> Hashable: """ Return Index or MultiIndex name. """ return self._name def name(self, value: Hashable) -> None: if self._no_setting_name: # Used in MultiIndex.levels to avoid silently ignoring name updates. raise RuntimeError( "Cannot set name on a level of a MultiIndex. Use " "'MultiIndex.set_names' instead." ) maybe_extract_name(value, None, type(self)) self._name = value def _validate_names( self, name=None, names=None, deep: bool = False ) -> list[Hashable]: """ Handles the quirks of having a singular 'name' parameter for general Index and plural 'names' parameter for MultiIndex. """ from copy import deepcopy if names is not None and name is not None: raise TypeError("Can only provide one of `names` and `name`") if names is None and name is None: new_names = deepcopy(self.names) if deep else self.names elif names is not None: if not is_list_like(names): raise TypeError("Must pass list-like as `names`.") new_names = names elif not is_list_like(name): new_names = [name] else: new_names = name if len(new_names) != len(self.names): raise ValueError( f"Length of new names must be {len(self.names)}, got {len(new_names)}" ) # All items in 'new_names' need to be hashable validate_all_hashable(*new_names, error_name=f"{type(self).__name__}.name") return new_names def _get_default_index_names( self, names: Hashable | Sequence[Hashable] | None = None, default=None ) -> list[Hashable]: """ Get names of index. Parameters ---------- names : int, str or 1-dimensional list, default None Index names to set. default : str Default name of index. Raises ------ TypeError if names not str or list-like """ from pandas.core.indexes.multi import MultiIndex if names is not None: if isinstance(names, (int, str)): names = [names] if not isinstance(names, list) and names is not None: raise ValueError("Index names must be str or 1-dimensional list") if not names: if isinstance(self, MultiIndex): names = com.fill_missing_names(self.names) else: names = [default] if self.name is None else [self.name] return names def _get_names(self) -> FrozenList: return FrozenList((self.name,)) def _set_names(self, values, *, level=None) -> None: """ Set new names on index. Each name has to be a hashable type. Parameters ---------- values : str or sequence name(s) to set level : int, level name, or sequence of int/level names (default None) If the index is a MultiIndex (hierarchical), level(s) to set (None for all levels). Otherwise level must be None Raises ------ TypeError if each name is not hashable. """ if not is_list_like(values): raise ValueError("Names must be a list-like") if len(values) != 1: raise ValueError(f"Length of new names must be 1, got {len(values)}") # GH 20527 # All items in 'name' need to be hashable: validate_all_hashable(*values, error_name=f"{type(self).__name__}.name") self._name = values[0] names = property(fset=_set_names, fget=_get_names) def set_names( self: _IndexT, names, *, level=..., inplace: Literal[False] = ... ) -> _IndexT: ... def set_names(self, names, *, level=..., inplace: Literal[True]) -> None: ... def set_names( self: _IndexT, names, *, level=..., inplace: bool = ... ) -> _IndexT | None: ... def set_names( self: _IndexT, names, *, level=None, inplace: bool = False ) -> _IndexT | None: """ Set Index or MultiIndex name. Able to set new names partially and by level. Parameters ---------- names : label or list of label or dict-like for MultiIndex Name(s) to set. .. versionchanged:: 1.3.0 level : int, label or list of int or label, optional If the index is a MultiIndex and names is not dict-like, level(s) to set (None for all levels). Otherwise level must be None. .. versionchanged:: 1.3.0 inplace : bool, default False Modifies the object directly, instead of creating a new Index or MultiIndex. Returns ------- Index or None The same type as the caller or None if ``inplace=True``. See Also -------- Index.rename : Able to set new names without level. Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx Index([1, 2, 3, 4], dtype='int64') >>> idx.set_names('quarter') Index([1, 2, 3, 4], dtype='int64', name='quarter') >>> idx = pd.MultiIndex.from_product([['python', 'cobra'], ... [2018, 2019]]) >>> idx MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], ) >>> idx = idx.set_names(['kind', 'year']) >>> idx.set_names('species', level=0) MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['species', 'year']) When renaming levels with a dict, levels can not be passed. >>> idx.set_names({'kind': 'snake'}) MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['snake', 'year']) """ if level is not None and not isinstance(self, ABCMultiIndex): raise ValueError("Level must be None for non-MultiIndex") if level is not None and not is_list_like(level) and is_list_like(names): raise TypeError("Names must be a string when a single level is provided.") if not is_list_like(names) and level is None and self.nlevels > 1: raise TypeError("Must pass list-like as `names`.") if is_dict_like(names) and not isinstance(self, ABCMultiIndex): raise TypeError("Can only pass dict-like as `names` for MultiIndex.") if is_dict_like(names) and level is not None: raise TypeError("Can not pass level for dictlike `names`.") if isinstance(self, ABCMultiIndex) and is_dict_like(names) and level is None: # Transform dict to list of new names and corresponding levels level, names_adjusted = [], [] for i, name in enumerate(self.names): if name in names.keys(): level.append(i) names_adjusted.append(names[name]) names = names_adjusted if not is_list_like(names): names = [names] if level is not None and not is_list_like(level): level = [level] if inplace: idx = self else: idx = self._view() idx._set_names(names, level=level) if not inplace: return idx return None def rename(self, name, inplace: bool = False): """ Alter Index or MultiIndex name. Able to set new names without level. Defaults to returning new index. Length of names must match number of levels in MultiIndex. Parameters ---------- name : label or list of labels Name(s) to set. inplace : bool, default False Modifies the object directly, instead of creating a new Index or MultiIndex. Returns ------- Index or None The same type as the caller or None if ``inplace=True``. See Also -------- Index.set_names : Able to set new names partially and by level. Examples -------- >>> idx = pd.Index(['A', 'C', 'A', 'B'], name='score') >>> idx.rename('grade') Index(['A', 'C', 'A', 'B'], dtype='object', name='grade') >>> idx = pd.MultiIndex.from_product([['python', 'cobra'], ... [2018, 2019]], ... names=['kind', 'year']) >>> idx MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['kind', 'year']) >>> idx.rename(['species', 'year']) MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['species', 'year']) >>> idx.rename('species') Traceback (most recent call last): TypeError: Must pass list-like as `names`. """ return self.set_names([name], inplace=inplace) # -------------------------------------------------------------------- # Level-Centric Methods def nlevels(self) -> int: """ Number of levels. """ return 1 def _sort_levels_monotonic(self: _IndexT) -> _IndexT: """ Compat with MultiIndex. """ return self def _validate_index_level(self, level) -> None: """ Validate index level. For single-level Index getting level number is a no-op, but some verification must be done like in MultiIndex. """ if isinstance(level, int): if level < 0 and level != -1: raise IndexError( "Too many levels: Index has only 1 level, " f"{level} is not a valid level number" ) if level > 0: raise IndexError( f"Too many levels: Index has only 1 level, not {level + 1}" ) elif level != self.name: raise KeyError( f"Requested level ({level}) does not match index name ({self.name})" ) def _get_level_number(self, level) -> int: self._validate_index_level(level) return 0 def sortlevel( self, level=None, ascending: bool | list[bool] = True, sort_remaining=None ): """ For internal compatibility with the Index API. Sort the Index. This is for compat with MultiIndex Parameters ---------- ascending : bool, default True False to sort in descending order level, sort_remaining are compat parameters Returns ------- Index """ if not isinstance(ascending, (list, bool)): raise TypeError( "ascending must be a single bool value or" "a list of bool values of length 1" ) if isinstance(ascending, list): if len(ascending) != 1: raise TypeError("ascending must be a list of bool values of length 1") ascending = ascending[0] if not isinstance(ascending, bool): raise TypeError("ascending must be a bool value") return self.sort_values(return_indexer=True, ascending=ascending) def _get_level_values(self, level) -> Index: """ Return an Index of values for requested level. This is primarily useful to get an individual level of values from a MultiIndex, but is provided on Index as well for compatibility. Parameters ---------- level : int or str It is either the integer position or the name of the level. Returns ------- Index Calling object, as there is only one level in the Index. See Also -------- MultiIndex.get_level_values : Get values for a level of a MultiIndex. Notes ----- For Index, level should be 0, since there are no multiple levels. Examples -------- >>> idx = pd.Index(list('abc')) >>> idx Index(['a', 'b', 'c'], dtype='object') Get level values by supplying `level` as integer: >>> idx.get_level_values(0) Index(['a', 'b', 'c'], dtype='object') """ self._validate_index_level(level) return self get_level_values = _get_level_values def droplevel(self, level: IndexLabel = 0): """ Return index with requested level(s) removed. If resulting index has only 1 level left, the result will be of Index type, not MultiIndex. The original index is not modified inplace. Parameters ---------- level : int, str, or list-like, default 0 If a string is given, must be the name of a level If list-like, elements must be names or indexes of levels. Returns ------- Index or MultiIndex Examples -------- >>> mi = pd.MultiIndex.from_arrays( ... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z']) >>> mi MultiIndex([(1, 3, 5), (2, 4, 6)], names=['x', 'y', 'z']) >>> mi.droplevel() MultiIndex([(3, 5), (4, 6)], names=['y', 'z']) >>> mi.droplevel(2) MultiIndex([(1, 3), (2, 4)], names=['x', 'y']) >>> mi.droplevel('z') MultiIndex([(1, 3), (2, 4)], names=['x', 'y']) >>> mi.droplevel(['x', 'y']) Index([5, 6], dtype='int64', name='z') """ if not isinstance(level, (tuple, list)): level = [level] levnums = sorted(self._get_level_number(lev) for lev in level)[::-1] return self._drop_level_numbers(levnums) def _drop_level_numbers(self, levnums: list[int]): """ Drop MultiIndex levels by level _number_, not name. """ if not levnums and not isinstance(self, ABCMultiIndex): return self if len(levnums) >= self.nlevels: raise ValueError( f"Cannot remove {len(levnums)} levels from an index with " f"{self.nlevels} levels: at least one level must be left." ) # The two checks above guarantee that here self is a MultiIndex self = cast("MultiIndex", self) new_levels = list(self.levels) new_codes = list(self.codes) new_names = list(self.names) for i in levnums: new_levels.pop(i) new_codes.pop(i) new_names.pop(i) if len(new_levels) == 1: lev = new_levels[0] if len(lev) == 0: # If lev is empty, lev.take will fail GH#42055 if len(new_codes[0]) == 0: # GH#45230 preserve RangeIndex here # see test_reset_index_empty_rangeindex result = lev[:0] else: res_values = algos.take(lev._values, new_codes[0], allow_fill=True) # _constructor instead of type(lev) for RangeIndex compat GH#35230 result = lev._constructor._simple_new(res_values, name=new_names[0]) else: # set nan if needed mask = new_codes[0] == -1 result = new_levels[0].take(new_codes[0]) if mask.any(): result = result.putmask(mask, np.nan) result._name = new_names[0] return result else: from pandas.core.indexes.multi import MultiIndex return MultiIndex( levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False, ) # -------------------------------------------------------------------- # Introspection Methods def _can_hold_na(self) -> bool: if isinstance(self.dtype, ExtensionDtype): if isinstance(self.dtype, IntervalDtype): # FIXME(GH#45720): this is inaccurate for integer-backed # IntervalArray, but without it other.categories.take raises # in IntervalArray._cmp_method return True return self.dtype._can_hold_na if self.dtype.kind in ["i", "u", "b"]: return False return True def is_monotonic_increasing(self) -> bool: """ Return a boolean if the values are equal or increasing. Returns ------- bool See Also -------- Index.is_monotonic_decreasing : Check if the values are equal or decreasing. Examples -------- >>> pd.Index([1, 2, 3]).is_monotonic_increasing True >>> pd.Index([1, 2, 2]).is_monotonic_increasing True >>> pd.Index([1, 3, 2]).is_monotonic_increasing False """ return self._engine.is_monotonic_increasing def is_monotonic_decreasing(self) -> bool: """ Return a boolean if the values are equal or decreasing. Returns ------- bool See Also -------- Index.is_monotonic_increasing : Check if the values are equal or increasing. Examples -------- >>> pd.Index([3, 2, 1]).is_monotonic_decreasing True >>> pd.Index([3, 2, 2]).is_monotonic_decreasing True >>> pd.Index([3, 1, 2]).is_monotonic_decreasing False """ return self._engine.is_monotonic_decreasing def _is_strictly_monotonic_increasing(self) -> bool: """ Return if the index is strictly monotonic increasing (only increasing) values. Examples -------- >>> Index([1, 2, 3])._is_strictly_monotonic_increasing True >>> Index([1, 2, 2])._is_strictly_monotonic_increasing False >>> Index([1, 3, 2])._is_strictly_monotonic_increasing False """ return self.is_unique and self.is_monotonic_increasing def _is_strictly_monotonic_decreasing(self) -> bool: """ Return if the index is strictly monotonic decreasing (only decreasing) values. Examples -------- >>> Index([3, 2, 1])._is_strictly_monotonic_decreasing True >>> Index([3, 2, 2])._is_strictly_monotonic_decreasing False >>> Index([3, 1, 2])._is_strictly_monotonic_decreasing False """ return self.is_unique and self.is_monotonic_decreasing def is_unique(self) -> bool: """ Return if the index has unique values. Returns ------- bool See Also -------- Index.has_duplicates : Inverse method that checks if it has duplicate values. Examples -------- >>> idx = pd.Index([1, 5, 7, 7]) >>> idx.is_unique False >>> idx = pd.Index([1, 5, 7]) >>> idx.is_unique True >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_unique False >>> idx = pd.Index(["Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_unique True """ return self._engine.is_unique def has_duplicates(self) -> bool: """ Check if the Index has duplicate values. Returns ------- bool Whether or not the Index has duplicate values. See Also -------- Index.is_unique : Inverse method that checks if it has unique values. Examples -------- >>> idx = pd.Index([1, 5, 7, 7]) >>> idx.has_duplicates True >>> idx = pd.Index([1, 5, 7]) >>> idx.has_duplicates False >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.has_duplicates True >>> idx = pd.Index(["Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.has_duplicates False """ return not self.is_unique def is_boolean(self) -> bool: """ Check if the Index only consists of booleans. .. deprecated:: 2.0.0 Use `pandas.api.types.is_bool_dtype` instead. Returns ------- bool Whether or not the Index only consists of booleans. See Also -------- is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype (deprecated). is_categorical : Check if the Index holds categorical data. is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([True, False, True]) >>> idx.is_boolean() # doctest: +SKIP True >>> idx = pd.Index(["True", "False", "True"]) >>> idx.is_boolean() # doctest: +SKIP False >>> idx = pd.Index([True, False, "True"]) >>> idx.is_boolean() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_boolean is deprecated. " "Use pandas.api.types.is_bool_type instead.", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["boolean"] def is_integer(self) -> bool: """ Check if the Index only consists of integers. .. deprecated:: 2.0.0 Use `pandas.api.types.is_integer_dtype` instead. Returns ------- bool Whether or not the Index only consists of integers. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx.is_integer() # doctest: +SKIP True >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_integer() # doctest: +SKIP False >>> idx = pd.Index(["Apple", "Mango", "Watermelon"]) >>> idx.is_integer() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_integer is deprecated. " "Use pandas.api.types.is_integer_dtype instead.", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["integer"] def is_floating(self) -> bool: """ Check if the Index is a floating type. .. deprecated:: 2.0.0 Use `pandas.api.types.is_float_dtype` instead The Index may consist of only floats, NaNs, or a mix of floats, integers, or NaNs. Returns ------- bool Whether or not the Index only consists of only consists of floats, NaNs, or a mix of floats, integers, or NaNs. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_floating() # doctest: +SKIP True >>> idx = pd.Index([1.0, 2.0, np.nan, 4.0]) >>> idx.is_floating() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4, np.nan]) >>> idx.is_floating() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4]) >>> idx.is_floating() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_floating is deprecated. " "Use pandas.api.types.is_float_dtype instead.", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["floating", "mixed-integer-float", "integer-na"] def is_numeric(self) -> bool: """ Check if the Index only consists of numeric data. .. deprecated:: 2.0.0 Use `pandas.api.types.is_numeric_dtype` instead. Returns ------- bool Whether or not the Index only consists of numeric data. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4.0]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4.0, np.nan]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4.0, np.nan, "Apple"]) >>> idx.is_numeric() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_numeric is deprecated. " "Use pandas.api.types.is_any_real_numeric_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["integer", "floating"] def is_object(self) -> bool: """ Check if the Index is of the object dtype. .. deprecated:: 2.0.0 Use `pandas.api.types.is_object_dtype` instead. Returns ------- bool Whether or not the Index is of the object dtype. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index(["Apple", "Mango", "Watermelon"]) >>> idx.is_object() # doctest: +SKIP True >>> idx = pd.Index(["Apple", "Mango", 2.0]) >>> idx.is_object() # doctest: +SKIP True >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_object() # doctest: +SKIP False >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_object() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_object is deprecated." "Use pandas.api.types.is_object_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return is_object_dtype(self.dtype) def is_categorical(self) -> bool: """ Check if the Index holds categorical data. .. deprecated:: 2.0.0 Use :meth:`pandas.api.types.is_categorical_dtype` instead. Returns ------- bool True if the Index is categorical. See Also -------- CategoricalIndex : Index for categorical data. is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_categorical() # doctest: +SKIP True >>> idx = pd.Index([1, 3, 5, 7]) >>> idx.is_categorical() # doctest: +SKIP False >>> s = pd.Series(["Peter", "Victor", "Elisabeth", "Mar"]) >>> s 0 Peter 1 Victor 2 Elisabeth 3 Mar dtype: object >>> s.index.is_categorical() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_categorical is deprecated." "Use pandas.api.types.is_categorical_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["categorical"] def is_interval(self) -> bool: """ Check if the Index holds Interval objects. .. deprecated:: 2.0.0 Use `pandas.api.types.is_interval_dtype` instead. Returns ------- bool Whether or not the Index holds Interval objects. See Also -------- IntervalIndex : Index for Interval objects. is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). Examples -------- >>> idx = pd.Index([pd.Interval(left=0, right=5), ... pd.Interval(left=5, right=10)]) >>> idx.is_interval() # doctest: +SKIP True >>> idx = pd.Index([1, 3, 5, 7]) >>> idx.is_interval() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_interval is deprecated." "Use pandas.api.types.is_interval_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["interval"] def _holds_integer(self) -> bool: """ Whether the type is an integer type. """ return self.inferred_type in ["integer", "mixed-integer"] def holds_integer(self) -> bool: """ Whether the type is an integer type. .. deprecated:: 2.0.0 Use `pandas.api.types.infer_dtype` instead """ warnings.warn( f"{type(self).__name__}.holds_integer is deprecated. " "Use pandas.api.types.infer_dtype instead.", FutureWarning, stacklevel=find_stack_level(), ) return self._holds_integer() def inferred_type(self) -> str_t: """ Return a string of the type inferred from the values. """ return lib.infer_dtype(self._values, skipna=False) def _is_all_dates(self) -> bool: """ Whether or not the index values only consist of dates. """ if needs_i8_conversion(self.dtype): return True elif self.dtype != _dtype_obj: # TODO(ExtensionIndex): 3rd party EA might override? # Note: this includes IntervalIndex, even when the left/right # contain datetime-like objects. return False elif self._is_multi: return False return is_datetime_array(ensure_object(self._values)) def _is_multi(self) -> bool: """ Cached check equivalent to isinstance(self, MultiIndex) """ return isinstance(self, ABCMultiIndex) # -------------------------------------------------------------------- # Pickle Methods def __reduce__(self): d = {"data": self._data, "name": self.name} return _new_Index, (type(self), d), None # -------------------------------------------------------------------- # Null Handling Methods def _na_value(self): """The expected NA value to use with this index.""" dtype = self.dtype if isinstance(dtype, np.dtype): if dtype.kind in ["m", "M"]: return NaT return np.nan return dtype.na_value def _isnan(self) -> npt.NDArray[np.bool_]: """ Return if each value is NaN. """ if self._can_hold_na: return isna(self) else: # shouldn't reach to this condition by checking hasnans beforehand values = np.empty(len(self), dtype=np.bool_) values.fill(False) return values def hasnans(self) -> bool: """ Return True if there are any NaNs. Enables various performance speedups. Returns ------- bool """ if self._can_hold_na: return bool(self._isnan.any()) else: return False def isna(self) -> npt.NDArray[np.bool_]: """ Detect missing values. Return a boolean same-sized object indicating if the values are NA. NA values, such as ``None``, :attr:`numpy.NaN` or :attr:`pd.NaT`, get mapped to ``True`` values. Everything else get mapped to ``False`` values. Characters such as empty strings `''` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). Returns ------- numpy.ndarray[bool] A boolean array of whether my values are NA. See Also -------- Index.notna : Boolean inverse of isna. Index.dropna : Omit entries with missing values. isna : Top-level isna. Series.isna : Detect missing values in Series object. Examples -------- Show which entries in a pandas.Index are NA. The result is an array. >>> idx = pd.Index([5.2, 6.0, np.NaN]) >>> idx Index([5.2, 6.0, nan], dtype='float64') >>> idx.isna() array([False, False, True]) Empty strings are not considered NA values. None is considered an NA value. >>> idx = pd.Index(['black', '', 'red', None]) >>> idx Index(['black', '', 'red', None], dtype='object') >>> idx.isna() array([False, False, False, True]) For datetimes, `NaT` (Not a Time) is considered as an NA value. >>> idx = pd.DatetimeIndex([pd.Timestamp('1940-04-25'), ... pd.Timestamp(''), None, pd.NaT]) >>> idx DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'], dtype='datetime64[ns]', freq=None) >>> idx.isna() array([False, True, True, True]) """ return self._isnan isnull = isna def notna(self) -> npt.NDArray[np.bool_]: """ Detect existing (non-missing) values. Return a boolean same-sized object indicating if the values are not NA. Non-missing values get mapped to ``True``. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). NA values, such as None or :attr:`numpy.NaN`, get mapped to ``False`` values. Returns ------- numpy.ndarray[bool] Boolean array to indicate which entries are not NA. See Also -------- Index.notnull : Alias of notna. Index.isna: Inverse of notna. notna : Top-level notna. Examples -------- Show which entries in an Index are not NA. The result is an array. >>> idx = pd.Index([5.2, 6.0, np.NaN]) >>> idx Index([5.2, 6.0, nan], dtype='float64') >>> idx.notna() array([ True, True, False]) Empty strings are not considered NA values. None is considered a NA value. >>> idx = pd.Index(['black', '', 'red', None]) >>> idx Index(['black', '', 'red', None], dtype='object') >>> idx.notna() array([ True, True, True, False]) """ return ~self.isna() notnull = notna def fillna(self, value=None, downcast=None): """ Fill NA/NaN values with the specified value. Parameters ---------- value : scalar Scalar value to use to fill holes (e.g. 0). This value cannot be a list-likes. downcast : dict, default is None A dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate equal type (e.g. float64 to int64 if possible). Returns ------- Index See Also -------- DataFrame.fillna : Fill NaN values of a DataFrame. Series.fillna : Fill NaN Values of a Series. """ value = self._require_scalar(value) if self.hasnans: result = self.putmask(self._isnan, value) if downcast is None: # no need to care metadata other than name # because it can't have freq if it has NaTs # _with_infer needed for test_fillna_categorical return Index._with_infer(result, name=self.name) raise NotImplementedError( f"{type(self).__name__}.fillna does not support 'downcast' " "argument values other than 'None'." ) return self._view() def dropna(self: _IndexT, how: AnyAll = "any") -> _IndexT: """ Return Index without NA/NaN values. Parameters ---------- how : {'any', 'all'}, default 'any' If the Index is a MultiIndex, drop the value when any or all levels are NaN. Returns ------- Index """ if how not in ("any", "all"): raise ValueError(f"invalid how option: {how}") if self.hasnans: res_values = self._values[~self._isnan] return type(self)._simple_new(res_values, name=self.name) return self._view() # -------------------------------------------------------------------- # Uniqueness Methods def unique(self: _IndexT, level: Hashable | None = None) -> _IndexT: """ Return unique values in the index. Unique values are returned in order of appearance, this does NOT sort. Parameters ---------- level : int or hashable, optional Only return values from specified level (for MultiIndex). If int, gets the level by integer position, else by level name. Returns ------- Index See Also -------- unique : Numpy array of unique values in that column. Series.unique : Return unique values of Series object. """ if level is not None: self._validate_index_level(level) if self.is_unique: return self._view() result = super().unique() return self._shallow_copy(result) def drop_duplicates(self: _IndexT, *, keep: DropKeep = "first") -> _IndexT: """ Return Index with duplicate values removed. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. Returns ------- Index See Also -------- Series.drop_duplicates : Equivalent method on Series. DataFrame.drop_duplicates : Equivalent method on DataFrame. Index.duplicated : Related method on Index, indicating duplicate Index values. Examples -------- Generate an pandas.Index with duplicate values. >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo']) The `keep` parameter controls which duplicate values are removed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> idx.drop_duplicates(keep='first') Index(['lama', 'cow', 'beetle', 'hippo'], dtype='object') The value 'last' keeps the last occurrence for each set of duplicated entries. >>> idx.drop_duplicates(keep='last') Index(['cow', 'beetle', 'lama', 'hippo'], dtype='object') The value ``False`` discards all sets of duplicated entries. >>> idx.drop_duplicates(keep=False) Index(['cow', 'beetle', 'hippo'], dtype='object') """ if self.is_unique: return self._view() return super().drop_duplicates(keep=keep) def duplicated(self, keep: DropKeep = "first") -> npt.NDArray[np.bool_]: """ Indicate duplicate index values. Duplicated values are indicated as ``True`` values in the resulting array. Either all duplicates, all except the first, or all except the last occurrence of duplicates can be indicated. Parameters ---------- keep : {'first', 'last', False}, default 'first' The value or values in a set of duplicates to mark as missing. - 'first' : Mark duplicates as ``True`` except for the first occurrence. - 'last' : Mark duplicates as ``True`` except for the last occurrence. - ``False`` : Mark all duplicates as ``True``. Returns ------- np.ndarray[bool] See Also -------- Series.duplicated : Equivalent method on pandas.Series. DataFrame.duplicated : Equivalent method on pandas.DataFrame. Index.drop_duplicates : Remove duplicate values from Index. Examples -------- By default, for each set of duplicated values, the first occurrence is set to False and all others to True: >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama']) >>> idx.duplicated() array([False, False, True, False, True]) which is equivalent to >>> idx.duplicated(keep='first') array([False, False, True, False, True]) By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True: >>> idx.duplicated(keep='last') array([ True, False, True, False, False]) By setting keep on ``False``, all duplicates are True: >>> idx.duplicated(keep=False) array([ True, False, True, False, True]) """ if self.is_unique: # fastpath available bc we are immutable return np.zeros(len(self), dtype=bool) return self._duplicated(keep=keep) # -------------------------------------------------------------------- # Arithmetic & Logical Methods def __iadd__(self, other): # alias for __add__ return self + other def __nonzero__(self) -> NoReturn: raise ValueError( f"The truth value of a {type(self).__name__} is ambiguous. " "Use a.empty, a.bool(), a.item(), a.any() or a.all()." ) __bool__ = __nonzero__ # -------------------------------------------------------------------- # Set Operation Methods def _get_reconciled_name_object(self, other): """ If the result of a set operation will be self, return self, unless the name changes, in which case make a shallow copy of self. """ name = get_op_result_name(self, other) if self.name is not name: return self.rename(name) return self def _validate_sort_keyword(self, sort): if sort not in [None, False, True]: raise ValueError( "The 'sort' keyword only takes the values of " f"None, True, or False; {sort} was passed." ) def _dti_setop_align_tzs(self, other: Index, setop: str_t) -> tuple[Index, Index]: """ With mismatched timezones, cast both to UTC. """ # Caller is responsibelf or checking # `not is_dtype_equal(self.dtype, other.dtype)` if ( isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex) and self.tz is not None and other.tz is not None ): # GH#39328, GH#45357 left = self.tz_convert("UTC") right = other.tz_convert("UTC") return left, right return self, other def union(self, other, sort=None): """ Form the union of two Index objects. If the Index objects are incompatible, both Index objects will be cast to dtype('object') first. Parameters ---------- other : Index or array-like sort : bool or None, default None Whether to sort the resulting Index. * None : Sort the result, except when 1. `self` and `other` are equal. 2. `self` or `other` has length 0. 3. Some values in `self` or `other` cannot be compared. A RuntimeWarning is issued in this case. * False : do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Examples -------- Union matching dtypes >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.union(idx2) Index([1, 2, 3, 4, 5, 6], dtype='int64') Union mismatched dtypes >>> idx1 = pd.Index(['a', 'b', 'c', 'd']) >>> idx2 = pd.Index([1, 2, 3, 4]) >>> idx1.union(idx2) Index(['a', 'b', 'c', 'd', 1, 2, 3, 4], dtype='object') MultiIndex case >>> idx1 = pd.MultiIndex.from_arrays( ... [[1, 1, 2, 2], ["Red", "Blue", "Red", "Blue"]] ... ) >>> idx1 MultiIndex([(1, 'Red'), (1, 'Blue'), (2, 'Red'), (2, 'Blue')], ) >>> idx2 = pd.MultiIndex.from_arrays( ... [[3, 3, 2, 2], ["Red", "Green", "Red", "Green"]] ... ) >>> idx2 MultiIndex([(3, 'Red'), (3, 'Green'), (2, 'Red'), (2, 'Green')], ) >>> idx1.union(idx2) MultiIndex([(1, 'Blue'), (1, 'Red'), (2, 'Blue'), (2, 'Green'), (2, 'Red'), (3, 'Green'), (3, 'Red')], ) >>> idx1.union(idx2, sort=False) MultiIndex([(1, 'Red'), (1, 'Blue'), (2, 'Red'), (2, 'Blue'), (3, 'Red'), (3, 'Green'), (2, 'Green')], ) """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) if not is_dtype_equal(self.dtype, other.dtype): if ( isinstance(self, ABCMultiIndex) and not is_object_dtype(_unpack_nested_dtype(other)) and len(other) > 0 ): raise NotImplementedError( "Can only union MultiIndex with MultiIndex or Index of tuples, " "try mi.to_flat_index().union(other) instead." ) self, other = self._dti_setop_align_tzs(other, "union") dtype = self._find_common_type_compat(other) left = self.astype(dtype, copy=False) right = other.astype(dtype, copy=False) return left.union(right, sort=sort) elif not len(other) or self.equals(other): # NB: whether this (and the `if not len(self)` check below) come before # or after the is_dtype_equal check above affects the returned dtype result = self._get_reconciled_name_object(other) if sort is True: return result.sort_values() return result elif not len(self): result = other._get_reconciled_name_object(self) if sort is True: return result.sort_values() return result result = self._union(other, sort=sort) return self._wrap_setop_result(other, result) def _union(self, other: Index, sort): """ Specific union logic should go here. In subclasses, union behavior should be overwritten here rather than in `self.union`. Parameters ---------- other : Index or array-like sort : False or None, default False Whether to sort the resulting index. * False : do not sort the result. * None : sort the result, except when `self` and `other` are equal or when the values cannot be compared. Returns ------- Index """ lvals = self._values rvals = other._values if ( sort is None and self.is_monotonic_increasing and other.is_monotonic_increasing and not (self.has_duplicates and other.has_duplicates) and self._can_use_libjoin ): # Both are monotonic and at least one is unique, so can use outer join # (actually don't need either unique, but without this restriction # test_union_same_value_duplicated_in_both fails) try: return self._outer_indexer(other)[0] except (TypeError, IncompatibleFrequency): # incomparable objects; should only be for object dtype value_list = list(lvals) # worth making this faster? a very unusual case value_set = set(lvals) value_list.extend([x for x in rvals if x not in value_set]) # If objects are unorderable, we must have object dtype. return np.array(value_list, dtype=object) elif not other.is_unique: # other has duplicates result_dups = algos.union_with_duplicates(self, other) return _maybe_try_sort(result_dups, sort) # The rest of this method is analogous to Index._intersection_via_get_indexer # Self may have duplicates; other already checked as unique # find indexes of things in "other" that are not in "self" if self._index_as_unique: indexer = self.get_indexer(other) missing = (indexer == -1).nonzero()[0] else: missing = algos.unique1d(self.get_indexer_non_unique(other)[1]) result: Index | MultiIndex | ArrayLike if self._is_multi: # Preserve MultiIndex to avoid losing dtypes result = self.append(other.take(missing)) else: if len(missing) > 0: other_diff = rvals.take(missing) result = concat_compat((lvals, other_diff)) else: result = lvals if not self.is_monotonic_increasing or not other.is_monotonic_increasing: # if both are monotonic then result should already be sorted result = _maybe_try_sort(result, sort) return result def _wrap_setop_result(self, other: Index, result) -> Index: name = get_op_result_name(self, other) if isinstance(result, Index): if result.name != name: result = result.rename(name) else: result = self._shallow_copy(result, name=name) return result def intersection(self, other, sort: bool = False): """ Form the intersection of two Index objects. This returns a new Index with elements common to the index and `other`. Parameters ---------- other : Index or array-like sort : True, False or None, default False Whether to sort the resulting index. * None : sort the result, except when `self` and `other` are equal or when the values cannot be compared. * False : do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.intersection(idx2) Index([3, 4], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) if not is_dtype_equal(self.dtype, other.dtype): self, other = self._dti_setop_align_tzs(other, "intersection") if self.equals(other): if self.has_duplicates: result = self.unique()._get_reconciled_name_object(other) else: result = self._get_reconciled_name_object(other) if sort is True: result = result.sort_values() return result if len(self) == 0 or len(other) == 0: # fastpath; we need to be careful about having commutativity if self._is_multi or other._is_multi: # _convert_can_do_setop ensures that we have both or neither # We retain self.levels return self[:0].rename(result_name) dtype = self._find_common_type_compat(other) if is_dtype_equal(self.dtype, dtype): # Slicing allows us to retain DTI/TDI.freq, RangeIndex # Note: self[:0] vs other[:0] affects # 1) which index's `freq` we get in DTI/TDI cases # This may be a historical artifact, i.e. no documented # reason for this choice. # 2) The `step` we get in RangeIndex cases if len(self) == 0: return self[:0].rename(result_name) else: return other[:0].rename(result_name) return Index([], dtype=dtype, name=result_name) elif not self._should_compare(other): # We can infer that the intersection is empty. if isinstance(self, ABCMultiIndex): return self[:0].rename(result_name) return Index([], name=result_name) elif not is_dtype_equal(self.dtype, other.dtype): dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) other = other.astype(dtype, copy=False) return this.intersection(other, sort=sort) result = self._intersection(other, sort=sort) return self._wrap_intersection_result(other, result) def _intersection(self, other: Index, sort: bool = False): """ intersection specialized to the case with matching dtypes. """ if ( self.is_monotonic_increasing and other.is_monotonic_increasing and self._can_use_libjoin and not isinstance(self, ABCMultiIndex) ): try: res_indexer, indexer, _ = self._inner_indexer(other) except TypeError: # non-comparable; should only be for object dtype pass else: # TODO: algos.unique1d should preserve DTA/TDA if is_numeric_dtype(self): # This is faster, because Index.unique() checks for uniqueness # before calculating the unique values. res = algos.unique1d(res_indexer) else: result = self.take(indexer) res = result.drop_duplicates() return ensure_wrapped_if_datetimelike(res) res_values = self._intersection_via_get_indexer(other, sort=sort) res_values = _maybe_try_sort(res_values, sort) return res_values def _wrap_intersection_result(self, other, result): # We will override for MultiIndex to handle empty results return self._wrap_setop_result(other, result) def _intersection_via_get_indexer( self, other: Index | MultiIndex, sort ) -> ArrayLike | MultiIndex: """ Find the intersection of two Indexes using get_indexer. Returns ------- np.ndarray or ExtensionArray The returned array will be unique. """ left_unique = self.unique() right_unique = other.unique() # even though we are unique, we need get_indexer_for for IntervalIndex indexer = left_unique.get_indexer_for(right_unique) mask = indexer != -1 taker = indexer.take(mask.nonzero()[0]) if sort is False: # sort bc we want the elements in the same order they are in self # unnecessary in the case with sort=None bc we will sort later taker = np.sort(taker) if isinstance(left_unique, ABCMultiIndex): result = left_unique.take(taker) else: result = left_unique.take(taker)._values return result def difference(self, other, sort=None): """ Return a new Index with elements of index not in `other`. This is the set difference of two Index objects. Parameters ---------- other : Index or array-like sort : bool or None, default None Whether to sort the resulting index. By default, the values are attempted to be sorted, but any TypeError from incomparable elements is caught by pandas. * None : Attempt to sort the result, but catch any TypeErrors from comparing incomparable elements. * False : Do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Examples -------- >>> idx1 = pd.Index([2, 1, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.difference(idx2) Index([1, 2], dtype='int64') >>> idx1.difference(idx2, sort=False) Index([2, 1], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) # Note: we do NOT call _dti_setop_align_tzs here, as there # is no requirement that .difference be commutative, so it does # not cast to object. if self.equals(other): # Note: we do not (yet) sort even if sort=None GH#24959 return self[:0].rename(result_name) if len(other) == 0: # Note: we do not (yet) sort even if sort=None GH#24959 result = self.rename(result_name) if sort is True: return result.sort_values() return result if not self._should_compare(other): # Nothing matches -> difference is everything result = self.rename(result_name) if sort is True: return result.sort_values() return result result = self._difference(other, sort=sort) return self._wrap_difference_result(other, result) def _difference(self, other, sort): # overridden by RangeIndex this = self.unique() indexer = this.get_indexer_for(other) indexer = indexer.take((indexer != -1).nonzero()[0]) label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True) the_diff: MultiIndex | ArrayLike if isinstance(this, ABCMultiIndex): the_diff = this.take(label_diff) else: the_diff = this._values.take(label_diff) the_diff = _maybe_try_sort(the_diff, sort) return the_diff def _wrap_difference_result(self, other, result): # We will override for MultiIndex to handle empty results return self._wrap_setop_result(other, result) def symmetric_difference(self, other, result_name=None, sort=None): """ Compute the symmetric difference of two Index objects. Parameters ---------- other : Index or array-like result_name : str sort : bool or None, default None Whether to sort the resulting index. By default, the values are attempted to be sorted, but any TypeError from incomparable elements is caught by pandas. * None : Attempt to sort the result, but catch any TypeErrors from comparing incomparable elements. * False : Do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Notes ----- ``symmetric_difference`` contains elements that appear in either ``idx1`` or ``idx2`` but not both. Equivalent to the Index created by ``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates dropped. Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([2, 3, 4, 5]) >>> idx1.symmetric_difference(idx2) Index([1, 5], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name_update = self._convert_can_do_setop(other) if result_name is None: result_name = result_name_update if not is_dtype_equal(self.dtype, other.dtype): self, other = self._dti_setop_align_tzs(other, "symmetric_difference") if not self._should_compare(other): return self.union(other, sort=sort).rename(result_name) elif not is_dtype_equal(self.dtype, other.dtype): dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) that = other.astype(dtype, copy=False) return this.symmetric_difference(that, sort=sort).rename(result_name) this = self.unique() other = other.unique() indexer = this.get_indexer_for(other) # {this} minus {other} common_indexer = indexer.take((indexer != -1).nonzero()[0]) left_indexer = np.setdiff1d( np.arange(this.size), common_indexer, assume_unique=True ) left_diff = this.take(left_indexer) # {other} minus {this} right_indexer = (indexer == -1).nonzero()[0] right_diff = other.take(right_indexer) res_values = left_diff.append(right_diff) result = _maybe_try_sort(res_values, sort) if not self._is_multi: return Index(result, name=result_name, dtype=res_values.dtype) else: left_diff = cast("MultiIndex", left_diff) if len(result) == 0: # result might be an Index, if other was an Index return left_diff.remove_unused_levels().set_names(result_name) return result.set_names(result_name) def _assert_can_do_setop(self, other) -> bool: if not is_list_like(other): raise TypeError("Input must be Index or array-like") return True def _convert_can_do_setop(self, other) -> tuple[Index, Hashable]: if not isinstance(other, Index): other = Index(other, name=self.name) result_name = self.name else: result_name = get_op_result_name(self, other) return other, result_name # -------------------------------------------------------------------- # Indexing Methods def get_loc(self, key): """ Get integer location, slice or boolean mask for requested label. Parameters ---------- key : label Returns ------- int if unique index, slice if monotonic index, else mask Examples -------- >>> unique_index = pd.Index(list('abc')) >>> unique_index.get_loc('b') 1 >>> monotonic_index = pd.Index(list('abbc')) >>> monotonic_index.get_loc('b') slice(1, 3, None) >>> non_monotonic_index = pd.Index(list('abcb')) >>> non_monotonic_index.get_loc('b') array([False, True, False, True]) """ casted_key = self._maybe_cast_indexer(key) try: return self._engine.get_loc(casted_key) except KeyError as err: raise KeyError(key) from err except TypeError: # If we have a listlike key, _check_indexing_error will raise # InvalidIndexError. Otherwise we fall through and re-raise # the TypeError. self._check_indexing_error(key) raise _index_shared_docs[ "get_indexer" ] = """ Compute indexer and mask for new index given the current index. The indexer should be then used as an input to ndarray.take to align the current data to the new index. Parameters ---------- target : %(target_klass)s method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional * default: exact matches only. * pad / ffill: find the PREVIOUS index value if no exact match. * backfill / bfill: use NEXT index value if no exact match * nearest: use the NEAREST index value if no exact match. Tied distances are broken by preferring the larger index value. limit : int, optional Maximum number of consecutive labels in ``target`` to match for inexact matches. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- np.ndarray[np.intp] Integers from 0 to n - 1 indicating that the index at these positions matches the corresponding target values. Missing values in the target are marked by -1. %(raises_section)s Notes ----- Returns -1 for unmatched values, for further explanation see the example below. Examples -------- >>> index = pd.Index(['c', 'a', 'b']) >>> index.get_indexer(['a', 'b', 'x']) array([ 1, 2, -1]) Notice that the return value is an array of locations in ``index`` and ``x`` is marked by -1, as it is not in ``index``. """ def get_indexer( self, target, method: str_t | None = None, limit: int | None = None, tolerance=None, ) -> npt.NDArray[np.intp]: method = clean_reindex_fill_method(method) orig_target = target target = self._maybe_cast_listlike_indexer(target) self._check_indexing_method(method, limit, tolerance) if not self._index_as_unique: raise InvalidIndexError(self._requires_unique_msg) if len(target) == 0: return np.array([], dtype=np.intp) if not self._should_compare(target) and not self._should_partial_index(target): # IntervalIndex get special treatment bc numeric scalars can be # matched to Interval scalars return self._get_indexer_non_comparable(target, method=method, unique=True) if is_categorical_dtype(self.dtype): # _maybe_cast_listlike_indexer ensures target has our dtype # (could improve perf by doing _should_compare check earlier?) assert is_dtype_equal(self.dtype, target.dtype) indexer = self._engine.get_indexer(target.codes) if self.hasnans and target.hasnans: # After _maybe_cast_listlike_indexer, target elements which do not # belong to some category are changed to NaNs # Mask to track actual NaN values compared to inserted NaN values # GH#45361 target_nans = isna(orig_target) loc = self.get_loc(np.nan) mask = target.isna() indexer[target_nans] = loc indexer[mask & ~target_nans] = -1 return indexer if is_categorical_dtype(target.dtype): # potential fastpath # get an indexer for unique categories then propagate to codes via take_nd # get_indexer instead of _get_indexer needed for MultiIndex cases # e.g. test_append_different_columns_types categories_indexer = self.get_indexer(target.categories) indexer = algos.take_nd(categories_indexer, target.codes, fill_value=-1) if (not self._is_multi and self.hasnans) and target.hasnans: # Exclude MultiIndex because hasnans raises NotImplementedError # we should only get here if we are unique, so loc is an integer # GH#41934 loc = self.get_loc(np.nan) mask = target.isna() indexer[mask] = loc return ensure_platform_int(indexer) pself, ptarget = self._maybe_promote(target) if pself is not self or ptarget is not target: return pself.get_indexer( ptarget, method=method, limit=limit, tolerance=tolerance ) if is_dtype_equal(self.dtype, target.dtype) and self.equals(target): # Only call equals if we have same dtype to avoid inference/casting return np.arange(len(target), dtype=np.intp) if not is_dtype_equal( self.dtype, target.dtype ) and not self._should_partial_index(target): # _should_partial_index e.g. IntervalIndex with numeric scalars # that can be matched to Interval scalars. dtype = self._find_common_type_compat(target) this = self.astype(dtype, copy=False) target = target.astype(dtype, copy=False) return this._get_indexer( target, method=method, limit=limit, tolerance=tolerance ) return self._get_indexer(target, method, limit, tolerance) def _get_indexer( self, target: Index, method: str_t | None = None, limit: int | None = None, tolerance=None, ) -> npt.NDArray[np.intp]: if tolerance is not None: tolerance = self._convert_tolerance(tolerance, target) if method in ["pad", "backfill"]: indexer = self._get_fill_indexer(target, method, limit, tolerance) elif method == "nearest": indexer = self._get_nearest_indexer(target, limit, tolerance) else: if target._is_multi and self._is_multi: engine = self._engine # error: Item "IndexEngine" of "Union[IndexEngine, ExtensionEngine]" # has no attribute "_extract_level_codes" tgt_values = engine._extract_level_codes( # type: ignore[union-attr] target ) else: tgt_values = target._get_engine_target() indexer = self._engine.get_indexer(tgt_values) return ensure_platform_int(indexer) def _should_partial_index(self, target: Index) -> bool: """ Should we attempt partial-matching indexing? """ if is_interval_dtype(self.dtype): if is_interval_dtype(target.dtype): return False # See https://github.com/pandas-dev/pandas/issues/47772 the commented # out code can be restored (instead of hardcoding `return True`) # once that issue is fixed # "Index" has no attribute "left" # return self.left._should_compare(target) # type: ignore[attr-defined] return True return False def _check_indexing_method( self, method: str_t | None, limit: int | None = None, tolerance=None, ) -> None: """ Raise if we have a get_indexer `method` that is not supported or valid. """ if method not in [None, "bfill", "backfill", "pad", "ffill", "nearest"]: # in practice the clean_reindex_fill_method call would raise # before we get here raise ValueError("Invalid fill method") # pragma: no cover if self._is_multi: if method == "nearest": raise NotImplementedError( "method='nearest' not implemented yet " "for MultiIndex; see GitHub issue 9365" ) if method in ("pad", "backfill"): if tolerance is not None: raise NotImplementedError( "tolerance not implemented yet for MultiIndex" ) if is_interval_dtype(self.dtype) or is_categorical_dtype(self.dtype): # GH#37871 for now this is only for IntervalIndex and CategoricalIndex if method is not None: raise NotImplementedError( f"method {method} not yet implemented for {type(self).__name__}" ) if method is None: if tolerance is not None: raise ValueError( "tolerance argument only valid if doing pad, " "backfill or nearest reindexing" ) if limit is not None: raise ValueError( "limit argument only valid if doing pad, " "backfill or nearest reindexing" ) def _convert_tolerance(self, tolerance, target: np.ndarray | Index) -> np.ndarray: # override this method on subclasses tolerance = np.asarray(tolerance) if target.size != tolerance.size and tolerance.size > 1: raise ValueError("list-like tolerance size must match target index size") elif is_numeric_dtype(self) and not np.issubdtype(tolerance.dtype, np.number): if tolerance.ndim > 0: raise ValueError( f"tolerance argument for {type(self).__name__} with dtype " f"{self.dtype} must contain numeric elements if it is list type" ) raise ValueError( f"tolerance argument for {type(self).__name__} with dtype {self.dtype} " f"must be numeric if it is a scalar: {repr(tolerance)}" ) return tolerance def _get_fill_indexer( self, target: Index, method: str_t, limit: int | None = None, tolerance=None ) -> npt.NDArray[np.intp]: if self._is_multi: # TODO: get_indexer_with_fill docstring says values must be _sorted_ # but that doesn't appear to be enforced # error: "IndexEngine" has no attribute "get_indexer_with_fill" engine = self._engine with warnings.catch_warnings(): # TODO: We need to fix this. Casting to int64 in cython warnings.filterwarnings("ignore", category=RuntimeWarning) return engine.get_indexer_with_fill( # type: ignore[union-attr] target=target._values, values=self._values, method=method, limit=limit, ) if self.is_monotonic_increasing and target.is_monotonic_increasing: target_values = target._get_engine_target() own_values = self._get_engine_target() if not isinstance(target_values, np.ndarray) or not isinstance( own_values, np.ndarray ): raise NotImplementedError if method == "pad": indexer = libalgos.pad(own_values, target_values, limit=limit) else: # i.e. "backfill" indexer = libalgos.backfill(own_values, target_values, limit=limit) else: indexer = self._get_fill_indexer_searchsorted(target, method, limit) if tolerance is not None and len(self): indexer = self._filter_indexer_tolerance(target, indexer, tolerance) return indexer def _get_fill_indexer_searchsorted( self, target: Index, method: str_t, limit: int | None = None ) -> npt.NDArray[np.intp]: """ Fallback pad/backfill get_indexer that works for monotonic decreasing indexes and non-monotonic targets. """ if limit is not None: raise ValueError( f"limit argument for {repr(method)} method only well-defined " "if index and target are monotonic" ) side: Literal["left", "right"] = "left" if method == "pad" else "right" # find exact matches first (this simplifies the algorithm) indexer = self.get_indexer(target) nonexact = indexer == -1 indexer[nonexact] = self._searchsorted_monotonic(target[nonexact], side) if side == "left": # searchsorted returns "indices into a sorted array such that, # if the corresponding elements in v were inserted before the # indices, the order of a would be preserved". # Thus, we need to subtract 1 to find values to the left. indexer[nonexact] -= 1 # This also mapped not found values (values of 0 from # np.searchsorted) to -1, which conveniently is also our # sentinel for missing values else: # Mark indices to the right of the largest value as not found indexer[indexer == len(self)] = -1 return indexer def _get_nearest_indexer( self, target: Index, limit: int | None, tolerance ) -> npt.NDArray[np.intp]: """ Get the indexer for the nearest index labels; requires an index with values that can be subtracted from each other (e.g., not strings or tuples). """ if not len(self): return self._get_fill_indexer(target, "pad") left_indexer = self.get_indexer(target, "pad", limit=limit) right_indexer = self.get_indexer(target, "backfill", limit=limit) left_distances = self._difference_compat(target, left_indexer) right_distances = self._difference_compat(target, right_indexer) op = operator.lt if self.is_monotonic_increasing else operator.le indexer = np.where( # error: Argument 1&2 has incompatible type "Union[ExtensionArray, # ndarray[Any, Any]]"; expected "Union[SupportsDunderLE, # SupportsDunderGE, SupportsDunderGT, SupportsDunderLT]" op(left_distances, right_distances) # type: ignore[arg-type] | (right_indexer == -1), left_indexer, right_indexer, ) if tolerance is not None: indexer = self._filter_indexer_tolerance(target, indexer, tolerance) return indexer def _filter_indexer_tolerance( self, target: Index, indexer: npt.NDArray[np.intp], tolerance, ) -> npt.NDArray[np.intp]: distance = self._difference_compat(target, indexer) return np.where(distance <= tolerance, indexer, -1) def _difference_compat( self, target: Index, indexer: npt.NDArray[np.intp] ) -> ArrayLike: # Compatibility for PeriodArray, for which __sub__ returns an ndarray[object] # of DateOffset objects, which do not support __abs__ (and would be slow # if they did) if isinstance(self.dtype, PeriodDtype): # Note: we only get here with matching dtypes own_values = cast("PeriodArray", self._data)._ndarray target_values = cast("PeriodArray", target._data)._ndarray diff = own_values[indexer] - target_values else: # error: Unsupported left operand type for - ("ExtensionArray") diff = self._values[indexer] - target._values # type: ignore[operator] return abs(diff) # -------------------------------------------------------------------- # Indexer Conversion Methods def _validate_positional_slice(self, key: slice) -> None: """ For positional indexing, a slice must have either int or None for each of start, stop, and step. """ self._validate_indexer("positional", key.start, "iloc") self._validate_indexer("positional", key.stop, "iloc") self._validate_indexer("positional", key.step, "iloc") def _convert_slice_indexer(self, key: slice, kind: str_t): """ Convert a slice indexer. By definition, these are labels unless 'iloc' is passed in. Floats are not allowed as the start, step, or stop of the slice. Parameters ---------- key : label of the slice bound kind : {'loc', 'getitem'} """ assert kind in ["loc", "getitem"], kind # potentially cast the bounds to integers start, stop, step = key.start, key.stop, key.step # TODO(GH#50617): once Series.__[gs]etitem__ is removed we should be able # to simplify this. if isinstance(self.dtype, np.dtype) and is_float_dtype(self.dtype): # We always treat __getitem__ slicing as label-based # translate to locations return self.slice_indexer(start, stop, step) # figure out if this is a positional indexer def is_int(v): return v is None or is_integer(v) is_index_slice = is_int(start) and is_int(stop) and is_int(step) # special case for interval_dtype bc we do not do partial-indexing # on integer Intervals when slicing # TODO: write this in terms of e.g. should_partial_index? ints_are_positional = self._should_fallback_to_positional or is_interval_dtype( self.dtype ) is_positional = is_index_slice and ints_are_positional if kind == "getitem": # called from the getitem slicers, validate that we are in fact integers if is_integer_dtype(self.dtype) or is_index_slice: # Note: these checks are redundant if we know is_index_slice self._validate_indexer("slice", key.start, "getitem") self._validate_indexer("slice", key.stop, "getitem") self._validate_indexer("slice", key.step, "getitem") return key # convert the slice to an indexer here # if we are mixed and have integers if is_positional: try: # Validate start & stop if start is not None: self.get_loc(start) if stop is not None: self.get_loc(stop) is_positional = False except KeyError: pass if com.is_null_slice(key): # It doesn't matter if we are positional or label based indexer = key elif is_positional: if kind == "loc": # GH#16121, GH#24612, GH#31810 raise TypeError( "Slicing a positional slice with .loc is not allowed, " "Use .loc with labels or .iloc with positions instead.", ) indexer = key else: indexer = self.slice_indexer(start, stop, step) return indexer def _raise_invalid_indexer( self, form: str_t, key, reraise: lib.NoDefault | None | Exception = lib.no_default, ) -> None: """ Raise consistent invalid indexer message. """ msg = ( f"cannot do {form} indexing on {type(self).__name__} with these " f"indexers [{key}] of type {type(key).__name__}" ) if reraise is not lib.no_default: raise TypeError(msg) from reraise raise TypeError(msg) # -------------------------------------------------------------------- # Reindex Methods def _validate_can_reindex(self, indexer: np.ndarray) -> None: """ Check if we are allowing reindexing with this particular indexer. Parameters ---------- indexer : an integer ndarray Raises ------ ValueError if its a duplicate axis """ # trying to reindex on an axis with duplicates if not self._index_as_unique and len(indexer): raise ValueError("cannot reindex on an axis with duplicate labels") def reindex( self, target, method=None, level=None, limit=None, tolerance=None ) -> tuple[Index, npt.NDArray[np.intp] | None]: """ Create index with target's values. Parameters ---------- target : an iterable method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional * default: exact matches only. * pad / ffill: find the PREVIOUS index value if no exact match. * backfill / bfill: use NEXT index value if no exact match * nearest: use the NEAREST index value if no exact match. Tied distances are broken by preferring the larger index value. level : int, optional Level of multiindex. limit : int, optional Maximum number of consecutive labels in ``target`` to match for inexact matches. tolerance : int or float, optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- new_index : pd.Index Resulting index. indexer : np.ndarray[np.intp] or None Indices of output values in original index. Raises ------ TypeError If ``method`` passed along with ``level``. ValueError If non-unique multi-index ValueError If non-unique index and ``method`` or ``limit`` passed. See Also -------- Series.reindex : Conform Series to new index with optional filling logic. DataFrame.reindex : Conform DataFrame to new index with optional filling logic. Examples -------- >>> idx = pd.Index(['car', 'bike', 'train', 'tractor']) >>> idx Index(['car', 'bike', 'train', 'tractor'], dtype='object') >>> idx.reindex(['car', 'bike']) (Index(['car', 'bike'], dtype='object'), array([0, 1])) """ # GH6552: preserve names when reindexing to non-named target # (i.e. neither Index nor Series). preserve_names = not hasattr(target, "name") # GH7774: preserve dtype/tz if target is empty and not an Index. target = ensure_has_len(target) # target may be an iterator if not isinstance(target, Index) and len(target) == 0: if level is not None and self._is_multi: # "Index" has no attribute "levels"; maybe "nlevels"? idx = self.levels[level] # type: ignore[attr-defined] else: idx = self target = idx[:0] else: target = ensure_index(target) if level is not None and ( isinstance(self, ABCMultiIndex) or isinstance(target, ABCMultiIndex) ): if method is not None: raise TypeError("Fill method not supported if level passed") # TODO: tests where passing `keep_order=not self._is_multi` # makes a difference for non-MultiIndex case target, indexer, _ = self._join_level( target, level, how="right", keep_order=not self._is_multi ) else: if self.equals(target): indexer = None else: if self._index_as_unique: indexer = self.get_indexer( target, method=method, limit=limit, tolerance=tolerance ) elif self._is_multi: raise ValueError("cannot handle a non-unique multi-index!") elif not self.is_unique: # GH#42568 raise ValueError("cannot reindex on an axis with duplicate labels") else: indexer, _ = self.get_indexer_non_unique(target) target = self._wrap_reindex_result(target, indexer, preserve_names) return target, indexer def _wrap_reindex_result(self, target, indexer, preserve_names: bool): target = self._maybe_preserve_names(target, preserve_names) return target def _maybe_preserve_names(self, target: Index, preserve_names: bool): if preserve_names and target.nlevels == 1 and target.name != self.name: target = target.copy(deep=False) target.name = self.name return target def _reindex_non_unique( self, target: Index ) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp] | None]: """ Create a new index with target's values (move/add/delete values as necessary) use with non-unique Index and a possibly non-unique target. Parameters ---------- target : an iterable Returns ------- new_index : pd.Index Resulting index. indexer : np.ndarray[np.intp] Indices of output values in original index. new_indexer : np.ndarray[np.intp] or None """ target = ensure_index(target) if len(target) == 0: # GH#13691 return self[:0], np.array([], dtype=np.intp), None indexer, missing = self.get_indexer_non_unique(target) check = indexer != -1 new_labels = self.take(indexer[check]) new_indexer = None if len(missing): length = np.arange(len(indexer), dtype=np.intp) missing = ensure_platform_int(missing) missing_labels = target.take(missing) missing_indexer = length[~check] cur_labels = self.take(indexer[check]).values cur_indexer = length[check] # Index constructor below will do inference new_labels = np.empty((len(indexer),), dtype=object) new_labels[cur_indexer] = cur_labels new_labels[missing_indexer] = missing_labels # GH#38906 if not len(self): new_indexer = np.arange(0, dtype=np.intp) # a unique indexer elif target.is_unique: # see GH5553, make sure we use the right indexer new_indexer = np.arange(len(indexer), dtype=np.intp) new_indexer[cur_indexer] = np.arange(len(cur_labels)) new_indexer[missing_indexer] = -1 # we have a non_unique selector, need to use the original # indexer here else: # need to retake to have the same size as the indexer indexer[~check] = -1 # reset the new indexer to account for the new size new_indexer = np.arange(len(self.take(indexer)), dtype=np.intp) new_indexer[~check] = -1 if not isinstance(self, ABCMultiIndex): new_index = Index(new_labels, name=self.name) else: new_index = type(self).from_tuples(new_labels, names=self.names) return new_index, indexer, new_indexer # -------------------------------------------------------------------- # Join Methods def join( self, other: Index, *, how: JoinHow = ..., level: Level = ..., return_indexers: Literal[True], sort: bool = ..., ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: ... def join( self, other: Index, *, how: JoinHow = ..., level: Level = ..., return_indexers: Literal[False] = ..., sort: bool = ..., ) -> Index: ... def join( self, other: Index, *, how: JoinHow = ..., level: Level = ..., return_indexers: bool = ..., sort: bool = ..., ) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: ... def join( self, other: Index, *, how: JoinHow = "left", level: Level = None, return_indexers: bool = False, sort: bool = False, ) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: """ Compute join_index and indexers to conform data structures to the new index. Parameters ---------- other : Index how : {'left', 'right', 'inner', 'outer'} level : int or level name, default None return_indexers : bool, default False sort : bool, default False Sort the join keys lexicographically in the result Index. If False, the order of the join keys depends on the join type (how keyword). Returns ------- join_index, (left_indexer, right_indexer) """ other = ensure_index(other) if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex): if (self.tz is None) ^ (other.tz is None): # Raise instead of casting to object below. raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex") if not self._is_multi and not other._is_multi: # We have specific handling for MultiIndex below pself, pother = self._maybe_promote(other) if pself is not self or pother is not other: return pself.join( pother, how=how, level=level, return_indexers=True, sort=sort ) lindexer: np.ndarray | None rindexer: np.ndarray | None # try to figure out the join level # GH3662 if level is None and (self._is_multi or other._is_multi): # have the same levels/names so a simple join if self.names == other.names: pass else: return self._join_multi(other, how=how) # join on the level if level is not None and (self._is_multi or other._is_multi): return self._join_level(other, level, how=how) if len(other) == 0: if how in ("left", "outer"): join_index = self._view() rindexer = np.broadcast_to(np.intp(-1), len(join_index)) return join_index, None, rindexer elif how in ("right", "inner", "cross"): join_index = other._view() lindexer = np.array([]) return join_index, lindexer, None if len(self) == 0: if how in ("right", "outer"): join_index = other._view() lindexer = np.broadcast_to(np.intp(-1), len(join_index)) return join_index, lindexer, None elif how in ("left", "inner", "cross"): join_index = self._view() rindexer = np.array([]) return join_index, None, rindexer if self._join_precedence < other._join_precedence: flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} how = flip.get(how, how) join_index, lidx, ridx = other.join( self, how=how, level=level, return_indexers=True ) lidx, ridx = ridx, lidx return join_index, lidx, ridx if not is_dtype_equal(self.dtype, other.dtype): dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) other = other.astype(dtype, copy=False) return this.join(other, how=how, return_indexers=True) _validate_join_method(how) if not self.is_unique and not other.is_unique: return self._join_non_unique(other, how=how) elif not self.is_unique or not other.is_unique: if self.is_monotonic_increasing and other.is_monotonic_increasing: if not is_interval_dtype(self.dtype): # otherwise we will fall through to _join_via_get_indexer # GH#39133 # go through object dtype for ea till engine is supported properly return self._join_monotonic(other, how=how) else: return self._join_non_unique(other, how=how) elif ( # GH48504: exclude MultiIndex to avoid going through MultiIndex._values self.is_monotonic_increasing and other.is_monotonic_increasing and self._can_use_libjoin and not isinstance(self, ABCMultiIndex) and not is_categorical_dtype(self.dtype) ): # Categorical is monotonic if data are ordered as categories, but join can # not handle this in case of not lexicographically monotonic GH#38502 try: return self._join_monotonic(other, how=how) except TypeError: # object dtype; non-comparable objects pass return self._join_via_get_indexer(other, how, sort) def _join_via_get_indexer( self, other: Index, how: JoinHow, sort: bool ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: # Fallback if we do not have any fastpaths available based on # uniqueness/monotonicity # Note: at this point we have checked matching dtypes if how == "left": join_index = self elif how == "right": join_index = other elif how == "inner": # TODO: sort=False here for backwards compat. It may # be better to use the sort parameter passed into join join_index = self.intersection(other, sort=False) elif how == "outer": # TODO: sort=True here for backwards compat. It may # be better to use the sort parameter passed into join join_index = self.union(other) if sort: join_index = join_index.sort_values() if join_index is self: lindexer = None else: lindexer = self.get_indexer_for(join_index) if join_index is other: rindexer = None else: rindexer = other.get_indexer_for(join_index) return join_index, lindexer, rindexer def _join_multi(self, other: Index, how: JoinHow): from pandas.core.indexes.multi import MultiIndex from pandas.core.reshape.merge import restore_dropped_levels_multijoin # figure out join names self_names_list = list(com.not_none(*self.names)) other_names_list = list(com.not_none(*other.names)) self_names_order = self_names_list.index other_names_order = other_names_list.index self_names = set(self_names_list) other_names = set(other_names_list) overlap = self_names & other_names # need at least 1 in common if not overlap: raise ValueError("cannot join with no overlapping index names") if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): # Drop the non-matching levels from left and right respectively ldrop_names = sorted(self_names - overlap, key=self_names_order) rdrop_names = sorted(other_names - overlap, key=other_names_order) # if only the order differs if not len(ldrop_names + rdrop_names): self_jnlevels = self other_jnlevels = other.reorder_levels(self.names) else: self_jnlevels = self.droplevel(ldrop_names) other_jnlevels = other.droplevel(rdrop_names) # Join left and right # Join on same leveled multi-index frames is supported join_idx, lidx, ridx = self_jnlevels.join( other_jnlevels, how=how, return_indexers=True ) # Restore the dropped levels # Returned index level order is # common levels, ldrop_names, rdrop_names dropped_names = ldrop_names + rdrop_names # error: Argument 5/6 to "restore_dropped_levels_multijoin" has # incompatible type "Optional[ndarray[Any, dtype[signedinteger[Any # ]]]]"; expected "ndarray[Any, dtype[signedinteger[Any]]]" levels, codes, names = restore_dropped_levels_multijoin( self, other, dropped_names, join_idx, lidx, # type: ignore[arg-type] ridx, # type: ignore[arg-type] ) # Re-create the multi-index multi_join_idx = MultiIndex( levels=levels, codes=codes, names=names, verify_integrity=False ) multi_join_idx = multi_join_idx.remove_unused_levels() return multi_join_idx, lidx, ridx jl = list(overlap)[0] # Case where only one index is multi # make the indices into mi's that match flip_order = False if isinstance(self, MultiIndex): self, other = other, self flip_order = True # flip if join method is right or left flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} how = flip.get(how, how) level = other.names.index(jl) result = self._join_level(other, level, how=how) if flip_order: return result[0], result[2], result[1] return result def _join_non_unique( self, other: Index, how: JoinHow = "left" ) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp]]: from pandas.core.reshape.merge import get_join_indexers # We only get here if dtypes match assert self.dtype == other.dtype left_idx, right_idx = get_join_indexers( [self._values], [other._values], how=how, sort=True ) mask = left_idx == -1 join_idx = self.take(left_idx) right = other.take(right_idx) join_index = join_idx.putmask(mask, right) return join_index, left_idx, right_idx def _join_level( self, other: Index, level, how: JoinHow = "left", keep_order: bool = True ) -> tuple[MultiIndex, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: """ The join method *only* affects the level of the resulting MultiIndex. Otherwise it just exactly aligns the Index data to the labels of the level in the MultiIndex. If ```keep_order == True```, the order of the data indexed by the MultiIndex will not be changed; otherwise, it will tie out with `other`. """ from pandas.core.indexes.multi import MultiIndex def _get_leaf_sorter(labels: list[np.ndarray]) -> npt.NDArray[np.intp]: """ Returns sorter for the inner most level while preserving the order of higher levels. Parameters ---------- labels : list[np.ndarray] Each ndarray has signed integer dtype, not necessarily identical. Returns ------- np.ndarray[np.intp] """ if labels[0].size == 0: return np.empty(0, dtype=np.intp) if len(labels) == 1: return get_group_index_sorter(ensure_platform_int(labels[0])) # find indexers of beginning of each set of # same-key labels w.r.t all but last level tic = labels[0][:-1] != labels[0][1:] for lab in labels[1:-1]: tic |= lab[:-1] != lab[1:] starts = np.hstack(([True], tic, [True])).nonzero()[0] lab = ensure_int64(labels[-1]) return lib.get_level_sorter(lab, ensure_platform_int(starts)) if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): raise TypeError("Join on level between two MultiIndex objects is ambiguous") left, right = self, other flip_order = not isinstance(self, MultiIndex) if flip_order: left, right = right, left flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} how = flip.get(how, how) assert isinstance(left, MultiIndex) level = left._get_level_number(level) old_level = left.levels[level] if not right.is_unique: raise NotImplementedError( "Index._join_level on non-unique index is not implemented" ) new_level, left_lev_indexer, right_lev_indexer = old_level.join( right, how=how, return_indexers=True ) if left_lev_indexer is None: if keep_order or len(left) == 0: left_indexer = None join_index = left else: # sort the leaves left_indexer = _get_leaf_sorter(left.codes[: level + 1]) join_index = left[left_indexer] else: left_lev_indexer = ensure_platform_int(left_lev_indexer) rev_indexer = lib.get_reverse_indexer(left_lev_indexer, len(old_level)) old_codes = left.codes[level] taker = old_codes[old_codes != -1] new_lev_codes = rev_indexer.take(taker) new_codes = list(left.codes) new_codes[level] = new_lev_codes new_levels = list(left.levels) new_levels[level] = new_level if keep_order: # just drop missing values. o.w. keep order left_indexer = np.arange(len(left), dtype=np.intp) left_indexer = cast(np.ndarray, left_indexer) mask = new_lev_codes != -1 if not mask.all(): new_codes = [lab[mask] for lab in new_codes] left_indexer = left_indexer[mask] else: # tie out the order with other if level == 0: # outer most level, take the fast route max_new_lev = 0 if len(new_lev_codes) == 0 else new_lev_codes.max() ngroups = 1 + max_new_lev left_indexer, counts = libalgos.groupsort_indexer( new_lev_codes, ngroups ) # missing values are placed first; drop them! left_indexer = left_indexer[counts[0] :] new_codes = [lab[left_indexer] for lab in new_codes] else: # sort the leaves mask = new_lev_codes != -1 mask_all = mask.all() if not mask_all: new_codes = [lab[mask] for lab in new_codes] left_indexer = _get_leaf_sorter(new_codes[: level + 1]) new_codes = [lab[left_indexer] for lab in new_codes] # left_indexers are w.r.t masked frame. # reverse to original frame! if not mask_all: left_indexer = mask.nonzero()[0][left_indexer] join_index = MultiIndex( levels=new_levels, codes=new_codes, names=left.names, verify_integrity=False, ) if right_lev_indexer is not None: right_indexer = right_lev_indexer.take(join_index.codes[level]) else: right_indexer = join_index.codes[level] if flip_order: left_indexer, right_indexer = right_indexer, left_indexer left_indexer = ( None if left_indexer is None else ensure_platform_int(left_indexer) ) right_indexer = ( None if right_indexer is None else ensure_platform_int(right_indexer) ) return join_index, left_indexer, right_indexer def _join_monotonic( self, other: Index, how: JoinHow = "left" ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: # We only get here with matching dtypes and both monotonic increasing assert other.dtype == self.dtype if self.equals(other): # This is a convenient place for this check, but its correctness # does not depend on monotonicity, so it could go earlier # in the calling method. ret_index = other if how == "right" else self return ret_index, None, None ridx: npt.NDArray[np.intp] | None lidx: npt.NDArray[np.intp] | None if self.is_unique and other.is_unique: # We can perform much better than the general case if how == "left": join_index = self lidx = None ridx = self._left_indexer_unique(other) elif how == "right": join_index = other lidx = other._left_indexer_unique(self) ridx = None elif how == "inner": join_array, lidx, ridx = self._inner_indexer(other) join_index = self._wrap_joined_index(join_array, other, lidx, ridx) elif how == "outer": join_array, lidx, ridx = self._outer_indexer(other) join_index = self._wrap_joined_index(join_array, other, lidx, ridx) else: if how == "left": join_array, lidx, ridx = self._left_indexer(other) elif how == "right": join_array, ridx, lidx = other._left_indexer(self) elif how == "inner": join_array, lidx, ridx = self._inner_indexer(other) elif how == "outer": join_array, lidx, ridx = self._outer_indexer(other) assert lidx is not None assert ridx is not None join_index = self._wrap_joined_index(join_array, other, lidx, ridx) lidx = None if lidx is None else ensure_platform_int(lidx) ridx = None if ridx is None else ensure_platform_int(ridx) return join_index, lidx, ridx def _wrap_joined_index( self: _IndexT, joined: ArrayLike, other: _IndexT, lidx: npt.NDArray[np.intp], ridx: npt.NDArray[np.intp], ) -> _IndexT: assert other.dtype == self.dtype if isinstance(self, ABCMultiIndex): name = self.names if self.names == other.names else None # error: Incompatible return value type (got "MultiIndex", # expected "_IndexT") mask = lidx == -1 join_idx = self.take(lidx) right = other.take(ridx) join_index = join_idx.putmask(mask, right) return join_index.set_names(name) # type: ignore[return-value] else: name = get_op_result_name(self, other) return self._constructor._with_infer(joined, name=name, dtype=self.dtype) def _can_use_libjoin(self) -> bool: """ Whether we can use the fastpaths implement in _libs.join """ if type(self) is Index: # excludes EAs, but include masks, we get here with monotonic # values only, meaning no NA return ( isinstance(self.dtype, np.dtype) or isinstance(self.values, BaseMaskedArray) or isinstance(self._values, ArrowExtensionArray) ) return not is_interval_dtype(self.dtype) # -------------------------------------------------------------------- # Uncategorized Methods def values(self) -> ArrayLike: """ Return an array representing the data in the Index. .. warning:: We recommend using :attr:`Index.array` or :meth:`Index.to_numpy`, depending on whether you need a reference to the underlying data or a NumPy array. Returns ------- array: numpy.ndarray or ExtensionArray See Also -------- Index.array : Reference to the underlying data. Index.to_numpy : A NumPy array representing the underlying data. """ return self._data def array(self) -> ExtensionArray: array = self._data if isinstance(array, np.ndarray): from pandas.core.arrays.numpy_ import PandasArray array = PandasArray(array) return array def _values(self) -> ExtensionArray | np.ndarray: """ The best array representation. This is an ndarray or ExtensionArray. ``_values`` are consistent between ``Series`` and ``Index``. It may differ from the public '.values' method. index | values | _values | ----------------- | --------------- | ------------- | Index | ndarray | ndarray | CategoricalIndex | Categorical | Categorical | DatetimeIndex | ndarray[M8ns] | DatetimeArray | DatetimeIndex[tz] | ndarray[M8ns] | DatetimeArray | PeriodIndex | ndarray[object] | PeriodArray | IntervalIndex | IntervalArray | IntervalArray | See Also -------- values : Values """ return self._data def _get_engine_target(self) -> ArrayLike: """ Get the ndarray or ExtensionArray that we can pass to the IndexEngine constructor. """ vals = self._values if isinstance(vals, StringArray): # GH#45652 much more performant than ExtensionEngine return vals._ndarray if ( type(self) is Index and isinstance(self._values, ExtensionArray) and not isinstance(self._values, BaseMaskedArray) and not ( isinstance(self._values, ArrowExtensionArray) and is_numeric_dtype(self.dtype) # Exclude decimal and self.dtype.kind != "O" ) ): # TODO(ExtensionIndex): remove special-case, just use self._values return self._values.astype(object) return vals def _get_join_target(self) -> ArrayLike: """ Get the ndarray or ExtensionArray that we can pass to the join functions. """ if isinstance(self._values, BaseMaskedArray): # This is only used if our array is monotonic, so no NAs present return self._values._data elif isinstance(self._values, ArrowExtensionArray): # This is only used if our array is monotonic, so no missing values # present return self._values.to_numpy() return self._get_engine_target() def _from_join_target(self, result: np.ndarray) -> ArrayLike: """ Cast the ndarray returned from one of the libjoin.foo_indexer functions back to type(self)._data. """ if isinstance(self.values, BaseMaskedArray): return type(self.values)(result, np.zeros(result.shape, dtype=np.bool_)) elif isinstance(self.values, ArrowExtensionArray): return type(self.values)._from_sequence(result) return result def memory_usage(self, deep: bool = False) -> int: result = self._memory_usage(deep=deep) # include our engine hashtable result += self._engine.sizeof(deep=deep) return result def where(self, cond, other=None) -> Index: """ Replace values where the condition is False. The replacement is taken from other. Parameters ---------- cond : bool array-like with the same length as self Condition to select the values on. other : scalar, or array-like, default None Replacement if the condition is False. Returns ------- pandas.Index A copy of self with values replaced from other where the condition is False. See Also -------- Series.where : Same method for Series. DataFrame.where : Same method for DataFrame. Examples -------- >>> idx = pd.Index(['car', 'bike', 'train', 'tractor']) >>> idx Index(['car', 'bike', 'train', 'tractor'], dtype='object') >>> idx.where(idx.isin(['car', 'train']), 'other') Index(['car', 'other', 'train', 'other'], dtype='object') """ if isinstance(self, ABCMultiIndex): raise NotImplementedError( ".where is not supported for MultiIndex operations" ) cond = np.asarray(cond, dtype=bool) return self.putmask(~cond, other) # construction helpers def _raise_scalar_data_error(cls, data): # We return the TypeError so that we can raise it from the constructor # in order to keep mypy happy raise TypeError( f"{cls.__name__}(...) must be called with a collection of some " f"kind, {repr(data)} was passed" ) def _validate_fill_value(self, value): """ Check if the value can be inserted into our array without casting, and convert it to an appropriate native type if necessary. Raises ------ TypeError If the value cannot be inserted into an array of this dtype. """ dtype = self.dtype if isinstance(dtype, np.dtype) and dtype.kind not in ["m", "M"]: # return np_can_hold_element(dtype, value) try: return np_can_hold_element(dtype, value) except LossySetitemError as err: # re-raise as TypeError for consistency raise TypeError from err elif not can_hold_element(self._values, value): raise TypeError return value def _require_scalar(self, value): """ Check that this is a scalar value that we can use for setitem-like operations without changing dtype. """ if not is_scalar(value): raise TypeError(f"'value' must be a scalar, passed: {type(value).__name__}") return value def _is_memory_usage_qualified(self) -> bool: """ Return a boolean if we need a qualified .info display. """ return is_object_dtype(self.dtype) def __contains__(self, key: Any) -> bool: """ Return a boolean indicating whether the provided key is in the index. Parameters ---------- key : label The key to check if it is present in the index. Returns ------- bool Whether the key search is in the index. Raises ------ TypeError If the key is not hashable. See Also -------- Index.isin : Returns an ndarray of boolean dtype indicating whether the list-like key is in the index. Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx Index([1, 2, 3, 4], dtype='int64') >>> 2 in idx True >>> 6 in idx False """ hash(key) try: return key in self._engine except (OverflowError, TypeError, ValueError): return False # https://github.com/python/typeshed/issues/2148#issuecomment-520783318 # Incompatible types in assignment (expression has type "None", base class # "object" defined the type as "Callable[[object], int]") __hash__: ClassVar[None] # type: ignore[assignment] def __setitem__(self, key, value): raise TypeError("Index does not support mutable operations") def __getitem__(self, key): """ Override numpy.ndarray's __getitem__ method to work as desired. This function adds lists and Series as valid boolean indexers (ndarrays only supports ndarray with dtype=bool). If resulting ndim != 1, plain ndarray is returned instead of corresponding `Index` subclass. """ getitem = self._data.__getitem__ if is_integer(key) or is_float(key): # GH#44051 exclude bool, which would return a 2d ndarray key = com.cast_scalar_indexer(key) return getitem(key) if isinstance(key, slice): # This case is separated from the conditional above to avoid # pessimization com.is_bool_indexer and ndim checks. result = getitem(key) # Going through simple_new for performance. return type(self)._simple_new( result, name=self._name, refs=self._references ) if com.is_bool_indexer(key): # if we have list[bools, length=1e5] then doing this check+convert # takes 166 µs + 2.1 ms and cuts the ndarray.__getitem__ # time below from 3.8 ms to 496 µs # if we already have ndarray[bool], the overhead is 1.4 µs or .25% if is_extension_array_dtype(getattr(key, "dtype", None)): key = key.to_numpy(dtype=bool, na_value=False) else: key = np.asarray(key, dtype=bool) result = getitem(key) # Because we ruled out integer above, we always get an arraylike here if result.ndim > 1: disallow_ndim_indexing(result) # NB: Using _constructor._simple_new would break if MultiIndex # didn't override __getitem__ return self._constructor._simple_new(result, name=self._name) def _getitem_slice(self: _IndexT, slobj: slice) -> _IndexT: """ Fastpath for __getitem__ when we know we have a slice. """ res = self._data[slobj] return type(self)._simple_new(res, name=self._name, refs=self._references) def _can_hold_identifiers_and_holds_name(self, name) -> bool: """ Faster check for ``name in self`` when we know `name` is a Python identifier (e.g. in NDFrame.__getattr__, which hits this to support . key lookup). For indexes that can't hold identifiers (everything but object & categorical) we just return False. https://github.com/pandas-dev/pandas/issues/19764 """ if ( is_object_dtype(self.dtype) or is_string_dtype(self.dtype) or is_categorical_dtype(self.dtype) ): return name in self return False def append(self, other: Index | Sequence[Index]) -> Index: """ Append a collection of Index options together. Parameters ---------- other : Index or list/tuple of indices Returns ------- Index """ to_concat = [self] if isinstance(other, (list, tuple)): to_concat += list(other) else: # error: Argument 1 to "append" of "list" has incompatible type # "Union[Index, Sequence[Index]]"; expected "Index" to_concat.append(other) # type: ignore[arg-type] for obj in to_concat: if not isinstance(obj, Index): raise TypeError("all inputs must be Index") names = {obj.name for obj in to_concat} name = None if len(names) > 1 else self.name return self._concat(to_concat, name) def _concat(self, to_concat: list[Index], name: Hashable) -> Index: """ Concatenate multiple Index objects. """ to_concat_vals = [x._values for x in to_concat] result = concat_compat(to_concat_vals) return Index._with_infer(result, name=name) def putmask(self, mask, value) -> Index: """ Return a new Index of the values set with the mask. Returns ------- Index See Also -------- numpy.ndarray.putmask : Changes elements of an array based on conditional and input values. """ mask, noop = validate_putmask(self._values, mask) if noop: return self.copy() if self.dtype != object and is_valid_na_for_dtype(value, self.dtype): # e.g. None -> np.nan, see also Block._standardize_fill_value value = self._na_value try: converted = self._validate_fill_value(value) except (LossySetitemError, ValueError, TypeError) as err: if is_object_dtype(self): # pragma: no cover raise err # See also: Block.coerce_to_target_dtype dtype = self._find_common_type_compat(value) return self.astype(dtype).putmask(mask, value) values = self._values.copy() if isinstance(values, np.ndarray): converted = setitem_datetimelike_compat(values, mask.sum(), converted) np.putmask(values, mask, converted) else: # Note: we use the original value here, not converted, as # _validate_fill_value is not idempotent values._putmask(mask, value) return self._shallow_copy(values) def equals(self, other: Any) -> bool: """ Determine if two Index object are equal. The things that are being compared are: * The elements inside the Index object. * The order of the elements inside the Index object. Parameters ---------- other : Any The other object to compare against. Returns ------- bool True if "other" is an Index and it has the same elements and order as the calling index; False otherwise. Examples -------- >>> idx1 = pd.Index([1, 2, 3]) >>> idx1 Index([1, 2, 3], dtype='int64') >>> idx1.equals(pd.Index([1, 2, 3])) True The elements inside are compared >>> idx2 = pd.Index(["1", "2", "3"]) >>> idx2 Index(['1', '2', '3'], dtype='object') >>> idx1.equals(idx2) False The order is compared >>> ascending_idx = pd.Index([1, 2, 3]) >>> ascending_idx Index([1, 2, 3], dtype='int64') >>> descending_idx = pd.Index([3, 2, 1]) >>> descending_idx Index([3, 2, 1], dtype='int64') >>> ascending_idx.equals(descending_idx) False The dtype is *not* compared >>> int64_idx = pd.Index([1, 2, 3], dtype='int64') >>> int64_idx Index([1, 2, 3], dtype='int64') >>> uint64_idx = pd.Index([1, 2, 3], dtype='uint64') >>> uint64_idx Index([1, 2, 3], dtype='uint64') >>> int64_idx.equals(uint64_idx) True """ if self.is_(other): return True if not isinstance(other, Index): return False if is_object_dtype(self.dtype) and not is_object_dtype(other.dtype): # if other is not object, use other's logic for coercion return other.equals(self) if isinstance(other, ABCMultiIndex): # d-level MultiIndex can equal d-tuple Index return other.equals(self) if isinstance(self._values, ExtensionArray): # Dispatch to the ExtensionArray's .equals method. if not isinstance(other, type(self)): return False earr = cast(ExtensionArray, self._data) return earr.equals(other._data) if is_extension_array_dtype(other.dtype): # All EA-backed Index subclasses override equals return other.equals(self) return array_equivalent(self._values, other._values) def identical(self, other) -> bool: """ Similar to equals, but checks that object attributes and types are also equal. Returns ------- bool If two Index objects have equal elements and same type True, otherwise False. """ return ( self.equals(other) and all( getattr(self, c, None) == getattr(other, c, None) for c in self._comparables ) and type(self) == type(other) and self.dtype == other.dtype ) def asof(self, label): """ Return the label from the index, or, if not present, the previous one. Assuming that the index is sorted, return the passed index label if it is in the index, or return the previous index label if the passed one is not in the index. Parameters ---------- label : object The label up to which the method returns the latest index label. Returns ------- object The passed label if it is in the index. The previous label if the passed label is not in the sorted index or `NaN` if there is no such label. See Also -------- Series.asof : Return the latest value in a Series up to the passed index. merge_asof : Perform an asof merge (similar to left join but it matches on nearest key rather than equal key). Index.get_loc : An `asof` is a thin wrapper around `get_loc` with method='pad'. Examples -------- `Index.asof` returns the latest index label up to the passed label. >>> idx = pd.Index(['2013-12-31', '2014-01-02', '2014-01-03']) >>> idx.asof('2014-01-01') '2013-12-31' If the label is in the index, the method returns the passed label. >>> idx.asof('2014-01-02') '2014-01-02' If all of the labels in the index are later than the passed label, NaN is returned. >>> idx.asof('1999-01-02') nan If the index is not sorted, an error is raised. >>> idx_not_sorted = pd.Index(['2013-12-31', '2015-01-02', ... '2014-01-03']) >>> idx_not_sorted.asof('2013-12-31') Traceback (most recent call last): ValueError: index must be monotonic increasing or decreasing """ self._searchsorted_monotonic(label) # validate sortedness try: loc = self.get_loc(label) except (KeyError, TypeError): # KeyError -> No exact match, try for padded # TypeError -> passed e.g. non-hashable, fall through to get # the tested exception message indexer = self.get_indexer([label], method="pad") if indexer.ndim > 1 or indexer.size > 1: raise TypeError("asof requires scalar valued input") loc = indexer.item() if loc == -1: return self._na_value else: if isinstance(loc, slice): loc = loc.indices(len(self))[-1] return self[loc] def asof_locs( self, where: Index, mask: npt.NDArray[np.bool_] ) -> npt.NDArray[np.intp]: """ Return the locations (indices) of labels in the index. As in the `asof` function, if the label (a particular entry in `where`) is not in the index, the latest index label up to the passed label is chosen and its index returned. If all of the labels in the index are later than a label in `where`, -1 is returned. `mask` is used to ignore NA values in the index during calculation. Parameters ---------- where : Index An Index consisting of an array of timestamps. mask : np.ndarray[bool] Array of booleans denoting where values in the original data are not NA. Returns ------- np.ndarray[np.intp] An array of locations (indices) of the labels from the Index which correspond to the return values of the `asof` function for every element in `where`. """ # error: No overload variant of "searchsorted" of "ndarray" matches argument # types "Union[ExtensionArray, ndarray[Any, Any]]", "str" # TODO: will be fixed when ExtensionArray.searchsorted() is fixed locs = self._values[mask].searchsorted( where._values, side="right" # type: ignore[call-overload] ) locs = np.where(locs > 0, locs - 1, 0) result = np.arange(len(self), dtype=np.intp)[mask].take(locs) first_value = self._values[mask.argmax()] result[(locs == 0) & (where._values < first_value)] = -1 return result def sort_values( self, return_indexer: bool = False, ascending: bool = True, na_position: str_t = "last", key: Callable | None = None, ): """ Return a sorted copy of the index. Return a sorted copy of the index, and optionally return the indices that sorted the index itself. Parameters ---------- return_indexer : bool, default False Should the indices that would sort the index be returned. ascending : bool, default True Should the index values be sorted in an ascending order. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. .. versionadded:: 1.2.0 key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. .. versionadded:: 1.1.0 Returns ------- sorted_index : pandas.Index Sorted copy of the index. indexer : numpy.ndarray, optional The indices that the index itself was sorted by. See Also -------- Series.sort_values : Sort values of a Series. DataFrame.sort_values : Sort values in a DataFrame. Examples -------- >>> idx = pd.Index([10, 100, 1, 1000]) >>> idx Index([10, 100, 1, 1000], dtype='int64') Sort values in ascending order (default behavior). >>> idx.sort_values() Index([1, 10, 100, 1000], dtype='int64') Sort values in descending order, and also get the indices `idx` was sorted by. >>> idx.sort_values(ascending=False, return_indexer=True) (Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2])) """ idx = ensure_key_mapped(self, key) # GH 35584. Sort missing values according to na_position kwarg # ignore na_position for MultiIndex if not isinstance(self, ABCMultiIndex): _as = nargsort( items=idx, ascending=ascending, na_position=na_position, key=key ) else: _as = idx.argsort() if not ascending: _as = _as[::-1] sorted_index = self.take(_as) if return_indexer: return sorted_index, _as else: return sorted_index def sort(self, *args, **kwargs): """ Use sort_values instead. """ raise TypeError("cannot sort an Index object in-place, use sort_values instead") def shift(self, periods: int = 1, freq=None): """ Shift index by desired number of time frequency increments. This method is for shifting the values of datetime-like indexes by a specified time increment a given number of times. Parameters ---------- periods : int, default 1 Number of periods (or increments) to shift by, can be positive or negative. freq : pandas.DateOffset, pandas.Timedelta or str, optional Frequency increment to shift by. If None, the index is shifted by its own `freq` attribute. Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc. Returns ------- pandas.Index Shifted index. See Also -------- Series.shift : Shift values of Series. Notes ----- This method is only implemented for datetime-like index classes, i.e., DatetimeIndex, PeriodIndex and TimedeltaIndex. Examples -------- Put the first 5 month starts of 2011 into an index. >>> month_starts = pd.date_range('1/1/2011', periods=5, freq='MS') >>> month_starts DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01', '2011-04-01', '2011-05-01'], dtype='datetime64[ns]', freq='MS') Shift the index by 10 days. >>> month_starts.shift(10, freq='D') DatetimeIndex(['2011-01-11', '2011-02-11', '2011-03-11', '2011-04-11', '2011-05-11'], dtype='datetime64[ns]', freq=None) The default value of `freq` is the `freq` attribute of the index, which is 'MS' (month start) in this example. >>> month_starts.shift(10) DatetimeIndex(['2011-11-01', '2011-12-01', '2012-01-01', '2012-02-01', '2012-03-01'], dtype='datetime64[ns]', freq='MS') """ raise NotImplementedError( f"This method is only implemented for DatetimeIndex, PeriodIndex and " f"TimedeltaIndex; Got type {type(self).__name__}" ) def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]: """ Return the integer indices that would sort the index. Parameters ---------- *args Passed to `numpy.ndarray.argsort`. **kwargs Passed to `numpy.ndarray.argsort`. Returns ------- np.ndarray[np.intp] Integer indices that would sort the index if used as an indexer. See Also -------- numpy.argsort : Similar method for NumPy arrays. Index.sort_values : Return sorted copy of Index. Examples -------- >>> idx = pd.Index(['b', 'a', 'd', 'c']) >>> idx Index(['b', 'a', 'd', 'c'], dtype='object') >>> order = idx.argsort() >>> order array([1, 0, 3, 2]) >>> idx[order] Index(['a', 'b', 'c', 'd'], dtype='object') """ # This works for either ndarray or EA, is overridden # by RangeIndex, MultIIndex return self._data.argsort(*args, **kwargs) def _check_indexing_error(self, key): if not is_scalar(key): # if key is not a scalar, directly raise an error (the code below # would convert to numpy arrays and raise later any way) - GH29926 raise InvalidIndexError(key) def _should_fallback_to_positional(self) -> bool: """ Should an integer key be treated as positional? """ return self.inferred_type not in { "integer", "mixed-integer", "floating", "complex", } _index_shared_docs[ "get_indexer_non_unique" ] = """ Compute indexer and mask for new index given the current index. The indexer should be then used as an input to ndarray.take to align the current data to the new index. Parameters ---------- target : %(target_klass)s Returns ------- indexer : np.ndarray[np.intp] Integers from 0 to n - 1 indicating that the index at these positions matches the corresponding target values. Missing values in the target are marked by -1. missing : np.ndarray[np.intp] An indexer into the target of the values not found. These correspond to the -1 in the indexer array. Examples -------- >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) >>> index.get_indexer_non_unique(['b', 'b']) (array([1, 3, 4, 1, 3, 4]), array([], dtype=int64)) In the example below there are no matched values. >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) >>> index.get_indexer_non_unique(['q', 'r', 't']) (array([-1, -1, -1]), array([0, 1, 2])) For this reason, the returned ``indexer`` contains only integers equal to -1. It demonstrates that there's no match between the index and the ``target`` values at these positions. The mask [0, 1, 2] in the return value shows that the first, second, and third elements are missing. Notice that the return value is a tuple contains two items. In the example below the first item is an array of locations in ``index``. The second item is a mask shows that the first and third elements are missing. >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) >>> index.get_indexer_non_unique(['f', 'b', 's']) (array([-1, 1, 3, 4, -1]), array([0, 2])) """ def get_indexer_non_unique( self, target ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: target = ensure_index(target) target = self._maybe_cast_listlike_indexer(target) if not self._should_compare(target) and not self._should_partial_index(target): # _should_partial_index e.g. IntervalIndex with numeric scalars # that can be matched to Interval scalars. return self._get_indexer_non_comparable(target, method=None, unique=False) pself, ptarget = self._maybe_promote(target) if pself is not self or ptarget is not target: return pself.get_indexer_non_unique(ptarget) if not is_dtype_equal(self.dtype, target.dtype): # TODO: if object, could use infer_dtype to preempt costly # conversion if still non-comparable? dtype = self._find_common_type_compat(target) this = self.astype(dtype, copy=False) that = target.astype(dtype, copy=False) return this.get_indexer_non_unique(that) # TODO: get_indexer has fastpaths for both Categorical-self and # Categorical-target. Can we do something similar here? # Note: _maybe_promote ensures we never get here with MultiIndex # self and non-Multi target tgt_values = target._get_engine_target() if self._is_multi and target._is_multi: engine = self._engine # Item "IndexEngine" of "Union[IndexEngine, ExtensionEngine]" has # no attribute "_extract_level_codes" tgt_values = engine._extract_level_codes(target) # type: ignore[union-attr] indexer, missing = self._engine.get_indexer_non_unique(tgt_values) return ensure_platform_int(indexer), ensure_platform_int(missing) def get_indexer_for(self, target) -> npt.NDArray[np.intp]: """ Guaranteed return of an indexer even when non-unique. This dispatches to get_indexer or get_indexer_non_unique as appropriate. Returns ------- np.ndarray[np.intp] List of indices. Examples -------- >>> idx = pd.Index([np.nan, 'var1', np.nan]) >>> idx.get_indexer_for([np.nan]) array([0, 2]) """ if self._index_as_unique: return self.get_indexer(target) indexer, _ = self.get_indexer_non_unique(target) return indexer def _get_indexer_strict(self, key, axis_name: str_t) -> tuple[Index, np.ndarray]: """ Analogue to get_indexer that raises if any elements are missing. """ keyarr = key if not isinstance(keyarr, Index): keyarr = com.asarray_tuplesafe(keyarr) if self._index_as_unique: indexer = self.get_indexer_for(keyarr) keyarr = self.reindex(keyarr)[0] else: keyarr, indexer, new_indexer = self._reindex_non_unique(keyarr) self._raise_if_missing(keyarr, indexer, axis_name) keyarr = self.take(indexer) if isinstance(key, Index): # GH 42790 - Preserve name from an Index keyarr.name = key.name if keyarr.dtype.kind in ["m", "M"]: # DTI/TDI.take can infer a freq in some cases when we dont want one if isinstance(key, list) or ( isinstance(key, type(self)) # "Index" has no attribute "freq" and key.freq is None # type: ignore[attr-defined] ): keyarr = keyarr._with_freq(None) return keyarr, indexer def _raise_if_missing(self, key, indexer, axis_name: str_t) -> None: """ Check that indexer can be used to return a result. e.g. at least one element was found, unless the list of keys was actually empty. Parameters ---------- key : list-like Targeted labels (only used to show correct error message). indexer: array-like of booleans Indices corresponding to the key, (with -1 indicating not found). axis_name : str Raises ------ KeyError If at least one key was requested but none was found. """ if len(key) == 0: return # Count missing values missing_mask = indexer < 0 nmissing = missing_mask.sum() if nmissing: # TODO: remove special-case; this is just to keep exception # message tests from raising while debugging use_interval_msg = is_interval_dtype(self.dtype) or ( is_categorical_dtype(self.dtype) # "Index" has no attribute "categories" [attr-defined] and is_interval_dtype( self.categories.dtype # type: ignore[attr-defined] ) ) if nmissing == len(indexer): if use_interval_msg: key = list(key) raise KeyError(f"None of [{key}] are in the [{axis_name}]") not_found = list(ensure_index(key)[missing_mask.nonzero()[0]].unique()) raise KeyError(f"{not_found} not in index") def _get_indexer_non_comparable( self, target: Index, method, unique: Literal[True] = ... ) -> npt.NDArray[np.intp]: ... def _get_indexer_non_comparable( self, target: Index, method, unique: Literal[False] ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... def _get_indexer_non_comparable( self, target: Index, method, unique: bool = True ) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... def _get_indexer_non_comparable( self, target: Index, method, unique: bool = True ) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: """ Called from get_indexer or get_indexer_non_unique when the target is of a non-comparable dtype. For get_indexer lookups with method=None, get_indexer is an _equality_ check, so non-comparable dtypes mean we will always have no matches. For get_indexer lookups with a method, get_indexer is an _inequality_ check, so non-comparable dtypes mean we will always raise TypeError. Parameters ---------- target : Index method : str or None unique : bool, default True * True if called from get_indexer. * False if called from get_indexer_non_unique. Raises ------ TypeError If doing an inequality check, i.e. method is not None. """ if method is not None: other = _unpack_nested_dtype(target) raise TypeError(f"Cannot compare dtypes {self.dtype} and {other.dtype}") no_matches = -1 * np.ones(target.shape, dtype=np.intp) if unique: # This is for get_indexer return no_matches else: # This is for get_indexer_non_unique missing = np.arange(len(target), dtype=np.intp) return no_matches, missing def _index_as_unique(self) -> bool: """ Whether we should treat this as unique for the sake of get_indexer vs get_indexer_non_unique. For IntervalIndex compat. """ return self.is_unique _requires_unique_msg = "Reindexing only valid with uniquely valued Index objects" def _maybe_promote(self, other: Index) -> tuple[Index, Index]: """ When dealing with an object-dtype Index and a non-object Index, see if we can upcast the object-dtype one to improve performance. """ if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex): if ( self.tz is not None and other.tz is not None and not tz_compare(self.tz, other.tz) ): # standardize on UTC return self.tz_convert("UTC"), other.tz_convert("UTC") elif self.inferred_type == "date" and isinstance(other, ABCDatetimeIndex): try: return type(other)(self), other except OutOfBoundsDatetime: return self, other elif self.inferred_type == "timedelta" and isinstance(other, ABCTimedeltaIndex): # TODO: we dont have tests that get here return type(other)(self), other elif self.dtype.kind == "u" and other.dtype.kind == "i": # GH#41873 if other.min() >= 0: # lookup min as it may be cached # TODO: may need itemsize check if we have non-64-bit Indexes return self, other.astype(self.dtype) elif self._is_multi and not other._is_multi: try: # "Type[Index]" has no attribute "from_tuples" other = type(self).from_tuples(other) # type: ignore[attr-defined] except (TypeError, ValueError): # let's instead try with a straight Index self = Index(self._values) if not is_object_dtype(self.dtype) and is_object_dtype(other.dtype): # Reverse op so we dont need to re-implement on the subclasses other, self = other._maybe_promote(self) return self, other def _find_common_type_compat(self, target) -> DtypeObj: """ Implementation of find_common_type that adjusts for Index-specific special cases. """ target_dtype, _ = infer_dtype_from(target, pandas_dtype=True) # special case: if one dtype is uint64 and the other a signed int, return object # See https://github.com/pandas-dev/pandas/issues/26778 for discussion # Now it's: # * float | [u]int -> float # * uint64 | signed int -> object # We may change union(float | [u]int) to go to object. if self.dtype == "uint64" or target_dtype == "uint64": if is_signed_integer_dtype(self.dtype) or is_signed_integer_dtype( target_dtype ): return _dtype_obj dtype = find_result_type(self._values, target) dtype = common_dtype_categorical_compat([self, target], dtype) return dtype def _should_compare(self, other: Index) -> bool: """ Check if `self == other` can ever have non-False entries. """ if (is_bool_dtype(other) and is_any_real_numeric_dtype(self)) or ( is_bool_dtype(self) and is_any_real_numeric_dtype(other) ): # GH#16877 Treat boolean labels passed to a numeric index as not # found. Without this fix False and True would be treated as 0 and 1 # respectively. return False other = _unpack_nested_dtype(other) dtype = other.dtype return self._is_comparable_dtype(dtype) or is_object_dtype(dtype) def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: """ Can we compare values of the given dtype to our own? """ if self.dtype.kind == "b": return dtype.kind == "b" elif is_numeric_dtype(self.dtype): return is_numeric_dtype(dtype) # TODO: this was written assuming we only get here with object-dtype, # which is nom longer correct. Can we specialize for EA? return True def groupby(self, values) -> PrettyDict[Hashable, np.ndarray]: """ Group the index labels by a given array of values. Parameters ---------- values : array Values used to determine the groups. Returns ------- dict {group name -> group labels} """ # TODO: if we are a MultiIndex, we can do better # that converting to tuples if isinstance(values, ABCMultiIndex): values = values._values values = Categorical(values) result = values._reverse_indexer() # map to the label result = {k: self.take(v) for k, v in result.items()} return PrettyDict(result) def map(self, mapper, na_action=None): """ Map values using an input mapping or function. Parameters ---------- mapper : function, dict, or Series Mapping correspondence. na_action : {None, 'ignore'} If 'ignore', propagate NA values, without passing them to the mapping correspondence. Returns ------- Union[Index, MultiIndex] The output of the mapping function applied to the index. If the function returns a tuple with more than one element a MultiIndex will be returned. """ from pandas.core.indexes.multi import MultiIndex new_values = self._map_values(mapper, na_action=na_action) # we can return a MultiIndex if new_values.size and isinstance(new_values[0], tuple): if isinstance(self, MultiIndex): names = self.names elif self.name: names = [self.name] * len(new_values[0]) else: names = None return MultiIndex.from_tuples(new_values, names=names) dtype = None if not new_values.size: # empty dtype = self.dtype # e.g. if we are floating and new_values is all ints, then we # don't want to cast back to floating. But if we are UInt64 # and new_values is all ints, we want to try. same_dtype = lib.infer_dtype(new_values, skipna=False) == self.inferred_type if same_dtype: new_values = maybe_cast_pointwise_result( new_values, self.dtype, same_dtype=same_dtype ) return Index._with_infer(new_values, dtype=dtype, copy=False, name=self.name) # TODO: De-duplicate with map, xref GH#32349 def _transform_index(self, func, *, level=None) -> Index: """ Apply function to all values found in index. This includes transforming multiindex entries separately. Only apply function to one level of the MultiIndex if level is specified. """ if isinstance(self, ABCMultiIndex): values = [ self.get_level_values(i).map(func) if i == level or level is None else self.get_level_values(i) for i in range(self.nlevels) ] return type(self).from_arrays(values) else: items = [func(x) for x in self] return Index(items, name=self.name, tupleize_cols=False) def isin(self, values, level=None) -> npt.NDArray[np.bool_]: """ Return a boolean array where the index values are in `values`. Compute boolean array of whether each index value is found in the passed set of values. The length of the returned boolean array matches the length of the index. Parameters ---------- values : set or list-like Sought values. level : str or int, optional Name or position of the index level to use (if the index is a `MultiIndex`). Returns ------- np.ndarray[bool] NumPy array of boolean values. See Also -------- Series.isin : Same for Series. DataFrame.isin : Same method for DataFrames. Notes ----- In the case of `MultiIndex` you must either specify `values` as a list-like object containing tuples that are the same length as the number of levels, or specify `level`. Otherwise it will raise a ``ValueError``. If `level` is specified: - if it is the name of one *and only one* index level, use that level; - otherwise it should be a number indicating level position. Examples -------- >>> idx = pd.Index([1,2,3]) >>> idx Index([1, 2, 3], dtype='int64') Check whether each index value in a list of values. >>> idx.isin([1, 4]) array([ True, False, False]) >>> midx = pd.MultiIndex.from_arrays([[1,2,3], ... ['red', 'blue', 'green']], ... names=('number', 'color')) >>> midx MultiIndex([(1, 'red'), (2, 'blue'), (3, 'green')], names=['number', 'color']) Check whether the strings in the 'color' level of the MultiIndex are in a list of colors. >>> midx.isin(['red', 'orange', 'yellow'], level='color') array([ True, False, False]) To check across the levels of a MultiIndex, pass a list of tuples: >>> midx.isin([(1, 'red'), (3, 'red')]) array([ True, False, False]) For a DatetimeIndex, string values in `values` are converted to Timestamps. >>> dates = ['2000-03-11', '2000-03-12', '2000-03-13'] >>> dti = pd.to_datetime(dates) >>> dti DatetimeIndex(['2000-03-11', '2000-03-12', '2000-03-13'], dtype='datetime64[ns]', freq=None) >>> dti.isin(['2000-03-11']) array([ True, False, False]) """ if level is not None: self._validate_index_level(level) return algos.isin(self._values, values) def _get_string_slice(self, key: str_t): # this is for partial string indexing, # overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex raise NotImplementedError def slice_indexer( self, start: Hashable | None = None, end: Hashable | None = None, step: int | None = None, ) -> slice: """ Compute the slice indexer for input labels and step. Index needs to be ordered and unique. Parameters ---------- start : label, default None If None, defaults to the beginning. end : label, default None If None, defaults to the end. step : int, default None Returns ------- slice Raises ------ KeyError : If key does not exist, or key is not unique and index is not ordered. Notes ----- This function assumes that the data is sorted, so use at your own peril Examples -------- This is a method on all index types. For example you can do: >>> idx = pd.Index(list('abcd')) >>> idx.slice_indexer(start='b', end='c') slice(1, 3, None) >>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')]) >>> idx.slice_indexer(start='b', end=('c', 'g')) slice(1, 3, None) """ start_slice, end_slice = self.slice_locs(start, end, step=step) # return a slice if not is_scalar(start_slice): raise AssertionError("Start slice bound is non-scalar") if not is_scalar(end_slice): raise AssertionError("End slice bound is non-scalar") return slice(start_slice, end_slice, step) def _maybe_cast_indexer(self, key): """ If we have a float key and are not a floating index, then try to cast to an int if equivalent. """ return key def _maybe_cast_listlike_indexer(self, target) -> Index: """ Analogue to maybe_cast_indexer for get_indexer instead of get_loc. """ return ensure_index(target) def _validate_indexer(self, form: str_t, key, kind: str_t) -> None: """ If we are positional indexer, validate that we have appropriate typed bounds must be an integer. """ assert kind in ["getitem", "iloc"] if key is not None and not is_integer(key): self._raise_invalid_indexer(form, key) def _maybe_cast_slice_bound(self, label, side: str_t): """ This function should be overloaded in subclasses that allow non-trivial casting on label-slice bounds, e.g. datetime-like indices allowing strings containing formatted datetimes. Parameters ---------- label : object side : {'left', 'right'} Returns ------- label : object Notes ----- Value of `side` parameter should be validated in caller. """ # We are a plain index here (sub-class override this method if they # wish to have special treatment for floats/ints, e.g. datetimelike Indexes if is_numeric_dtype(self.dtype): return self._maybe_cast_indexer(label) # reject them, if index does not contain label if (is_float(label) or is_integer(label)) and label not in self: self._raise_invalid_indexer("slice", label) return label def _searchsorted_monotonic(self, label, side: Literal["left", "right"] = "left"): if self.is_monotonic_increasing: return self.searchsorted(label, side=side) elif self.is_monotonic_decreasing: # np.searchsorted expects ascending sort order, have to reverse # everything for it to work (element ordering, search side and # resulting value). pos = self[::-1].searchsorted( label, side="right" if side == "left" else "left" ) return len(self) - pos raise ValueError("index must be monotonic increasing or decreasing") def get_slice_bound(self, label, side: Literal["left", "right"]) -> int: """ Calculate slice bound that corresponds to given label. Returns leftmost (one-past-the-rightmost if ``side=='right'``) position of given label. Parameters ---------- label : object side : {'left', 'right'} Returns ------- int Index of label. """ if side not in ("left", "right"): raise ValueError( "Invalid value for side kwarg, must be either " f"'left' or 'right': {side}" ) original_label = label # For datetime indices label may be a string that has to be converted # to datetime boundary according to its resolution. label = self._maybe_cast_slice_bound(label, side) # we need to look up the label try: slc = self.get_loc(label) except KeyError as err: try: return self._searchsorted_monotonic(label, side) except ValueError: # raise the original KeyError raise err if isinstance(slc, np.ndarray): # get_loc may return a boolean array, which # is OK as long as they are representable by a slice. assert is_bool_dtype(slc.dtype) slc = lib.maybe_booleans_to_slice(slc.view("u1")) if isinstance(slc, np.ndarray): raise KeyError( f"Cannot get {side} slice bound for non-unique " f"label: {repr(original_label)}" ) if isinstance(slc, slice): if side == "left": return slc.start else: return slc.stop else: if side == "right": return slc + 1 else: return slc def slice_locs(self, start=None, end=None, step=None) -> tuple[int, int]: """ Compute slice locations for input labels. Parameters ---------- start : label, default None If None, defaults to the beginning. end : label, default None If None, defaults to the end. step : int, defaults None If None, defaults to 1. Returns ------- tuple[int, int] See Also -------- Index.get_loc : Get location for a single label. Notes ----- This method only works if the index is monotonic or unique. Examples -------- >>> idx = pd.Index(list('abcd')) >>> idx.slice_locs(start='b', end='c') (1, 3) """ inc = step is None or step >= 0 if not inc: # If it's a reverse slice, temporarily swap bounds. start, end = end, start # GH 16785: If start and end happen to be date strings with UTC offsets # attempt to parse and check that the offsets are the same if isinstance(start, (str, datetime)) and isinstance(end, (str, datetime)): try: ts_start = Timestamp(start) ts_end = Timestamp(end) except (ValueError, TypeError): pass else: if not tz_compare(ts_start.tzinfo, ts_end.tzinfo): raise ValueError("Both dates must have the same UTC offset") start_slice = None if start is not None: start_slice = self.get_slice_bound(start, "left") if start_slice is None: start_slice = 0 end_slice = None if end is not None: end_slice = self.get_slice_bound(end, "right") if end_slice is None: end_slice = len(self) if not inc: # Bounds at this moment are swapped, swap them back and shift by 1. # # slice_locs('B', 'A', step=-1): s='B', e='A' # # s='A' e='B' # AFTER SWAP: | | # v ------------------> V # ----------------------------------- # | | |A|A|A|A| | | | | |B|B| | | | | # ----------------------------------- # ^ <------------------ ^ # SHOULD BE: | | # end=s-1 start=e-1 # end_slice, start_slice = start_slice - 1, end_slice - 1 # i == -1 triggers ``len(self) + i`` selection that points to the # last element, not before-the-first one, subtracting len(self) # compensates that. if end_slice == -1: end_slice -= len(self) if start_slice == -1: start_slice -= len(self) return start_slice, end_slice def delete(self: _IndexT, loc) -> _IndexT: """ Make new Index with passed location(-s) deleted. Parameters ---------- loc : int or list of int Location of item(-s) which will be deleted. Use a list of locations to delete more than one value at the same time. Returns ------- Index Will be same type as self, except for RangeIndex. See Also -------- numpy.delete : Delete any rows and column from NumPy array (ndarray). Examples -------- >>> idx = pd.Index(['a', 'b', 'c']) >>> idx.delete(1) Index(['a', 'c'], dtype='object') >>> idx = pd.Index(['a', 'b', 'c']) >>> idx.delete([0, 2]) Index(['b'], dtype='object') """ values = self._values res_values: ArrayLike if isinstance(values, np.ndarray): # TODO(__array_function__): special casing will be unnecessary res_values = np.delete(values, loc) else: res_values = values.delete(loc) # _constructor so RangeIndex-> Index with an int64 dtype return self._constructor._simple_new(res_values, name=self.name) def insert(self, loc: int, item) -> Index: """ Make new Index inserting new item at location. Follows Python numpy.insert semantics for negative values. Parameters ---------- loc : int item : object Returns ------- Index """ item = lib.item_from_zerodim(item) if is_valid_na_for_dtype(item, self.dtype) and self.dtype != object: item = self._na_value arr = self._values try: if isinstance(arr, ExtensionArray): res_values = arr.insert(loc, item) return type(self)._simple_new(res_values, name=self.name) else: item = self._validate_fill_value(item) except (TypeError, ValueError, LossySetitemError): # e.g. trying to insert an integer into a DatetimeIndex # We cannot keep the same dtype, so cast to the (often object) # minimal shared dtype before doing the insert. dtype = self._find_common_type_compat(item) return self.astype(dtype).insert(loc, item) if arr.dtype != object or not isinstance( item, (tuple, np.datetime64, np.timedelta64) ): # with object-dtype we need to worry about numpy incorrectly casting # dt64/td64 to integer, also about treating tuples as sequences # special-casing dt64/td64 https://github.com/numpy/numpy/issues/12550 casted = arr.dtype.type(item) new_values = np.insert(arr, loc, casted) else: # error: No overload variant of "insert" matches argument types # "ndarray[Any, Any]", "int", "None" new_values = np.insert(arr, loc, None) # type: ignore[call-overload] loc = loc if loc >= 0 else loc - 1 new_values[loc] = item return Index._with_infer(new_values, name=self.name) def drop( self, labels: Index | np.ndarray | Iterable[Hashable], errors: IgnoreRaise = "raise", ) -> Index: """ Make new Index with passed list of labels deleted. Parameters ---------- labels : array-like or scalar errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and existing labels are dropped. Returns ------- Index Will be same type as self, except for RangeIndex. Raises ------ KeyError If not all of the labels are found in the selected axis """ if not isinstance(labels, Index): # avoid materializing e.g. RangeIndex arr_dtype = "object" if self.dtype == "object" else None labels = com.index_labels_to_array(labels, dtype=arr_dtype) indexer = self.get_indexer_for(labels) mask = indexer == -1 if mask.any(): if errors != "ignore": raise KeyError(f"{list(labels[mask])} not found in axis") indexer = indexer[~mask] return self.delete(indexer) def infer_objects(self, copy: bool = True) -> Index: """ If we have an object dtype, try to infer a non-object dtype. Parameters ---------- copy : bool, default True Whether to make a copy in cases where no inference occurs. """ if self._is_multi: raise NotImplementedError( "infer_objects is not implemented for MultiIndex. " "Use index.to_frame().infer_objects() instead." ) if self.dtype != object: return self.copy() if copy else self values = self._values values = cast("npt.NDArray[np.object_]", values) res_values = lib.maybe_convert_objects( values, convert_datetime=True, convert_timedelta=True, convert_period=True, convert_interval=True, ) if copy and res_values is values: return self.copy() result = Index(res_values, name=self.name) if not copy and res_values is values and self._references is not None: result._references = self._references result._references.add_index_reference(result) return result # -------------------------------------------------------------------- # Generated Arithmetic, Comparison, and Unary Methods def _cmp_method(self, other, op): """ Wrapper used to dispatch comparison operations. """ if self.is_(other): # fastpath if op in {operator.eq, operator.le, operator.ge}: arr = np.ones(len(self), dtype=bool) if self._can_hold_na and not isinstance(self, ABCMultiIndex): # TODO: should set MultiIndex._can_hold_na = False? arr[self.isna()] = False return arr elif op is operator.ne: arr = np.zeros(len(self), dtype=bool) if self._can_hold_na and not isinstance(self, ABCMultiIndex): arr[self.isna()] = True return arr if isinstance(other, (np.ndarray, Index, ABCSeries, ExtensionArray)) and len( self ) != len(other): raise ValueError("Lengths must match to compare") if not isinstance(other, ABCMultiIndex): other = extract_array(other, extract_numpy=True) else: other = np.asarray(other) if is_object_dtype(self.dtype) and isinstance(other, ExtensionArray): # e.g. PeriodArray, Categorical with np.errstate(all="ignore"): result = op(self._values, other) elif isinstance(self._values, ExtensionArray): result = op(self._values, other) elif is_object_dtype(self.dtype) and not isinstance(self, ABCMultiIndex): # don't pass MultiIndex with np.errstate(all="ignore"): result = ops.comp_method_OBJECT_ARRAY(op, self._values, other) else: with np.errstate(all="ignore"): result = ops.comparison_op(self._values, other, op) return result def _logical_method(self, other, op): res_name = ops.get_op_result_name(self, other) lvalues = self._values rvalues = extract_array(other, extract_numpy=True, extract_range=True) res_values = ops.logical_op(lvalues, rvalues, op) return self._construct_result(res_values, name=res_name) def _construct_result(self, result, name): if isinstance(result, tuple): return ( Index(result[0], name=name, dtype=result[0].dtype), Index(result[1], name=name, dtype=result[1].dtype), ) return Index(result, name=name, dtype=result.dtype) def _arith_method(self, other, op): if ( isinstance(other, Index) and is_object_dtype(other.dtype) and type(other) is not Index ): # We return NotImplemented for object-dtype index *subclasses* so they have # a chance to implement ops before we unwrap them. # See https://github.com/pandas-dev/pandas/issues/31109 return NotImplemented return super()._arith_method(other, op) def _unary_method(self, op): result = op(self._values) return Index(result, name=self.name) def __abs__(self) -> Index: return self._unary_method(operator.abs) def __neg__(self) -> Index: return self._unary_method(operator.neg) def __pos__(self) -> Index: return self._unary_method(operator.pos) def __invert__(self) -> Index: # GH#8875 return self._unary_method(operator.inv) # -------------------------------------------------------------------- # Reductions def any(self, *args, **kwargs): """ Return whether any element is Truthy. Parameters ---------- *args Required for compatibility with numpy. **kwargs Required for compatibility with numpy. Returns ------- bool or array-like (if axis is specified) A single element array-like may be converted to bool. See Also -------- Index.all : Return whether all elements are True. Series.all : Return whether all elements are True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to True because these are not equal to zero. Examples -------- >>> index = pd.Index([0, 1, 2]) >>> index.any() True >>> index = pd.Index([0, 0, 0]) >>> index.any() False """ nv.validate_any(args, kwargs) self._maybe_disable_logical_methods("any") # error: Argument 1 to "any" has incompatible type "ArrayLike"; expected # "Union[Union[int, float, complex, str, bytes, generic], Sequence[Union[int, # float, complex, str, bytes, generic]], Sequence[Sequence[Any]], # _SupportsArray]" return np.any(self.values) # type: ignore[arg-type] def all(self, *args, **kwargs): """ Return whether all elements are Truthy. Parameters ---------- *args Required for compatibility with numpy. **kwargs Required for compatibility with numpy. Returns ------- bool or array-like (if axis is specified) A single element array-like may be converted to bool. See Also -------- Index.any : Return whether any element in an Index is True. Series.any : Return whether any element in a Series is True. Series.all : Return whether all elements in a Series are True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to True because these are not equal to zero. Examples -------- True, because nonzero integers are considered True. >>> pd.Index([1, 2, 3]).all() True False, because ``0`` is considered False. >>> pd.Index([0, 1, 2]).all() False """ nv.validate_all(args, kwargs) self._maybe_disable_logical_methods("all") # error: Argument 1 to "all" has incompatible type "ArrayLike"; expected # "Union[Union[int, float, complex, str, bytes, generic], Sequence[Union[int, # float, complex, str, bytes, generic]], Sequence[Sequence[Any]], # _SupportsArray]" return np.all(self.values) # type: ignore[arg-type] def _maybe_disable_logical_methods(self, opname: str_t) -> None: """ raise if this Index subclass does not support any or all. """ if ( isinstance(self, ABCMultiIndex) or needs_i8_conversion(self.dtype) or is_interval_dtype(self.dtype) or is_categorical_dtype(self.dtype) or is_float_dtype(self.dtype) ): # This call will raise make_invalid_op(opname)(self) def argmin(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: nv.validate_argmin(args, kwargs) nv.validate_minmax_axis(axis) if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return -1 return super().argmin(skipna=skipna) def argmax(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: nv.validate_argmax(args, kwargs) nv.validate_minmax_axis(axis) if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return -1 return super().argmax(skipna=skipna) def min(self, axis=None, skipna: bool = True, *args, **kwargs): nv.validate_min(args, kwargs) nv.validate_minmax_axis(axis) if not len(self): return self._na_value if len(self) and self.is_monotonic_increasing: # quick check first = self[0] if not isna(first): return first if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return self._na_value if not self._is_multi and not isinstance(self._values, np.ndarray): return self._values._reduce(name="min", skipna=skipna) return super().min(skipna=skipna) def max(self, axis=None, skipna: bool = True, *args, **kwargs): nv.validate_max(args, kwargs) nv.validate_minmax_axis(axis) if not len(self): return self._na_value if len(self) and self.is_monotonic_increasing: # quick check last = self[-1] if not isna(last): return last if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return self._na_value if not self._is_multi and not isinstance(self._values, np.ndarray): return self._values._reduce(name="max", skipna=skipna) return super().max(skipna=skipna) # -------------------------------------------------------------------- def shape(self) -> Shape: """ Return a tuple of the shape of the underlying data. """ # See GH#27775, GH#27384 for history/reasoning in how this is defined. return (len(self),) The provided code snippet includes necessary dependencies for implementing the `get_indexer_indexer` function. Write a Python function `def get_indexer_indexer( target: Index, level: Level | list[Level] | None, ascending: list[bool] | bool, kind: SortKind, na_position: NaPosition, sort_remaining: bool, key: IndexKeyFunc, ) -> npt.NDArray[np.intp] | None` to solve the following problem: Helper method that return the indexer according to input parameters for the sort_index method of DataFrame and Series. Parameters ---------- target : Index level : int or level name or list of ints or list of level names ascending : bool or list of bools, default True kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' na_position : {'first', 'last'}, default 'last' sort_remaining : bool, default True key : callable, optional Returns ------- Optional[ndarray[intp]] The indexer for the new index. Here is the function: def get_indexer_indexer( target: Index, level: Level | list[Level] | None, ascending: list[bool] | bool, kind: SortKind, na_position: NaPosition, sort_remaining: bool, key: IndexKeyFunc, ) -> npt.NDArray[np.intp] | None: """ Helper method that return the indexer according to input parameters for the sort_index method of DataFrame and Series. Parameters ---------- target : Index level : int or level name or list of ints or list of level names ascending : bool or list of bools, default True kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' na_position : {'first', 'last'}, default 'last' sort_remaining : bool, default True key : callable, optional Returns ------- Optional[ndarray[intp]] The indexer for the new index. """ target = ensure_key_mapped(target, key, levels=level) target = target._sort_levels_monotonic() if level is not None: _, indexer = target.sortlevel( level, ascending=ascending, sort_remaining=sort_remaining ) elif isinstance(target, ABCMultiIndex): indexer = lexsort_indexer( target._get_codes_for_sorting(), orders=ascending, na_position=na_position ) else: # Check monotonic-ness before sort an index (GH 11080) if (ascending and target.is_monotonic_increasing) or ( not ascending and target.is_monotonic_decreasing ): return None # ascending can only be a Sequence for MultiIndex indexer = nargsort( target, kind=kind, ascending=cast(bool, ascending), na_position=na_position, ) return indexer
Helper method that return the indexer according to input parameters for the sort_index method of DataFrame and Series. Parameters ---------- target : Index level : int or level name or list of ints or list of level names ascending : bool or list of bools, default True kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' na_position : {'first', 'last'}, default 'last' sort_remaining : bool, default True key : callable, optional Returns ------- Optional[ndarray[intp]] The indexer for the new index.
173,031
from __future__ import annotations from collections import defaultdict from typing import ( TYPE_CHECKING, Callable, DefaultDict, Hashable, Iterable, Sequence, cast, ) import numpy as np from pandas._libs import ( algos, hashtable, lib, ) from pandas._libs.hashtable import unique_label_indices from pandas._typing import ( AxisInt, IndexKeyFunc, Level, NaPosition, Shape, SortKind, npt, ) from pandas.core.dtypes.common import ( ensure_int64, ensure_platform_int, is_extension_array_dtype, ) from pandas.core.dtypes.generic import ( ABCMultiIndex, ABCRangeIndex, ) from pandas.core.dtypes.missing import isna from pandas.core.construction import extract_array def get_group_index( labels, shape: Shape, sort: bool, xnull: bool ) -> npt.NDArray[np.int64]: """ For the particular label_list, gets the offsets into the hypothetical list representing the totally ordered cartesian product of all possible label combinations, *as long as* this space fits within int64 bounds; otherwise, though group indices identify unique combinations of labels, they cannot be deconstructed. - If `sort`, rank of returned ids preserve lexical ranks of labels. i.e. returned id's can be used to do lexical sort on labels; - If `xnull` nulls (-1 labels) are passed through. Parameters ---------- labels : sequence of arrays Integers identifying levels at each location shape : tuple[int, ...] Number of unique levels at each location sort : bool If the ranks of returned ids should match lexical ranks of labels xnull : bool If true nulls are excluded. i.e. -1 values in the labels are passed through. Returns ------- An array of type int64 where two elements are equal if their corresponding labels are equal at all location. Notes ----- The length of `labels` and `shape` must be identical. """ def _int64_cut_off(shape) -> int: acc = 1 for i, mul in enumerate(shape): acc *= int(mul) if not acc < lib.i8max: return i return len(shape) def maybe_lift(lab, size) -> tuple[np.ndarray, int]: # promote nan values (assigned -1 label in lab array) # so that all output values are non-negative return (lab + 1, size + 1) if (lab == -1).any() else (lab, size) labels = [ensure_int64(x) for x in labels] lshape = list(shape) if not xnull: for i, (lab, size) in enumerate(zip(labels, shape)): lab, size = maybe_lift(lab, size) labels[i] = lab lshape[i] = size labels = list(labels) # Iteratively process all the labels in chunks sized so less # than lib.i8max unique int ids will be required for each chunk while True: # how many levels can be done without overflow: nlev = _int64_cut_off(lshape) # compute flat ids for the first `nlev` levels stride = np.prod(lshape[1:nlev], dtype="i8") out = stride * labels[0].astype("i8", subok=False, copy=False) for i in range(1, nlev): if lshape[i] == 0: stride = np.int64(0) else: stride //= lshape[i] out += labels[i] * stride if xnull: # exclude nulls mask = labels[0] == -1 for lab in labels[1:nlev]: mask |= lab == -1 out[mask] = -1 if nlev == len(lshape): # all levels done! break # compress what has been done so far in order to avoid overflow # to retain lexical ranks, obs_ids should be sorted comp_ids, obs_ids = compress_group_index(out, sort=sort) labels = [comp_ids] + labels[nlev:] lshape = [len(obs_ids)] + lshape[nlev:] return out def compress_group_index( group_index: npt.NDArray[np.int64], sort: bool = True ) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]: """ Group_index is offsets into cartesian product of all possible labels. This space can be huge, so this function compresses it, by computing offsets (comp_ids) into the list of unique labels (obs_group_ids). """ size_hint = len(group_index) table = hashtable.Int64HashTable(size_hint) group_index = ensure_int64(group_index) # note, group labels come out ascending (ie, 1,2,3 etc) comp_ids, obs_group_ids = table.get_labels_groupby(group_index) if sort and len(obs_group_ids) > 0: obs_group_ids, comp_ids = _reorder_by_uniques(obs_group_ids, comp_ids) return ensure_int64(comp_ids), ensure_int64(obs_group_ids) Shape = Tuple[int, ...] The provided code snippet includes necessary dependencies for implementing the `get_compressed_ids` function. Write a Python function `def get_compressed_ids( labels, sizes: Shape ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.int64]]` to solve the following problem: Group_index is offsets into cartesian product of all possible labels. This space can be huge, so this function compresses it, by computing offsets (comp_ids) into the list of unique labels (obs_group_ids). Parameters ---------- labels : list of label arrays sizes : tuple[int] of size of the levels Returns ------- np.ndarray[np.intp] comp_ids np.ndarray[np.int64] obs_group_ids Here is the function: def get_compressed_ids( labels, sizes: Shape ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.int64]]: """ Group_index is offsets into cartesian product of all possible labels. This space can be huge, so this function compresses it, by computing offsets (comp_ids) into the list of unique labels (obs_group_ids). Parameters ---------- labels : list of label arrays sizes : tuple[int] of size of the levels Returns ------- np.ndarray[np.intp] comp_ids np.ndarray[np.int64] obs_group_ids """ ids = get_group_index(labels, sizes, sort=True, xnull=False) return compress_group_index(ids, sort=True)
Group_index is offsets into cartesian product of all possible labels. This space can be huge, so this function compresses it, by computing offsets (comp_ids) into the list of unique labels (obs_group_ids). Parameters ---------- labels : list of label arrays sizes : tuple[int] of size of the levels Returns ------- np.ndarray[np.intp] comp_ids np.ndarray[np.int64] obs_group_ids
173,032
from __future__ import annotations from collections import defaultdict from typing import ( TYPE_CHECKING, Callable, DefaultDict, Hashable, Iterable, Sequence, cast, ) import numpy as np from pandas._libs import ( algos, hashtable, lib, ) from pandas._libs.hashtable import unique_label_indices from pandas._typing import ( AxisInt, IndexKeyFunc, Level, NaPosition, Shape, SortKind, npt, ) from pandas.core.dtypes.common import ( ensure_int64, ensure_platform_int, is_extension_array_dtype, ) from pandas.core.dtypes.generic import ( ABCMultiIndex, ABCRangeIndex, ) from pandas.core.dtypes.missing import isna from pandas.core.construction import extract_array def _nanargminmax(values: np.ndarray, mask: npt.NDArray[np.bool_], func) -> int: """ See nanargminmax.__doc__. """ idx = np.arange(values.shape[0]) non_nans = values[~mask] non_nan_idx = idx[~mask] return non_nan_idx[func(non_nans)] AxisInt = int def isna(obj: Scalar) -> bool: ... def isna( obj: ArrayLike | Index | list, ) -> npt.NDArray[np.bool_]: ... def isna(obj: NDFrameT) -> NDFrameT: ... def isna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]: ... def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: ... def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: """ Detect missing values for an array-like object. This function takes a scalar or array-like object and indicates whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike). Parameters ---------- obj : scalar or array-like Object to check for null or missing values. Returns ------- bool or array-like of bool For scalar input, returns a scalar boolean. For array input, returns an array of boolean indicating whether each corresponding element is missing. See Also -------- notna : Boolean inverse of pandas.isna. Series.isna : Detect missing values in a Series. DataFrame.isna : Detect missing values in a DataFrame. Index.isna : Detect missing values in an Index. Examples -------- Scalar arguments (including strings) result in a scalar boolean. >>> pd.isna('dog') False >>> pd.isna(pd.NA) True >>> pd.isna(np.nan) True ndarrays result in an ndarray of booleans. >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]]) >>> array array([[ 1., nan, 3.], [ 4., 5., nan]]) >>> pd.isna(array) array([[False, True, False], [False, False, True]]) For indexes, an ndarray of booleans is returned. >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, ... "2017-07-08"]) >>> index DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'], dtype='datetime64[ns]', freq=None) >>> pd.isna(index) array([False, False, True, False]) For Series and DataFrame, the same type is returned, containing booleans. >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) >>> df 0 1 2 0 ant bee cat 1 dog None fly >>> pd.isna(df) 0 1 2 0 False False False 1 False True False >>> pd.isna(df[1]) 0 False 1 True Name: 1, dtype: bool """ return _isna(obj) The provided code snippet includes necessary dependencies for implementing the `nargminmax` function. Write a Python function `def nargminmax(values: ExtensionArray, method: str, axis: AxisInt = 0)` to solve the following problem: Implementation of np.argmin/argmax but for ExtensionArray and which handles missing values. Parameters ---------- values : ExtensionArray method : {"argmax", "argmin"} axis : int, default 0 Returns ------- int Here is the function: def nargminmax(values: ExtensionArray, method: str, axis: AxisInt = 0): """ Implementation of np.argmin/argmax but for ExtensionArray and which handles missing values. Parameters ---------- values : ExtensionArray method : {"argmax", "argmin"} axis : int, default 0 Returns ------- int """ assert method in {"argmax", "argmin"} func = np.argmax if method == "argmax" else np.argmin mask = np.asarray(isna(values)) arr_values = values._values_for_argsort() if arr_values.ndim > 1: if mask.any(): if axis == 1: zipped = zip(arr_values, mask) else: zipped = zip(arr_values.T, mask.T) return np.array([_nanargminmax(v, m, func) for v, m in zipped]) return func(arr_values, axis=axis) return _nanargminmax(arr_values, mask, func)
Implementation of np.argmin/argmax but for ExtensionArray and which handles missing values. Parameters ---------- values : ExtensionArray method : {"argmax", "argmin"} axis : int, default 0 Returns ------- int
173,033
from __future__ import annotations from collections import defaultdict from typing import ( TYPE_CHECKING, Callable, DefaultDict, Hashable, Iterable, Sequence, cast, ) import numpy as np from pandas._libs import ( algos, hashtable, lib, ) from pandas._libs.hashtable import unique_label_indices from pandas._typing import ( AxisInt, IndexKeyFunc, Level, NaPosition, Shape, SortKind, npt, ) from pandas.core.dtypes.common import ( ensure_int64, ensure_platform_int, is_extension_array_dtype, ) from pandas.core.dtypes.generic import ( ABCMultiIndex, ABCRangeIndex, ) from pandas.core.dtypes.missing import isna from pandas.core.construction import extract_array class defaultdict(Dict[_KT, _VT], Generic[_KT, _VT]): default_factory: Callable[[], _VT] def __init__(self, **kwargs: _VT) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]]) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]], **kwargs: _VT) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]], map: Mapping[_KT, _VT]) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]], map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]], iterable: Iterable[Tuple[_KT, _VT]]) -> None: ... def __init__( self, default_factory: Optional[Callable[[], _VT]], iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT ) -> None: ... def __missing__(self, key: _KT) -> _VT: ... def copy(self: _S) -> _S: ... DefaultDict = _Alias() class Iterable(Protocol[_T_co]): def __iter__(self) -> Iterator[_T_co]: ... class Index(IndexOpsMixin, PandasObject): """ Immutable sequence used for indexing and alignment. The basic object storing axis labels for all pandas objects. .. versionchanged:: 2.0.0 Index can hold all numpy numeric dtypes (except float16). Previously only int64/uint64/float64 dtypes were accepted. Parameters ---------- data : array-like (1-dimensional) dtype : NumPy dtype (default: object) If dtype is None, we find the dtype that best fits the data. If an actual dtype is provided, we coerce to that dtype if it's safe. Otherwise, an error will be raised. copy : bool Make a copy of input ndarray. name : object Name to be stored in the index. tupleize_cols : bool (default: True) When True, attempt to create a MultiIndex if possible. See Also -------- RangeIndex : Index implementing a monotonic integer range. CategoricalIndex : Index of :class:`Categorical` s. MultiIndex : A multi-level, or hierarchical Index. IntervalIndex : An Index of :class:`Interval` s. DatetimeIndex : Index of datetime64 data. TimedeltaIndex : Index of timedelta64 data. PeriodIndex : Index of Period data. Notes ----- An Index instance can **only** contain hashable objects. An Index instance *can not* hold numpy float16 dtype. Examples -------- >>> pd.Index([1, 2, 3]) Index([1, 2, 3], dtype='int64') >>> pd.Index(list('abc')) Index(['a', 'b', 'c'], dtype='object') >>> pd.Index([1, 2, 3], dtype="uint8") Index([1, 2, 3], dtype='uint8') """ # To hand over control to subclasses _join_precedence = 1 # Cython methods; see github.com/cython/cython/issues/2647 # for why we need to wrap these instead of making them class attributes # Moreover, cython will choose the appropriate-dtyped sub-function # given the dtypes of the passed arguments def _left_indexer_unique(self: _IndexT, other: _IndexT) -> npt.NDArray[np.intp]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) # similar but not identical to ov.searchsorted(sv) return libjoin.left_join_indexer_unique(sv, ov) def _left_indexer( self: _IndexT, other: _IndexT ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) joined_ndarray, lidx, ridx = libjoin.left_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return joined, lidx, ridx def _inner_indexer( self: _IndexT, other: _IndexT ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) joined_ndarray, lidx, ridx = libjoin.inner_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return joined, lidx, ridx def _outer_indexer( self: _IndexT, other: _IndexT ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) joined_ndarray, lidx, ridx = libjoin.outer_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return joined, lidx, ridx _typ: str = "index" _data: ExtensionArray | np.ndarray _data_cls: type[ExtensionArray] | tuple[type[np.ndarray], type[ExtensionArray]] = ( np.ndarray, ExtensionArray, ) _id: object | None = None _name: Hashable = None # MultiIndex.levels previously allowed setting the index name. We # don't allow this anymore, and raise if it happens rather than # failing silently. _no_setting_name: bool = False _comparables: list[str] = ["name"] _attributes: list[str] = ["name"] def _can_hold_strings(self) -> bool: return not is_numeric_dtype(self) _engine_types: dict[np.dtype | ExtensionDtype, type[libindex.IndexEngine]] = { np.dtype(np.int8): libindex.Int8Engine, np.dtype(np.int16): libindex.Int16Engine, np.dtype(np.int32): libindex.Int32Engine, np.dtype(np.int64): libindex.Int64Engine, np.dtype(np.uint8): libindex.UInt8Engine, np.dtype(np.uint16): libindex.UInt16Engine, np.dtype(np.uint32): libindex.UInt32Engine, np.dtype(np.uint64): libindex.UInt64Engine, np.dtype(np.float32): libindex.Float32Engine, np.dtype(np.float64): libindex.Float64Engine, np.dtype(np.complex64): libindex.Complex64Engine, np.dtype(np.complex128): libindex.Complex128Engine, } def _engine_type( self, ) -> type[libindex.IndexEngine] | type[libindex.ExtensionEngine]: return self._engine_types.get(self.dtype, libindex.ObjectEngine) # whether we support partial string indexing. Overridden # in DatetimeIndex and PeriodIndex _supports_partial_string_indexing = False _accessors = {"str"} str = CachedAccessor("str", StringMethods) _references = None # -------------------------------------------------------------------- # Constructors def __new__( cls, data=None, dtype=None, copy: bool = False, name=None, tupleize_cols: bool = True, ) -> Index: from pandas.core.indexes.range import RangeIndex name = maybe_extract_name(name, data, cls) if dtype is not None: dtype = pandas_dtype(dtype) data_dtype = getattr(data, "dtype", None) refs = None if not copy and isinstance(data, (ABCSeries, Index)): refs = data._references # range if isinstance(data, (range, RangeIndex)): result = RangeIndex(start=data, copy=copy, name=name) if dtype is not None: return result.astype(dtype, copy=False) return result elif is_ea_or_datetimelike_dtype(dtype): # non-EA dtype indexes have special casting logic, so we punt here pass elif is_ea_or_datetimelike_dtype(data_dtype): pass elif isinstance(data, (np.ndarray, Index, ABCSeries)): if isinstance(data, ABCMultiIndex): data = data._values if data.dtype.kind not in ["i", "u", "f", "b", "c", "m", "M"]: # GH#11836 we need to avoid having numpy coerce # things that look like ints/floats to ints unless # they are actually ints, e.g. '0' and 0.0 # should not be coerced data = com.asarray_tuplesafe(data, dtype=_dtype_obj) elif is_scalar(data): raise cls._raise_scalar_data_error(data) elif hasattr(data, "__array__"): return Index(np.asarray(data), dtype=dtype, copy=copy, name=name) elif not is_list_like(data) and not isinstance(data, memoryview): # 2022-11-16 the memoryview check is only necessary on some CI # builds, not clear why raise cls._raise_scalar_data_error(data) else: if tupleize_cols: # GH21470: convert iterable to list before determining if empty if is_iterator(data): data = list(data) if data and all(isinstance(e, tuple) for e in data): # we must be all tuples, otherwise don't construct # 10697 from pandas.core.indexes.multi import MultiIndex return MultiIndex.from_tuples(data, names=name) # other iterable of some kind if not isinstance(data, (list, tuple)): # we allow set/frozenset, which Series/sanitize_array does not, so # cast to list here data = list(data) if len(data) == 0: # unlike Series, we default to object dtype: data = np.array(data, dtype=object) if len(data) and isinstance(data[0], tuple): # Ensure we get 1-D array of tuples instead of 2D array. data = com.asarray_tuplesafe(data, dtype=_dtype_obj) try: arr = sanitize_array(data, None, dtype=dtype, copy=copy) except ValueError as err: if "index must be specified when data is not list-like" in str(err): raise cls._raise_scalar_data_error(data) from err if "Data must be 1-dimensional" in str(err): raise ValueError("Index data must be 1-dimensional") from err raise arr = ensure_wrapped_if_datetimelike(arr) klass = cls._dtype_to_subclass(arr.dtype) arr = klass._ensure_array(arr, arr.dtype, copy=False) return klass._simple_new(arr, name, refs=refs) def _ensure_array(cls, data, dtype, copy: bool): """ Ensure we have a valid array to pass to _simple_new. """ if data.ndim > 1: # GH#13601, GH#20285, GH#27125 raise ValueError("Index data must be 1-dimensional") elif dtype == np.float16: # float16 not supported (no indexing engine) raise NotImplementedError("float16 indexes are not supported") if copy: # asarray_tuplesafe does not always copy underlying data, # so need to make sure that this happens data = data.copy() return data def _dtype_to_subclass(cls, dtype: DtypeObj): # Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423 if isinstance(dtype, ExtensionDtype): if isinstance(dtype, DatetimeTZDtype): from pandas import DatetimeIndex return DatetimeIndex elif isinstance(dtype, CategoricalDtype): from pandas import CategoricalIndex return CategoricalIndex elif isinstance(dtype, IntervalDtype): from pandas import IntervalIndex return IntervalIndex elif isinstance(dtype, PeriodDtype): from pandas import PeriodIndex return PeriodIndex return Index if dtype.kind == "M": from pandas import DatetimeIndex return DatetimeIndex elif dtype.kind == "m": from pandas import TimedeltaIndex return TimedeltaIndex elif dtype.kind == "O": # NB: assuming away MultiIndex return Index elif issubclass(dtype.type, str) or is_numeric_dtype(dtype): return Index raise NotImplementedError(dtype) # NOTE for new Index creation: # - _simple_new: It returns new Index with the same type as the caller. # All metadata (such as name) must be provided by caller's responsibility. # Using _shallow_copy is recommended because it fills these metadata # otherwise specified. # - _shallow_copy: It returns new Index with the same type (using # _simple_new), but fills caller's metadata otherwise specified. Passed # kwargs will overwrite corresponding metadata. # See each method's docstring. def _simple_new( cls: type[_IndexT], values: ArrayLike, name: Hashable = None, refs=None ) -> _IndexT: """ We require that we have a dtype compat for the values. If we are passed a non-dtype compat, then coerce using the constructor. Must be careful not to recurse. """ assert isinstance(values, cls._data_cls), type(values) result = object.__new__(cls) result._data = values result._name = name result._cache = {} result._reset_identity() if refs is not None: result._references = refs else: result._references = BlockValuesRefs() result._references.add_index_reference(result) return result def _with_infer(cls, *args, **kwargs): """ Constructor that uses the 1.0.x behavior inferring numeric dtypes for ndarray[object] inputs. """ result = cls(*args, **kwargs) if result.dtype == _dtype_obj and not result._is_multi: # error: Argument 1 to "maybe_convert_objects" has incompatible type # "Union[ExtensionArray, ndarray[Any, Any]]"; expected # "ndarray[Any, Any]" values = lib.maybe_convert_objects(result._values) # type: ignore[arg-type] if values.dtype.kind in ["i", "u", "f", "b"]: return Index(values, name=result.name) return result def _constructor(self: _IndexT) -> type[_IndexT]: return type(self) def _maybe_check_unique(self) -> None: """ Check that an Index has no duplicates. This is typically only called via `NDFrame.flags.allows_duplicate_labels.setter` when it's set to True (duplicates aren't allowed). Raises ------ DuplicateLabelError When the index is not unique. """ if not self.is_unique: msg = """Index has duplicates.""" duplicates = self._format_duplicate_message() msg += f"\n{duplicates}" raise DuplicateLabelError(msg) def _format_duplicate_message(self) -> DataFrame: """ Construct the DataFrame for a DuplicateLabelError. This returns a DataFrame indicating the labels and positions of duplicates in an index. This should only be called when it's already known that duplicates are present. Examples -------- >>> idx = pd.Index(['a', 'b', 'a']) >>> idx._format_duplicate_message() positions label a [0, 2] """ from pandas import Series duplicates = self[self.duplicated(keep="first")].unique() assert len(duplicates) out = Series(np.arange(len(self))).groupby(self).agg(list)[duplicates] if self._is_multi: # test_format_duplicate_labels_message_multi # error: "Type[Index]" has no attribute "from_tuples" [attr-defined] out.index = type(self).from_tuples(out.index) # type: ignore[attr-defined] if self.nlevels == 1: out = out.rename_axis("label") return out.to_frame(name="positions") # -------------------------------------------------------------------- # Index Internals Methods def _shallow_copy(self: _IndexT, values, name: Hashable = no_default) -> _IndexT: """ Create a new Index with the same class as the caller, don't copy the data, use the same object attributes with passed in attributes taking precedence. *this is an internal non-public method* Parameters ---------- values : the values to create the new Index, optional name : Label, defaults to self.name """ name = self._name if name is no_default else name return self._simple_new(values, name=name, refs=self._references) def _view(self: _IndexT) -> _IndexT: """ fastpath to make a shallow copy, i.e. new object with same data. """ result = self._simple_new(self._values, name=self._name, refs=self._references) result._cache = self._cache return result def _rename(self: _IndexT, name: Hashable) -> _IndexT: """ fastpath for rename if new name is already validated. """ result = self._view() result._name = name return result def is_(self, other) -> bool: """ More flexible, faster check like ``is`` but that works through views. Note: this is *not* the same as ``Index.identical()``, which checks that metadata is also the same. Parameters ---------- other : object Other object to compare against. Returns ------- bool True if both have same underlying data, False otherwise. See Also -------- Index.identical : Works like ``Index.is_`` but also checks metadata. """ if self is other: return True elif not hasattr(other, "_id"): return False elif self._id is None or other._id is None: return False else: return self._id is other._id def _reset_identity(self) -> None: """ Initializes or resets ``_id`` attribute with new object. """ self._id = object() def _cleanup(self) -> None: self._engine.clear_mapping() def _engine( self, ) -> libindex.IndexEngine | libindex.ExtensionEngine | libindex.MaskedIndexEngine: # For base class (object dtype) we get ObjectEngine target_values = self._get_engine_target() if isinstance(target_values, ExtensionArray): if isinstance(target_values, (BaseMaskedArray, ArrowExtensionArray)): try: return _masked_engines[target_values.dtype.name](target_values) except KeyError: # Not supported yet e.g. decimal pass elif self._engine_type is libindex.ObjectEngine: return libindex.ExtensionEngine(target_values) target_values = cast(np.ndarray, target_values) # to avoid a reference cycle, bind `target_values` to a local variable, so # `self` is not passed into the lambda. if target_values.dtype == bool: return libindex.BoolEngine(target_values) elif target_values.dtype == np.complex64: return libindex.Complex64Engine(target_values) elif target_values.dtype == np.complex128: return libindex.Complex128Engine(target_values) elif needs_i8_conversion(self.dtype): # We need to keep M8/m8 dtype when initializing the Engine, # but don't want to change _get_engine_target bc it is used # elsewhere # error: Item "ExtensionArray" of "Union[ExtensionArray, # ndarray[Any, Any]]" has no attribute "_ndarray" [union-attr] target_values = self._data._ndarray # type: ignore[union-attr] # error: Argument 1 to "ExtensionEngine" has incompatible type # "ndarray[Any, Any]"; expected "ExtensionArray" return self._engine_type(target_values) # type: ignore[arg-type] def _dir_additions_for_owner(self) -> set[str_t]: """ Add the string-like labels to the owner dataframe/series dir output. If this is a MultiIndex, it's first level values are used. """ return { c for c in self.unique(level=0)[: get_option("display.max_dir_items")] if isinstance(c, str) and c.isidentifier() } # -------------------------------------------------------------------- # Array-Like Methods # ndarray compat def __len__(self) -> int: """ Return the length of the Index. """ return len(self._data) def __array__(self, dtype=None) -> np.ndarray: """ The array interface, return my values. """ return np.asarray(self._data, dtype=dtype) def __array_ufunc__(self, ufunc: np.ufunc, method: str_t, *inputs, **kwargs): if any(isinstance(other, (ABCSeries, ABCDataFrame)) for other in inputs): return NotImplemented result = arraylike.maybe_dispatch_ufunc_to_dunder_op( self, ufunc, method, *inputs, **kwargs ) if result is not NotImplemented: return result if "out" in kwargs: # e.g. test_dti_isub_tdi return arraylike.dispatch_ufunc_with_out( self, ufunc, method, *inputs, **kwargs ) if method == "reduce": result = arraylike.dispatch_reduction_ufunc( self, ufunc, method, *inputs, **kwargs ) if result is not NotImplemented: return result new_inputs = [x if x is not self else x._values for x in inputs] result = getattr(ufunc, method)(*new_inputs, **kwargs) if ufunc.nout == 2: # i.e. np.divmod, np.modf, np.frexp return tuple(self.__array_wrap__(x) for x in result) if result.dtype == np.float16: result = result.astype(np.float32) return self.__array_wrap__(result) def __array_wrap__(self, result, context=None): """ Gets called after a ufunc and other functions e.g. np.split. """ result = lib.item_from_zerodim(result) if is_bool_dtype(result) or lib.is_scalar(result) or np.ndim(result) > 1: return result return Index(result, name=self.name) def dtype(self) -> DtypeObj: """ Return the dtype object of the underlying data. """ return self._data.dtype def ravel(self, order: str_t = "C") -> Index: """ Return a view on self. Returns ------- Index See Also -------- numpy.ndarray.ravel : Return a flattened array. """ return self[:] def view(self, cls=None): # we need to see if we are subclassing an # index type here if cls is not None and not hasattr(cls, "_typ"): dtype = cls if isinstance(cls, str): dtype = pandas_dtype(cls) if isinstance(dtype, (np.dtype, ExtensionDtype)) and needs_i8_conversion( dtype ): if dtype.kind == "m" and dtype != "m8[ns]": # e.g. m8[s] return self._data.view(cls) idx_cls = self._dtype_to_subclass(dtype) # NB: we only get here for subclasses that override # _data_cls such that it is a type and not a tuple # of types. arr_cls = idx_cls._data_cls arr = arr_cls(self._data.view("i8"), dtype=dtype) return idx_cls._simple_new(arr, name=self.name, refs=self._references) result = self._data.view(cls) else: result = self._view() if isinstance(result, Index): result._id = self._id return result def astype(self, dtype, copy: bool = True): """ Create an Index with values cast to dtypes. The class of a new Index is determined by dtype. When conversion is impossible, a TypeError exception is raised. Parameters ---------- dtype : numpy dtype or pandas type Note that any signed integer `dtype` is treated as ``'int64'``, and any unsigned integer `dtype` is treated as ``'uint64'``, regardless of the size. copy : bool, default True By default, astype always returns a newly allocated object. If copy is set to False and internal requirements on dtype are satisfied, the original data is used to create a new Index or the original Index is returned. Returns ------- Index Index with values cast to specified dtype. """ if dtype is not None: dtype = pandas_dtype(dtype) if is_dtype_equal(self.dtype, dtype): # Ensure that self.astype(self.dtype) is self return self.copy() if copy else self values = self._data if isinstance(values, ExtensionArray): with rewrite_exception(type(values).__name__, type(self).__name__): new_values = values.astype(dtype, copy=copy) elif isinstance(dtype, ExtensionDtype): cls = dtype.construct_array_type() # Note: for RangeIndex and CategoricalDtype self vs self._values # behaves differently here. new_values = cls._from_sequence(self, dtype=dtype, copy=copy) else: # GH#13149 specifically use astype_array instead of astype new_values = astype_array(values, dtype=dtype, copy=copy) # pass copy=False because any copying will be done in the astype above result = Index(new_values, name=self.name, dtype=new_values.dtype, copy=False) if ( not copy and self._references is not None and astype_is_view(self.dtype, dtype) ): result._references = self._references result._references.add_index_reference(result) return result _index_shared_docs[ "take" ] = """ Return a new %(klass)s of the values selected by the indices. For internal compatibility with numpy arrays. Parameters ---------- indices : array-like Indices to be taken. axis : int, optional The axis over which to select values, always 0. allow_fill : bool, default True fill_value : scalar, default None If allow_fill=True and fill_value is not None, indices specified by -1 are regarded as NA. If Index doesn't hold NA, raise ValueError. Returns ------- Index An index formed of elements at the given indices. Will be the same type as self, except for RangeIndex. See Also -------- numpy.ndarray.take: Return an array formed from the elements of a at the given indices. """ def take( self, indices, axis: Axis = 0, allow_fill: bool = True, fill_value=None, **kwargs, ): if kwargs: nv.validate_take((), kwargs) if is_scalar(indices): raise TypeError("Expected indices to be array-like") indices = ensure_platform_int(indices) allow_fill = self._maybe_disallow_fill(allow_fill, fill_value, indices) # Note: we discard fill_value and use self._na_value, only relevant # in the case where allow_fill is True and fill_value is not None values = self._values if isinstance(values, np.ndarray): taken = algos.take( values, indices, allow_fill=allow_fill, fill_value=self._na_value ) else: # algos.take passes 'axis' keyword which not all EAs accept taken = values.take( indices, allow_fill=allow_fill, fill_value=self._na_value ) # _constructor so RangeIndex-> Index with an int64 dtype return self._constructor._simple_new(taken, name=self.name) def _maybe_disallow_fill(self, allow_fill: bool, fill_value, indices) -> bool: """ We only use pandas-style take when allow_fill is True _and_ fill_value is not None. """ if allow_fill and fill_value is not None: # only fill if we are passing a non-None fill_value if self._can_hold_na: if (indices < -1).any(): raise ValueError( "When allow_fill=True and fill_value is not None, " "all indices must be >= -1" ) else: cls_name = type(self).__name__ raise ValueError( f"Unable to fill values because {cls_name} cannot contain NA" ) else: allow_fill = False return allow_fill _index_shared_docs[ "repeat" ] = """ Repeat elements of a %(klass)s. Returns a new %(klass)s where each element of the current %(klass)s is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty %(klass)s. axis : None Must be ``None``. Has no effect but is accepted for compatibility with numpy. Returns ------- %(klass)s Newly created %(klass)s with repeated elements. See Also -------- Series.repeat : Equivalent function for Series. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> idx = pd.Index(['a', 'b', 'c']) >>> idx Index(['a', 'b', 'c'], dtype='object') >>> idx.repeat(2) Index(['a', 'a', 'b', 'b', 'c', 'c'], dtype='object') >>> idx.repeat([1, 2, 3]) Index(['a', 'b', 'b', 'c', 'c', 'c'], dtype='object') """ def repeat(self, repeats, axis=None): repeats = ensure_platform_int(repeats) nv.validate_repeat((), {"axis": axis}) res_values = self._values.repeat(repeats) # _constructor so RangeIndex-> Index with an int64 dtype return self._constructor._simple_new(res_values, name=self.name) # -------------------------------------------------------------------- # Copying Methods def copy( self: _IndexT, name: Hashable | None = None, deep: bool = False, ) -> _IndexT: """ Make a copy of this object. Name is set on the new object. Parameters ---------- name : Label, optional Set name for new object. deep : bool, default False Returns ------- Index Index refer to new object which is a copy of this object. Notes ----- In most cases, there should be no functional difference from using ``deep``, but if ``deep`` is passed it will attempt to deepcopy. """ name = self._validate_names(name=name, deep=deep)[0] if deep: new_data = self._data.copy() new_index = type(self)._simple_new(new_data, name=name) else: new_index = self._rename(name=name) return new_index def __copy__(self: _IndexT, **kwargs) -> _IndexT: return self.copy(**kwargs) def __deepcopy__(self: _IndexT, memo=None) -> _IndexT: """ Parameters ---------- memo, default None Standard signature. Unused """ return self.copy(deep=True) # -------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str_t: """ Return a string representation for this object. """ klass_name = type(self).__name__ data = self._format_data() attrs = self._format_attrs() space = self._format_space() attrs_str = [f"{k}={v}" for k, v in attrs] prepr = f",{space}".join(attrs_str) # no data provided, just attributes if data is None: data = "" return f"{klass_name}({data}{prepr})" def _format_space(self) -> str_t: # using space here controls if the attributes # are line separated or not (the default) # max_seq_items = get_option('display.max_seq_items') # if len(self) > max_seq_items: # space = "\n%s" % (' ' * (len(klass) + 1)) return " " def _formatter_func(self): """ Return the formatter function. """ return default_pprint def _format_data(self, name=None) -> str_t: """ Return the formatted data as a unicode string. """ # do we want to justify (only do so for non-objects) is_justify = True if self.inferred_type == "string": is_justify = False elif self.inferred_type == "categorical": self = cast("CategoricalIndex", self) if is_object_dtype(self.categories): is_justify = False return format_object_summary( self, self._formatter_func, is_justify=is_justify, name=name, line_break_each_value=self._is_multi, ) def _format_attrs(self) -> list[tuple[str_t, str_t | int | bool | None]]: """ Return a list of tuples of the (attr,formatted_value). """ attrs: list[tuple[str_t, str_t | int | bool | None]] = [] if not self._is_multi: attrs.append(("dtype", f"'{self.dtype}'")) if self.name is not None: attrs.append(("name", default_pprint(self.name))) elif self._is_multi and any(x is not None for x in self.names): attrs.append(("names", default_pprint(self.names))) max_seq_items = get_option("display.max_seq_items") or len(self) if len(self) > max_seq_items: attrs.append(("length", len(self))) return attrs def _get_level_names(self) -> Hashable | Sequence[Hashable]: """ Return a name or list of names with None replaced by the level number. """ if self._is_multi: return [ level if name is None else name for level, name in enumerate(self.names) ] else: return 0 if self.name is None else self.name def _mpl_repr(self) -> np.ndarray: # how to represent ourselves to matplotlib if isinstance(self.dtype, np.dtype) and self.dtype.kind != "M": return cast(np.ndarray, self.values) return self.astype(object, copy=False)._values def format( self, name: bool = False, formatter: Callable | None = None, na_rep: str_t = "NaN", ) -> list[str_t]: """ Render a string representation of the Index. """ header = [] if name: header.append( pprint_thing(self.name, escape_chars=("\t", "\r", "\n")) if self.name is not None else "" ) if formatter is not None: return header + list(self.map(formatter)) return self._format_with_header(header, na_rep=na_rep) def _format_with_header(self, header: list[str_t], na_rep: str_t) -> list[str_t]: from pandas.io.formats.format import format_array values = self._values if is_object_dtype(values.dtype): values = cast(np.ndarray, values) values = lib.maybe_convert_objects(values, safe=True) result = [pprint_thing(x, escape_chars=("\t", "\r", "\n")) for x in values] # could have nans mask = is_float_nan(values) if mask.any(): result_arr = np.array(result) result_arr[mask] = na_rep result = result_arr.tolist() else: result = trim_front(format_array(values, None, justify="left")) return header + result def _format_native_types( self, *, na_rep: str_t = "", decimal: str_t = ".", float_format=None, date_format=None, quoting=None, ) -> npt.NDArray[np.object_]: """ Actually format specific types of the index. """ from pandas.io.formats.format import FloatArrayFormatter if is_float_dtype(self.dtype) and not is_extension_array_dtype(self.dtype): formatter = FloatArrayFormatter( self._values, na_rep=na_rep, float_format=float_format, decimal=decimal, quoting=quoting, fixed_width=False, ) return formatter.get_result_as_array() mask = isna(self) if not is_object_dtype(self) and not quoting: values = np.asarray(self).astype(str) else: values = np.array(self, dtype=object, copy=True) values[mask] = na_rep return values def _summary(self, name=None) -> str_t: """ Return a summarized representation. Parameters ---------- name : str name to use in the summary representation Returns ------- String with a summarized representation of the index """ if len(self) > 0: head = self[0] if hasattr(head, "format") and not isinstance(head, str): head = head.format() elif needs_i8_conversion(self.dtype): # e.g. Timedelta, display as values, not quoted head = self._formatter_func(head).replace("'", "") tail = self[-1] if hasattr(tail, "format") and not isinstance(tail, str): tail = tail.format() elif needs_i8_conversion(self.dtype): # e.g. Timedelta, display as values, not quoted tail = self._formatter_func(tail).replace("'", "") index_summary = f", {head} to {tail}" else: index_summary = "" if name is None: name = type(self).__name__ return f"{name}: {len(self)} entries{index_summary}" # -------------------------------------------------------------------- # Conversion Methods def to_flat_index(self: _IndexT) -> _IndexT: """ Identity method. This is implemented for compatibility with subclass implementations when chaining. Returns ------- pd.Index Caller. See Also -------- MultiIndex.to_flat_index : Subclass implementation. """ return self def to_series(self, index=None, name: Hashable = None) -> Series: """ Create a Series with both index and values equal to the index keys. Useful with map for returning an indexer based on an index. Parameters ---------- index : Index, optional Index of resulting Series. If None, defaults to original index. name : str, optional Name of resulting Series. If None, defaults to name of original index. Returns ------- Series The dtype will be based on the type of the Index values. See Also -------- Index.to_frame : Convert an Index to a DataFrame. Series.to_frame : Convert Series to DataFrame. Examples -------- >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal') By default, the original Index and original name is reused. >>> idx.to_series() animal Ant Ant Bear Bear Cow Cow Name: animal, dtype: object To enforce a new Index, specify new labels to ``index``: >>> idx.to_series(index=[0, 1, 2]) 0 Ant 1 Bear 2 Cow Name: animal, dtype: object To override the name of the resulting column, specify `name`: >>> idx.to_series(name='zoo') animal Ant Ant Bear Bear Cow Cow Name: zoo, dtype: object """ from pandas import Series if index is None: index = self._view() if name is None: name = self.name return Series(self._values.copy(), index=index, name=name) def to_frame( self, index: bool = True, name: Hashable = lib.no_default ) -> DataFrame: """ Create a DataFrame with a column containing the Index. Parameters ---------- index : bool, default True Set the index of the returned DataFrame as the original Index. name : object, defaults to index.name The passed name should substitute for the index name (if it has one). Returns ------- DataFrame DataFrame containing the original Index data. See Also -------- Index.to_series : Convert an Index to a Series. Series.to_frame : Convert Series to DataFrame. Examples -------- >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal') >>> idx.to_frame() animal animal Ant Ant Bear Bear Cow Cow By default, the original Index is reused. To enforce a new Index: >>> idx.to_frame(index=False) animal 0 Ant 1 Bear 2 Cow To override the name of the resulting column, specify `name`: >>> idx.to_frame(index=False, name='zoo') zoo 0 Ant 1 Bear 2 Cow """ from pandas import DataFrame if name is lib.no_default: name = self._get_level_names() result = DataFrame({name: self._values.copy()}) if index: result.index = self return result # -------------------------------------------------------------------- # Name-Centric Methods def name(self) -> Hashable: """ Return Index or MultiIndex name. """ return self._name def name(self, value: Hashable) -> None: if self._no_setting_name: # Used in MultiIndex.levels to avoid silently ignoring name updates. raise RuntimeError( "Cannot set name on a level of a MultiIndex. Use " "'MultiIndex.set_names' instead." ) maybe_extract_name(value, None, type(self)) self._name = value def _validate_names( self, name=None, names=None, deep: bool = False ) -> list[Hashable]: """ Handles the quirks of having a singular 'name' parameter for general Index and plural 'names' parameter for MultiIndex. """ from copy import deepcopy if names is not None and name is not None: raise TypeError("Can only provide one of `names` and `name`") if names is None and name is None: new_names = deepcopy(self.names) if deep else self.names elif names is not None: if not is_list_like(names): raise TypeError("Must pass list-like as `names`.") new_names = names elif not is_list_like(name): new_names = [name] else: new_names = name if len(new_names) != len(self.names): raise ValueError( f"Length of new names must be {len(self.names)}, got {len(new_names)}" ) # All items in 'new_names' need to be hashable validate_all_hashable(*new_names, error_name=f"{type(self).__name__}.name") return new_names def _get_default_index_names( self, names: Hashable | Sequence[Hashable] | None = None, default=None ) -> list[Hashable]: """ Get names of index. Parameters ---------- names : int, str or 1-dimensional list, default None Index names to set. default : str Default name of index. Raises ------ TypeError if names not str or list-like """ from pandas.core.indexes.multi import MultiIndex if names is not None: if isinstance(names, (int, str)): names = [names] if not isinstance(names, list) and names is not None: raise ValueError("Index names must be str or 1-dimensional list") if not names: if isinstance(self, MultiIndex): names = com.fill_missing_names(self.names) else: names = [default] if self.name is None else [self.name] return names def _get_names(self) -> FrozenList: return FrozenList((self.name,)) def _set_names(self, values, *, level=None) -> None: """ Set new names on index. Each name has to be a hashable type. Parameters ---------- values : str or sequence name(s) to set level : int, level name, or sequence of int/level names (default None) If the index is a MultiIndex (hierarchical), level(s) to set (None for all levels). Otherwise level must be None Raises ------ TypeError if each name is not hashable. """ if not is_list_like(values): raise ValueError("Names must be a list-like") if len(values) != 1: raise ValueError(f"Length of new names must be 1, got {len(values)}") # GH 20527 # All items in 'name' need to be hashable: validate_all_hashable(*values, error_name=f"{type(self).__name__}.name") self._name = values[0] names = property(fset=_set_names, fget=_get_names) def set_names( self: _IndexT, names, *, level=..., inplace: Literal[False] = ... ) -> _IndexT: ... def set_names(self, names, *, level=..., inplace: Literal[True]) -> None: ... def set_names( self: _IndexT, names, *, level=..., inplace: bool = ... ) -> _IndexT | None: ... def set_names( self: _IndexT, names, *, level=None, inplace: bool = False ) -> _IndexT | None: """ Set Index or MultiIndex name. Able to set new names partially and by level. Parameters ---------- names : label or list of label or dict-like for MultiIndex Name(s) to set. .. versionchanged:: 1.3.0 level : int, label or list of int or label, optional If the index is a MultiIndex and names is not dict-like, level(s) to set (None for all levels). Otherwise level must be None. .. versionchanged:: 1.3.0 inplace : bool, default False Modifies the object directly, instead of creating a new Index or MultiIndex. Returns ------- Index or None The same type as the caller or None if ``inplace=True``. See Also -------- Index.rename : Able to set new names without level. Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx Index([1, 2, 3, 4], dtype='int64') >>> idx.set_names('quarter') Index([1, 2, 3, 4], dtype='int64', name='quarter') >>> idx = pd.MultiIndex.from_product([['python', 'cobra'], ... [2018, 2019]]) >>> idx MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], ) >>> idx = idx.set_names(['kind', 'year']) >>> idx.set_names('species', level=0) MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['species', 'year']) When renaming levels with a dict, levels can not be passed. >>> idx.set_names({'kind': 'snake'}) MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['snake', 'year']) """ if level is not None and not isinstance(self, ABCMultiIndex): raise ValueError("Level must be None for non-MultiIndex") if level is not None and not is_list_like(level) and is_list_like(names): raise TypeError("Names must be a string when a single level is provided.") if not is_list_like(names) and level is None and self.nlevels > 1: raise TypeError("Must pass list-like as `names`.") if is_dict_like(names) and not isinstance(self, ABCMultiIndex): raise TypeError("Can only pass dict-like as `names` for MultiIndex.") if is_dict_like(names) and level is not None: raise TypeError("Can not pass level for dictlike `names`.") if isinstance(self, ABCMultiIndex) and is_dict_like(names) and level is None: # Transform dict to list of new names and corresponding levels level, names_adjusted = [], [] for i, name in enumerate(self.names): if name in names.keys(): level.append(i) names_adjusted.append(names[name]) names = names_adjusted if not is_list_like(names): names = [names] if level is not None and not is_list_like(level): level = [level] if inplace: idx = self else: idx = self._view() idx._set_names(names, level=level) if not inplace: return idx return None def rename(self, name, inplace: bool = False): """ Alter Index or MultiIndex name. Able to set new names without level. Defaults to returning new index. Length of names must match number of levels in MultiIndex. Parameters ---------- name : label or list of labels Name(s) to set. inplace : bool, default False Modifies the object directly, instead of creating a new Index or MultiIndex. Returns ------- Index or None The same type as the caller or None if ``inplace=True``. See Also -------- Index.set_names : Able to set new names partially and by level. Examples -------- >>> idx = pd.Index(['A', 'C', 'A', 'B'], name='score') >>> idx.rename('grade') Index(['A', 'C', 'A', 'B'], dtype='object', name='grade') >>> idx = pd.MultiIndex.from_product([['python', 'cobra'], ... [2018, 2019]], ... names=['kind', 'year']) >>> idx MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['kind', 'year']) >>> idx.rename(['species', 'year']) MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['species', 'year']) >>> idx.rename('species') Traceback (most recent call last): TypeError: Must pass list-like as `names`. """ return self.set_names([name], inplace=inplace) # -------------------------------------------------------------------- # Level-Centric Methods def nlevels(self) -> int: """ Number of levels. """ return 1 def _sort_levels_monotonic(self: _IndexT) -> _IndexT: """ Compat with MultiIndex. """ return self def _validate_index_level(self, level) -> None: """ Validate index level. For single-level Index getting level number is a no-op, but some verification must be done like in MultiIndex. """ if isinstance(level, int): if level < 0 and level != -1: raise IndexError( "Too many levels: Index has only 1 level, " f"{level} is not a valid level number" ) if level > 0: raise IndexError( f"Too many levels: Index has only 1 level, not {level + 1}" ) elif level != self.name: raise KeyError( f"Requested level ({level}) does not match index name ({self.name})" ) def _get_level_number(self, level) -> int: self._validate_index_level(level) return 0 def sortlevel( self, level=None, ascending: bool | list[bool] = True, sort_remaining=None ): """ For internal compatibility with the Index API. Sort the Index. This is for compat with MultiIndex Parameters ---------- ascending : bool, default True False to sort in descending order level, sort_remaining are compat parameters Returns ------- Index """ if not isinstance(ascending, (list, bool)): raise TypeError( "ascending must be a single bool value or" "a list of bool values of length 1" ) if isinstance(ascending, list): if len(ascending) != 1: raise TypeError("ascending must be a list of bool values of length 1") ascending = ascending[0] if not isinstance(ascending, bool): raise TypeError("ascending must be a bool value") return self.sort_values(return_indexer=True, ascending=ascending) def _get_level_values(self, level) -> Index: """ Return an Index of values for requested level. This is primarily useful to get an individual level of values from a MultiIndex, but is provided on Index as well for compatibility. Parameters ---------- level : int or str It is either the integer position or the name of the level. Returns ------- Index Calling object, as there is only one level in the Index. See Also -------- MultiIndex.get_level_values : Get values for a level of a MultiIndex. Notes ----- For Index, level should be 0, since there are no multiple levels. Examples -------- >>> idx = pd.Index(list('abc')) >>> idx Index(['a', 'b', 'c'], dtype='object') Get level values by supplying `level` as integer: >>> idx.get_level_values(0) Index(['a', 'b', 'c'], dtype='object') """ self._validate_index_level(level) return self get_level_values = _get_level_values def droplevel(self, level: IndexLabel = 0): """ Return index with requested level(s) removed. If resulting index has only 1 level left, the result will be of Index type, not MultiIndex. The original index is not modified inplace. Parameters ---------- level : int, str, or list-like, default 0 If a string is given, must be the name of a level If list-like, elements must be names or indexes of levels. Returns ------- Index or MultiIndex Examples -------- >>> mi = pd.MultiIndex.from_arrays( ... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z']) >>> mi MultiIndex([(1, 3, 5), (2, 4, 6)], names=['x', 'y', 'z']) >>> mi.droplevel() MultiIndex([(3, 5), (4, 6)], names=['y', 'z']) >>> mi.droplevel(2) MultiIndex([(1, 3), (2, 4)], names=['x', 'y']) >>> mi.droplevel('z') MultiIndex([(1, 3), (2, 4)], names=['x', 'y']) >>> mi.droplevel(['x', 'y']) Index([5, 6], dtype='int64', name='z') """ if not isinstance(level, (tuple, list)): level = [level] levnums = sorted(self._get_level_number(lev) for lev in level)[::-1] return self._drop_level_numbers(levnums) def _drop_level_numbers(self, levnums: list[int]): """ Drop MultiIndex levels by level _number_, not name. """ if not levnums and not isinstance(self, ABCMultiIndex): return self if len(levnums) >= self.nlevels: raise ValueError( f"Cannot remove {len(levnums)} levels from an index with " f"{self.nlevels} levels: at least one level must be left." ) # The two checks above guarantee that here self is a MultiIndex self = cast("MultiIndex", self) new_levels = list(self.levels) new_codes = list(self.codes) new_names = list(self.names) for i in levnums: new_levels.pop(i) new_codes.pop(i) new_names.pop(i) if len(new_levels) == 1: lev = new_levels[0] if len(lev) == 0: # If lev is empty, lev.take will fail GH#42055 if len(new_codes[0]) == 0: # GH#45230 preserve RangeIndex here # see test_reset_index_empty_rangeindex result = lev[:0] else: res_values = algos.take(lev._values, new_codes[0], allow_fill=True) # _constructor instead of type(lev) for RangeIndex compat GH#35230 result = lev._constructor._simple_new(res_values, name=new_names[0]) else: # set nan if needed mask = new_codes[0] == -1 result = new_levels[0].take(new_codes[0]) if mask.any(): result = result.putmask(mask, np.nan) result._name = new_names[0] return result else: from pandas.core.indexes.multi import MultiIndex return MultiIndex( levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False, ) # -------------------------------------------------------------------- # Introspection Methods def _can_hold_na(self) -> bool: if isinstance(self.dtype, ExtensionDtype): if isinstance(self.dtype, IntervalDtype): # FIXME(GH#45720): this is inaccurate for integer-backed # IntervalArray, but without it other.categories.take raises # in IntervalArray._cmp_method return True return self.dtype._can_hold_na if self.dtype.kind in ["i", "u", "b"]: return False return True def is_monotonic_increasing(self) -> bool: """ Return a boolean if the values are equal or increasing. Returns ------- bool See Also -------- Index.is_monotonic_decreasing : Check if the values are equal or decreasing. Examples -------- >>> pd.Index([1, 2, 3]).is_monotonic_increasing True >>> pd.Index([1, 2, 2]).is_monotonic_increasing True >>> pd.Index([1, 3, 2]).is_monotonic_increasing False """ return self._engine.is_monotonic_increasing def is_monotonic_decreasing(self) -> bool: """ Return a boolean if the values are equal or decreasing. Returns ------- bool See Also -------- Index.is_monotonic_increasing : Check if the values are equal or increasing. Examples -------- >>> pd.Index([3, 2, 1]).is_monotonic_decreasing True >>> pd.Index([3, 2, 2]).is_monotonic_decreasing True >>> pd.Index([3, 1, 2]).is_monotonic_decreasing False """ return self._engine.is_monotonic_decreasing def _is_strictly_monotonic_increasing(self) -> bool: """ Return if the index is strictly monotonic increasing (only increasing) values. Examples -------- >>> Index([1, 2, 3])._is_strictly_monotonic_increasing True >>> Index([1, 2, 2])._is_strictly_monotonic_increasing False >>> Index([1, 3, 2])._is_strictly_monotonic_increasing False """ return self.is_unique and self.is_monotonic_increasing def _is_strictly_monotonic_decreasing(self) -> bool: """ Return if the index is strictly monotonic decreasing (only decreasing) values. Examples -------- >>> Index([3, 2, 1])._is_strictly_monotonic_decreasing True >>> Index([3, 2, 2])._is_strictly_monotonic_decreasing False >>> Index([3, 1, 2])._is_strictly_monotonic_decreasing False """ return self.is_unique and self.is_monotonic_decreasing def is_unique(self) -> bool: """ Return if the index has unique values. Returns ------- bool See Also -------- Index.has_duplicates : Inverse method that checks if it has duplicate values. Examples -------- >>> idx = pd.Index([1, 5, 7, 7]) >>> idx.is_unique False >>> idx = pd.Index([1, 5, 7]) >>> idx.is_unique True >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_unique False >>> idx = pd.Index(["Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_unique True """ return self._engine.is_unique def has_duplicates(self) -> bool: """ Check if the Index has duplicate values. Returns ------- bool Whether or not the Index has duplicate values. See Also -------- Index.is_unique : Inverse method that checks if it has unique values. Examples -------- >>> idx = pd.Index([1, 5, 7, 7]) >>> idx.has_duplicates True >>> idx = pd.Index([1, 5, 7]) >>> idx.has_duplicates False >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.has_duplicates True >>> idx = pd.Index(["Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.has_duplicates False """ return not self.is_unique def is_boolean(self) -> bool: """ Check if the Index only consists of booleans. .. deprecated:: 2.0.0 Use `pandas.api.types.is_bool_dtype` instead. Returns ------- bool Whether or not the Index only consists of booleans. See Also -------- is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype (deprecated). is_categorical : Check if the Index holds categorical data. is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([True, False, True]) >>> idx.is_boolean() # doctest: +SKIP True >>> idx = pd.Index(["True", "False", "True"]) >>> idx.is_boolean() # doctest: +SKIP False >>> idx = pd.Index([True, False, "True"]) >>> idx.is_boolean() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_boolean is deprecated. " "Use pandas.api.types.is_bool_type instead.", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["boolean"] def is_integer(self) -> bool: """ Check if the Index only consists of integers. .. deprecated:: 2.0.0 Use `pandas.api.types.is_integer_dtype` instead. Returns ------- bool Whether or not the Index only consists of integers. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx.is_integer() # doctest: +SKIP True >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_integer() # doctest: +SKIP False >>> idx = pd.Index(["Apple", "Mango", "Watermelon"]) >>> idx.is_integer() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_integer is deprecated. " "Use pandas.api.types.is_integer_dtype instead.", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["integer"] def is_floating(self) -> bool: """ Check if the Index is a floating type. .. deprecated:: 2.0.0 Use `pandas.api.types.is_float_dtype` instead The Index may consist of only floats, NaNs, or a mix of floats, integers, or NaNs. Returns ------- bool Whether or not the Index only consists of only consists of floats, NaNs, or a mix of floats, integers, or NaNs. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_floating() # doctest: +SKIP True >>> idx = pd.Index([1.0, 2.0, np.nan, 4.0]) >>> idx.is_floating() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4, np.nan]) >>> idx.is_floating() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4]) >>> idx.is_floating() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_floating is deprecated. " "Use pandas.api.types.is_float_dtype instead.", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["floating", "mixed-integer-float", "integer-na"] def is_numeric(self) -> bool: """ Check if the Index only consists of numeric data. .. deprecated:: 2.0.0 Use `pandas.api.types.is_numeric_dtype` instead. Returns ------- bool Whether or not the Index only consists of numeric data. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4.0]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4.0, np.nan]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4.0, np.nan, "Apple"]) >>> idx.is_numeric() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_numeric is deprecated. " "Use pandas.api.types.is_any_real_numeric_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["integer", "floating"] def is_object(self) -> bool: """ Check if the Index is of the object dtype. .. deprecated:: 2.0.0 Use `pandas.api.types.is_object_dtype` instead. Returns ------- bool Whether or not the Index is of the object dtype. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index(["Apple", "Mango", "Watermelon"]) >>> idx.is_object() # doctest: +SKIP True >>> idx = pd.Index(["Apple", "Mango", 2.0]) >>> idx.is_object() # doctest: +SKIP True >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_object() # doctest: +SKIP False >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_object() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_object is deprecated." "Use pandas.api.types.is_object_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return is_object_dtype(self.dtype) def is_categorical(self) -> bool: """ Check if the Index holds categorical data. .. deprecated:: 2.0.0 Use :meth:`pandas.api.types.is_categorical_dtype` instead. Returns ------- bool True if the Index is categorical. See Also -------- CategoricalIndex : Index for categorical data. is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_categorical() # doctest: +SKIP True >>> idx = pd.Index([1, 3, 5, 7]) >>> idx.is_categorical() # doctest: +SKIP False >>> s = pd.Series(["Peter", "Victor", "Elisabeth", "Mar"]) >>> s 0 Peter 1 Victor 2 Elisabeth 3 Mar dtype: object >>> s.index.is_categorical() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_categorical is deprecated." "Use pandas.api.types.is_categorical_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["categorical"] def is_interval(self) -> bool: """ Check if the Index holds Interval objects. .. deprecated:: 2.0.0 Use `pandas.api.types.is_interval_dtype` instead. Returns ------- bool Whether or not the Index holds Interval objects. See Also -------- IntervalIndex : Index for Interval objects. is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). Examples -------- >>> idx = pd.Index([pd.Interval(left=0, right=5), ... pd.Interval(left=5, right=10)]) >>> idx.is_interval() # doctest: +SKIP True >>> idx = pd.Index([1, 3, 5, 7]) >>> idx.is_interval() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_interval is deprecated." "Use pandas.api.types.is_interval_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["interval"] def _holds_integer(self) -> bool: """ Whether the type is an integer type. """ return self.inferred_type in ["integer", "mixed-integer"] def holds_integer(self) -> bool: """ Whether the type is an integer type. .. deprecated:: 2.0.0 Use `pandas.api.types.infer_dtype` instead """ warnings.warn( f"{type(self).__name__}.holds_integer is deprecated. " "Use pandas.api.types.infer_dtype instead.", FutureWarning, stacklevel=find_stack_level(), ) return self._holds_integer() def inferred_type(self) -> str_t: """ Return a string of the type inferred from the values. """ return lib.infer_dtype(self._values, skipna=False) def _is_all_dates(self) -> bool: """ Whether or not the index values only consist of dates. """ if needs_i8_conversion(self.dtype): return True elif self.dtype != _dtype_obj: # TODO(ExtensionIndex): 3rd party EA might override? # Note: this includes IntervalIndex, even when the left/right # contain datetime-like objects. return False elif self._is_multi: return False return is_datetime_array(ensure_object(self._values)) def _is_multi(self) -> bool: """ Cached check equivalent to isinstance(self, MultiIndex) """ return isinstance(self, ABCMultiIndex) # -------------------------------------------------------------------- # Pickle Methods def __reduce__(self): d = {"data": self._data, "name": self.name} return _new_Index, (type(self), d), None # -------------------------------------------------------------------- # Null Handling Methods def _na_value(self): """The expected NA value to use with this index.""" dtype = self.dtype if isinstance(dtype, np.dtype): if dtype.kind in ["m", "M"]: return NaT return np.nan return dtype.na_value def _isnan(self) -> npt.NDArray[np.bool_]: """ Return if each value is NaN. """ if self._can_hold_na: return isna(self) else: # shouldn't reach to this condition by checking hasnans beforehand values = np.empty(len(self), dtype=np.bool_) values.fill(False) return values def hasnans(self) -> bool: """ Return True if there are any NaNs. Enables various performance speedups. Returns ------- bool """ if self._can_hold_na: return bool(self._isnan.any()) else: return False def isna(self) -> npt.NDArray[np.bool_]: """ Detect missing values. Return a boolean same-sized object indicating if the values are NA. NA values, such as ``None``, :attr:`numpy.NaN` or :attr:`pd.NaT`, get mapped to ``True`` values. Everything else get mapped to ``False`` values. Characters such as empty strings `''` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). Returns ------- numpy.ndarray[bool] A boolean array of whether my values are NA. See Also -------- Index.notna : Boolean inverse of isna. Index.dropna : Omit entries with missing values. isna : Top-level isna. Series.isna : Detect missing values in Series object. Examples -------- Show which entries in a pandas.Index are NA. The result is an array. >>> idx = pd.Index([5.2, 6.0, np.NaN]) >>> idx Index([5.2, 6.0, nan], dtype='float64') >>> idx.isna() array([False, False, True]) Empty strings are not considered NA values. None is considered an NA value. >>> idx = pd.Index(['black', '', 'red', None]) >>> idx Index(['black', '', 'red', None], dtype='object') >>> idx.isna() array([False, False, False, True]) For datetimes, `NaT` (Not a Time) is considered as an NA value. >>> idx = pd.DatetimeIndex([pd.Timestamp('1940-04-25'), ... pd.Timestamp(''), None, pd.NaT]) >>> idx DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'], dtype='datetime64[ns]', freq=None) >>> idx.isna() array([False, True, True, True]) """ return self._isnan isnull = isna def notna(self) -> npt.NDArray[np.bool_]: """ Detect existing (non-missing) values. Return a boolean same-sized object indicating if the values are not NA. Non-missing values get mapped to ``True``. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). NA values, such as None or :attr:`numpy.NaN`, get mapped to ``False`` values. Returns ------- numpy.ndarray[bool] Boolean array to indicate which entries are not NA. See Also -------- Index.notnull : Alias of notna. Index.isna: Inverse of notna. notna : Top-level notna. Examples -------- Show which entries in an Index are not NA. The result is an array. >>> idx = pd.Index([5.2, 6.0, np.NaN]) >>> idx Index([5.2, 6.0, nan], dtype='float64') >>> idx.notna() array([ True, True, False]) Empty strings are not considered NA values. None is considered a NA value. >>> idx = pd.Index(['black', '', 'red', None]) >>> idx Index(['black', '', 'red', None], dtype='object') >>> idx.notna() array([ True, True, True, False]) """ return ~self.isna() notnull = notna def fillna(self, value=None, downcast=None): """ Fill NA/NaN values with the specified value. Parameters ---------- value : scalar Scalar value to use to fill holes (e.g. 0). This value cannot be a list-likes. downcast : dict, default is None A dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate equal type (e.g. float64 to int64 if possible). Returns ------- Index See Also -------- DataFrame.fillna : Fill NaN values of a DataFrame. Series.fillna : Fill NaN Values of a Series. """ value = self._require_scalar(value) if self.hasnans: result = self.putmask(self._isnan, value) if downcast is None: # no need to care metadata other than name # because it can't have freq if it has NaTs # _with_infer needed for test_fillna_categorical return Index._with_infer(result, name=self.name) raise NotImplementedError( f"{type(self).__name__}.fillna does not support 'downcast' " "argument values other than 'None'." ) return self._view() def dropna(self: _IndexT, how: AnyAll = "any") -> _IndexT: """ Return Index without NA/NaN values. Parameters ---------- how : {'any', 'all'}, default 'any' If the Index is a MultiIndex, drop the value when any or all levels are NaN. Returns ------- Index """ if how not in ("any", "all"): raise ValueError(f"invalid how option: {how}") if self.hasnans: res_values = self._values[~self._isnan] return type(self)._simple_new(res_values, name=self.name) return self._view() # -------------------------------------------------------------------- # Uniqueness Methods def unique(self: _IndexT, level: Hashable | None = None) -> _IndexT: """ Return unique values in the index. Unique values are returned in order of appearance, this does NOT sort. Parameters ---------- level : int or hashable, optional Only return values from specified level (for MultiIndex). If int, gets the level by integer position, else by level name. Returns ------- Index See Also -------- unique : Numpy array of unique values in that column. Series.unique : Return unique values of Series object. """ if level is not None: self._validate_index_level(level) if self.is_unique: return self._view() result = super().unique() return self._shallow_copy(result) def drop_duplicates(self: _IndexT, *, keep: DropKeep = "first") -> _IndexT: """ Return Index with duplicate values removed. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. Returns ------- Index See Also -------- Series.drop_duplicates : Equivalent method on Series. DataFrame.drop_duplicates : Equivalent method on DataFrame. Index.duplicated : Related method on Index, indicating duplicate Index values. Examples -------- Generate an pandas.Index with duplicate values. >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo']) The `keep` parameter controls which duplicate values are removed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> idx.drop_duplicates(keep='first') Index(['lama', 'cow', 'beetle', 'hippo'], dtype='object') The value 'last' keeps the last occurrence for each set of duplicated entries. >>> idx.drop_duplicates(keep='last') Index(['cow', 'beetle', 'lama', 'hippo'], dtype='object') The value ``False`` discards all sets of duplicated entries. >>> idx.drop_duplicates(keep=False) Index(['cow', 'beetle', 'hippo'], dtype='object') """ if self.is_unique: return self._view() return super().drop_duplicates(keep=keep) def duplicated(self, keep: DropKeep = "first") -> npt.NDArray[np.bool_]: """ Indicate duplicate index values. Duplicated values are indicated as ``True`` values in the resulting array. Either all duplicates, all except the first, or all except the last occurrence of duplicates can be indicated. Parameters ---------- keep : {'first', 'last', False}, default 'first' The value or values in a set of duplicates to mark as missing. - 'first' : Mark duplicates as ``True`` except for the first occurrence. - 'last' : Mark duplicates as ``True`` except for the last occurrence. - ``False`` : Mark all duplicates as ``True``. Returns ------- np.ndarray[bool] See Also -------- Series.duplicated : Equivalent method on pandas.Series. DataFrame.duplicated : Equivalent method on pandas.DataFrame. Index.drop_duplicates : Remove duplicate values from Index. Examples -------- By default, for each set of duplicated values, the first occurrence is set to False and all others to True: >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama']) >>> idx.duplicated() array([False, False, True, False, True]) which is equivalent to >>> idx.duplicated(keep='first') array([False, False, True, False, True]) By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True: >>> idx.duplicated(keep='last') array([ True, False, True, False, False]) By setting keep on ``False``, all duplicates are True: >>> idx.duplicated(keep=False) array([ True, False, True, False, True]) """ if self.is_unique: # fastpath available bc we are immutable return np.zeros(len(self), dtype=bool) return self._duplicated(keep=keep) # -------------------------------------------------------------------- # Arithmetic & Logical Methods def __iadd__(self, other): # alias for __add__ return self + other def __nonzero__(self) -> NoReturn: raise ValueError( f"The truth value of a {type(self).__name__} is ambiguous. " "Use a.empty, a.bool(), a.item(), a.any() or a.all()." ) __bool__ = __nonzero__ # -------------------------------------------------------------------- # Set Operation Methods def _get_reconciled_name_object(self, other): """ If the result of a set operation will be self, return self, unless the name changes, in which case make a shallow copy of self. """ name = get_op_result_name(self, other) if self.name is not name: return self.rename(name) return self def _validate_sort_keyword(self, sort): if sort not in [None, False, True]: raise ValueError( "The 'sort' keyword only takes the values of " f"None, True, or False; {sort} was passed." ) def _dti_setop_align_tzs(self, other: Index, setop: str_t) -> tuple[Index, Index]: """ With mismatched timezones, cast both to UTC. """ # Caller is responsibelf or checking # `not is_dtype_equal(self.dtype, other.dtype)` if ( isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex) and self.tz is not None and other.tz is not None ): # GH#39328, GH#45357 left = self.tz_convert("UTC") right = other.tz_convert("UTC") return left, right return self, other def union(self, other, sort=None): """ Form the union of two Index objects. If the Index objects are incompatible, both Index objects will be cast to dtype('object') first. Parameters ---------- other : Index or array-like sort : bool or None, default None Whether to sort the resulting Index. * None : Sort the result, except when 1. `self` and `other` are equal. 2. `self` or `other` has length 0. 3. Some values in `self` or `other` cannot be compared. A RuntimeWarning is issued in this case. * False : do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Examples -------- Union matching dtypes >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.union(idx2) Index([1, 2, 3, 4, 5, 6], dtype='int64') Union mismatched dtypes >>> idx1 = pd.Index(['a', 'b', 'c', 'd']) >>> idx2 = pd.Index([1, 2, 3, 4]) >>> idx1.union(idx2) Index(['a', 'b', 'c', 'd', 1, 2, 3, 4], dtype='object') MultiIndex case >>> idx1 = pd.MultiIndex.from_arrays( ... [[1, 1, 2, 2], ["Red", "Blue", "Red", "Blue"]] ... ) >>> idx1 MultiIndex([(1, 'Red'), (1, 'Blue'), (2, 'Red'), (2, 'Blue')], ) >>> idx2 = pd.MultiIndex.from_arrays( ... [[3, 3, 2, 2], ["Red", "Green", "Red", "Green"]] ... ) >>> idx2 MultiIndex([(3, 'Red'), (3, 'Green'), (2, 'Red'), (2, 'Green')], ) >>> idx1.union(idx2) MultiIndex([(1, 'Blue'), (1, 'Red'), (2, 'Blue'), (2, 'Green'), (2, 'Red'), (3, 'Green'), (3, 'Red')], ) >>> idx1.union(idx2, sort=False) MultiIndex([(1, 'Red'), (1, 'Blue'), (2, 'Red'), (2, 'Blue'), (3, 'Red'), (3, 'Green'), (2, 'Green')], ) """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) if not is_dtype_equal(self.dtype, other.dtype): if ( isinstance(self, ABCMultiIndex) and not is_object_dtype(_unpack_nested_dtype(other)) and len(other) > 0 ): raise NotImplementedError( "Can only union MultiIndex with MultiIndex or Index of tuples, " "try mi.to_flat_index().union(other) instead." ) self, other = self._dti_setop_align_tzs(other, "union") dtype = self._find_common_type_compat(other) left = self.astype(dtype, copy=False) right = other.astype(dtype, copy=False) return left.union(right, sort=sort) elif not len(other) or self.equals(other): # NB: whether this (and the `if not len(self)` check below) come before # or after the is_dtype_equal check above affects the returned dtype result = self._get_reconciled_name_object(other) if sort is True: return result.sort_values() return result elif not len(self): result = other._get_reconciled_name_object(self) if sort is True: return result.sort_values() return result result = self._union(other, sort=sort) return self._wrap_setop_result(other, result) def _union(self, other: Index, sort): """ Specific union logic should go here. In subclasses, union behavior should be overwritten here rather than in `self.union`. Parameters ---------- other : Index or array-like sort : False or None, default False Whether to sort the resulting index. * False : do not sort the result. * None : sort the result, except when `self` and `other` are equal or when the values cannot be compared. Returns ------- Index """ lvals = self._values rvals = other._values if ( sort is None and self.is_monotonic_increasing and other.is_monotonic_increasing and not (self.has_duplicates and other.has_duplicates) and self._can_use_libjoin ): # Both are monotonic and at least one is unique, so can use outer join # (actually don't need either unique, but without this restriction # test_union_same_value_duplicated_in_both fails) try: return self._outer_indexer(other)[0] except (TypeError, IncompatibleFrequency): # incomparable objects; should only be for object dtype value_list = list(lvals) # worth making this faster? a very unusual case value_set = set(lvals) value_list.extend([x for x in rvals if x not in value_set]) # If objects are unorderable, we must have object dtype. return np.array(value_list, dtype=object) elif not other.is_unique: # other has duplicates result_dups = algos.union_with_duplicates(self, other) return _maybe_try_sort(result_dups, sort) # The rest of this method is analogous to Index._intersection_via_get_indexer # Self may have duplicates; other already checked as unique # find indexes of things in "other" that are not in "self" if self._index_as_unique: indexer = self.get_indexer(other) missing = (indexer == -1).nonzero()[0] else: missing = algos.unique1d(self.get_indexer_non_unique(other)[1]) result: Index | MultiIndex | ArrayLike if self._is_multi: # Preserve MultiIndex to avoid losing dtypes result = self.append(other.take(missing)) else: if len(missing) > 0: other_diff = rvals.take(missing) result = concat_compat((lvals, other_diff)) else: result = lvals if not self.is_monotonic_increasing or not other.is_monotonic_increasing: # if both are monotonic then result should already be sorted result = _maybe_try_sort(result, sort) return result def _wrap_setop_result(self, other: Index, result) -> Index: name = get_op_result_name(self, other) if isinstance(result, Index): if result.name != name: result = result.rename(name) else: result = self._shallow_copy(result, name=name) return result def intersection(self, other, sort: bool = False): """ Form the intersection of two Index objects. This returns a new Index with elements common to the index and `other`. Parameters ---------- other : Index or array-like sort : True, False or None, default False Whether to sort the resulting index. * None : sort the result, except when `self` and `other` are equal or when the values cannot be compared. * False : do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.intersection(idx2) Index([3, 4], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) if not is_dtype_equal(self.dtype, other.dtype): self, other = self._dti_setop_align_tzs(other, "intersection") if self.equals(other): if self.has_duplicates: result = self.unique()._get_reconciled_name_object(other) else: result = self._get_reconciled_name_object(other) if sort is True: result = result.sort_values() return result if len(self) == 0 or len(other) == 0: # fastpath; we need to be careful about having commutativity if self._is_multi or other._is_multi: # _convert_can_do_setop ensures that we have both or neither # We retain self.levels return self[:0].rename(result_name) dtype = self._find_common_type_compat(other) if is_dtype_equal(self.dtype, dtype): # Slicing allows us to retain DTI/TDI.freq, RangeIndex # Note: self[:0] vs other[:0] affects # 1) which index's `freq` we get in DTI/TDI cases # This may be a historical artifact, i.e. no documented # reason for this choice. # 2) The `step` we get in RangeIndex cases if len(self) == 0: return self[:0].rename(result_name) else: return other[:0].rename(result_name) return Index([], dtype=dtype, name=result_name) elif not self._should_compare(other): # We can infer that the intersection is empty. if isinstance(self, ABCMultiIndex): return self[:0].rename(result_name) return Index([], name=result_name) elif not is_dtype_equal(self.dtype, other.dtype): dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) other = other.astype(dtype, copy=False) return this.intersection(other, sort=sort) result = self._intersection(other, sort=sort) return self._wrap_intersection_result(other, result) def _intersection(self, other: Index, sort: bool = False): """ intersection specialized to the case with matching dtypes. """ if ( self.is_monotonic_increasing and other.is_monotonic_increasing and self._can_use_libjoin and not isinstance(self, ABCMultiIndex) ): try: res_indexer, indexer, _ = self._inner_indexer(other) except TypeError: # non-comparable; should only be for object dtype pass else: # TODO: algos.unique1d should preserve DTA/TDA if is_numeric_dtype(self): # This is faster, because Index.unique() checks for uniqueness # before calculating the unique values. res = algos.unique1d(res_indexer) else: result = self.take(indexer) res = result.drop_duplicates() return ensure_wrapped_if_datetimelike(res) res_values = self._intersection_via_get_indexer(other, sort=sort) res_values = _maybe_try_sort(res_values, sort) return res_values def _wrap_intersection_result(self, other, result): # We will override for MultiIndex to handle empty results return self._wrap_setop_result(other, result) def _intersection_via_get_indexer( self, other: Index | MultiIndex, sort ) -> ArrayLike | MultiIndex: """ Find the intersection of two Indexes using get_indexer. Returns ------- np.ndarray or ExtensionArray The returned array will be unique. """ left_unique = self.unique() right_unique = other.unique() # even though we are unique, we need get_indexer_for for IntervalIndex indexer = left_unique.get_indexer_for(right_unique) mask = indexer != -1 taker = indexer.take(mask.nonzero()[0]) if sort is False: # sort bc we want the elements in the same order they are in self # unnecessary in the case with sort=None bc we will sort later taker = np.sort(taker) if isinstance(left_unique, ABCMultiIndex): result = left_unique.take(taker) else: result = left_unique.take(taker)._values return result def difference(self, other, sort=None): """ Return a new Index with elements of index not in `other`. This is the set difference of two Index objects. Parameters ---------- other : Index or array-like sort : bool or None, default None Whether to sort the resulting index. By default, the values are attempted to be sorted, but any TypeError from incomparable elements is caught by pandas. * None : Attempt to sort the result, but catch any TypeErrors from comparing incomparable elements. * False : Do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Examples -------- >>> idx1 = pd.Index([2, 1, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.difference(idx2) Index([1, 2], dtype='int64') >>> idx1.difference(idx2, sort=False) Index([2, 1], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) # Note: we do NOT call _dti_setop_align_tzs here, as there # is no requirement that .difference be commutative, so it does # not cast to object. if self.equals(other): # Note: we do not (yet) sort even if sort=None GH#24959 return self[:0].rename(result_name) if len(other) == 0: # Note: we do not (yet) sort even if sort=None GH#24959 result = self.rename(result_name) if sort is True: return result.sort_values() return result if not self._should_compare(other): # Nothing matches -> difference is everything result = self.rename(result_name) if sort is True: return result.sort_values() return result result = self._difference(other, sort=sort) return self._wrap_difference_result(other, result) def _difference(self, other, sort): # overridden by RangeIndex this = self.unique() indexer = this.get_indexer_for(other) indexer = indexer.take((indexer != -1).nonzero()[0]) label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True) the_diff: MultiIndex | ArrayLike if isinstance(this, ABCMultiIndex): the_diff = this.take(label_diff) else: the_diff = this._values.take(label_diff) the_diff = _maybe_try_sort(the_diff, sort) return the_diff def _wrap_difference_result(self, other, result): # We will override for MultiIndex to handle empty results return self._wrap_setop_result(other, result) def symmetric_difference(self, other, result_name=None, sort=None): """ Compute the symmetric difference of two Index objects. Parameters ---------- other : Index or array-like result_name : str sort : bool or None, default None Whether to sort the resulting index. By default, the values are attempted to be sorted, but any TypeError from incomparable elements is caught by pandas. * None : Attempt to sort the result, but catch any TypeErrors from comparing incomparable elements. * False : Do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Notes ----- ``symmetric_difference`` contains elements that appear in either ``idx1`` or ``idx2`` but not both. Equivalent to the Index created by ``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates dropped. Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([2, 3, 4, 5]) >>> idx1.symmetric_difference(idx2) Index([1, 5], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name_update = self._convert_can_do_setop(other) if result_name is None: result_name = result_name_update if not is_dtype_equal(self.dtype, other.dtype): self, other = self._dti_setop_align_tzs(other, "symmetric_difference") if not self._should_compare(other): return self.union(other, sort=sort).rename(result_name) elif not is_dtype_equal(self.dtype, other.dtype): dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) that = other.astype(dtype, copy=False) return this.symmetric_difference(that, sort=sort).rename(result_name) this = self.unique() other = other.unique() indexer = this.get_indexer_for(other) # {this} minus {other} common_indexer = indexer.take((indexer != -1).nonzero()[0]) left_indexer = np.setdiff1d( np.arange(this.size), common_indexer, assume_unique=True ) left_diff = this.take(left_indexer) # {other} minus {this} right_indexer = (indexer == -1).nonzero()[0] right_diff = other.take(right_indexer) res_values = left_diff.append(right_diff) result = _maybe_try_sort(res_values, sort) if not self._is_multi: return Index(result, name=result_name, dtype=res_values.dtype) else: left_diff = cast("MultiIndex", left_diff) if len(result) == 0: # result might be an Index, if other was an Index return left_diff.remove_unused_levels().set_names(result_name) return result.set_names(result_name) def _assert_can_do_setop(self, other) -> bool: if not is_list_like(other): raise TypeError("Input must be Index or array-like") return True def _convert_can_do_setop(self, other) -> tuple[Index, Hashable]: if not isinstance(other, Index): other = Index(other, name=self.name) result_name = self.name else: result_name = get_op_result_name(self, other) return other, result_name # -------------------------------------------------------------------- # Indexing Methods def get_loc(self, key): """ Get integer location, slice or boolean mask for requested label. Parameters ---------- key : label Returns ------- int if unique index, slice if monotonic index, else mask Examples -------- >>> unique_index = pd.Index(list('abc')) >>> unique_index.get_loc('b') 1 >>> monotonic_index = pd.Index(list('abbc')) >>> monotonic_index.get_loc('b') slice(1, 3, None) >>> non_monotonic_index = pd.Index(list('abcb')) >>> non_monotonic_index.get_loc('b') array([False, True, False, True]) """ casted_key = self._maybe_cast_indexer(key) try: return self._engine.get_loc(casted_key) except KeyError as err: raise KeyError(key) from err except TypeError: # If we have a listlike key, _check_indexing_error will raise # InvalidIndexError. Otherwise we fall through and re-raise # the TypeError. self._check_indexing_error(key) raise _index_shared_docs[ "get_indexer" ] = """ Compute indexer and mask for new index given the current index. The indexer should be then used as an input to ndarray.take to align the current data to the new index. Parameters ---------- target : %(target_klass)s method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional * default: exact matches only. * pad / ffill: find the PREVIOUS index value if no exact match. * backfill / bfill: use NEXT index value if no exact match * nearest: use the NEAREST index value if no exact match. Tied distances are broken by preferring the larger index value. limit : int, optional Maximum number of consecutive labels in ``target`` to match for inexact matches. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- np.ndarray[np.intp] Integers from 0 to n - 1 indicating that the index at these positions matches the corresponding target values. Missing values in the target are marked by -1. %(raises_section)s Notes ----- Returns -1 for unmatched values, for further explanation see the example below. Examples -------- >>> index = pd.Index(['c', 'a', 'b']) >>> index.get_indexer(['a', 'b', 'x']) array([ 1, 2, -1]) Notice that the return value is an array of locations in ``index`` and ``x`` is marked by -1, as it is not in ``index``. """ def get_indexer( self, target, method: str_t | None = None, limit: int | None = None, tolerance=None, ) -> npt.NDArray[np.intp]: method = clean_reindex_fill_method(method) orig_target = target target = self._maybe_cast_listlike_indexer(target) self._check_indexing_method(method, limit, tolerance) if not self._index_as_unique: raise InvalidIndexError(self._requires_unique_msg) if len(target) == 0: return np.array([], dtype=np.intp) if not self._should_compare(target) and not self._should_partial_index(target): # IntervalIndex get special treatment bc numeric scalars can be # matched to Interval scalars return self._get_indexer_non_comparable(target, method=method, unique=True) if is_categorical_dtype(self.dtype): # _maybe_cast_listlike_indexer ensures target has our dtype # (could improve perf by doing _should_compare check earlier?) assert is_dtype_equal(self.dtype, target.dtype) indexer = self._engine.get_indexer(target.codes) if self.hasnans and target.hasnans: # After _maybe_cast_listlike_indexer, target elements which do not # belong to some category are changed to NaNs # Mask to track actual NaN values compared to inserted NaN values # GH#45361 target_nans = isna(orig_target) loc = self.get_loc(np.nan) mask = target.isna() indexer[target_nans] = loc indexer[mask & ~target_nans] = -1 return indexer if is_categorical_dtype(target.dtype): # potential fastpath # get an indexer for unique categories then propagate to codes via take_nd # get_indexer instead of _get_indexer needed for MultiIndex cases # e.g. test_append_different_columns_types categories_indexer = self.get_indexer(target.categories) indexer = algos.take_nd(categories_indexer, target.codes, fill_value=-1) if (not self._is_multi and self.hasnans) and target.hasnans: # Exclude MultiIndex because hasnans raises NotImplementedError # we should only get here if we are unique, so loc is an integer # GH#41934 loc = self.get_loc(np.nan) mask = target.isna() indexer[mask] = loc return ensure_platform_int(indexer) pself, ptarget = self._maybe_promote(target) if pself is not self or ptarget is not target: return pself.get_indexer( ptarget, method=method, limit=limit, tolerance=tolerance ) if is_dtype_equal(self.dtype, target.dtype) and self.equals(target): # Only call equals if we have same dtype to avoid inference/casting return np.arange(len(target), dtype=np.intp) if not is_dtype_equal( self.dtype, target.dtype ) and not self._should_partial_index(target): # _should_partial_index e.g. IntervalIndex with numeric scalars # that can be matched to Interval scalars. dtype = self._find_common_type_compat(target) this = self.astype(dtype, copy=False) target = target.astype(dtype, copy=False) return this._get_indexer( target, method=method, limit=limit, tolerance=tolerance ) return self._get_indexer(target, method, limit, tolerance) def _get_indexer( self, target: Index, method: str_t | None = None, limit: int | None = None, tolerance=None, ) -> npt.NDArray[np.intp]: if tolerance is not None: tolerance = self._convert_tolerance(tolerance, target) if method in ["pad", "backfill"]: indexer = self._get_fill_indexer(target, method, limit, tolerance) elif method == "nearest": indexer = self._get_nearest_indexer(target, limit, tolerance) else: if target._is_multi and self._is_multi: engine = self._engine # error: Item "IndexEngine" of "Union[IndexEngine, ExtensionEngine]" # has no attribute "_extract_level_codes" tgt_values = engine._extract_level_codes( # type: ignore[union-attr] target ) else: tgt_values = target._get_engine_target() indexer = self._engine.get_indexer(tgt_values) return ensure_platform_int(indexer) def _should_partial_index(self, target: Index) -> bool: """ Should we attempt partial-matching indexing? """ if is_interval_dtype(self.dtype): if is_interval_dtype(target.dtype): return False # See https://github.com/pandas-dev/pandas/issues/47772 the commented # out code can be restored (instead of hardcoding `return True`) # once that issue is fixed # "Index" has no attribute "left" # return self.left._should_compare(target) # type: ignore[attr-defined] return True return False def _check_indexing_method( self, method: str_t | None, limit: int | None = None, tolerance=None, ) -> None: """ Raise if we have a get_indexer `method` that is not supported or valid. """ if method not in [None, "bfill", "backfill", "pad", "ffill", "nearest"]: # in practice the clean_reindex_fill_method call would raise # before we get here raise ValueError("Invalid fill method") # pragma: no cover if self._is_multi: if method == "nearest": raise NotImplementedError( "method='nearest' not implemented yet " "for MultiIndex; see GitHub issue 9365" ) if method in ("pad", "backfill"): if tolerance is not None: raise NotImplementedError( "tolerance not implemented yet for MultiIndex" ) if is_interval_dtype(self.dtype) or is_categorical_dtype(self.dtype): # GH#37871 for now this is only for IntervalIndex and CategoricalIndex if method is not None: raise NotImplementedError( f"method {method} not yet implemented for {type(self).__name__}" ) if method is None: if tolerance is not None: raise ValueError( "tolerance argument only valid if doing pad, " "backfill or nearest reindexing" ) if limit is not None: raise ValueError( "limit argument only valid if doing pad, " "backfill or nearest reindexing" ) def _convert_tolerance(self, tolerance, target: np.ndarray | Index) -> np.ndarray: # override this method on subclasses tolerance = np.asarray(tolerance) if target.size != tolerance.size and tolerance.size > 1: raise ValueError("list-like tolerance size must match target index size") elif is_numeric_dtype(self) and not np.issubdtype(tolerance.dtype, np.number): if tolerance.ndim > 0: raise ValueError( f"tolerance argument for {type(self).__name__} with dtype " f"{self.dtype} must contain numeric elements if it is list type" ) raise ValueError( f"tolerance argument for {type(self).__name__} with dtype {self.dtype} " f"must be numeric if it is a scalar: {repr(tolerance)}" ) return tolerance def _get_fill_indexer( self, target: Index, method: str_t, limit: int | None = None, tolerance=None ) -> npt.NDArray[np.intp]: if self._is_multi: # TODO: get_indexer_with_fill docstring says values must be _sorted_ # but that doesn't appear to be enforced # error: "IndexEngine" has no attribute "get_indexer_with_fill" engine = self._engine with warnings.catch_warnings(): # TODO: We need to fix this. Casting to int64 in cython warnings.filterwarnings("ignore", category=RuntimeWarning) return engine.get_indexer_with_fill( # type: ignore[union-attr] target=target._values, values=self._values, method=method, limit=limit, ) if self.is_monotonic_increasing and target.is_monotonic_increasing: target_values = target._get_engine_target() own_values = self._get_engine_target() if not isinstance(target_values, np.ndarray) or not isinstance( own_values, np.ndarray ): raise NotImplementedError if method == "pad": indexer = libalgos.pad(own_values, target_values, limit=limit) else: # i.e. "backfill" indexer = libalgos.backfill(own_values, target_values, limit=limit) else: indexer = self._get_fill_indexer_searchsorted(target, method, limit) if tolerance is not None and len(self): indexer = self._filter_indexer_tolerance(target, indexer, tolerance) return indexer def _get_fill_indexer_searchsorted( self, target: Index, method: str_t, limit: int | None = None ) -> npt.NDArray[np.intp]: """ Fallback pad/backfill get_indexer that works for monotonic decreasing indexes and non-monotonic targets. """ if limit is not None: raise ValueError( f"limit argument for {repr(method)} method only well-defined " "if index and target are monotonic" ) side: Literal["left", "right"] = "left" if method == "pad" else "right" # find exact matches first (this simplifies the algorithm) indexer = self.get_indexer(target) nonexact = indexer == -1 indexer[nonexact] = self._searchsorted_monotonic(target[nonexact], side) if side == "left": # searchsorted returns "indices into a sorted array such that, # if the corresponding elements in v were inserted before the # indices, the order of a would be preserved". # Thus, we need to subtract 1 to find values to the left. indexer[nonexact] -= 1 # This also mapped not found values (values of 0 from # np.searchsorted) to -1, which conveniently is also our # sentinel for missing values else: # Mark indices to the right of the largest value as not found indexer[indexer == len(self)] = -1 return indexer def _get_nearest_indexer( self, target: Index, limit: int | None, tolerance ) -> npt.NDArray[np.intp]: """ Get the indexer for the nearest index labels; requires an index with values that can be subtracted from each other (e.g., not strings or tuples). """ if not len(self): return self._get_fill_indexer(target, "pad") left_indexer = self.get_indexer(target, "pad", limit=limit) right_indexer = self.get_indexer(target, "backfill", limit=limit) left_distances = self._difference_compat(target, left_indexer) right_distances = self._difference_compat(target, right_indexer) op = operator.lt if self.is_monotonic_increasing else operator.le indexer = np.where( # error: Argument 1&2 has incompatible type "Union[ExtensionArray, # ndarray[Any, Any]]"; expected "Union[SupportsDunderLE, # SupportsDunderGE, SupportsDunderGT, SupportsDunderLT]" op(left_distances, right_distances) # type: ignore[arg-type] | (right_indexer == -1), left_indexer, right_indexer, ) if tolerance is not None: indexer = self._filter_indexer_tolerance(target, indexer, tolerance) return indexer def _filter_indexer_tolerance( self, target: Index, indexer: npt.NDArray[np.intp], tolerance, ) -> npt.NDArray[np.intp]: distance = self._difference_compat(target, indexer) return np.where(distance <= tolerance, indexer, -1) def _difference_compat( self, target: Index, indexer: npt.NDArray[np.intp] ) -> ArrayLike: # Compatibility for PeriodArray, for which __sub__ returns an ndarray[object] # of DateOffset objects, which do not support __abs__ (and would be slow # if they did) if isinstance(self.dtype, PeriodDtype): # Note: we only get here with matching dtypes own_values = cast("PeriodArray", self._data)._ndarray target_values = cast("PeriodArray", target._data)._ndarray diff = own_values[indexer] - target_values else: # error: Unsupported left operand type for - ("ExtensionArray") diff = self._values[indexer] - target._values # type: ignore[operator] return abs(diff) # -------------------------------------------------------------------- # Indexer Conversion Methods def _validate_positional_slice(self, key: slice) -> None: """ For positional indexing, a slice must have either int or None for each of start, stop, and step. """ self._validate_indexer("positional", key.start, "iloc") self._validate_indexer("positional", key.stop, "iloc") self._validate_indexer("positional", key.step, "iloc") def _convert_slice_indexer(self, key: slice, kind: str_t): """ Convert a slice indexer. By definition, these are labels unless 'iloc' is passed in. Floats are not allowed as the start, step, or stop of the slice. Parameters ---------- key : label of the slice bound kind : {'loc', 'getitem'} """ assert kind in ["loc", "getitem"], kind # potentially cast the bounds to integers start, stop, step = key.start, key.stop, key.step # TODO(GH#50617): once Series.__[gs]etitem__ is removed we should be able # to simplify this. if isinstance(self.dtype, np.dtype) and is_float_dtype(self.dtype): # We always treat __getitem__ slicing as label-based # translate to locations return self.slice_indexer(start, stop, step) # figure out if this is a positional indexer def is_int(v): return v is None or is_integer(v) is_index_slice = is_int(start) and is_int(stop) and is_int(step) # special case for interval_dtype bc we do not do partial-indexing # on integer Intervals when slicing # TODO: write this in terms of e.g. should_partial_index? ints_are_positional = self._should_fallback_to_positional or is_interval_dtype( self.dtype ) is_positional = is_index_slice and ints_are_positional if kind == "getitem": # called from the getitem slicers, validate that we are in fact integers if is_integer_dtype(self.dtype) or is_index_slice: # Note: these checks are redundant if we know is_index_slice self._validate_indexer("slice", key.start, "getitem") self._validate_indexer("slice", key.stop, "getitem") self._validate_indexer("slice", key.step, "getitem") return key # convert the slice to an indexer here # if we are mixed and have integers if is_positional: try: # Validate start & stop if start is not None: self.get_loc(start) if stop is not None: self.get_loc(stop) is_positional = False except KeyError: pass if com.is_null_slice(key): # It doesn't matter if we are positional or label based indexer = key elif is_positional: if kind == "loc": # GH#16121, GH#24612, GH#31810 raise TypeError( "Slicing a positional slice with .loc is not allowed, " "Use .loc with labels or .iloc with positions instead.", ) indexer = key else: indexer = self.slice_indexer(start, stop, step) return indexer def _raise_invalid_indexer( self, form: str_t, key, reraise: lib.NoDefault | None | Exception = lib.no_default, ) -> None: """ Raise consistent invalid indexer message. """ msg = ( f"cannot do {form} indexing on {type(self).__name__} with these " f"indexers [{key}] of type {type(key).__name__}" ) if reraise is not lib.no_default: raise TypeError(msg) from reraise raise TypeError(msg) # -------------------------------------------------------------------- # Reindex Methods def _validate_can_reindex(self, indexer: np.ndarray) -> None: """ Check if we are allowing reindexing with this particular indexer. Parameters ---------- indexer : an integer ndarray Raises ------ ValueError if its a duplicate axis """ # trying to reindex on an axis with duplicates if not self._index_as_unique and len(indexer): raise ValueError("cannot reindex on an axis with duplicate labels") def reindex( self, target, method=None, level=None, limit=None, tolerance=None ) -> tuple[Index, npt.NDArray[np.intp] | None]: """ Create index with target's values. Parameters ---------- target : an iterable method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional * default: exact matches only. * pad / ffill: find the PREVIOUS index value if no exact match. * backfill / bfill: use NEXT index value if no exact match * nearest: use the NEAREST index value if no exact match. Tied distances are broken by preferring the larger index value. level : int, optional Level of multiindex. limit : int, optional Maximum number of consecutive labels in ``target`` to match for inexact matches. tolerance : int or float, optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- new_index : pd.Index Resulting index. indexer : np.ndarray[np.intp] or None Indices of output values in original index. Raises ------ TypeError If ``method`` passed along with ``level``. ValueError If non-unique multi-index ValueError If non-unique index and ``method`` or ``limit`` passed. See Also -------- Series.reindex : Conform Series to new index with optional filling logic. DataFrame.reindex : Conform DataFrame to new index with optional filling logic. Examples -------- >>> idx = pd.Index(['car', 'bike', 'train', 'tractor']) >>> idx Index(['car', 'bike', 'train', 'tractor'], dtype='object') >>> idx.reindex(['car', 'bike']) (Index(['car', 'bike'], dtype='object'), array([0, 1])) """ # GH6552: preserve names when reindexing to non-named target # (i.e. neither Index nor Series). preserve_names = not hasattr(target, "name") # GH7774: preserve dtype/tz if target is empty and not an Index. target = ensure_has_len(target) # target may be an iterator if not isinstance(target, Index) and len(target) == 0: if level is not None and self._is_multi: # "Index" has no attribute "levels"; maybe "nlevels"? idx = self.levels[level] # type: ignore[attr-defined] else: idx = self target = idx[:0] else: target = ensure_index(target) if level is not None and ( isinstance(self, ABCMultiIndex) or isinstance(target, ABCMultiIndex) ): if method is not None: raise TypeError("Fill method not supported if level passed") # TODO: tests where passing `keep_order=not self._is_multi` # makes a difference for non-MultiIndex case target, indexer, _ = self._join_level( target, level, how="right", keep_order=not self._is_multi ) else: if self.equals(target): indexer = None else: if self._index_as_unique: indexer = self.get_indexer( target, method=method, limit=limit, tolerance=tolerance ) elif self._is_multi: raise ValueError("cannot handle a non-unique multi-index!") elif not self.is_unique: # GH#42568 raise ValueError("cannot reindex on an axis with duplicate labels") else: indexer, _ = self.get_indexer_non_unique(target) target = self._wrap_reindex_result(target, indexer, preserve_names) return target, indexer def _wrap_reindex_result(self, target, indexer, preserve_names: bool): target = self._maybe_preserve_names(target, preserve_names) return target def _maybe_preserve_names(self, target: Index, preserve_names: bool): if preserve_names and target.nlevels == 1 and target.name != self.name: target = target.copy(deep=False) target.name = self.name return target def _reindex_non_unique( self, target: Index ) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp] | None]: """ Create a new index with target's values (move/add/delete values as necessary) use with non-unique Index and a possibly non-unique target. Parameters ---------- target : an iterable Returns ------- new_index : pd.Index Resulting index. indexer : np.ndarray[np.intp] Indices of output values in original index. new_indexer : np.ndarray[np.intp] or None """ target = ensure_index(target) if len(target) == 0: # GH#13691 return self[:0], np.array([], dtype=np.intp), None indexer, missing = self.get_indexer_non_unique(target) check = indexer != -1 new_labels = self.take(indexer[check]) new_indexer = None if len(missing): length = np.arange(len(indexer), dtype=np.intp) missing = ensure_platform_int(missing) missing_labels = target.take(missing) missing_indexer = length[~check] cur_labels = self.take(indexer[check]).values cur_indexer = length[check] # Index constructor below will do inference new_labels = np.empty((len(indexer),), dtype=object) new_labels[cur_indexer] = cur_labels new_labels[missing_indexer] = missing_labels # GH#38906 if not len(self): new_indexer = np.arange(0, dtype=np.intp) # a unique indexer elif target.is_unique: # see GH5553, make sure we use the right indexer new_indexer = np.arange(len(indexer), dtype=np.intp) new_indexer[cur_indexer] = np.arange(len(cur_labels)) new_indexer[missing_indexer] = -1 # we have a non_unique selector, need to use the original # indexer here else: # need to retake to have the same size as the indexer indexer[~check] = -1 # reset the new indexer to account for the new size new_indexer = np.arange(len(self.take(indexer)), dtype=np.intp) new_indexer[~check] = -1 if not isinstance(self, ABCMultiIndex): new_index = Index(new_labels, name=self.name) else: new_index = type(self).from_tuples(new_labels, names=self.names) return new_index, indexer, new_indexer # -------------------------------------------------------------------- # Join Methods def join( self, other: Index, *, how: JoinHow = ..., level: Level = ..., return_indexers: Literal[True], sort: bool = ..., ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: ... def join( self, other: Index, *, how: JoinHow = ..., level: Level = ..., return_indexers: Literal[False] = ..., sort: bool = ..., ) -> Index: ... def join( self, other: Index, *, how: JoinHow = ..., level: Level = ..., return_indexers: bool = ..., sort: bool = ..., ) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: ... def join( self, other: Index, *, how: JoinHow = "left", level: Level = None, return_indexers: bool = False, sort: bool = False, ) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: """ Compute join_index and indexers to conform data structures to the new index. Parameters ---------- other : Index how : {'left', 'right', 'inner', 'outer'} level : int or level name, default None return_indexers : bool, default False sort : bool, default False Sort the join keys lexicographically in the result Index. If False, the order of the join keys depends on the join type (how keyword). Returns ------- join_index, (left_indexer, right_indexer) """ other = ensure_index(other) if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex): if (self.tz is None) ^ (other.tz is None): # Raise instead of casting to object below. raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex") if not self._is_multi and not other._is_multi: # We have specific handling for MultiIndex below pself, pother = self._maybe_promote(other) if pself is not self or pother is not other: return pself.join( pother, how=how, level=level, return_indexers=True, sort=sort ) lindexer: np.ndarray | None rindexer: np.ndarray | None # try to figure out the join level # GH3662 if level is None and (self._is_multi or other._is_multi): # have the same levels/names so a simple join if self.names == other.names: pass else: return self._join_multi(other, how=how) # join on the level if level is not None and (self._is_multi or other._is_multi): return self._join_level(other, level, how=how) if len(other) == 0: if how in ("left", "outer"): join_index = self._view() rindexer = np.broadcast_to(np.intp(-1), len(join_index)) return join_index, None, rindexer elif how in ("right", "inner", "cross"): join_index = other._view() lindexer = np.array([]) return join_index, lindexer, None if len(self) == 0: if how in ("right", "outer"): join_index = other._view() lindexer = np.broadcast_to(np.intp(-1), len(join_index)) return join_index, lindexer, None elif how in ("left", "inner", "cross"): join_index = self._view() rindexer = np.array([]) return join_index, None, rindexer if self._join_precedence < other._join_precedence: flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} how = flip.get(how, how) join_index, lidx, ridx = other.join( self, how=how, level=level, return_indexers=True ) lidx, ridx = ridx, lidx return join_index, lidx, ridx if not is_dtype_equal(self.dtype, other.dtype): dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) other = other.astype(dtype, copy=False) return this.join(other, how=how, return_indexers=True) _validate_join_method(how) if not self.is_unique and not other.is_unique: return self._join_non_unique(other, how=how) elif not self.is_unique or not other.is_unique: if self.is_monotonic_increasing and other.is_monotonic_increasing: if not is_interval_dtype(self.dtype): # otherwise we will fall through to _join_via_get_indexer # GH#39133 # go through object dtype for ea till engine is supported properly return self._join_monotonic(other, how=how) else: return self._join_non_unique(other, how=how) elif ( # GH48504: exclude MultiIndex to avoid going through MultiIndex._values self.is_monotonic_increasing and other.is_monotonic_increasing and self._can_use_libjoin and not isinstance(self, ABCMultiIndex) and not is_categorical_dtype(self.dtype) ): # Categorical is monotonic if data are ordered as categories, but join can # not handle this in case of not lexicographically monotonic GH#38502 try: return self._join_monotonic(other, how=how) except TypeError: # object dtype; non-comparable objects pass return self._join_via_get_indexer(other, how, sort) def _join_via_get_indexer( self, other: Index, how: JoinHow, sort: bool ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: # Fallback if we do not have any fastpaths available based on # uniqueness/monotonicity # Note: at this point we have checked matching dtypes if how == "left": join_index = self elif how == "right": join_index = other elif how == "inner": # TODO: sort=False here for backwards compat. It may # be better to use the sort parameter passed into join join_index = self.intersection(other, sort=False) elif how == "outer": # TODO: sort=True here for backwards compat. It may # be better to use the sort parameter passed into join join_index = self.union(other) if sort: join_index = join_index.sort_values() if join_index is self: lindexer = None else: lindexer = self.get_indexer_for(join_index) if join_index is other: rindexer = None else: rindexer = other.get_indexer_for(join_index) return join_index, lindexer, rindexer def _join_multi(self, other: Index, how: JoinHow): from pandas.core.indexes.multi import MultiIndex from pandas.core.reshape.merge import restore_dropped_levels_multijoin # figure out join names self_names_list = list(com.not_none(*self.names)) other_names_list = list(com.not_none(*other.names)) self_names_order = self_names_list.index other_names_order = other_names_list.index self_names = set(self_names_list) other_names = set(other_names_list) overlap = self_names & other_names # need at least 1 in common if not overlap: raise ValueError("cannot join with no overlapping index names") if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): # Drop the non-matching levels from left and right respectively ldrop_names = sorted(self_names - overlap, key=self_names_order) rdrop_names = sorted(other_names - overlap, key=other_names_order) # if only the order differs if not len(ldrop_names + rdrop_names): self_jnlevels = self other_jnlevels = other.reorder_levels(self.names) else: self_jnlevels = self.droplevel(ldrop_names) other_jnlevels = other.droplevel(rdrop_names) # Join left and right # Join on same leveled multi-index frames is supported join_idx, lidx, ridx = self_jnlevels.join( other_jnlevels, how=how, return_indexers=True ) # Restore the dropped levels # Returned index level order is # common levels, ldrop_names, rdrop_names dropped_names = ldrop_names + rdrop_names # error: Argument 5/6 to "restore_dropped_levels_multijoin" has # incompatible type "Optional[ndarray[Any, dtype[signedinteger[Any # ]]]]"; expected "ndarray[Any, dtype[signedinteger[Any]]]" levels, codes, names = restore_dropped_levels_multijoin( self, other, dropped_names, join_idx, lidx, # type: ignore[arg-type] ridx, # type: ignore[arg-type] ) # Re-create the multi-index multi_join_idx = MultiIndex( levels=levels, codes=codes, names=names, verify_integrity=False ) multi_join_idx = multi_join_idx.remove_unused_levels() return multi_join_idx, lidx, ridx jl = list(overlap)[0] # Case where only one index is multi # make the indices into mi's that match flip_order = False if isinstance(self, MultiIndex): self, other = other, self flip_order = True # flip if join method is right or left flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} how = flip.get(how, how) level = other.names.index(jl) result = self._join_level(other, level, how=how) if flip_order: return result[0], result[2], result[1] return result def _join_non_unique( self, other: Index, how: JoinHow = "left" ) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp]]: from pandas.core.reshape.merge import get_join_indexers # We only get here if dtypes match assert self.dtype == other.dtype left_idx, right_idx = get_join_indexers( [self._values], [other._values], how=how, sort=True ) mask = left_idx == -1 join_idx = self.take(left_idx) right = other.take(right_idx) join_index = join_idx.putmask(mask, right) return join_index, left_idx, right_idx def _join_level( self, other: Index, level, how: JoinHow = "left", keep_order: bool = True ) -> tuple[MultiIndex, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: """ The join method *only* affects the level of the resulting MultiIndex. Otherwise it just exactly aligns the Index data to the labels of the level in the MultiIndex. If ```keep_order == True```, the order of the data indexed by the MultiIndex will not be changed; otherwise, it will tie out with `other`. """ from pandas.core.indexes.multi import MultiIndex def _get_leaf_sorter(labels: list[np.ndarray]) -> npt.NDArray[np.intp]: """ Returns sorter for the inner most level while preserving the order of higher levels. Parameters ---------- labels : list[np.ndarray] Each ndarray has signed integer dtype, not necessarily identical. Returns ------- np.ndarray[np.intp] """ if labels[0].size == 0: return np.empty(0, dtype=np.intp) if len(labels) == 1: return get_group_index_sorter(ensure_platform_int(labels[0])) # find indexers of beginning of each set of # same-key labels w.r.t all but last level tic = labels[0][:-1] != labels[0][1:] for lab in labels[1:-1]: tic |= lab[:-1] != lab[1:] starts = np.hstack(([True], tic, [True])).nonzero()[0] lab = ensure_int64(labels[-1]) return lib.get_level_sorter(lab, ensure_platform_int(starts)) if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): raise TypeError("Join on level between two MultiIndex objects is ambiguous") left, right = self, other flip_order = not isinstance(self, MultiIndex) if flip_order: left, right = right, left flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} how = flip.get(how, how) assert isinstance(left, MultiIndex) level = left._get_level_number(level) old_level = left.levels[level] if not right.is_unique: raise NotImplementedError( "Index._join_level on non-unique index is not implemented" ) new_level, left_lev_indexer, right_lev_indexer = old_level.join( right, how=how, return_indexers=True ) if left_lev_indexer is None: if keep_order or len(left) == 0: left_indexer = None join_index = left else: # sort the leaves left_indexer = _get_leaf_sorter(left.codes[: level + 1]) join_index = left[left_indexer] else: left_lev_indexer = ensure_platform_int(left_lev_indexer) rev_indexer = lib.get_reverse_indexer(left_lev_indexer, len(old_level)) old_codes = left.codes[level] taker = old_codes[old_codes != -1] new_lev_codes = rev_indexer.take(taker) new_codes = list(left.codes) new_codes[level] = new_lev_codes new_levels = list(left.levels) new_levels[level] = new_level if keep_order: # just drop missing values. o.w. keep order left_indexer = np.arange(len(left), dtype=np.intp) left_indexer = cast(np.ndarray, left_indexer) mask = new_lev_codes != -1 if not mask.all(): new_codes = [lab[mask] for lab in new_codes] left_indexer = left_indexer[mask] else: # tie out the order with other if level == 0: # outer most level, take the fast route max_new_lev = 0 if len(new_lev_codes) == 0 else new_lev_codes.max() ngroups = 1 + max_new_lev left_indexer, counts = libalgos.groupsort_indexer( new_lev_codes, ngroups ) # missing values are placed first; drop them! left_indexer = left_indexer[counts[0] :] new_codes = [lab[left_indexer] for lab in new_codes] else: # sort the leaves mask = new_lev_codes != -1 mask_all = mask.all() if not mask_all: new_codes = [lab[mask] for lab in new_codes] left_indexer = _get_leaf_sorter(new_codes[: level + 1]) new_codes = [lab[left_indexer] for lab in new_codes] # left_indexers are w.r.t masked frame. # reverse to original frame! if not mask_all: left_indexer = mask.nonzero()[0][left_indexer] join_index = MultiIndex( levels=new_levels, codes=new_codes, names=left.names, verify_integrity=False, ) if right_lev_indexer is not None: right_indexer = right_lev_indexer.take(join_index.codes[level]) else: right_indexer = join_index.codes[level] if flip_order: left_indexer, right_indexer = right_indexer, left_indexer left_indexer = ( None if left_indexer is None else ensure_platform_int(left_indexer) ) right_indexer = ( None if right_indexer is None else ensure_platform_int(right_indexer) ) return join_index, left_indexer, right_indexer def _join_monotonic( self, other: Index, how: JoinHow = "left" ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: # We only get here with matching dtypes and both monotonic increasing assert other.dtype == self.dtype if self.equals(other): # This is a convenient place for this check, but its correctness # does not depend on monotonicity, so it could go earlier # in the calling method. ret_index = other if how == "right" else self return ret_index, None, None ridx: npt.NDArray[np.intp] | None lidx: npt.NDArray[np.intp] | None if self.is_unique and other.is_unique: # We can perform much better than the general case if how == "left": join_index = self lidx = None ridx = self._left_indexer_unique(other) elif how == "right": join_index = other lidx = other._left_indexer_unique(self) ridx = None elif how == "inner": join_array, lidx, ridx = self._inner_indexer(other) join_index = self._wrap_joined_index(join_array, other, lidx, ridx) elif how == "outer": join_array, lidx, ridx = self._outer_indexer(other) join_index = self._wrap_joined_index(join_array, other, lidx, ridx) else: if how == "left": join_array, lidx, ridx = self._left_indexer(other) elif how == "right": join_array, ridx, lidx = other._left_indexer(self) elif how == "inner": join_array, lidx, ridx = self._inner_indexer(other) elif how == "outer": join_array, lidx, ridx = self._outer_indexer(other) assert lidx is not None assert ridx is not None join_index = self._wrap_joined_index(join_array, other, lidx, ridx) lidx = None if lidx is None else ensure_platform_int(lidx) ridx = None if ridx is None else ensure_platform_int(ridx) return join_index, lidx, ridx def _wrap_joined_index( self: _IndexT, joined: ArrayLike, other: _IndexT, lidx: npt.NDArray[np.intp], ridx: npt.NDArray[np.intp], ) -> _IndexT: assert other.dtype == self.dtype if isinstance(self, ABCMultiIndex): name = self.names if self.names == other.names else None # error: Incompatible return value type (got "MultiIndex", # expected "_IndexT") mask = lidx == -1 join_idx = self.take(lidx) right = other.take(ridx) join_index = join_idx.putmask(mask, right) return join_index.set_names(name) # type: ignore[return-value] else: name = get_op_result_name(self, other) return self._constructor._with_infer(joined, name=name, dtype=self.dtype) def _can_use_libjoin(self) -> bool: """ Whether we can use the fastpaths implement in _libs.join """ if type(self) is Index: # excludes EAs, but include masks, we get here with monotonic # values only, meaning no NA return ( isinstance(self.dtype, np.dtype) or isinstance(self.values, BaseMaskedArray) or isinstance(self._values, ArrowExtensionArray) ) return not is_interval_dtype(self.dtype) # -------------------------------------------------------------------- # Uncategorized Methods def values(self) -> ArrayLike: """ Return an array representing the data in the Index. .. warning:: We recommend using :attr:`Index.array` or :meth:`Index.to_numpy`, depending on whether you need a reference to the underlying data or a NumPy array. Returns ------- array: numpy.ndarray or ExtensionArray See Also -------- Index.array : Reference to the underlying data. Index.to_numpy : A NumPy array representing the underlying data. """ return self._data def array(self) -> ExtensionArray: array = self._data if isinstance(array, np.ndarray): from pandas.core.arrays.numpy_ import PandasArray array = PandasArray(array) return array def _values(self) -> ExtensionArray | np.ndarray: """ The best array representation. This is an ndarray or ExtensionArray. ``_values`` are consistent between ``Series`` and ``Index``. It may differ from the public '.values' method. index | values | _values | ----------------- | --------------- | ------------- | Index | ndarray | ndarray | CategoricalIndex | Categorical | Categorical | DatetimeIndex | ndarray[M8ns] | DatetimeArray | DatetimeIndex[tz] | ndarray[M8ns] | DatetimeArray | PeriodIndex | ndarray[object] | PeriodArray | IntervalIndex | IntervalArray | IntervalArray | See Also -------- values : Values """ return self._data def _get_engine_target(self) -> ArrayLike: """ Get the ndarray or ExtensionArray that we can pass to the IndexEngine constructor. """ vals = self._values if isinstance(vals, StringArray): # GH#45652 much more performant than ExtensionEngine return vals._ndarray if ( type(self) is Index and isinstance(self._values, ExtensionArray) and not isinstance(self._values, BaseMaskedArray) and not ( isinstance(self._values, ArrowExtensionArray) and is_numeric_dtype(self.dtype) # Exclude decimal and self.dtype.kind != "O" ) ): # TODO(ExtensionIndex): remove special-case, just use self._values return self._values.astype(object) return vals def _get_join_target(self) -> ArrayLike: """ Get the ndarray or ExtensionArray that we can pass to the join functions. """ if isinstance(self._values, BaseMaskedArray): # This is only used if our array is monotonic, so no NAs present return self._values._data elif isinstance(self._values, ArrowExtensionArray): # This is only used if our array is monotonic, so no missing values # present return self._values.to_numpy() return self._get_engine_target() def _from_join_target(self, result: np.ndarray) -> ArrayLike: """ Cast the ndarray returned from one of the libjoin.foo_indexer functions back to type(self)._data. """ if isinstance(self.values, BaseMaskedArray): return type(self.values)(result, np.zeros(result.shape, dtype=np.bool_)) elif isinstance(self.values, ArrowExtensionArray): return type(self.values)._from_sequence(result) return result def memory_usage(self, deep: bool = False) -> int: result = self._memory_usage(deep=deep) # include our engine hashtable result += self._engine.sizeof(deep=deep) return result def where(self, cond, other=None) -> Index: """ Replace values where the condition is False. The replacement is taken from other. Parameters ---------- cond : bool array-like with the same length as self Condition to select the values on. other : scalar, or array-like, default None Replacement if the condition is False. Returns ------- pandas.Index A copy of self with values replaced from other where the condition is False. See Also -------- Series.where : Same method for Series. DataFrame.where : Same method for DataFrame. Examples -------- >>> idx = pd.Index(['car', 'bike', 'train', 'tractor']) >>> idx Index(['car', 'bike', 'train', 'tractor'], dtype='object') >>> idx.where(idx.isin(['car', 'train']), 'other') Index(['car', 'other', 'train', 'other'], dtype='object') """ if isinstance(self, ABCMultiIndex): raise NotImplementedError( ".where is not supported for MultiIndex operations" ) cond = np.asarray(cond, dtype=bool) return self.putmask(~cond, other) # construction helpers def _raise_scalar_data_error(cls, data): # We return the TypeError so that we can raise it from the constructor # in order to keep mypy happy raise TypeError( f"{cls.__name__}(...) must be called with a collection of some " f"kind, {repr(data)} was passed" ) def _validate_fill_value(self, value): """ Check if the value can be inserted into our array without casting, and convert it to an appropriate native type if necessary. Raises ------ TypeError If the value cannot be inserted into an array of this dtype. """ dtype = self.dtype if isinstance(dtype, np.dtype) and dtype.kind not in ["m", "M"]: # return np_can_hold_element(dtype, value) try: return np_can_hold_element(dtype, value) except LossySetitemError as err: # re-raise as TypeError for consistency raise TypeError from err elif not can_hold_element(self._values, value): raise TypeError return value def _require_scalar(self, value): """ Check that this is a scalar value that we can use for setitem-like operations without changing dtype. """ if not is_scalar(value): raise TypeError(f"'value' must be a scalar, passed: {type(value).__name__}") return value def _is_memory_usage_qualified(self) -> bool: """ Return a boolean if we need a qualified .info display. """ return is_object_dtype(self.dtype) def __contains__(self, key: Any) -> bool: """ Return a boolean indicating whether the provided key is in the index. Parameters ---------- key : label The key to check if it is present in the index. Returns ------- bool Whether the key search is in the index. Raises ------ TypeError If the key is not hashable. See Also -------- Index.isin : Returns an ndarray of boolean dtype indicating whether the list-like key is in the index. Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx Index([1, 2, 3, 4], dtype='int64') >>> 2 in idx True >>> 6 in idx False """ hash(key) try: return key in self._engine except (OverflowError, TypeError, ValueError): return False # https://github.com/python/typeshed/issues/2148#issuecomment-520783318 # Incompatible types in assignment (expression has type "None", base class # "object" defined the type as "Callable[[object], int]") __hash__: ClassVar[None] # type: ignore[assignment] def __setitem__(self, key, value): raise TypeError("Index does not support mutable operations") def __getitem__(self, key): """ Override numpy.ndarray's __getitem__ method to work as desired. This function adds lists and Series as valid boolean indexers (ndarrays only supports ndarray with dtype=bool). If resulting ndim != 1, plain ndarray is returned instead of corresponding `Index` subclass. """ getitem = self._data.__getitem__ if is_integer(key) or is_float(key): # GH#44051 exclude bool, which would return a 2d ndarray key = com.cast_scalar_indexer(key) return getitem(key) if isinstance(key, slice): # This case is separated from the conditional above to avoid # pessimization com.is_bool_indexer and ndim checks. result = getitem(key) # Going through simple_new for performance. return type(self)._simple_new( result, name=self._name, refs=self._references ) if com.is_bool_indexer(key): # if we have list[bools, length=1e5] then doing this check+convert # takes 166 µs + 2.1 ms and cuts the ndarray.__getitem__ # time below from 3.8 ms to 496 µs # if we already have ndarray[bool], the overhead is 1.4 µs or .25% if is_extension_array_dtype(getattr(key, "dtype", None)): key = key.to_numpy(dtype=bool, na_value=False) else: key = np.asarray(key, dtype=bool) result = getitem(key) # Because we ruled out integer above, we always get an arraylike here if result.ndim > 1: disallow_ndim_indexing(result) # NB: Using _constructor._simple_new would break if MultiIndex # didn't override __getitem__ return self._constructor._simple_new(result, name=self._name) def _getitem_slice(self: _IndexT, slobj: slice) -> _IndexT: """ Fastpath for __getitem__ when we know we have a slice. """ res = self._data[slobj] return type(self)._simple_new(res, name=self._name, refs=self._references) def _can_hold_identifiers_and_holds_name(self, name) -> bool: """ Faster check for ``name in self`` when we know `name` is a Python identifier (e.g. in NDFrame.__getattr__, which hits this to support . key lookup). For indexes that can't hold identifiers (everything but object & categorical) we just return False. https://github.com/pandas-dev/pandas/issues/19764 """ if ( is_object_dtype(self.dtype) or is_string_dtype(self.dtype) or is_categorical_dtype(self.dtype) ): return name in self return False def append(self, other: Index | Sequence[Index]) -> Index: """ Append a collection of Index options together. Parameters ---------- other : Index or list/tuple of indices Returns ------- Index """ to_concat = [self] if isinstance(other, (list, tuple)): to_concat += list(other) else: # error: Argument 1 to "append" of "list" has incompatible type # "Union[Index, Sequence[Index]]"; expected "Index" to_concat.append(other) # type: ignore[arg-type] for obj in to_concat: if not isinstance(obj, Index): raise TypeError("all inputs must be Index") names = {obj.name for obj in to_concat} name = None if len(names) > 1 else self.name return self._concat(to_concat, name) def _concat(self, to_concat: list[Index], name: Hashable) -> Index: """ Concatenate multiple Index objects. """ to_concat_vals = [x._values for x in to_concat] result = concat_compat(to_concat_vals) return Index._with_infer(result, name=name) def putmask(self, mask, value) -> Index: """ Return a new Index of the values set with the mask. Returns ------- Index See Also -------- numpy.ndarray.putmask : Changes elements of an array based on conditional and input values. """ mask, noop = validate_putmask(self._values, mask) if noop: return self.copy() if self.dtype != object and is_valid_na_for_dtype(value, self.dtype): # e.g. None -> np.nan, see also Block._standardize_fill_value value = self._na_value try: converted = self._validate_fill_value(value) except (LossySetitemError, ValueError, TypeError) as err: if is_object_dtype(self): # pragma: no cover raise err # See also: Block.coerce_to_target_dtype dtype = self._find_common_type_compat(value) return self.astype(dtype).putmask(mask, value) values = self._values.copy() if isinstance(values, np.ndarray): converted = setitem_datetimelike_compat(values, mask.sum(), converted) np.putmask(values, mask, converted) else: # Note: we use the original value here, not converted, as # _validate_fill_value is not idempotent values._putmask(mask, value) return self._shallow_copy(values) def equals(self, other: Any) -> bool: """ Determine if two Index object are equal. The things that are being compared are: * The elements inside the Index object. * The order of the elements inside the Index object. Parameters ---------- other : Any The other object to compare against. Returns ------- bool True if "other" is an Index and it has the same elements and order as the calling index; False otherwise. Examples -------- >>> idx1 = pd.Index([1, 2, 3]) >>> idx1 Index([1, 2, 3], dtype='int64') >>> idx1.equals(pd.Index([1, 2, 3])) True The elements inside are compared >>> idx2 = pd.Index(["1", "2", "3"]) >>> idx2 Index(['1', '2', '3'], dtype='object') >>> idx1.equals(idx2) False The order is compared >>> ascending_idx = pd.Index([1, 2, 3]) >>> ascending_idx Index([1, 2, 3], dtype='int64') >>> descending_idx = pd.Index([3, 2, 1]) >>> descending_idx Index([3, 2, 1], dtype='int64') >>> ascending_idx.equals(descending_idx) False The dtype is *not* compared >>> int64_idx = pd.Index([1, 2, 3], dtype='int64') >>> int64_idx Index([1, 2, 3], dtype='int64') >>> uint64_idx = pd.Index([1, 2, 3], dtype='uint64') >>> uint64_idx Index([1, 2, 3], dtype='uint64') >>> int64_idx.equals(uint64_idx) True """ if self.is_(other): return True if not isinstance(other, Index): return False if is_object_dtype(self.dtype) and not is_object_dtype(other.dtype): # if other is not object, use other's logic for coercion return other.equals(self) if isinstance(other, ABCMultiIndex): # d-level MultiIndex can equal d-tuple Index return other.equals(self) if isinstance(self._values, ExtensionArray): # Dispatch to the ExtensionArray's .equals method. if not isinstance(other, type(self)): return False earr = cast(ExtensionArray, self._data) return earr.equals(other._data) if is_extension_array_dtype(other.dtype): # All EA-backed Index subclasses override equals return other.equals(self) return array_equivalent(self._values, other._values) def identical(self, other) -> bool: """ Similar to equals, but checks that object attributes and types are also equal. Returns ------- bool If two Index objects have equal elements and same type True, otherwise False. """ return ( self.equals(other) and all( getattr(self, c, None) == getattr(other, c, None) for c in self._comparables ) and type(self) == type(other) and self.dtype == other.dtype ) def asof(self, label): """ Return the label from the index, or, if not present, the previous one. Assuming that the index is sorted, return the passed index label if it is in the index, or return the previous index label if the passed one is not in the index. Parameters ---------- label : object The label up to which the method returns the latest index label. Returns ------- object The passed label if it is in the index. The previous label if the passed label is not in the sorted index or `NaN` if there is no such label. See Also -------- Series.asof : Return the latest value in a Series up to the passed index. merge_asof : Perform an asof merge (similar to left join but it matches on nearest key rather than equal key). Index.get_loc : An `asof` is a thin wrapper around `get_loc` with method='pad'. Examples -------- `Index.asof` returns the latest index label up to the passed label. >>> idx = pd.Index(['2013-12-31', '2014-01-02', '2014-01-03']) >>> idx.asof('2014-01-01') '2013-12-31' If the label is in the index, the method returns the passed label. >>> idx.asof('2014-01-02') '2014-01-02' If all of the labels in the index are later than the passed label, NaN is returned. >>> idx.asof('1999-01-02') nan If the index is not sorted, an error is raised. >>> idx_not_sorted = pd.Index(['2013-12-31', '2015-01-02', ... '2014-01-03']) >>> idx_not_sorted.asof('2013-12-31') Traceback (most recent call last): ValueError: index must be monotonic increasing or decreasing """ self._searchsorted_monotonic(label) # validate sortedness try: loc = self.get_loc(label) except (KeyError, TypeError): # KeyError -> No exact match, try for padded # TypeError -> passed e.g. non-hashable, fall through to get # the tested exception message indexer = self.get_indexer([label], method="pad") if indexer.ndim > 1 or indexer.size > 1: raise TypeError("asof requires scalar valued input") loc = indexer.item() if loc == -1: return self._na_value else: if isinstance(loc, slice): loc = loc.indices(len(self))[-1] return self[loc] def asof_locs( self, where: Index, mask: npt.NDArray[np.bool_] ) -> npt.NDArray[np.intp]: """ Return the locations (indices) of labels in the index. As in the `asof` function, if the label (a particular entry in `where`) is not in the index, the latest index label up to the passed label is chosen and its index returned. If all of the labels in the index are later than a label in `where`, -1 is returned. `mask` is used to ignore NA values in the index during calculation. Parameters ---------- where : Index An Index consisting of an array of timestamps. mask : np.ndarray[bool] Array of booleans denoting where values in the original data are not NA. Returns ------- np.ndarray[np.intp] An array of locations (indices) of the labels from the Index which correspond to the return values of the `asof` function for every element in `where`. """ # error: No overload variant of "searchsorted" of "ndarray" matches argument # types "Union[ExtensionArray, ndarray[Any, Any]]", "str" # TODO: will be fixed when ExtensionArray.searchsorted() is fixed locs = self._values[mask].searchsorted( where._values, side="right" # type: ignore[call-overload] ) locs = np.where(locs > 0, locs - 1, 0) result = np.arange(len(self), dtype=np.intp)[mask].take(locs) first_value = self._values[mask.argmax()] result[(locs == 0) & (where._values < first_value)] = -1 return result def sort_values( self, return_indexer: bool = False, ascending: bool = True, na_position: str_t = "last", key: Callable | None = None, ): """ Return a sorted copy of the index. Return a sorted copy of the index, and optionally return the indices that sorted the index itself. Parameters ---------- return_indexer : bool, default False Should the indices that would sort the index be returned. ascending : bool, default True Should the index values be sorted in an ascending order. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. .. versionadded:: 1.2.0 key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. .. versionadded:: 1.1.0 Returns ------- sorted_index : pandas.Index Sorted copy of the index. indexer : numpy.ndarray, optional The indices that the index itself was sorted by. See Also -------- Series.sort_values : Sort values of a Series. DataFrame.sort_values : Sort values in a DataFrame. Examples -------- >>> idx = pd.Index([10, 100, 1, 1000]) >>> idx Index([10, 100, 1, 1000], dtype='int64') Sort values in ascending order (default behavior). >>> idx.sort_values() Index([1, 10, 100, 1000], dtype='int64') Sort values in descending order, and also get the indices `idx` was sorted by. >>> idx.sort_values(ascending=False, return_indexer=True) (Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2])) """ idx = ensure_key_mapped(self, key) # GH 35584. Sort missing values according to na_position kwarg # ignore na_position for MultiIndex if not isinstance(self, ABCMultiIndex): _as = nargsort( items=idx, ascending=ascending, na_position=na_position, key=key ) else: _as = idx.argsort() if not ascending: _as = _as[::-1] sorted_index = self.take(_as) if return_indexer: return sorted_index, _as else: return sorted_index def sort(self, *args, **kwargs): """ Use sort_values instead. """ raise TypeError("cannot sort an Index object in-place, use sort_values instead") def shift(self, periods: int = 1, freq=None): """ Shift index by desired number of time frequency increments. This method is for shifting the values of datetime-like indexes by a specified time increment a given number of times. Parameters ---------- periods : int, default 1 Number of periods (or increments) to shift by, can be positive or negative. freq : pandas.DateOffset, pandas.Timedelta or str, optional Frequency increment to shift by. If None, the index is shifted by its own `freq` attribute. Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc. Returns ------- pandas.Index Shifted index. See Also -------- Series.shift : Shift values of Series. Notes ----- This method is only implemented for datetime-like index classes, i.e., DatetimeIndex, PeriodIndex and TimedeltaIndex. Examples -------- Put the first 5 month starts of 2011 into an index. >>> month_starts = pd.date_range('1/1/2011', periods=5, freq='MS') >>> month_starts DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01', '2011-04-01', '2011-05-01'], dtype='datetime64[ns]', freq='MS') Shift the index by 10 days. >>> month_starts.shift(10, freq='D') DatetimeIndex(['2011-01-11', '2011-02-11', '2011-03-11', '2011-04-11', '2011-05-11'], dtype='datetime64[ns]', freq=None) The default value of `freq` is the `freq` attribute of the index, which is 'MS' (month start) in this example. >>> month_starts.shift(10) DatetimeIndex(['2011-11-01', '2011-12-01', '2012-01-01', '2012-02-01', '2012-03-01'], dtype='datetime64[ns]', freq='MS') """ raise NotImplementedError( f"This method is only implemented for DatetimeIndex, PeriodIndex and " f"TimedeltaIndex; Got type {type(self).__name__}" ) def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]: """ Return the integer indices that would sort the index. Parameters ---------- *args Passed to `numpy.ndarray.argsort`. **kwargs Passed to `numpy.ndarray.argsort`. Returns ------- np.ndarray[np.intp] Integer indices that would sort the index if used as an indexer. See Also -------- numpy.argsort : Similar method for NumPy arrays. Index.sort_values : Return sorted copy of Index. Examples -------- >>> idx = pd.Index(['b', 'a', 'd', 'c']) >>> idx Index(['b', 'a', 'd', 'c'], dtype='object') >>> order = idx.argsort() >>> order array([1, 0, 3, 2]) >>> idx[order] Index(['a', 'b', 'c', 'd'], dtype='object') """ # This works for either ndarray or EA, is overridden # by RangeIndex, MultIIndex return self._data.argsort(*args, **kwargs) def _check_indexing_error(self, key): if not is_scalar(key): # if key is not a scalar, directly raise an error (the code below # would convert to numpy arrays and raise later any way) - GH29926 raise InvalidIndexError(key) def _should_fallback_to_positional(self) -> bool: """ Should an integer key be treated as positional? """ return self.inferred_type not in { "integer", "mixed-integer", "floating", "complex", } _index_shared_docs[ "get_indexer_non_unique" ] = """ Compute indexer and mask for new index given the current index. The indexer should be then used as an input to ndarray.take to align the current data to the new index. Parameters ---------- target : %(target_klass)s Returns ------- indexer : np.ndarray[np.intp] Integers from 0 to n - 1 indicating that the index at these positions matches the corresponding target values. Missing values in the target are marked by -1. missing : np.ndarray[np.intp] An indexer into the target of the values not found. These correspond to the -1 in the indexer array. Examples -------- >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) >>> index.get_indexer_non_unique(['b', 'b']) (array([1, 3, 4, 1, 3, 4]), array([], dtype=int64)) In the example below there are no matched values. >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) >>> index.get_indexer_non_unique(['q', 'r', 't']) (array([-1, -1, -1]), array([0, 1, 2])) For this reason, the returned ``indexer`` contains only integers equal to -1. It demonstrates that there's no match between the index and the ``target`` values at these positions. The mask [0, 1, 2] in the return value shows that the first, second, and third elements are missing. Notice that the return value is a tuple contains two items. In the example below the first item is an array of locations in ``index``. The second item is a mask shows that the first and third elements are missing. >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) >>> index.get_indexer_non_unique(['f', 'b', 's']) (array([-1, 1, 3, 4, -1]), array([0, 2])) """ def get_indexer_non_unique( self, target ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: target = ensure_index(target) target = self._maybe_cast_listlike_indexer(target) if not self._should_compare(target) and not self._should_partial_index(target): # _should_partial_index e.g. IntervalIndex with numeric scalars # that can be matched to Interval scalars. return self._get_indexer_non_comparable(target, method=None, unique=False) pself, ptarget = self._maybe_promote(target) if pself is not self or ptarget is not target: return pself.get_indexer_non_unique(ptarget) if not is_dtype_equal(self.dtype, target.dtype): # TODO: if object, could use infer_dtype to preempt costly # conversion if still non-comparable? dtype = self._find_common_type_compat(target) this = self.astype(dtype, copy=False) that = target.astype(dtype, copy=False) return this.get_indexer_non_unique(that) # TODO: get_indexer has fastpaths for both Categorical-self and # Categorical-target. Can we do something similar here? # Note: _maybe_promote ensures we never get here with MultiIndex # self and non-Multi target tgt_values = target._get_engine_target() if self._is_multi and target._is_multi: engine = self._engine # Item "IndexEngine" of "Union[IndexEngine, ExtensionEngine]" has # no attribute "_extract_level_codes" tgt_values = engine._extract_level_codes(target) # type: ignore[union-attr] indexer, missing = self._engine.get_indexer_non_unique(tgt_values) return ensure_platform_int(indexer), ensure_platform_int(missing) def get_indexer_for(self, target) -> npt.NDArray[np.intp]: """ Guaranteed return of an indexer even when non-unique. This dispatches to get_indexer or get_indexer_non_unique as appropriate. Returns ------- np.ndarray[np.intp] List of indices. Examples -------- >>> idx = pd.Index([np.nan, 'var1', np.nan]) >>> idx.get_indexer_for([np.nan]) array([0, 2]) """ if self._index_as_unique: return self.get_indexer(target) indexer, _ = self.get_indexer_non_unique(target) return indexer def _get_indexer_strict(self, key, axis_name: str_t) -> tuple[Index, np.ndarray]: """ Analogue to get_indexer that raises if any elements are missing. """ keyarr = key if not isinstance(keyarr, Index): keyarr = com.asarray_tuplesafe(keyarr) if self._index_as_unique: indexer = self.get_indexer_for(keyarr) keyarr = self.reindex(keyarr)[0] else: keyarr, indexer, new_indexer = self._reindex_non_unique(keyarr) self._raise_if_missing(keyarr, indexer, axis_name) keyarr = self.take(indexer) if isinstance(key, Index): # GH 42790 - Preserve name from an Index keyarr.name = key.name if keyarr.dtype.kind in ["m", "M"]: # DTI/TDI.take can infer a freq in some cases when we dont want one if isinstance(key, list) or ( isinstance(key, type(self)) # "Index" has no attribute "freq" and key.freq is None # type: ignore[attr-defined] ): keyarr = keyarr._with_freq(None) return keyarr, indexer def _raise_if_missing(self, key, indexer, axis_name: str_t) -> None: """ Check that indexer can be used to return a result. e.g. at least one element was found, unless the list of keys was actually empty. Parameters ---------- key : list-like Targeted labels (only used to show correct error message). indexer: array-like of booleans Indices corresponding to the key, (with -1 indicating not found). axis_name : str Raises ------ KeyError If at least one key was requested but none was found. """ if len(key) == 0: return # Count missing values missing_mask = indexer < 0 nmissing = missing_mask.sum() if nmissing: # TODO: remove special-case; this is just to keep exception # message tests from raising while debugging use_interval_msg = is_interval_dtype(self.dtype) or ( is_categorical_dtype(self.dtype) # "Index" has no attribute "categories" [attr-defined] and is_interval_dtype( self.categories.dtype # type: ignore[attr-defined] ) ) if nmissing == len(indexer): if use_interval_msg: key = list(key) raise KeyError(f"None of [{key}] are in the [{axis_name}]") not_found = list(ensure_index(key)[missing_mask.nonzero()[0]].unique()) raise KeyError(f"{not_found} not in index") def _get_indexer_non_comparable( self, target: Index, method, unique: Literal[True] = ... ) -> npt.NDArray[np.intp]: ... def _get_indexer_non_comparable( self, target: Index, method, unique: Literal[False] ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... def _get_indexer_non_comparable( self, target: Index, method, unique: bool = True ) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... def _get_indexer_non_comparable( self, target: Index, method, unique: bool = True ) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: """ Called from get_indexer or get_indexer_non_unique when the target is of a non-comparable dtype. For get_indexer lookups with method=None, get_indexer is an _equality_ check, so non-comparable dtypes mean we will always have no matches. For get_indexer lookups with a method, get_indexer is an _inequality_ check, so non-comparable dtypes mean we will always raise TypeError. Parameters ---------- target : Index method : str or None unique : bool, default True * True if called from get_indexer. * False if called from get_indexer_non_unique. Raises ------ TypeError If doing an inequality check, i.e. method is not None. """ if method is not None: other = _unpack_nested_dtype(target) raise TypeError(f"Cannot compare dtypes {self.dtype} and {other.dtype}") no_matches = -1 * np.ones(target.shape, dtype=np.intp) if unique: # This is for get_indexer return no_matches else: # This is for get_indexer_non_unique missing = np.arange(len(target), dtype=np.intp) return no_matches, missing def _index_as_unique(self) -> bool: """ Whether we should treat this as unique for the sake of get_indexer vs get_indexer_non_unique. For IntervalIndex compat. """ return self.is_unique _requires_unique_msg = "Reindexing only valid with uniquely valued Index objects" def _maybe_promote(self, other: Index) -> tuple[Index, Index]: """ When dealing with an object-dtype Index and a non-object Index, see if we can upcast the object-dtype one to improve performance. """ if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex): if ( self.tz is not None and other.tz is not None and not tz_compare(self.tz, other.tz) ): # standardize on UTC return self.tz_convert("UTC"), other.tz_convert("UTC") elif self.inferred_type == "date" and isinstance(other, ABCDatetimeIndex): try: return type(other)(self), other except OutOfBoundsDatetime: return self, other elif self.inferred_type == "timedelta" and isinstance(other, ABCTimedeltaIndex): # TODO: we dont have tests that get here return type(other)(self), other elif self.dtype.kind == "u" and other.dtype.kind == "i": # GH#41873 if other.min() >= 0: # lookup min as it may be cached # TODO: may need itemsize check if we have non-64-bit Indexes return self, other.astype(self.dtype) elif self._is_multi and not other._is_multi: try: # "Type[Index]" has no attribute "from_tuples" other = type(self).from_tuples(other) # type: ignore[attr-defined] except (TypeError, ValueError): # let's instead try with a straight Index self = Index(self._values) if not is_object_dtype(self.dtype) and is_object_dtype(other.dtype): # Reverse op so we dont need to re-implement on the subclasses other, self = other._maybe_promote(self) return self, other def _find_common_type_compat(self, target) -> DtypeObj: """ Implementation of find_common_type that adjusts for Index-specific special cases. """ target_dtype, _ = infer_dtype_from(target, pandas_dtype=True) # special case: if one dtype is uint64 and the other a signed int, return object # See https://github.com/pandas-dev/pandas/issues/26778 for discussion # Now it's: # * float | [u]int -> float # * uint64 | signed int -> object # We may change union(float | [u]int) to go to object. if self.dtype == "uint64" or target_dtype == "uint64": if is_signed_integer_dtype(self.dtype) or is_signed_integer_dtype( target_dtype ): return _dtype_obj dtype = find_result_type(self._values, target) dtype = common_dtype_categorical_compat([self, target], dtype) return dtype def _should_compare(self, other: Index) -> bool: """ Check if `self == other` can ever have non-False entries. """ if (is_bool_dtype(other) and is_any_real_numeric_dtype(self)) or ( is_bool_dtype(self) and is_any_real_numeric_dtype(other) ): # GH#16877 Treat boolean labels passed to a numeric index as not # found. Without this fix False and True would be treated as 0 and 1 # respectively. return False other = _unpack_nested_dtype(other) dtype = other.dtype return self._is_comparable_dtype(dtype) or is_object_dtype(dtype) def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: """ Can we compare values of the given dtype to our own? """ if self.dtype.kind == "b": return dtype.kind == "b" elif is_numeric_dtype(self.dtype): return is_numeric_dtype(dtype) # TODO: this was written assuming we only get here with object-dtype, # which is nom longer correct. Can we specialize for EA? return True def groupby(self, values) -> PrettyDict[Hashable, np.ndarray]: """ Group the index labels by a given array of values. Parameters ---------- values : array Values used to determine the groups. Returns ------- dict {group name -> group labels} """ # TODO: if we are a MultiIndex, we can do better # that converting to tuples if isinstance(values, ABCMultiIndex): values = values._values values = Categorical(values) result = values._reverse_indexer() # map to the label result = {k: self.take(v) for k, v in result.items()} return PrettyDict(result) def map(self, mapper, na_action=None): """ Map values using an input mapping or function. Parameters ---------- mapper : function, dict, or Series Mapping correspondence. na_action : {None, 'ignore'} If 'ignore', propagate NA values, without passing them to the mapping correspondence. Returns ------- Union[Index, MultiIndex] The output of the mapping function applied to the index. If the function returns a tuple with more than one element a MultiIndex will be returned. """ from pandas.core.indexes.multi import MultiIndex new_values = self._map_values(mapper, na_action=na_action) # we can return a MultiIndex if new_values.size and isinstance(new_values[0], tuple): if isinstance(self, MultiIndex): names = self.names elif self.name: names = [self.name] * len(new_values[0]) else: names = None return MultiIndex.from_tuples(new_values, names=names) dtype = None if not new_values.size: # empty dtype = self.dtype # e.g. if we are floating and new_values is all ints, then we # don't want to cast back to floating. But if we are UInt64 # and new_values is all ints, we want to try. same_dtype = lib.infer_dtype(new_values, skipna=False) == self.inferred_type if same_dtype: new_values = maybe_cast_pointwise_result( new_values, self.dtype, same_dtype=same_dtype ) return Index._with_infer(new_values, dtype=dtype, copy=False, name=self.name) # TODO: De-duplicate with map, xref GH#32349 def _transform_index(self, func, *, level=None) -> Index: """ Apply function to all values found in index. This includes transforming multiindex entries separately. Only apply function to one level of the MultiIndex if level is specified. """ if isinstance(self, ABCMultiIndex): values = [ self.get_level_values(i).map(func) if i == level or level is None else self.get_level_values(i) for i in range(self.nlevels) ] return type(self).from_arrays(values) else: items = [func(x) for x in self] return Index(items, name=self.name, tupleize_cols=False) def isin(self, values, level=None) -> npt.NDArray[np.bool_]: """ Return a boolean array where the index values are in `values`. Compute boolean array of whether each index value is found in the passed set of values. The length of the returned boolean array matches the length of the index. Parameters ---------- values : set or list-like Sought values. level : str or int, optional Name or position of the index level to use (if the index is a `MultiIndex`). Returns ------- np.ndarray[bool] NumPy array of boolean values. See Also -------- Series.isin : Same for Series. DataFrame.isin : Same method for DataFrames. Notes ----- In the case of `MultiIndex` you must either specify `values` as a list-like object containing tuples that are the same length as the number of levels, or specify `level`. Otherwise it will raise a ``ValueError``. If `level` is specified: - if it is the name of one *and only one* index level, use that level; - otherwise it should be a number indicating level position. Examples -------- >>> idx = pd.Index([1,2,3]) >>> idx Index([1, 2, 3], dtype='int64') Check whether each index value in a list of values. >>> idx.isin([1, 4]) array([ True, False, False]) >>> midx = pd.MultiIndex.from_arrays([[1,2,3], ... ['red', 'blue', 'green']], ... names=('number', 'color')) >>> midx MultiIndex([(1, 'red'), (2, 'blue'), (3, 'green')], names=['number', 'color']) Check whether the strings in the 'color' level of the MultiIndex are in a list of colors. >>> midx.isin(['red', 'orange', 'yellow'], level='color') array([ True, False, False]) To check across the levels of a MultiIndex, pass a list of tuples: >>> midx.isin([(1, 'red'), (3, 'red')]) array([ True, False, False]) For a DatetimeIndex, string values in `values` are converted to Timestamps. >>> dates = ['2000-03-11', '2000-03-12', '2000-03-13'] >>> dti = pd.to_datetime(dates) >>> dti DatetimeIndex(['2000-03-11', '2000-03-12', '2000-03-13'], dtype='datetime64[ns]', freq=None) >>> dti.isin(['2000-03-11']) array([ True, False, False]) """ if level is not None: self._validate_index_level(level) return algos.isin(self._values, values) def _get_string_slice(self, key: str_t): # this is for partial string indexing, # overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex raise NotImplementedError def slice_indexer( self, start: Hashable | None = None, end: Hashable | None = None, step: int | None = None, ) -> slice: """ Compute the slice indexer for input labels and step. Index needs to be ordered and unique. Parameters ---------- start : label, default None If None, defaults to the beginning. end : label, default None If None, defaults to the end. step : int, default None Returns ------- slice Raises ------ KeyError : If key does not exist, or key is not unique and index is not ordered. Notes ----- This function assumes that the data is sorted, so use at your own peril Examples -------- This is a method on all index types. For example you can do: >>> idx = pd.Index(list('abcd')) >>> idx.slice_indexer(start='b', end='c') slice(1, 3, None) >>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')]) >>> idx.slice_indexer(start='b', end=('c', 'g')) slice(1, 3, None) """ start_slice, end_slice = self.slice_locs(start, end, step=step) # return a slice if not is_scalar(start_slice): raise AssertionError("Start slice bound is non-scalar") if not is_scalar(end_slice): raise AssertionError("End slice bound is non-scalar") return slice(start_slice, end_slice, step) def _maybe_cast_indexer(self, key): """ If we have a float key and are not a floating index, then try to cast to an int if equivalent. """ return key def _maybe_cast_listlike_indexer(self, target) -> Index: """ Analogue to maybe_cast_indexer for get_indexer instead of get_loc. """ return ensure_index(target) def _validate_indexer(self, form: str_t, key, kind: str_t) -> None: """ If we are positional indexer, validate that we have appropriate typed bounds must be an integer. """ assert kind in ["getitem", "iloc"] if key is not None and not is_integer(key): self._raise_invalid_indexer(form, key) def _maybe_cast_slice_bound(self, label, side: str_t): """ This function should be overloaded in subclasses that allow non-trivial casting on label-slice bounds, e.g. datetime-like indices allowing strings containing formatted datetimes. Parameters ---------- label : object side : {'left', 'right'} Returns ------- label : object Notes ----- Value of `side` parameter should be validated in caller. """ # We are a plain index here (sub-class override this method if they # wish to have special treatment for floats/ints, e.g. datetimelike Indexes if is_numeric_dtype(self.dtype): return self._maybe_cast_indexer(label) # reject them, if index does not contain label if (is_float(label) or is_integer(label)) and label not in self: self._raise_invalid_indexer("slice", label) return label def _searchsorted_monotonic(self, label, side: Literal["left", "right"] = "left"): if self.is_monotonic_increasing: return self.searchsorted(label, side=side) elif self.is_monotonic_decreasing: # np.searchsorted expects ascending sort order, have to reverse # everything for it to work (element ordering, search side and # resulting value). pos = self[::-1].searchsorted( label, side="right" if side == "left" else "left" ) return len(self) - pos raise ValueError("index must be monotonic increasing or decreasing") def get_slice_bound(self, label, side: Literal["left", "right"]) -> int: """ Calculate slice bound that corresponds to given label. Returns leftmost (one-past-the-rightmost if ``side=='right'``) position of given label. Parameters ---------- label : object side : {'left', 'right'} Returns ------- int Index of label. """ if side not in ("left", "right"): raise ValueError( "Invalid value for side kwarg, must be either " f"'left' or 'right': {side}" ) original_label = label # For datetime indices label may be a string that has to be converted # to datetime boundary according to its resolution. label = self._maybe_cast_slice_bound(label, side) # we need to look up the label try: slc = self.get_loc(label) except KeyError as err: try: return self._searchsorted_monotonic(label, side) except ValueError: # raise the original KeyError raise err if isinstance(slc, np.ndarray): # get_loc may return a boolean array, which # is OK as long as they are representable by a slice. assert is_bool_dtype(slc.dtype) slc = lib.maybe_booleans_to_slice(slc.view("u1")) if isinstance(slc, np.ndarray): raise KeyError( f"Cannot get {side} slice bound for non-unique " f"label: {repr(original_label)}" ) if isinstance(slc, slice): if side == "left": return slc.start else: return slc.stop else: if side == "right": return slc + 1 else: return slc def slice_locs(self, start=None, end=None, step=None) -> tuple[int, int]: """ Compute slice locations for input labels. Parameters ---------- start : label, default None If None, defaults to the beginning. end : label, default None If None, defaults to the end. step : int, defaults None If None, defaults to 1. Returns ------- tuple[int, int] See Also -------- Index.get_loc : Get location for a single label. Notes ----- This method only works if the index is monotonic or unique. Examples -------- >>> idx = pd.Index(list('abcd')) >>> idx.slice_locs(start='b', end='c') (1, 3) """ inc = step is None or step >= 0 if not inc: # If it's a reverse slice, temporarily swap bounds. start, end = end, start # GH 16785: If start and end happen to be date strings with UTC offsets # attempt to parse and check that the offsets are the same if isinstance(start, (str, datetime)) and isinstance(end, (str, datetime)): try: ts_start = Timestamp(start) ts_end = Timestamp(end) except (ValueError, TypeError): pass else: if not tz_compare(ts_start.tzinfo, ts_end.tzinfo): raise ValueError("Both dates must have the same UTC offset") start_slice = None if start is not None: start_slice = self.get_slice_bound(start, "left") if start_slice is None: start_slice = 0 end_slice = None if end is not None: end_slice = self.get_slice_bound(end, "right") if end_slice is None: end_slice = len(self) if not inc: # Bounds at this moment are swapped, swap them back and shift by 1. # # slice_locs('B', 'A', step=-1): s='B', e='A' # # s='A' e='B' # AFTER SWAP: | | # v ------------------> V # ----------------------------------- # | | |A|A|A|A| | | | | |B|B| | | | | # ----------------------------------- # ^ <------------------ ^ # SHOULD BE: | | # end=s-1 start=e-1 # end_slice, start_slice = start_slice - 1, end_slice - 1 # i == -1 triggers ``len(self) + i`` selection that points to the # last element, not before-the-first one, subtracting len(self) # compensates that. if end_slice == -1: end_slice -= len(self) if start_slice == -1: start_slice -= len(self) return start_slice, end_slice def delete(self: _IndexT, loc) -> _IndexT: """ Make new Index with passed location(-s) deleted. Parameters ---------- loc : int or list of int Location of item(-s) which will be deleted. Use a list of locations to delete more than one value at the same time. Returns ------- Index Will be same type as self, except for RangeIndex. See Also -------- numpy.delete : Delete any rows and column from NumPy array (ndarray). Examples -------- >>> idx = pd.Index(['a', 'b', 'c']) >>> idx.delete(1) Index(['a', 'c'], dtype='object') >>> idx = pd.Index(['a', 'b', 'c']) >>> idx.delete([0, 2]) Index(['b'], dtype='object') """ values = self._values res_values: ArrayLike if isinstance(values, np.ndarray): # TODO(__array_function__): special casing will be unnecessary res_values = np.delete(values, loc) else: res_values = values.delete(loc) # _constructor so RangeIndex-> Index with an int64 dtype return self._constructor._simple_new(res_values, name=self.name) def insert(self, loc: int, item) -> Index: """ Make new Index inserting new item at location. Follows Python numpy.insert semantics for negative values. Parameters ---------- loc : int item : object Returns ------- Index """ item = lib.item_from_zerodim(item) if is_valid_na_for_dtype(item, self.dtype) and self.dtype != object: item = self._na_value arr = self._values try: if isinstance(arr, ExtensionArray): res_values = arr.insert(loc, item) return type(self)._simple_new(res_values, name=self.name) else: item = self._validate_fill_value(item) except (TypeError, ValueError, LossySetitemError): # e.g. trying to insert an integer into a DatetimeIndex # We cannot keep the same dtype, so cast to the (often object) # minimal shared dtype before doing the insert. dtype = self._find_common_type_compat(item) return self.astype(dtype).insert(loc, item) if arr.dtype != object or not isinstance( item, (tuple, np.datetime64, np.timedelta64) ): # with object-dtype we need to worry about numpy incorrectly casting # dt64/td64 to integer, also about treating tuples as sequences # special-casing dt64/td64 https://github.com/numpy/numpy/issues/12550 casted = arr.dtype.type(item) new_values = np.insert(arr, loc, casted) else: # error: No overload variant of "insert" matches argument types # "ndarray[Any, Any]", "int", "None" new_values = np.insert(arr, loc, None) # type: ignore[call-overload] loc = loc if loc >= 0 else loc - 1 new_values[loc] = item return Index._with_infer(new_values, name=self.name) def drop( self, labels: Index | np.ndarray | Iterable[Hashable], errors: IgnoreRaise = "raise", ) -> Index: """ Make new Index with passed list of labels deleted. Parameters ---------- labels : array-like or scalar errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and existing labels are dropped. Returns ------- Index Will be same type as self, except for RangeIndex. Raises ------ KeyError If not all of the labels are found in the selected axis """ if not isinstance(labels, Index): # avoid materializing e.g. RangeIndex arr_dtype = "object" if self.dtype == "object" else None labels = com.index_labels_to_array(labels, dtype=arr_dtype) indexer = self.get_indexer_for(labels) mask = indexer == -1 if mask.any(): if errors != "ignore": raise KeyError(f"{list(labels[mask])} not found in axis") indexer = indexer[~mask] return self.delete(indexer) def infer_objects(self, copy: bool = True) -> Index: """ If we have an object dtype, try to infer a non-object dtype. Parameters ---------- copy : bool, default True Whether to make a copy in cases where no inference occurs. """ if self._is_multi: raise NotImplementedError( "infer_objects is not implemented for MultiIndex. " "Use index.to_frame().infer_objects() instead." ) if self.dtype != object: return self.copy() if copy else self values = self._values values = cast("npt.NDArray[np.object_]", values) res_values = lib.maybe_convert_objects( values, convert_datetime=True, convert_timedelta=True, convert_period=True, convert_interval=True, ) if copy and res_values is values: return self.copy() result = Index(res_values, name=self.name) if not copy and res_values is values and self._references is not None: result._references = self._references result._references.add_index_reference(result) return result # -------------------------------------------------------------------- # Generated Arithmetic, Comparison, and Unary Methods def _cmp_method(self, other, op): """ Wrapper used to dispatch comparison operations. """ if self.is_(other): # fastpath if op in {operator.eq, operator.le, operator.ge}: arr = np.ones(len(self), dtype=bool) if self._can_hold_na and not isinstance(self, ABCMultiIndex): # TODO: should set MultiIndex._can_hold_na = False? arr[self.isna()] = False return arr elif op is operator.ne: arr = np.zeros(len(self), dtype=bool) if self._can_hold_na and not isinstance(self, ABCMultiIndex): arr[self.isna()] = True return arr if isinstance(other, (np.ndarray, Index, ABCSeries, ExtensionArray)) and len( self ) != len(other): raise ValueError("Lengths must match to compare") if not isinstance(other, ABCMultiIndex): other = extract_array(other, extract_numpy=True) else: other = np.asarray(other) if is_object_dtype(self.dtype) and isinstance(other, ExtensionArray): # e.g. PeriodArray, Categorical with np.errstate(all="ignore"): result = op(self._values, other) elif isinstance(self._values, ExtensionArray): result = op(self._values, other) elif is_object_dtype(self.dtype) and not isinstance(self, ABCMultiIndex): # don't pass MultiIndex with np.errstate(all="ignore"): result = ops.comp_method_OBJECT_ARRAY(op, self._values, other) else: with np.errstate(all="ignore"): result = ops.comparison_op(self._values, other, op) return result def _logical_method(self, other, op): res_name = ops.get_op_result_name(self, other) lvalues = self._values rvalues = extract_array(other, extract_numpy=True, extract_range=True) res_values = ops.logical_op(lvalues, rvalues, op) return self._construct_result(res_values, name=res_name) def _construct_result(self, result, name): if isinstance(result, tuple): return ( Index(result[0], name=name, dtype=result[0].dtype), Index(result[1], name=name, dtype=result[1].dtype), ) return Index(result, name=name, dtype=result.dtype) def _arith_method(self, other, op): if ( isinstance(other, Index) and is_object_dtype(other.dtype) and type(other) is not Index ): # We return NotImplemented for object-dtype index *subclasses* so they have # a chance to implement ops before we unwrap them. # See https://github.com/pandas-dev/pandas/issues/31109 return NotImplemented return super()._arith_method(other, op) def _unary_method(self, op): result = op(self._values) return Index(result, name=self.name) def __abs__(self) -> Index: return self._unary_method(operator.abs) def __neg__(self) -> Index: return self._unary_method(operator.neg) def __pos__(self) -> Index: return self._unary_method(operator.pos) def __invert__(self) -> Index: # GH#8875 return self._unary_method(operator.inv) # -------------------------------------------------------------------- # Reductions def any(self, *args, **kwargs): """ Return whether any element is Truthy. Parameters ---------- *args Required for compatibility with numpy. **kwargs Required for compatibility with numpy. Returns ------- bool or array-like (if axis is specified) A single element array-like may be converted to bool. See Also -------- Index.all : Return whether all elements are True. Series.all : Return whether all elements are True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to True because these are not equal to zero. Examples -------- >>> index = pd.Index([0, 1, 2]) >>> index.any() True >>> index = pd.Index([0, 0, 0]) >>> index.any() False """ nv.validate_any(args, kwargs) self._maybe_disable_logical_methods("any") # error: Argument 1 to "any" has incompatible type "ArrayLike"; expected # "Union[Union[int, float, complex, str, bytes, generic], Sequence[Union[int, # float, complex, str, bytes, generic]], Sequence[Sequence[Any]], # _SupportsArray]" return np.any(self.values) # type: ignore[arg-type] def all(self, *args, **kwargs): """ Return whether all elements are Truthy. Parameters ---------- *args Required for compatibility with numpy. **kwargs Required for compatibility with numpy. Returns ------- bool or array-like (if axis is specified) A single element array-like may be converted to bool. See Also -------- Index.any : Return whether any element in an Index is True. Series.any : Return whether any element in a Series is True. Series.all : Return whether all elements in a Series are True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to True because these are not equal to zero. Examples -------- True, because nonzero integers are considered True. >>> pd.Index([1, 2, 3]).all() True False, because ``0`` is considered False. >>> pd.Index([0, 1, 2]).all() False """ nv.validate_all(args, kwargs) self._maybe_disable_logical_methods("all") # error: Argument 1 to "all" has incompatible type "ArrayLike"; expected # "Union[Union[int, float, complex, str, bytes, generic], Sequence[Union[int, # float, complex, str, bytes, generic]], Sequence[Sequence[Any]], # _SupportsArray]" return np.all(self.values) # type: ignore[arg-type] def _maybe_disable_logical_methods(self, opname: str_t) -> None: """ raise if this Index subclass does not support any or all. """ if ( isinstance(self, ABCMultiIndex) or needs_i8_conversion(self.dtype) or is_interval_dtype(self.dtype) or is_categorical_dtype(self.dtype) or is_float_dtype(self.dtype) ): # This call will raise make_invalid_op(opname)(self) def argmin(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: nv.validate_argmin(args, kwargs) nv.validate_minmax_axis(axis) if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return -1 return super().argmin(skipna=skipna) def argmax(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: nv.validate_argmax(args, kwargs) nv.validate_minmax_axis(axis) if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return -1 return super().argmax(skipna=skipna) def min(self, axis=None, skipna: bool = True, *args, **kwargs): nv.validate_min(args, kwargs) nv.validate_minmax_axis(axis) if not len(self): return self._na_value if len(self) and self.is_monotonic_increasing: # quick check first = self[0] if not isna(first): return first if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return self._na_value if not self._is_multi and not isinstance(self._values, np.ndarray): return self._values._reduce(name="min", skipna=skipna) return super().min(skipna=skipna) def max(self, axis=None, skipna: bool = True, *args, **kwargs): nv.validate_max(args, kwargs) nv.validate_minmax_axis(axis) if not len(self): return self._na_value if len(self) and self.is_monotonic_increasing: # quick check last = self[-1] if not isna(last): return last if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return self._na_value if not self._is_multi and not isinstance(self._values, np.ndarray): return self._values._reduce(name="max", skipna=skipna) return super().max(skipna=skipna) # -------------------------------------------------------------------- def shape(self) -> Shape: """ Return a tuple of the shape of the underlying data. """ # See GH#27775, GH#27384 for history/reasoning in how this is defined. return (len(self),) The provided code snippet includes necessary dependencies for implementing the `get_flattened_list` function. Write a Python function `def get_flattened_list( comp_ids: npt.NDArray[np.intp], ngroups: int, levels: Iterable[Index], labels: Iterable[np.ndarray], ) -> list[tuple]` to solve the following problem: Map compressed group id -> key tuple. Here is the function: def get_flattened_list( comp_ids: npt.NDArray[np.intp], ngroups: int, levels: Iterable[Index], labels: Iterable[np.ndarray], ) -> list[tuple]: """Map compressed group id -> key tuple.""" comp_ids = comp_ids.astype(np.int64, copy=False) arrays: DefaultDict[int, list[int]] = defaultdict(list) for labs, level in zip(labels, levels): table = hashtable.Int64HashTable(ngroups) table.map_keys_to_values(comp_ids, labs.astype(np.int64, copy=False)) for i in range(ngroups): arrays[i].append(level[table.get_item(i)]) return [tuple(array) for array in arrays.values()]
Map compressed group id -> key tuple.
173,034
from __future__ import annotations from collections import defaultdict from typing import ( TYPE_CHECKING, Callable, DefaultDict, Hashable, Iterable, Sequence, cast, ) import numpy as np from pandas._libs import ( algos, hashtable, lib, ) from pandas._libs.hashtable import unique_label_indices from pandas._typing import ( AxisInt, IndexKeyFunc, Level, NaPosition, Shape, SortKind, npt, ) from pandas.core.dtypes.common import ( ensure_int64, ensure_platform_int, is_extension_array_dtype, ) from pandas.core.dtypes.generic import ( ABCMultiIndex, ABCRangeIndex, ) from pandas.core.dtypes.missing import isna from pandas.core.construction import extract_array def get_group_index( labels, shape: Shape, sort: bool, xnull: bool ) -> npt.NDArray[np.int64]: """ For the particular label_list, gets the offsets into the hypothetical list representing the totally ordered cartesian product of all possible label combinations, *as long as* this space fits within int64 bounds; otherwise, though group indices identify unique combinations of labels, they cannot be deconstructed. - If `sort`, rank of returned ids preserve lexical ranks of labels. i.e. returned id's can be used to do lexical sort on labels; - If `xnull` nulls (-1 labels) are passed through. Parameters ---------- labels : sequence of arrays Integers identifying levels at each location shape : tuple[int, ...] Number of unique levels at each location sort : bool If the ranks of returned ids should match lexical ranks of labels xnull : bool If true nulls are excluded. i.e. -1 values in the labels are passed through. Returns ------- An array of type int64 where two elements are equal if their corresponding labels are equal at all location. Notes ----- The length of `labels` and `shape` must be identical. """ def _int64_cut_off(shape) -> int: acc = 1 for i, mul in enumerate(shape): acc *= int(mul) if not acc < lib.i8max: return i return len(shape) def maybe_lift(lab, size) -> tuple[np.ndarray, int]: # promote nan values (assigned -1 label in lab array) # so that all output values are non-negative return (lab + 1, size + 1) if (lab == -1).any() else (lab, size) labels = [ensure_int64(x) for x in labels] lshape = list(shape) if not xnull: for i, (lab, size) in enumerate(zip(labels, shape)): lab, size = maybe_lift(lab, size) labels[i] = lab lshape[i] = size labels = list(labels) # Iteratively process all the labels in chunks sized so less # than lib.i8max unique int ids will be required for each chunk while True: # how many levels can be done without overflow: nlev = _int64_cut_off(lshape) # compute flat ids for the first `nlev` levels stride = np.prod(lshape[1:nlev], dtype="i8") out = stride * labels[0].astype("i8", subok=False, copy=False) for i in range(1, nlev): if lshape[i] == 0: stride = np.int64(0) else: stride //= lshape[i] out += labels[i] * stride if xnull: # exclude nulls mask = labels[0] == -1 for lab in labels[1:nlev]: mask |= lab == -1 out[mask] = -1 if nlev == len(lshape): # all levels done! break # compress what has been done so far in order to avoid overflow # to retain lexical ranks, obs_ids should be sorted comp_ids, obs_ids = compress_group_index(out, sort=sort) labels = [comp_ids] + labels[nlev:] lshape = [len(obs_ids)] + lshape[nlev:] return out def is_int64_overflow_possible(shape: Shape) -> bool: the_prod = 1 for x in shape: the_prod *= int(x) return the_prod >= lib.i8max def get_group_index_sorter( group_index: npt.NDArray[np.intp], ngroups: int | None = None ) -> npt.NDArray[np.intp]: """ algos.groupsort_indexer implements `counting sort` and it is at least O(ngroups), where ngroups = prod(shape) shape = map(len, keys) that is, linear in the number of combinations (cartesian product) of unique values of groupby keys. This can be huge when doing multi-key groupby. np.argsort(kind='mergesort') is O(count x log(count)) where count is the length of the data-frame; Both algorithms are `stable` sort and that is necessary for correctness of groupby operations. e.g. consider: df.groupby(key)[col].transform('first') Parameters ---------- group_index : np.ndarray[np.intp] signed integer dtype ngroups : int or None, default None Returns ------- np.ndarray[np.intp] """ if ngroups is None: ngroups = 1 + group_index.max() count = len(group_index) alpha = 0.0 # taking complexities literally; there may be beta = 1.0 # some room for fine-tuning these parameters do_groupsort = count > 0 and ((alpha + beta * ngroups) < (count * np.log(count))) if do_groupsort: sorter, _ = algos.groupsort_indexer( ensure_platform_int(group_index), ngroups, ) # sorter _should_ already be intp, but mypy is not yet able to verify else: sorter = group_index.argsort(kind="mergesort") return ensure_platform_int(sorter) class Hashable(Protocol, metaclass=ABCMeta): # TODO: This is special, in that a subclass of a hashable class may not be hashable # (for example, list vs. object). It's not obvious how to represent this. This class # is currently mostly useless for static checking. def __hash__(self) -> int: ... class Index(IndexOpsMixin, PandasObject): """ Immutable sequence used for indexing and alignment. The basic object storing axis labels for all pandas objects. .. versionchanged:: 2.0.0 Index can hold all numpy numeric dtypes (except float16). Previously only int64/uint64/float64 dtypes were accepted. Parameters ---------- data : array-like (1-dimensional) dtype : NumPy dtype (default: object) If dtype is None, we find the dtype that best fits the data. If an actual dtype is provided, we coerce to that dtype if it's safe. Otherwise, an error will be raised. copy : bool Make a copy of input ndarray. name : object Name to be stored in the index. tupleize_cols : bool (default: True) When True, attempt to create a MultiIndex if possible. See Also -------- RangeIndex : Index implementing a monotonic integer range. CategoricalIndex : Index of :class:`Categorical` s. MultiIndex : A multi-level, or hierarchical Index. IntervalIndex : An Index of :class:`Interval` s. DatetimeIndex : Index of datetime64 data. TimedeltaIndex : Index of timedelta64 data. PeriodIndex : Index of Period data. Notes ----- An Index instance can **only** contain hashable objects. An Index instance *can not* hold numpy float16 dtype. Examples -------- >>> pd.Index([1, 2, 3]) Index([1, 2, 3], dtype='int64') >>> pd.Index(list('abc')) Index(['a', 'b', 'c'], dtype='object') >>> pd.Index([1, 2, 3], dtype="uint8") Index([1, 2, 3], dtype='uint8') """ # To hand over control to subclasses _join_precedence = 1 # Cython methods; see github.com/cython/cython/issues/2647 # for why we need to wrap these instead of making them class attributes # Moreover, cython will choose the appropriate-dtyped sub-function # given the dtypes of the passed arguments def _left_indexer_unique(self: _IndexT, other: _IndexT) -> npt.NDArray[np.intp]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) # similar but not identical to ov.searchsorted(sv) return libjoin.left_join_indexer_unique(sv, ov) def _left_indexer( self: _IndexT, other: _IndexT ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) joined_ndarray, lidx, ridx = libjoin.left_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return joined, lidx, ridx def _inner_indexer( self: _IndexT, other: _IndexT ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) joined_ndarray, lidx, ridx = libjoin.inner_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return joined, lidx, ridx def _outer_indexer( self: _IndexT, other: _IndexT ) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: # Caller is responsible for ensuring other.dtype == self.dtype sv = self._get_join_target() ov = other._get_join_target() # can_use_libjoin assures sv and ov are ndarrays sv = cast(np.ndarray, sv) ov = cast(np.ndarray, ov) joined_ndarray, lidx, ridx = libjoin.outer_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return joined, lidx, ridx _typ: str = "index" _data: ExtensionArray | np.ndarray _data_cls: type[ExtensionArray] | tuple[type[np.ndarray], type[ExtensionArray]] = ( np.ndarray, ExtensionArray, ) _id: object | None = None _name: Hashable = None # MultiIndex.levels previously allowed setting the index name. We # don't allow this anymore, and raise if it happens rather than # failing silently. _no_setting_name: bool = False _comparables: list[str] = ["name"] _attributes: list[str] = ["name"] def _can_hold_strings(self) -> bool: return not is_numeric_dtype(self) _engine_types: dict[np.dtype | ExtensionDtype, type[libindex.IndexEngine]] = { np.dtype(np.int8): libindex.Int8Engine, np.dtype(np.int16): libindex.Int16Engine, np.dtype(np.int32): libindex.Int32Engine, np.dtype(np.int64): libindex.Int64Engine, np.dtype(np.uint8): libindex.UInt8Engine, np.dtype(np.uint16): libindex.UInt16Engine, np.dtype(np.uint32): libindex.UInt32Engine, np.dtype(np.uint64): libindex.UInt64Engine, np.dtype(np.float32): libindex.Float32Engine, np.dtype(np.float64): libindex.Float64Engine, np.dtype(np.complex64): libindex.Complex64Engine, np.dtype(np.complex128): libindex.Complex128Engine, } def _engine_type( self, ) -> type[libindex.IndexEngine] | type[libindex.ExtensionEngine]: return self._engine_types.get(self.dtype, libindex.ObjectEngine) # whether we support partial string indexing. Overridden # in DatetimeIndex and PeriodIndex _supports_partial_string_indexing = False _accessors = {"str"} str = CachedAccessor("str", StringMethods) _references = None # -------------------------------------------------------------------- # Constructors def __new__( cls, data=None, dtype=None, copy: bool = False, name=None, tupleize_cols: bool = True, ) -> Index: from pandas.core.indexes.range import RangeIndex name = maybe_extract_name(name, data, cls) if dtype is not None: dtype = pandas_dtype(dtype) data_dtype = getattr(data, "dtype", None) refs = None if not copy and isinstance(data, (ABCSeries, Index)): refs = data._references # range if isinstance(data, (range, RangeIndex)): result = RangeIndex(start=data, copy=copy, name=name) if dtype is not None: return result.astype(dtype, copy=False) return result elif is_ea_or_datetimelike_dtype(dtype): # non-EA dtype indexes have special casting logic, so we punt here pass elif is_ea_or_datetimelike_dtype(data_dtype): pass elif isinstance(data, (np.ndarray, Index, ABCSeries)): if isinstance(data, ABCMultiIndex): data = data._values if data.dtype.kind not in ["i", "u", "f", "b", "c", "m", "M"]: # GH#11836 we need to avoid having numpy coerce # things that look like ints/floats to ints unless # they are actually ints, e.g. '0' and 0.0 # should not be coerced data = com.asarray_tuplesafe(data, dtype=_dtype_obj) elif is_scalar(data): raise cls._raise_scalar_data_error(data) elif hasattr(data, "__array__"): return Index(np.asarray(data), dtype=dtype, copy=copy, name=name) elif not is_list_like(data) and not isinstance(data, memoryview): # 2022-11-16 the memoryview check is only necessary on some CI # builds, not clear why raise cls._raise_scalar_data_error(data) else: if tupleize_cols: # GH21470: convert iterable to list before determining if empty if is_iterator(data): data = list(data) if data and all(isinstance(e, tuple) for e in data): # we must be all tuples, otherwise don't construct # 10697 from pandas.core.indexes.multi import MultiIndex return MultiIndex.from_tuples(data, names=name) # other iterable of some kind if not isinstance(data, (list, tuple)): # we allow set/frozenset, which Series/sanitize_array does not, so # cast to list here data = list(data) if len(data) == 0: # unlike Series, we default to object dtype: data = np.array(data, dtype=object) if len(data) and isinstance(data[0], tuple): # Ensure we get 1-D array of tuples instead of 2D array. data = com.asarray_tuplesafe(data, dtype=_dtype_obj) try: arr = sanitize_array(data, None, dtype=dtype, copy=copy) except ValueError as err: if "index must be specified when data is not list-like" in str(err): raise cls._raise_scalar_data_error(data) from err if "Data must be 1-dimensional" in str(err): raise ValueError("Index data must be 1-dimensional") from err raise arr = ensure_wrapped_if_datetimelike(arr) klass = cls._dtype_to_subclass(arr.dtype) arr = klass._ensure_array(arr, arr.dtype, copy=False) return klass._simple_new(arr, name, refs=refs) def _ensure_array(cls, data, dtype, copy: bool): """ Ensure we have a valid array to pass to _simple_new. """ if data.ndim > 1: # GH#13601, GH#20285, GH#27125 raise ValueError("Index data must be 1-dimensional") elif dtype == np.float16: # float16 not supported (no indexing engine) raise NotImplementedError("float16 indexes are not supported") if copy: # asarray_tuplesafe does not always copy underlying data, # so need to make sure that this happens data = data.copy() return data def _dtype_to_subclass(cls, dtype: DtypeObj): # Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423 if isinstance(dtype, ExtensionDtype): if isinstance(dtype, DatetimeTZDtype): from pandas import DatetimeIndex return DatetimeIndex elif isinstance(dtype, CategoricalDtype): from pandas import CategoricalIndex return CategoricalIndex elif isinstance(dtype, IntervalDtype): from pandas import IntervalIndex return IntervalIndex elif isinstance(dtype, PeriodDtype): from pandas import PeriodIndex return PeriodIndex return Index if dtype.kind == "M": from pandas import DatetimeIndex return DatetimeIndex elif dtype.kind == "m": from pandas import TimedeltaIndex return TimedeltaIndex elif dtype.kind == "O": # NB: assuming away MultiIndex return Index elif issubclass(dtype.type, str) or is_numeric_dtype(dtype): return Index raise NotImplementedError(dtype) # NOTE for new Index creation: # - _simple_new: It returns new Index with the same type as the caller. # All metadata (such as name) must be provided by caller's responsibility. # Using _shallow_copy is recommended because it fills these metadata # otherwise specified. # - _shallow_copy: It returns new Index with the same type (using # _simple_new), but fills caller's metadata otherwise specified. Passed # kwargs will overwrite corresponding metadata. # See each method's docstring. def _simple_new( cls: type[_IndexT], values: ArrayLike, name: Hashable = None, refs=None ) -> _IndexT: """ We require that we have a dtype compat for the values. If we are passed a non-dtype compat, then coerce using the constructor. Must be careful not to recurse. """ assert isinstance(values, cls._data_cls), type(values) result = object.__new__(cls) result._data = values result._name = name result._cache = {} result._reset_identity() if refs is not None: result._references = refs else: result._references = BlockValuesRefs() result._references.add_index_reference(result) return result def _with_infer(cls, *args, **kwargs): """ Constructor that uses the 1.0.x behavior inferring numeric dtypes for ndarray[object] inputs. """ result = cls(*args, **kwargs) if result.dtype == _dtype_obj and not result._is_multi: # error: Argument 1 to "maybe_convert_objects" has incompatible type # "Union[ExtensionArray, ndarray[Any, Any]]"; expected # "ndarray[Any, Any]" values = lib.maybe_convert_objects(result._values) # type: ignore[arg-type] if values.dtype.kind in ["i", "u", "f", "b"]: return Index(values, name=result.name) return result def _constructor(self: _IndexT) -> type[_IndexT]: return type(self) def _maybe_check_unique(self) -> None: """ Check that an Index has no duplicates. This is typically only called via `NDFrame.flags.allows_duplicate_labels.setter` when it's set to True (duplicates aren't allowed). Raises ------ DuplicateLabelError When the index is not unique. """ if not self.is_unique: msg = """Index has duplicates.""" duplicates = self._format_duplicate_message() msg += f"\n{duplicates}" raise DuplicateLabelError(msg) def _format_duplicate_message(self) -> DataFrame: """ Construct the DataFrame for a DuplicateLabelError. This returns a DataFrame indicating the labels and positions of duplicates in an index. This should only be called when it's already known that duplicates are present. Examples -------- >>> idx = pd.Index(['a', 'b', 'a']) >>> idx._format_duplicate_message() positions label a [0, 2] """ from pandas import Series duplicates = self[self.duplicated(keep="first")].unique() assert len(duplicates) out = Series(np.arange(len(self))).groupby(self).agg(list)[duplicates] if self._is_multi: # test_format_duplicate_labels_message_multi # error: "Type[Index]" has no attribute "from_tuples" [attr-defined] out.index = type(self).from_tuples(out.index) # type: ignore[attr-defined] if self.nlevels == 1: out = out.rename_axis("label") return out.to_frame(name="positions") # -------------------------------------------------------------------- # Index Internals Methods def _shallow_copy(self: _IndexT, values, name: Hashable = no_default) -> _IndexT: """ Create a new Index with the same class as the caller, don't copy the data, use the same object attributes with passed in attributes taking precedence. *this is an internal non-public method* Parameters ---------- values : the values to create the new Index, optional name : Label, defaults to self.name """ name = self._name if name is no_default else name return self._simple_new(values, name=name, refs=self._references) def _view(self: _IndexT) -> _IndexT: """ fastpath to make a shallow copy, i.e. new object with same data. """ result = self._simple_new(self._values, name=self._name, refs=self._references) result._cache = self._cache return result def _rename(self: _IndexT, name: Hashable) -> _IndexT: """ fastpath for rename if new name is already validated. """ result = self._view() result._name = name return result def is_(self, other) -> bool: """ More flexible, faster check like ``is`` but that works through views. Note: this is *not* the same as ``Index.identical()``, which checks that metadata is also the same. Parameters ---------- other : object Other object to compare against. Returns ------- bool True if both have same underlying data, False otherwise. See Also -------- Index.identical : Works like ``Index.is_`` but also checks metadata. """ if self is other: return True elif not hasattr(other, "_id"): return False elif self._id is None or other._id is None: return False else: return self._id is other._id def _reset_identity(self) -> None: """ Initializes or resets ``_id`` attribute with new object. """ self._id = object() def _cleanup(self) -> None: self._engine.clear_mapping() def _engine( self, ) -> libindex.IndexEngine | libindex.ExtensionEngine | libindex.MaskedIndexEngine: # For base class (object dtype) we get ObjectEngine target_values = self._get_engine_target() if isinstance(target_values, ExtensionArray): if isinstance(target_values, (BaseMaskedArray, ArrowExtensionArray)): try: return _masked_engines[target_values.dtype.name](target_values) except KeyError: # Not supported yet e.g. decimal pass elif self._engine_type is libindex.ObjectEngine: return libindex.ExtensionEngine(target_values) target_values = cast(np.ndarray, target_values) # to avoid a reference cycle, bind `target_values` to a local variable, so # `self` is not passed into the lambda. if target_values.dtype == bool: return libindex.BoolEngine(target_values) elif target_values.dtype == np.complex64: return libindex.Complex64Engine(target_values) elif target_values.dtype == np.complex128: return libindex.Complex128Engine(target_values) elif needs_i8_conversion(self.dtype): # We need to keep M8/m8 dtype when initializing the Engine, # but don't want to change _get_engine_target bc it is used # elsewhere # error: Item "ExtensionArray" of "Union[ExtensionArray, # ndarray[Any, Any]]" has no attribute "_ndarray" [union-attr] target_values = self._data._ndarray # type: ignore[union-attr] # error: Argument 1 to "ExtensionEngine" has incompatible type # "ndarray[Any, Any]"; expected "ExtensionArray" return self._engine_type(target_values) # type: ignore[arg-type] def _dir_additions_for_owner(self) -> set[str_t]: """ Add the string-like labels to the owner dataframe/series dir output. If this is a MultiIndex, it's first level values are used. """ return { c for c in self.unique(level=0)[: get_option("display.max_dir_items")] if isinstance(c, str) and c.isidentifier() } # -------------------------------------------------------------------- # Array-Like Methods # ndarray compat def __len__(self) -> int: """ Return the length of the Index. """ return len(self._data) def __array__(self, dtype=None) -> np.ndarray: """ The array interface, return my values. """ return np.asarray(self._data, dtype=dtype) def __array_ufunc__(self, ufunc: np.ufunc, method: str_t, *inputs, **kwargs): if any(isinstance(other, (ABCSeries, ABCDataFrame)) for other in inputs): return NotImplemented result = arraylike.maybe_dispatch_ufunc_to_dunder_op( self, ufunc, method, *inputs, **kwargs ) if result is not NotImplemented: return result if "out" in kwargs: # e.g. test_dti_isub_tdi return arraylike.dispatch_ufunc_with_out( self, ufunc, method, *inputs, **kwargs ) if method == "reduce": result = arraylike.dispatch_reduction_ufunc( self, ufunc, method, *inputs, **kwargs ) if result is not NotImplemented: return result new_inputs = [x if x is not self else x._values for x in inputs] result = getattr(ufunc, method)(*new_inputs, **kwargs) if ufunc.nout == 2: # i.e. np.divmod, np.modf, np.frexp return tuple(self.__array_wrap__(x) for x in result) if result.dtype == np.float16: result = result.astype(np.float32) return self.__array_wrap__(result) def __array_wrap__(self, result, context=None): """ Gets called after a ufunc and other functions e.g. np.split. """ result = lib.item_from_zerodim(result) if is_bool_dtype(result) or lib.is_scalar(result) or np.ndim(result) > 1: return result return Index(result, name=self.name) def dtype(self) -> DtypeObj: """ Return the dtype object of the underlying data. """ return self._data.dtype def ravel(self, order: str_t = "C") -> Index: """ Return a view on self. Returns ------- Index See Also -------- numpy.ndarray.ravel : Return a flattened array. """ return self[:] def view(self, cls=None): # we need to see if we are subclassing an # index type here if cls is not None and not hasattr(cls, "_typ"): dtype = cls if isinstance(cls, str): dtype = pandas_dtype(cls) if isinstance(dtype, (np.dtype, ExtensionDtype)) and needs_i8_conversion( dtype ): if dtype.kind == "m" and dtype != "m8[ns]": # e.g. m8[s] return self._data.view(cls) idx_cls = self._dtype_to_subclass(dtype) # NB: we only get here for subclasses that override # _data_cls such that it is a type and not a tuple # of types. arr_cls = idx_cls._data_cls arr = arr_cls(self._data.view("i8"), dtype=dtype) return idx_cls._simple_new(arr, name=self.name, refs=self._references) result = self._data.view(cls) else: result = self._view() if isinstance(result, Index): result._id = self._id return result def astype(self, dtype, copy: bool = True): """ Create an Index with values cast to dtypes. The class of a new Index is determined by dtype. When conversion is impossible, a TypeError exception is raised. Parameters ---------- dtype : numpy dtype or pandas type Note that any signed integer `dtype` is treated as ``'int64'``, and any unsigned integer `dtype` is treated as ``'uint64'``, regardless of the size. copy : bool, default True By default, astype always returns a newly allocated object. If copy is set to False and internal requirements on dtype are satisfied, the original data is used to create a new Index or the original Index is returned. Returns ------- Index Index with values cast to specified dtype. """ if dtype is not None: dtype = pandas_dtype(dtype) if is_dtype_equal(self.dtype, dtype): # Ensure that self.astype(self.dtype) is self return self.copy() if copy else self values = self._data if isinstance(values, ExtensionArray): with rewrite_exception(type(values).__name__, type(self).__name__): new_values = values.astype(dtype, copy=copy) elif isinstance(dtype, ExtensionDtype): cls = dtype.construct_array_type() # Note: for RangeIndex and CategoricalDtype self vs self._values # behaves differently here. new_values = cls._from_sequence(self, dtype=dtype, copy=copy) else: # GH#13149 specifically use astype_array instead of astype new_values = astype_array(values, dtype=dtype, copy=copy) # pass copy=False because any copying will be done in the astype above result = Index(new_values, name=self.name, dtype=new_values.dtype, copy=False) if ( not copy and self._references is not None and astype_is_view(self.dtype, dtype) ): result._references = self._references result._references.add_index_reference(result) return result _index_shared_docs[ "take" ] = """ Return a new %(klass)s of the values selected by the indices. For internal compatibility with numpy arrays. Parameters ---------- indices : array-like Indices to be taken. axis : int, optional The axis over which to select values, always 0. allow_fill : bool, default True fill_value : scalar, default None If allow_fill=True and fill_value is not None, indices specified by -1 are regarded as NA. If Index doesn't hold NA, raise ValueError. Returns ------- Index An index formed of elements at the given indices. Will be the same type as self, except for RangeIndex. See Also -------- numpy.ndarray.take: Return an array formed from the elements of a at the given indices. """ def take( self, indices, axis: Axis = 0, allow_fill: bool = True, fill_value=None, **kwargs, ): if kwargs: nv.validate_take((), kwargs) if is_scalar(indices): raise TypeError("Expected indices to be array-like") indices = ensure_platform_int(indices) allow_fill = self._maybe_disallow_fill(allow_fill, fill_value, indices) # Note: we discard fill_value and use self._na_value, only relevant # in the case where allow_fill is True and fill_value is not None values = self._values if isinstance(values, np.ndarray): taken = algos.take( values, indices, allow_fill=allow_fill, fill_value=self._na_value ) else: # algos.take passes 'axis' keyword which not all EAs accept taken = values.take( indices, allow_fill=allow_fill, fill_value=self._na_value ) # _constructor so RangeIndex-> Index with an int64 dtype return self._constructor._simple_new(taken, name=self.name) def _maybe_disallow_fill(self, allow_fill: bool, fill_value, indices) -> bool: """ We only use pandas-style take when allow_fill is True _and_ fill_value is not None. """ if allow_fill and fill_value is not None: # only fill if we are passing a non-None fill_value if self._can_hold_na: if (indices < -1).any(): raise ValueError( "When allow_fill=True and fill_value is not None, " "all indices must be >= -1" ) else: cls_name = type(self).__name__ raise ValueError( f"Unable to fill values because {cls_name} cannot contain NA" ) else: allow_fill = False return allow_fill _index_shared_docs[ "repeat" ] = """ Repeat elements of a %(klass)s. Returns a new %(klass)s where each element of the current %(klass)s is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty %(klass)s. axis : None Must be ``None``. Has no effect but is accepted for compatibility with numpy. Returns ------- %(klass)s Newly created %(klass)s with repeated elements. See Also -------- Series.repeat : Equivalent function for Series. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> idx = pd.Index(['a', 'b', 'c']) >>> idx Index(['a', 'b', 'c'], dtype='object') >>> idx.repeat(2) Index(['a', 'a', 'b', 'b', 'c', 'c'], dtype='object') >>> idx.repeat([1, 2, 3]) Index(['a', 'b', 'b', 'c', 'c', 'c'], dtype='object') """ def repeat(self, repeats, axis=None): repeats = ensure_platform_int(repeats) nv.validate_repeat((), {"axis": axis}) res_values = self._values.repeat(repeats) # _constructor so RangeIndex-> Index with an int64 dtype return self._constructor._simple_new(res_values, name=self.name) # -------------------------------------------------------------------- # Copying Methods def copy( self: _IndexT, name: Hashable | None = None, deep: bool = False, ) -> _IndexT: """ Make a copy of this object. Name is set on the new object. Parameters ---------- name : Label, optional Set name for new object. deep : bool, default False Returns ------- Index Index refer to new object which is a copy of this object. Notes ----- In most cases, there should be no functional difference from using ``deep``, but if ``deep`` is passed it will attempt to deepcopy. """ name = self._validate_names(name=name, deep=deep)[0] if deep: new_data = self._data.copy() new_index = type(self)._simple_new(new_data, name=name) else: new_index = self._rename(name=name) return new_index def __copy__(self: _IndexT, **kwargs) -> _IndexT: return self.copy(**kwargs) def __deepcopy__(self: _IndexT, memo=None) -> _IndexT: """ Parameters ---------- memo, default None Standard signature. Unused """ return self.copy(deep=True) # -------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str_t: """ Return a string representation for this object. """ klass_name = type(self).__name__ data = self._format_data() attrs = self._format_attrs() space = self._format_space() attrs_str = [f"{k}={v}" for k, v in attrs] prepr = f",{space}".join(attrs_str) # no data provided, just attributes if data is None: data = "" return f"{klass_name}({data}{prepr})" def _format_space(self) -> str_t: # using space here controls if the attributes # are line separated or not (the default) # max_seq_items = get_option('display.max_seq_items') # if len(self) > max_seq_items: # space = "\n%s" % (' ' * (len(klass) + 1)) return " " def _formatter_func(self): """ Return the formatter function. """ return default_pprint def _format_data(self, name=None) -> str_t: """ Return the formatted data as a unicode string. """ # do we want to justify (only do so for non-objects) is_justify = True if self.inferred_type == "string": is_justify = False elif self.inferred_type == "categorical": self = cast("CategoricalIndex", self) if is_object_dtype(self.categories): is_justify = False return format_object_summary( self, self._formatter_func, is_justify=is_justify, name=name, line_break_each_value=self._is_multi, ) def _format_attrs(self) -> list[tuple[str_t, str_t | int | bool | None]]: """ Return a list of tuples of the (attr,formatted_value). """ attrs: list[tuple[str_t, str_t | int | bool | None]] = [] if not self._is_multi: attrs.append(("dtype", f"'{self.dtype}'")) if self.name is not None: attrs.append(("name", default_pprint(self.name))) elif self._is_multi and any(x is not None for x in self.names): attrs.append(("names", default_pprint(self.names))) max_seq_items = get_option("display.max_seq_items") or len(self) if len(self) > max_seq_items: attrs.append(("length", len(self))) return attrs def _get_level_names(self) -> Hashable | Sequence[Hashable]: """ Return a name or list of names with None replaced by the level number. """ if self._is_multi: return [ level if name is None else name for level, name in enumerate(self.names) ] else: return 0 if self.name is None else self.name def _mpl_repr(self) -> np.ndarray: # how to represent ourselves to matplotlib if isinstance(self.dtype, np.dtype) and self.dtype.kind != "M": return cast(np.ndarray, self.values) return self.astype(object, copy=False)._values def format( self, name: bool = False, formatter: Callable | None = None, na_rep: str_t = "NaN", ) -> list[str_t]: """ Render a string representation of the Index. """ header = [] if name: header.append( pprint_thing(self.name, escape_chars=("\t", "\r", "\n")) if self.name is not None else "" ) if formatter is not None: return header + list(self.map(formatter)) return self._format_with_header(header, na_rep=na_rep) def _format_with_header(self, header: list[str_t], na_rep: str_t) -> list[str_t]: from pandas.io.formats.format import format_array values = self._values if is_object_dtype(values.dtype): values = cast(np.ndarray, values) values = lib.maybe_convert_objects(values, safe=True) result = [pprint_thing(x, escape_chars=("\t", "\r", "\n")) for x in values] # could have nans mask = is_float_nan(values) if mask.any(): result_arr = np.array(result) result_arr[mask] = na_rep result = result_arr.tolist() else: result = trim_front(format_array(values, None, justify="left")) return header + result def _format_native_types( self, *, na_rep: str_t = "", decimal: str_t = ".", float_format=None, date_format=None, quoting=None, ) -> npt.NDArray[np.object_]: """ Actually format specific types of the index. """ from pandas.io.formats.format import FloatArrayFormatter if is_float_dtype(self.dtype) and not is_extension_array_dtype(self.dtype): formatter = FloatArrayFormatter( self._values, na_rep=na_rep, float_format=float_format, decimal=decimal, quoting=quoting, fixed_width=False, ) return formatter.get_result_as_array() mask = isna(self) if not is_object_dtype(self) and not quoting: values = np.asarray(self).astype(str) else: values = np.array(self, dtype=object, copy=True) values[mask] = na_rep return values def _summary(self, name=None) -> str_t: """ Return a summarized representation. Parameters ---------- name : str name to use in the summary representation Returns ------- String with a summarized representation of the index """ if len(self) > 0: head = self[0] if hasattr(head, "format") and not isinstance(head, str): head = head.format() elif needs_i8_conversion(self.dtype): # e.g. Timedelta, display as values, not quoted head = self._formatter_func(head).replace("'", "") tail = self[-1] if hasattr(tail, "format") and not isinstance(tail, str): tail = tail.format() elif needs_i8_conversion(self.dtype): # e.g. Timedelta, display as values, not quoted tail = self._formatter_func(tail).replace("'", "") index_summary = f", {head} to {tail}" else: index_summary = "" if name is None: name = type(self).__name__ return f"{name}: {len(self)} entries{index_summary}" # -------------------------------------------------------------------- # Conversion Methods def to_flat_index(self: _IndexT) -> _IndexT: """ Identity method. This is implemented for compatibility with subclass implementations when chaining. Returns ------- pd.Index Caller. See Also -------- MultiIndex.to_flat_index : Subclass implementation. """ return self def to_series(self, index=None, name: Hashable = None) -> Series: """ Create a Series with both index and values equal to the index keys. Useful with map for returning an indexer based on an index. Parameters ---------- index : Index, optional Index of resulting Series. If None, defaults to original index. name : str, optional Name of resulting Series. If None, defaults to name of original index. Returns ------- Series The dtype will be based on the type of the Index values. See Also -------- Index.to_frame : Convert an Index to a DataFrame. Series.to_frame : Convert Series to DataFrame. Examples -------- >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal') By default, the original Index and original name is reused. >>> idx.to_series() animal Ant Ant Bear Bear Cow Cow Name: animal, dtype: object To enforce a new Index, specify new labels to ``index``: >>> idx.to_series(index=[0, 1, 2]) 0 Ant 1 Bear 2 Cow Name: animal, dtype: object To override the name of the resulting column, specify `name`: >>> idx.to_series(name='zoo') animal Ant Ant Bear Bear Cow Cow Name: zoo, dtype: object """ from pandas import Series if index is None: index = self._view() if name is None: name = self.name return Series(self._values.copy(), index=index, name=name) def to_frame( self, index: bool = True, name: Hashable = lib.no_default ) -> DataFrame: """ Create a DataFrame with a column containing the Index. Parameters ---------- index : bool, default True Set the index of the returned DataFrame as the original Index. name : object, defaults to index.name The passed name should substitute for the index name (if it has one). Returns ------- DataFrame DataFrame containing the original Index data. See Also -------- Index.to_series : Convert an Index to a Series. Series.to_frame : Convert Series to DataFrame. Examples -------- >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal') >>> idx.to_frame() animal animal Ant Ant Bear Bear Cow Cow By default, the original Index is reused. To enforce a new Index: >>> idx.to_frame(index=False) animal 0 Ant 1 Bear 2 Cow To override the name of the resulting column, specify `name`: >>> idx.to_frame(index=False, name='zoo') zoo 0 Ant 1 Bear 2 Cow """ from pandas import DataFrame if name is lib.no_default: name = self._get_level_names() result = DataFrame({name: self._values.copy()}) if index: result.index = self return result # -------------------------------------------------------------------- # Name-Centric Methods def name(self) -> Hashable: """ Return Index or MultiIndex name. """ return self._name def name(self, value: Hashable) -> None: if self._no_setting_name: # Used in MultiIndex.levels to avoid silently ignoring name updates. raise RuntimeError( "Cannot set name on a level of a MultiIndex. Use " "'MultiIndex.set_names' instead." ) maybe_extract_name(value, None, type(self)) self._name = value def _validate_names( self, name=None, names=None, deep: bool = False ) -> list[Hashable]: """ Handles the quirks of having a singular 'name' parameter for general Index and plural 'names' parameter for MultiIndex. """ from copy import deepcopy if names is not None and name is not None: raise TypeError("Can only provide one of `names` and `name`") if names is None and name is None: new_names = deepcopy(self.names) if deep else self.names elif names is not None: if not is_list_like(names): raise TypeError("Must pass list-like as `names`.") new_names = names elif not is_list_like(name): new_names = [name] else: new_names = name if len(new_names) != len(self.names): raise ValueError( f"Length of new names must be {len(self.names)}, got {len(new_names)}" ) # All items in 'new_names' need to be hashable validate_all_hashable(*new_names, error_name=f"{type(self).__name__}.name") return new_names def _get_default_index_names( self, names: Hashable | Sequence[Hashable] | None = None, default=None ) -> list[Hashable]: """ Get names of index. Parameters ---------- names : int, str or 1-dimensional list, default None Index names to set. default : str Default name of index. Raises ------ TypeError if names not str or list-like """ from pandas.core.indexes.multi import MultiIndex if names is not None: if isinstance(names, (int, str)): names = [names] if not isinstance(names, list) and names is not None: raise ValueError("Index names must be str or 1-dimensional list") if not names: if isinstance(self, MultiIndex): names = com.fill_missing_names(self.names) else: names = [default] if self.name is None else [self.name] return names def _get_names(self) -> FrozenList: return FrozenList((self.name,)) def _set_names(self, values, *, level=None) -> None: """ Set new names on index. Each name has to be a hashable type. Parameters ---------- values : str or sequence name(s) to set level : int, level name, or sequence of int/level names (default None) If the index is a MultiIndex (hierarchical), level(s) to set (None for all levels). Otherwise level must be None Raises ------ TypeError if each name is not hashable. """ if not is_list_like(values): raise ValueError("Names must be a list-like") if len(values) != 1: raise ValueError(f"Length of new names must be 1, got {len(values)}") # GH 20527 # All items in 'name' need to be hashable: validate_all_hashable(*values, error_name=f"{type(self).__name__}.name") self._name = values[0] names = property(fset=_set_names, fget=_get_names) def set_names( self: _IndexT, names, *, level=..., inplace: Literal[False] = ... ) -> _IndexT: ... def set_names(self, names, *, level=..., inplace: Literal[True]) -> None: ... def set_names( self: _IndexT, names, *, level=..., inplace: bool = ... ) -> _IndexT | None: ... def set_names( self: _IndexT, names, *, level=None, inplace: bool = False ) -> _IndexT | None: """ Set Index or MultiIndex name. Able to set new names partially and by level. Parameters ---------- names : label or list of label or dict-like for MultiIndex Name(s) to set. .. versionchanged:: 1.3.0 level : int, label or list of int or label, optional If the index is a MultiIndex and names is not dict-like, level(s) to set (None for all levels). Otherwise level must be None. .. versionchanged:: 1.3.0 inplace : bool, default False Modifies the object directly, instead of creating a new Index or MultiIndex. Returns ------- Index or None The same type as the caller or None if ``inplace=True``. See Also -------- Index.rename : Able to set new names without level. Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx Index([1, 2, 3, 4], dtype='int64') >>> idx.set_names('quarter') Index([1, 2, 3, 4], dtype='int64', name='quarter') >>> idx = pd.MultiIndex.from_product([['python', 'cobra'], ... [2018, 2019]]) >>> idx MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], ) >>> idx = idx.set_names(['kind', 'year']) >>> idx.set_names('species', level=0) MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['species', 'year']) When renaming levels with a dict, levels can not be passed. >>> idx.set_names({'kind': 'snake'}) MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['snake', 'year']) """ if level is not None and not isinstance(self, ABCMultiIndex): raise ValueError("Level must be None for non-MultiIndex") if level is not None and not is_list_like(level) and is_list_like(names): raise TypeError("Names must be a string when a single level is provided.") if not is_list_like(names) and level is None and self.nlevels > 1: raise TypeError("Must pass list-like as `names`.") if is_dict_like(names) and not isinstance(self, ABCMultiIndex): raise TypeError("Can only pass dict-like as `names` for MultiIndex.") if is_dict_like(names) and level is not None: raise TypeError("Can not pass level for dictlike `names`.") if isinstance(self, ABCMultiIndex) and is_dict_like(names) and level is None: # Transform dict to list of new names and corresponding levels level, names_adjusted = [], [] for i, name in enumerate(self.names): if name in names.keys(): level.append(i) names_adjusted.append(names[name]) names = names_adjusted if not is_list_like(names): names = [names] if level is not None and not is_list_like(level): level = [level] if inplace: idx = self else: idx = self._view() idx._set_names(names, level=level) if not inplace: return idx return None def rename(self, name, inplace: bool = False): """ Alter Index or MultiIndex name. Able to set new names without level. Defaults to returning new index. Length of names must match number of levels in MultiIndex. Parameters ---------- name : label or list of labels Name(s) to set. inplace : bool, default False Modifies the object directly, instead of creating a new Index or MultiIndex. Returns ------- Index or None The same type as the caller or None if ``inplace=True``. See Also -------- Index.set_names : Able to set new names partially and by level. Examples -------- >>> idx = pd.Index(['A', 'C', 'A', 'B'], name='score') >>> idx.rename('grade') Index(['A', 'C', 'A', 'B'], dtype='object', name='grade') >>> idx = pd.MultiIndex.from_product([['python', 'cobra'], ... [2018, 2019]], ... names=['kind', 'year']) >>> idx MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['kind', 'year']) >>> idx.rename(['species', 'year']) MultiIndex([('python', 2018), ('python', 2019), ( 'cobra', 2018), ( 'cobra', 2019)], names=['species', 'year']) >>> idx.rename('species') Traceback (most recent call last): TypeError: Must pass list-like as `names`. """ return self.set_names([name], inplace=inplace) # -------------------------------------------------------------------- # Level-Centric Methods def nlevels(self) -> int: """ Number of levels. """ return 1 def _sort_levels_monotonic(self: _IndexT) -> _IndexT: """ Compat with MultiIndex. """ return self def _validate_index_level(self, level) -> None: """ Validate index level. For single-level Index getting level number is a no-op, but some verification must be done like in MultiIndex. """ if isinstance(level, int): if level < 0 and level != -1: raise IndexError( "Too many levels: Index has only 1 level, " f"{level} is not a valid level number" ) if level > 0: raise IndexError( f"Too many levels: Index has only 1 level, not {level + 1}" ) elif level != self.name: raise KeyError( f"Requested level ({level}) does not match index name ({self.name})" ) def _get_level_number(self, level) -> int: self._validate_index_level(level) return 0 def sortlevel( self, level=None, ascending: bool | list[bool] = True, sort_remaining=None ): """ For internal compatibility with the Index API. Sort the Index. This is for compat with MultiIndex Parameters ---------- ascending : bool, default True False to sort in descending order level, sort_remaining are compat parameters Returns ------- Index """ if not isinstance(ascending, (list, bool)): raise TypeError( "ascending must be a single bool value or" "a list of bool values of length 1" ) if isinstance(ascending, list): if len(ascending) != 1: raise TypeError("ascending must be a list of bool values of length 1") ascending = ascending[0] if not isinstance(ascending, bool): raise TypeError("ascending must be a bool value") return self.sort_values(return_indexer=True, ascending=ascending) def _get_level_values(self, level) -> Index: """ Return an Index of values for requested level. This is primarily useful to get an individual level of values from a MultiIndex, but is provided on Index as well for compatibility. Parameters ---------- level : int or str It is either the integer position or the name of the level. Returns ------- Index Calling object, as there is only one level in the Index. See Also -------- MultiIndex.get_level_values : Get values for a level of a MultiIndex. Notes ----- For Index, level should be 0, since there are no multiple levels. Examples -------- >>> idx = pd.Index(list('abc')) >>> idx Index(['a', 'b', 'c'], dtype='object') Get level values by supplying `level` as integer: >>> idx.get_level_values(0) Index(['a', 'b', 'c'], dtype='object') """ self._validate_index_level(level) return self get_level_values = _get_level_values def droplevel(self, level: IndexLabel = 0): """ Return index with requested level(s) removed. If resulting index has only 1 level left, the result will be of Index type, not MultiIndex. The original index is not modified inplace. Parameters ---------- level : int, str, or list-like, default 0 If a string is given, must be the name of a level If list-like, elements must be names or indexes of levels. Returns ------- Index or MultiIndex Examples -------- >>> mi = pd.MultiIndex.from_arrays( ... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z']) >>> mi MultiIndex([(1, 3, 5), (2, 4, 6)], names=['x', 'y', 'z']) >>> mi.droplevel() MultiIndex([(3, 5), (4, 6)], names=['y', 'z']) >>> mi.droplevel(2) MultiIndex([(1, 3), (2, 4)], names=['x', 'y']) >>> mi.droplevel('z') MultiIndex([(1, 3), (2, 4)], names=['x', 'y']) >>> mi.droplevel(['x', 'y']) Index([5, 6], dtype='int64', name='z') """ if not isinstance(level, (tuple, list)): level = [level] levnums = sorted(self._get_level_number(lev) for lev in level)[::-1] return self._drop_level_numbers(levnums) def _drop_level_numbers(self, levnums: list[int]): """ Drop MultiIndex levels by level _number_, not name. """ if not levnums and not isinstance(self, ABCMultiIndex): return self if len(levnums) >= self.nlevels: raise ValueError( f"Cannot remove {len(levnums)} levels from an index with " f"{self.nlevels} levels: at least one level must be left." ) # The two checks above guarantee that here self is a MultiIndex self = cast("MultiIndex", self) new_levels = list(self.levels) new_codes = list(self.codes) new_names = list(self.names) for i in levnums: new_levels.pop(i) new_codes.pop(i) new_names.pop(i) if len(new_levels) == 1: lev = new_levels[0] if len(lev) == 0: # If lev is empty, lev.take will fail GH#42055 if len(new_codes[0]) == 0: # GH#45230 preserve RangeIndex here # see test_reset_index_empty_rangeindex result = lev[:0] else: res_values = algos.take(lev._values, new_codes[0], allow_fill=True) # _constructor instead of type(lev) for RangeIndex compat GH#35230 result = lev._constructor._simple_new(res_values, name=new_names[0]) else: # set nan if needed mask = new_codes[0] == -1 result = new_levels[0].take(new_codes[0]) if mask.any(): result = result.putmask(mask, np.nan) result._name = new_names[0] return result else: from pandas.core.indexes.multi import MultiIndex return MultiIndex( levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False, ) # -------------------------------------------------------------------- # Introspection Methods def _can_hold_na(self) -> bool: if isinstance(self.dtype, ExtensionDtype): if isinstance(self.dtype, IntervalDtype): # FIXME(GH#45720): this is inaccurate for integer-backed # IntervalArray, but without it other.categories.take raises # in IntervalArray._cmp_method return True return self.dtype._can_hold_na if self.dtype.kind in ["i", "u", "b"]: return False return True def is_monotonic_increasing(self) -> bool: """ Return a boolean if the values are equal or increasing. Returns ------- bool See Also -------- Index.is_monotonic_decreasing : Check if the values are equal or decreasing. Examples -------- >>> pd.Index([1, 2, 3]).is_monotonic_increasing True >>> pd.Index([1, 2, 2]).is_monotonic_increasing True >>> pd.Index([1, 3, 2]).is_monotonic_increasing False """ return self._engine.is_monotonic_increasing def is_monotonic_decreasing(self) -> bool: """ Return a boolean if the values are equal or decreasing. Returns ------- bool See Also -------- Index.is_monotonic_increasing : Check if the values are equal or increasing. Examples -------- >>> pd.Index([3, 2, 1]).is_monotonic_decreasing True >>> pd.Index([3, 2, 2]).is_monotonic_decreasing True >>> pd.Index([3, 1, 2]).is_monotonic_decreasing False """ return self._engine.is_monotonic_decreasing def _is_strictly_monotonic_increasing(self) -> bool: """ Return if the index is strictly monotonic increasing (only increasing) values. Examples -------- >>> Index([1, 2, 3])._is_strictly_monotonic_increasing True >>> Index([1, 2, 2])._is_strictly_monotonic_increasing False >>> Index([1, 3, 2])._is_strictly_monotonic_increasing False """ return self.is_unique and self.is_monotonic_increasing def _is_strictly_monotonic_decreasing(self) -> bool: """ Return if the index is strictly monotonic decreasing (only decreasing) values. Examples -------- >>> Index([3, 2, 1])._is_strictly_monotonic_decreasing True >>> Index([3, 2, 2])._is_strictly_monotonic_decreasing False >>> Index([3, 1, 2])._is_strictly_monotonic_decreasing False """ return self.is_unique and self.is_monotonic_decreasing def is_unique(self) -> bool: """ Return if the index has unique values. Returns ------- bool See Also -------- Index.has_duplicates : Inverse method that checks if it has duplicate values. Examples -------- >>> idx = pd.Index([1, 5, 7, 7]) >>> idx.is_unique False >>> idx = pd.Index([1, 5, 7]) >>> idx.is_unique True >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_unique False >>> idx = pd.Index(["Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_unique True """ return self._engine.is_unique def has_duplicates(self) -> bool: """ Check if the Index has duplicate values. Returns ------- bool Whether or not the Index has duplicate values. See Also -------- Index.is_unique : Inverse method that checks if it has unique values. Examples -------- >>> idx = pd.Index([1, 5, 7, 7]) >>> idx.has_duplicates True >>> idx = pd.Index([1, 5, 7]) >>> idx.has_duplicates False >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.has_duplicates True >>> idx = pd.Index(["Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.has_duplicates False """ return not self.is_unique def is_boolean(self) -> bool: """ Check if the Index only consists of booleans. .. deprecated:: 2.0.0 Use `pandas.api.types.is_bool_dtype` instead. Returns ------- bool Whether or not the Index only consists of booleans. See Also -------- is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype (deprecated). is_categorical : Check if the Index holds categorical data. is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([True, False, True]) >>> idx.is_boolean() # doctest: +SKIP True >>> idx = pd.Index(["True", "False", "True"]) >>> idx.is_boolean() # doctest: +SKIP False >>> idx = pd.Index([True, False, "True"]) >>> idx.is_boolean() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_boolean is deprecated. " "Use pandas.api.types.is_bool_type instead.", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["boolean"] def is_integer(self) -> bool: """ Check if the Index only consists of integers. .. deprecated:: 2.0.0 Use `pandas.api.types.is_integer_dtype` instead. Returns ------- bool Whether or not the Index only consists of integers. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx.is_integer() # doctest: +SKIP True >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_integer() # doctest: +SKIP False >>> idx = pd.Index(["Apple", "Mango", "Watermelon"]) >>> idx.is_integer() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_integer is deprecated. " "Use pandas.api.types.is_integer_dtype instead.", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["integer"] def is_floating(self) -> bool: """ Check if the Index is a floating type. .. deprecated:: 2.0.0 Use `pandas.api.types.is_float_dtype` instead The Index may consist of only floats, NaNs, or a mix of floats, integers, or NaNs. Returns ------- bool Whether or not the Index only consists of only consists of floats, NaNs, or a mix of floats, integers, or NaNs. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_floating() # doctest: +SKIP True >>> idx = pd.Index([1.0, 2.0, np.nan, 4.0]) >>> idx.is_floating() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4, np.nan]) >>> idx.is_floating() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4]) >>> idx.is_floating() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_floating is deprecated. " "Use pandas.api.types.is_float_dtype instead.", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["floating", "mixed-integer-float", "integer-na"] def is_numeric(self) -> bool: """ Check if the Index only consists of numeric data. .. deprecated:: 2.0.0 Use `pandas.api.types.is_numeric_dtype` instead. Returns ------- bool Whether or not the Index only consists of numeric data. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4.0]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4.0, np.nan]) >>> idx.is_numeric() # doctest: +SKIP True >>> idx = pd.Index([1, 2, 3, 4.0, np.nan, "Apple"]) >>> idx.is_numeric() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_numeric is deprecated. " "Use pandas.api.types.is_any_real_numeric_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["integer", "floating"] def is_object(self) -> bool: """ Check if the Index is of the object dtype. .. deprecated:: 2.0.0 Use `pandas.api.types.is_object_dtype` instead. Returns ------- bool Whether or not the Index is of the object dtype. See Also -------- is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index(["Apple", "Mango", "Watermelon"]) >>> idx.is_object() # doctest: +SKIP True >>> idx = pd.Index(["Apple", "Mango", 2.0]) >>> idx.is_object() # doctest: +SKIP True >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_object() # doctest: +SKIP False >>> idx = pd.Index([1.0, 2.0, 3.0, 4.0]) >>> idx.is_object() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_object is deprecated." "Use pandas.api.types.is_object_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return is_object_dtype(self.dtype) def is_categorical(self) -> bool: """ Check if the Index holds categorical data. .. deprecated:: 2.0.0 Use :meth:`pandas.api.types.is_categorical_dtype` instead. Returns ------- bool True if the Index is categorical. See Also -------- CategoricalIndex : Index for categorical data. is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_interval : Check if the Index holds Interval objects (deprecated). Examples -------- >>> idx = pd.Index(["Watermelon", "Orange", "Apple", ... "Watermelon"]).astype("category") >>> idx.is_categorical() # doctest: +SKIP True >>> idx = pd.Index([1, 3, 5, 7]) >>> idx.is_categorical() # doctest: +SKIP False >>> s = pd.Series(["Peter", "Victor", "Elisabeth", "Mar"]) >>> s 0 Peter 1 Victor 2 Elisabeth 3 Mar dtype: object >>> s.index.is_categorical() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_categorical is deprecated." "Use pandas.api.types.is_categorical_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["categorical"] def is_interval(self) -> bool: """ Check if the Index holds Interval objects. .. deprecated:: 2.0.0 Use `pandas.api.types.is_interval_dtype` instead. Returns ------- bool Whether or not the Index holds Interval objects. See Also -------- IntervalIndex : Index for Interval objects. is_boolean : Check if the Index only consists of booleans (deprecated). is_integer : Check if the Index only consists of integers (deprecated). is_floating : Check if the Index is a floating type (deprecated). is_numeric : Check if the Index only consists of numeric data (deprecated). is_object : Check if the Index is of the object dtype. (deprecated). is_categorical : Check if the Index holds categorical data (deprecated). Examples -------- >>> idx = pd.Index([pd.Interval(left=0, right=5), ... pd.Interval(left=5, right=10)]) >>> idx.is_interval() # doctest: +SKIP True >>> idx = pd.Index([1, 3, 5, 7]) >>> idx.is_interval() # doctest: +SKIP False """ warnings.warn( f"{type(self).__name__}.is_interval is deprecated." "Use pandas.api.types.is_interval_dtype instead", FutureWarning, stacklevel=find_stack_level(), ) return self.inferred_type in ["interval"] def _holds_integer(self) -> bool: """ Whether the type is an integer type. """ return self.inferred_type in ["integer", "mixed-integer"] def holds_integer(self) -> bool: """ Whether the type is an integer type. .. deprecated:: 2.0.0 Use `pandas.api.types.infer_dtype` instead """ warnings.warn( f"{type(self).__name__}.holds_integer is deprecated. " "Use pandas.api.types.infer_dtype instead.", FutureWarning, stacklevel=find_stack_level(), ) return self._holds_integer() def inferred_type(self) -> str_t: """ Return a string of the type inferred from the values. """ return lib.infer_dtype(self._values, skipna=False) def _is_all_dates(self) -> bool: """ Whether or not the index values only consist of dates. """ if needs_i8_conversion(self.dtype): return True elif self.dtype != _dtype_obj: # TODO(ExtensionIndex): 3rd party EA might override? # Note: this includes IntervalIndex, even when the left/right # contain datetime-like objects. return False elif self._is_multi: return False return is_datetime_array(ensure_object(self._values)) def _is_multi(self) -> bool: """ Cached check equivalent to isinstance(self, MultiIndex) """ return isinstance(self, ABCMultiIndex) # -------------------------------------------------------------------- # Pickle Methods def __reduce__(self): d = {"data": self._data, "name": self.name} return _new_Index, (type(self), d), None # -------------------------------------------------------------------- # Null Handling Methods def _na_value(self): """The expected NA value to use with this index.""" dtype = self.dtype if isinstance(dtype, np.dtype): if dtype.kind in ["m", "M"]: return NaT return np.nan return dtype.na_value def _isnan(self) -> npt.NDArray[np.bool_]: """ Return if each value is NaN. """ if self._can_hold_na: return isna(self) else: # shouldn't reach to this condition by checking hasnans beforehand values = np.empty(len(self), dtype=np.bool_) values.fill(False) return values def hasnans(self) -> bool: """ Return True if there are any NaNs. Enables various performance speedups. Returns ------- bool """ if self._can_hold_na: return bool(self._isnan.any()) else: return False def isna(self) -> npt.NDArray[np.bool_]: """ Detect missing values. Return a boolean same-sized object indicating if the values are NA. NA values, such as ``None``, :attr:`numpy.NaN` or :attr:`pd.NaT`, get mapped to ``True`` values. Everything else get mapped to ``False`` values. Characters such as empty strings `''` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). Returns ------- numpy.ndarray[bool] A boolean array of whether my values are NA. See Also -------- Index.notna : Boolean inverse of isna. Index.dropna : Omit entries with missing values. isna : Top-level isna. Series.isna : Detect missing values in Series object. Examples -------- Show which entries in a pandas.Index are NA. The result is an array. >>> idx = pd.Index([5.2, 6.0, np.NaN]) >>> idx Index([5.2, 6.0, nan], dtype='float64') >>> idx.isna() array([False, False, True]) Empty strings are not considered NA values. None is considered an NA value. >>> idx = pd.Index(['black', '', 'red', None]) >>> idx Index(['black', '', 'red', None], dtype='object') >>> idx.isna() array([False, False, False, True]) For datetimes, `NaT` (Not a Time) is considered as an NA value. >>> idx = pd.DatetimeIndex([pd.Timestamp('1940-04-25'), ... pd.Timestamp(''), None, pd.NaT]) >>> idx DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'], dtype='datetime64[ns]', freq=None) >>> idx.isna() array([False, True, True, True]) """ return self._isnan isnull = isna def notna(self) -> npt.NDArray[np.bool_]: """ Detect existing (non-missing) values. Return a boolean same-sized object indicating if the values are not NA. Non-missing values get mapped to ``True``. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). NA values, such as None or :attr:`numpy.NaN`, get mapped to ``False`` values. Returns ------- numpy.ndarray[bool] Boolean array to indicate which entries are not NA. See Also -------- Index.notnull : Alias of notna. Index.isna: Inverse of notna. notna : Top-level notna. Examples -------- Show which entries in an Index are not NA. The result is an array. >>> idx = pd.Index([5.2, 6.0, np.NaN]) >>> idx Index([5.2, 6.0, nan], dtype='float64') >>> idx.notna() array([ True, True, False]) Empty strings are not considered NA values. None is considered a NA value. >>> idx = pd.Index(['black', '', 'red', None]) >>> idx Index(['black', '', 'red', None], dtype='object') >>> idx.notna() array([ True, True, True, False]) """ return ~self.isna() notnull = notna def fillna(self, value=None, downcast=None): """ Fill NA/NaN values with the specified value. Parameters ---------- value : scalar Scalar value to use to fill holes (e.g. 0). This value cannot be a list-likes. downcast : dict, default is None A dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate equal type (e.g. float64 to int64 if possible). Returns ------- Index See Also -------- DataFrame.fillna : Fill NaN values of a DataFrame. Series.fillna : Fill NaN Values of a Series. """ value = self._require_scalar(value) if self.hasnans: result = self.putmask(self._isnan, value) if downcast is None: # no need to care metadata other than name # because it can't have freq if it has NaTs # _with_infer needed for test_fillna_categorical return Index._with_infer(result, name=self.name) raise NotImplementedError( f"{type(self).__name__}.fillna does not support 'downcast' " "argument values other than 'None'." ) return self._view() def dropna(self: _IndexT, how: AnyAll = "any") -> _IndexT: """ Return Index without NA/NaN values. Parameters ---------- how : {'any', 'all'}, default 'any' If the Index is a MultiIndex, drop the value when any or all levels are NaN. Returns ------- Index """ if how not in ("any", "all"): raise ValueError(f"invalid how option: {how}") if self.hasnans: res_values = self._values[~self._isnan] return type(self)._simple_new(res_values, name=self.name) return self._view() # -------------------------------------------------------------------- # Uniqueness Methods def unique(self: _IndexT, level: Hashable | None = None) -> _IndexT: """ Return unique values in the index. Unique values are returned in order of appearance, this does NOT sort. Parameters ---------- level : int or hashable, optional Only return values from specified level (for MultiIndex). If int, gets the level by integer position, else by level name. Returns ------- Index See Also -------- unique : Numpy array of unique values in that column. Series.unique : Return unique values of Series object. """ if level is not None: self._validate_index_level(level) if self.is_unique: return self._view() result = super().unique() return self._shallow_copy(result) def drop_duplicates(self: _IndexT, *, keep: DropKeep = "first") -> _IndexT: """ Return Index with duplicate values removed. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. Returns ------- Index See Also -------- Series.drop_duplicates : Equivalent method on Series. DataFrame.drop_duplicates : Equivalent method on DataFrame. Index.duplicated : Related method on Index, indicating duplicate Index values. Examples -------- Generate an pandas.Index with duplicate values. >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo']) The `keep` parameter controls which duplicate values are removed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> idx.drop_duplicates(keep='first') Index(['lama', 'cow', 'beetle', 'hippo'], dtype='object') The value 'last' keeps the last occurrence for each set of duplicated entries. >>> idx.drop_duplicates(keep='last') Index(['cow', 'beetle', 'lama', 'hippo'], dtype='object') The value ``False`` discards all sets of duplicated entries. >>> idx.drop_duplicates(keep=False) Index(['cow', 'beetle', 'hippo'], dtype='object') """ if self.is_unique: return self._view() return super().drop_duplicates(keep=keep) def duplicated(self, keep: DropKeep = "first") -> npt.NDArray[np.bool_]: """ Indicate duplicate index values. Duplicated values are indicated as ``True`` values in the resulting array. Either all duplicates, all except the first, or all except the last occurrence of duplicates can be indicated. Parameters ---------- keep : {'first', 'last', False}, default 'first' The value or values in a set of duplicates to mark as missing. - 'first' : Mark duplicates as ``True`` except for the first occurrence. - 'last' : Mark duplicates as ``True`` except for the last occurrence. - ``False`` : Mark all duplicates as ``True``. Returns ------- np.ndarray[bool] See Also -------- Series.duplicated : Equivalent method on pandas.Series. DataFrame.duplicated : Equivalent method on pandas.DataFrame. Index.drop_duplicates : Remove duplicate values from Index. Examples -------- By default, for each set of duplicated values, the first occurrence is set to False and all others to True: >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama']) >>> idx.duplicated() array([False, False, True, False, True]) which is equivalent to >>> idx.duplicated(keep='first') array([False, False, True, False, True]) By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True: >>> idx.duplicated(keep='last') array([ True, False, True, False, False]) By setting keep on ``False``, all duplicates are True: >>> idx.duplicated(keep=False) array([ True, False, True, False, True]) """ if self.is_unique: # fastpath available bc we are immutable return np.zeros(len(self), dtype=bool) return self._duplicated(keep=keep) # -------------------------------------------------------------------- # Arithmetic & Logical Methods def __iadd__(self, other): # alias for __add__ return self + other def __nonzero__(self) -> NoReturn: raise ValueError( f"The truth value of a {type(self).__name__} is ambiguous. " "Use a.empty, a.bool(), a.item(), a.any() or a.all()." ) __bool__ = __nonzero__ # -------------------------------------------------------------------- # Set Operation Methods def _get_reconciled_name_object(self, other): """ If the result of a set operation will be self, return self, unless the name changes, in which case make a shallow copy of self. """ name = get_op_result_name(self, other) if self.name is not name: return self.rename(name) return self def _validate_sort_keyword(self, sort): if sort not in [None, False, True]: raise ValueError( "The 'sort' keyword only takes the values of " f"None, True, or False; {sort} was passed." ) def _dti_setop_align_tzs(self, other: Index, setop: str_t) -> tuple[Index, Index]: """ With mismatched timezones, cast both to UTC. """ # Caller is responsibelf or checking # `not is_dtype_equal(self.dtype, other.dtype)` if ( isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex) and self.tz is not None and other.tz is not None ): # GH#39328, GH#45357 left = self.tz_convert("UTC") right = other.tz_convert("UTC") return left, right return self, other def union(self, other, sort=None): """ Form the union of two Index objects. If the Index objects are incompatible, both Index objects will be cast to dtype('object') first. Parameters ---------- other : Index or array-like sort : bool or None, default None Whether to sort the resulting Index. * None : Sort the result, except when 1. `self` and `other` are equal. 2. `self` or `other` has length 0. 3. Some values in `self` or `other` cannot be compared. A RuntimeWarning is issued in this case. * False : do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Examples -------- Union matching dtypes >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.union(idx2) Index([1, 2, 3, 4, 5, 6], dtype='int64') Union mismatched dtypes >>> idx1 = pd.Index(['a', 'b', 'c', 'd']) >>> idx2 = pd.Index([1, 2, 3, 4]) >>> idx1.union(idx2) Index(['a', 'b', 'c', 'd', 1, 2, 3, 4], dtype='object') MultiIndex case >>> idx1 = pd.MultiIndex.from_arrays( ... [[1, 1, 2, 2], ["Red", "Blue", "Red", "Blue"]] ... ) >>> idx1 MultiIndex([(1, 'Red'), (1, 'Blue'), (2, 'Red'), (2, 'Blue')], ) >>> idx2 = pd.MultiIndex.from_arrays( ... [[3, 3, 2, 2], ["Red", "Green", "Red", "Green"]] ... ) >>> idx2 MultiIndex([(3, 'Red'), (3, 'Green'), (2, 'Red'), (2, 'Green')], ) >>> idx1.union(idx2) MultiIndex([(1, 'Blue'), (1, 'Red'), (2, 'Blue'), (2, 'Green'), (2, 'Red'), (3, 'Green'), (3, 'Red')], ) >>> idx1.union(idx2, sort=False) MultiIndex([(1, 'Red'), (1, 'Blue'), (2, 'Red'), (2, 'Blue'), (3, 'Red'), (3, 'Green'), (2, 'Green')], ) """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) if not is_dtype_equal(self.dtype, other.dtype): if ( isinstance(self, ABCMultiIndex) and not is_object_dtype(_unpack_nested_dtype(other)) and len(other) > 0 ): raise NotImplementedError( "Can only union MultiIndex with MultiIndex or Index of tuples, " "try mi.to_flat_index().union(other) instead." ) self, other = self._dti_setop_align_tzs(other, "union") dtype = self._find_common_type_compat(other) left = self.astype(dtype, copy=False) right = other.astype(dtype, copy=False) return left.union(right, sort=sort) elif not len(other) or self.equals(other): # NB: whether this (and the `if not len(self)` check below) come before # or after the is_dtype_equal check above affects the returned dtype result = self._get_reconciled_name_object(other) if sort is True: return result.sort_values() return result elif not len(self): result = other._get_reconciled_name_object(self) if sort is True: return result.sort_values() return result result = self._union(other, sort=sort) return self._wrap_setop_result(other, result) def _union(self, other: Index, sort): """ Specific union logic should go here. In subclasses, union behavior should be overwritten here rather than in `self.union`. Parameters ---------- other : Index or array-like sort : False or None, default False Whether to sort the resulting index. * False : do not sort the result. * None : sort the result, except when `self` and `other` are equal or when the values cannot be compared. Returns ------- Index """ lvals = self._values rvals = other._values if ( sort is None and self.is_monotonic_increasing and other.is_monotonic_increasing and not (self.has_duplicates and other.has_duplicates) and self._can_use_libjoin ): # Both are monotonic and at least one is unique, so can use outer join # (actually don't need either unique, but without this restriction # test_union_same_value_duplicated_in_both fails) try: return self._outer_indexer(other)[0] except (TypeError, IncompatibleFrequency): # incomparable objects; should only be for object dtype value_list = list(lvals) # worth making this faster? a very unusual case value_set = set(lvals) value_list.extend([x for x in rvals if x not in value_set]) # If objects are unorderable, we must have object dtype. return np.array(value_list, dtype=object) elif not other.is_unique: # other has duplicates result_dups = algos.union_with_duplicates(self, other) return _maybe_try_sort(result_dups, sort) # The rest of this method is analogous to Index._intersection_via_get_indexer # Self may have duplicates; other already checked as unique # find indexes of things in "other" that are not in "self" if self._index_as_unique: indexer = self.get_indexer(other) missing = (indexer == -1).nonzero()[0] else: missing = algos.unique1d(self.get_indexer_non_unique(other)[1]) result: Index | MultiIndex | ArrayLike if self._is_multi: # Preserve MultiIndex to avoid losing dtypes result = self.append(other.take(missing)) else: if len(missing) > 0: other_diff = rvals.take(missing) result = concat_compat((lvals, other_diff)) else: result = lvals if not self.is_monotonic_increasing or not other.is_monotonic_increasing: # if both are monotonic then result should already be sorted result = _maybe_try_sort(result, sort) return result def _wrap_setop_result(self, other: Index, result) -> Index: name = get_op_result_name(self, other) if isinstance(result, Index): if result.name != name: result = result.rename(name) else: result = self._shallow_copy(result, name=name) return result def intersection(self, other, sort: bool = False): """ Form the intersection of two Index objects. This returns a new Index with elements common to the index and `other`. Parameters ---------- other : Index or array-like sort : True, False or None, default False Whether to sort the resulting index. * None : sort the result, except when `self` and `other` are equal or when the values cannot be compared. * False : do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.intersection(idx2) Index([3, 4], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) if not is_dtype_equal(self.dtype, other.dtype): self, other = self._dti_setop_align_tzs(other, "intersection") if self.equals(other): if self.has_duplicates: result = self.unique()._get_reconciled_name_object(other) else: result = self._get_reconciled_name_object(other) if sort is True: result = result.sort_values() return result if len(self) == 0 or len(other) == 0: # fastpath; we need to be careful about having commutativity if self._is_multi or other._is_multi: # _convert_can_do_setop ensures that we have both or neither # We retain self.levels return self[:0].rename(result_name) dtype = self._find_common_type_compat(other) if is_dtype_equal(self.dtype, dtype): # Slicing allows us to retain DTI/TDI.freq, RangeIndex # Note: self[:0] vs other[:0] affects # 1) which index's `freq` we get in DTI/TDI cases # This may be a historical artifact, i.e. no documented # reason for this choice. # 2) The `step` we get in RangeIndex cases if len(self) == 0: return self[:0].rename(result_name) else: return other[:0].rename(result_name) return Index([], dtype=dtype, name=result_name) elif not self._should_compare(other): # We can infer that the intersection is empty. if isinstance(self, ABCMultiIndex): return self[:0].rename(result_name) return Index([], name=result_name) elif not is_dtype_equal(self.dtype, other.dtype): dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) other = other.astype(dtype, copy=False) return this.intersection(other, sort=sort) result = self._intersection(other, sort=sort) return self._wrap_intersection_result(other, result) def _intersection(self, other: Index, sort: bool = False): """ intersection specialized to the case with matching dtypes. """ if ( self.is_monotonic_increasing and other.is_monotonic_increasing and self._can_use_libjoin and not isinstance(self, ABCMultiIndex) ): try: res_indexer, indexer, _ = self._inner_indexer(other) except TypeError: # non-comparable; should only be for object dtype pass else: # TODO: algos.unique1d should preserve DTA/TDA if is_numeric_dtype(self): # This is faster, because Index.unique() checks for uniqueness # before calculating the unique values. res = algos.unique1d(res_indexer) else: result = self.take(indexer) res = result.drop_duplicates() return ensure_wrapped_if_datetimelike(res) res_values = self._intersection_via_get_indexer(other, sort=sort) res_values = _maybe_try_sort(res_values, sort) return res_values def _wrap_intersection_result(self, other, result): # We will override for MultiIndex to handle empty results return self._wrap_setop_result(other, result) def _intersection_via_get_indexer( self, other: Index | MultiIndex, sort ) -> ArrayLike | MultiIndex: """ Find the intersection of two Indexes using get_indexer. Returns ------- np.ndarray or ExtensionArray The returned array will be unique. """ left_unique = self.unique() right_unique = other.unique() # even though we are unique, we need get_indexer_for for IntervalIndex indexer = left_unique.get_indexer_for(right_unique) mask = indexer != -1 taker = indexer.take(mask.nonzero()[0]) if sort is False: # sort bc we want the elements in the same order they are in self # unnecessary in the case with sort=None bc we will sort later taker = np.sort(taker) if isinstance(left_unique, ABCMultiIndex): result = left_unique.take(taker) else: result = left_unique.take(taker)._values return result def difference(self, other, sort=None): """ Return a new Index with elements of index not in `other`. This is the set difference of two Index objects. Parameters ---------- other : Index or array-like sort : bool or None, default None Whether to sort the resulting index. By default, the values are attempted to be sorted, but any TypeError from incomparable elements is caught by pandas. * None : Attempt to sort the result, but catch any TypeErrors from comparing incomparable elements. * False : Do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Examples -------- >>> idx1 = pd.Index([2, 1, 3, 4]) >>> idx2 = pd.Index([3, 4, 5, 6]) >>> idx1.difference(idx2) Index([1, 2], dtype='int64') >>> idx1.difference(idx2, sort=False) Index([2, 1], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) # Note: we do NOT call _dti_setop_align_tzs here, as there # is no requirement that .difference be commutative, so it does # not cast to object. if self.equals(other): # Note: we do not (yet) sort even if sort=None GH#24959 return self[:0].rename(result_name) if len(other) == 0: # Note: we do not (yet) sort even if sort=None GH#24959 result = self.rename(result_name) if sort is True: return result.sort_values() return result if not self._should_compare(other): # Nothing matches -> difference is everything result = self.rename(result_name) if sort is True: return result.sort_values() return result result = self._difference(other, sort=sort) return self._wrap_difference_result(other, result) def _difference(self, other, sort): # overridden by RangeIndex this = self.unique() indexer = this.get_indexer_for(other) indexer = indexer.take((indexer != -1).nonzero()[0]) label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True) the_diff: MultiIndex | ArrayLike if isinstance(this, ABCMultiIndex): the_diff = this.take(label_diff) else: the_diff = this._values.take(label_diff) the_diff = _maybe_try_sort(the_diff, sort) return the_diff def _wrap_difference_result(self, other, result): # We will override for MultiIndex to handle empty results return self._wrap_setop_result(other, result) def symmetric_difference(self, other, result_name=None, sort=None): """ Compute the symmetric difference of two Index objects. Parameters ---------- other : Index or array-like result_name : str sort : bool or None, default None Whether to sort the resulting index. By default, the values are attempted to be sorted, but any TypeError from incomparable elements is caught by pandas. * None : Attempt to sort the result, but catch any TypeErrors from comparing incomparable elements. * False : Do not sort the result. * True : Sort the result (which may raise TypeError). Returns ------- Index Notes ----- ``symmetric_difference`` contains elements that appear in either ``idx1`` or ``idx2`` but not both. Equivalent to the Index created by ``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates dropped. Examples -------- >>> idx1 = pd.Index([1, 2, 3, 4]) >>> idx2 = pd.Index([2, 3, 4, 5]) >>> idx1.symmetric_difference(idx2) Index([1, 5], dtype='int64') """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name_update = self._convert_can_do_setop(other) if result_name is None: result_name = result_name_update if not is_dtype_equal(self.dtype, other.dtype): self, other = self._dti_setop_align_tzs(other, "symmetric_difference") if not self._should_compare(other): return self.union(other, sort=sort).rename(result_name) elif not is_dtype_equal(self.dtype, other.dtype): dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) that = other.astype(dtype, copy=False) return this.symmetric_difference(that, sort=sort).rename(result_name) this = self.unique() other = other.unique() indexer = this.get_indexer_for(other) # {this} minus {other} common_indexer = indexer.take((indexer != -1).nonzero()[0]) left_indexer = np.setdiff1d( np.arange(this.size), common_indexer, assume_unique=True ) left_diff = this.take(left_indexer) # {other} minus {this} right_indexer = (indexer == -1).nonzero()[0] right_diff = other.take(right_indexer) res_values = left_diff.append(right_diff) result = _maybe_try_sort(res_values, sort) if not self._is_multi: return Index(result, name=result_name, dtype=res_values.dtype) else: left_diff = cast("MultiIndex", left_diff) if len(result) == 0: # result might be an Index, if other was an Index return left_diff.remove_unused_levels().set_names(result_name) return result.set_names(result_name) def _assert_can_do_setop(self, other) -> bool: if not is_list_like(other): raise TypeError("Input must be Index or array-like") return True def _convert_can_do_setop(self, other) -> tuple[Index, Hashable]: if not isinstance(other, Index): other = Index(other, name=self.name) result_name = self.name else: result_name = get_op_result_name(self, other) return other, result_name # -------------------------------------------------------------------- # Indexing Methods def get_loc(self, key): """ Get integer location, slice or boolean mask for requested label. Parameters ---------- key : label Returns ------- int if unique index, slice if monotonic index, else mask Examples -------- >>> unique_index = pd.Index(list('abc')) >>> unique_index.get_loc('b') 1 >>> monotonic_index = pd.Index(list('abbc')) >>> monotonic_index.get_loc('b') slice(1, 3, None) >>> non_monotonic_index = pd.Index(list('abcb')) >>> non_monotonic_index.get_loc('b') array([False, True, False, True]) """ casted_key = self._maybe_cast_indexer(key) try: return self._engine.get_loc(casted_key) except KeyError as err: raise KeyError(key) from err except TypeError: # If we have a listlike key, _check_indexing_error will raise # InvalidIndexError. Otherwise we fall through and re-raise # the TypeError. self._check_indexing_error(key) raise _index_shared_docs[ "get_indexer" ] = """ Compute indexer and mask for new index given the current index. The indexer should be then used as an input to ndarray.take to align the current data to the new index. Parameters ---------- target : %(target_klass)s method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional * default: exact matches only. * pad / ffill: find the PREVIOUS index value if no exact match. * backfill / bfill: use NEXT index value if no exact match * nearest: use the NEAREST index value if no exact match. Tied distances are broken by preferring the larger index value. limit : int, optional Maximum number of consecutive labels in ``target`` to match for inexact matches. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- np.ndarray[np.intp] Integers from 0 to n - 1 indicating that the index at these positions matches the corresponding target values. Missing values in the target are marked by -1. %(raises_section)s Notes ----- Returns -1 for unmatched values, for further explanation see the example below. Examples -------- >>> index = pd.Index(['c', 'a', 'b']) >>> index.get_indexer(['a', 'b', 'x']) array([ 1, 2, -1]) Notice that the return value is an array of locations in ``index`` and ``x`` is marked by -1, as it is not in ``index``. """ def get_indexer( self, target, method: str_t | None = None, limit: int | None = None, tolerance=None, ) -> npt.NDArray[np.intp]: method = clean_reindex_fill_method(method) orig_target = target target = self._maybe_cast_listlike_indexer(target) self._check_indexing_method(method, limit, tolerance) if not self._index_as_unique: raise InvalidIndexError(self._requires_unique_msg) if len(target) == 0: return np.array([], dtype=np.intp) if not self._should_compare(target) and not self._should_partial_index(target): # IntervalIndex get special treatment bc numeric scalars can be # matched to Interval scalars return self._get_indexer_non_comparable(target, method=method, unique=True) if is_categorical_dtype(self.dtype): # _maybe_cast_listlike_indexer ensures target has our dtype # (could improve perf by doing _should_compare check earlier?) assert is_dtype_equal(self.dtype, target.dtype) indexer = self._engine.get_indexer(target.codes) if self.hasnans and target.hasnans: # After _maybe_cast_listlike_indexer, target elements which do not # belong to some category are changed to NaNs # Mask to track actual NaN values compared to inserted NaN values # GH#45361 target_nans = isna(orig_target) loc = self.get_loc(np.nan) mask = target.isna() indexer[target_nans] = loc indexer[mask & ~target_nans] = -1 return indexer if is_categorical_dtype(target.dtype): # potential fastpath # get an indexer for unique categories then propagate to codes via take_nd # get_indexer instead of _get_indexer needed for MultiIndex cases # e.g. test_append_different_columns_types categories_indexer = self.get_indexer(target.categories) indexer = algos.take_nd(categories_indexer, target.codes, fill_value=-1) if (not self._is_multi and self.hasnans) and target.hasnans: # Exclude MultiIndex because hasnans raises NotImplementedError # we should only get here if we are unique, so loc is an integer # GH#41934 loc = self.get_loc(np.nan) mask = target.isna() indexer[mask] = loc return ensure_platform_int(indexer) pself, ptarget = self._maybe_promote(target) if pself is not self or ptarget is not target: return pself.get_indexer( ptarget, method=method, limit=limit, tolerance=tolerance ) if is_dtype_equal(self.dtype, target.dtype) and self.equals(target): # Only call equals if we have same dtype to avoid inference/casting return np.arange(len(target), dtype=np.intp) if not is_dtype_equal( self.dtype, target.dtype ) and not self._should_partial_index(target): # _should_partial_index e.g. IntervalIndex with numeric scalars # that can be matched to Interval scalars. dtype = self._find_common_type_compat(target) this = self.astype(dtype, copy=False) target = target.astype(dtype, copy=False) return this._get_indexer( target, method=method, limit=limit, tolerance=tolerance ) return self._get_indexer(target, method, limit, tolerance) def _get_indexer( self, target: Index, method: str_t | None = None, limit: int | None = None, tolerance=None, ) -> npt.NDArray[np.intp]: if tolerance is not None: tolerance = self._convert_tolerance(tolerance, target) if method in ["pad", "backfill"]: indexer = self._get_fill_indexer(target, method, limit, tolerance) elif method == "nearest": indexer = self._get_nearest_indexer(target, limit, tolerance) else: if target._is_multi and self._is_multi: engine = self._engine # error: Item "IndexEngine" of "Union[IndexEngine, ExtensionEngine]" # has no attribute "_extract_level_codes" tgt_values = engine._extract_level_codes( # type: ignore[union-attr] target ) else: tgt_values = target._get_engine_target() indexer = self._engine.get_indexer(tgt_values) return ensure_platform_int(indexer) def _should_partial_index(self, target: Index) -> bool: """ Should we attempt partial-matching indexing? """ if is_interval_dtype(self.dtype): if is_interval_dtype(target.dtype): return False # See https://github.com/pandas-dev/pandas/issues/47772 the commented # out code can be restored (instead of hardcoding `return True`) # once that issue is fixed # "Index" has no attribute "left" # return self.left._should_compare(target) # type: ignore[attr-defined] return True return False def _check_indexing_method( self, method: str_t | None, limit: int | None = None, tolerance=None, ) -> None: """ Raise if we have a get_indexer `method` that is not supported or valid. """ if method not in [None, "bfill", "backfill", "pad", "ffill", "nearest"]: # in practice the clean_reindex_fill_method call would raise # before we get here raise ValueError("Invalid fill method") # pragma: no cover if self._is_multi: if method == "nearest": raise NotImplementedError( "method='nearest' not implemented yet " "for MultiIndex; see GitHub issue 9365" ) if method in ("pad", "backfill"): if tolerance is not None: raise NotImplementedError( "tolerance not implemented yet for MultiIndex" ) if is_interval_dtype(self.dtype) or is_categorical_dtype(self.dtype): # GH#37871 for now this is only for IntervalIndex and CategoricalIndex if method is not None: raise NotImplementedError( f"method {method} not yet implemented for {type(self).__name__}" ) if method is None: if tolerance is not None: raise ValueError( "tolerance argument only valid if doing pad, " "backfill or nearest reindexing" ) if limit is not None: raise ValueError( "limit argument only valid if doing pad, " "backfill or nearest reindexing" ) def _convert_tolerance(self, tolerance, target: np.ndarray | Index) -> np.ndarray: # override this method on subclasses tolerance = np.asarray(tolerance) if target.size != tolerance.size and tolerance.size > 1: raise ValueError("list-like tolerance size must match target index size") elif is_numeric_dtype(self) and not np.issubdtype(tolerance.dtype, np.number): if tolerance.ndim > 0: raise ValueError( f"tolerance argument for {type(self).__name__} with dtype " f"{self.dtype} must contain numeric elements if it is list type" ) raise ValueError( f"tolerance argument for {type(self).__name__} with dtype {self.dtype} " f"must be numeric if it is a scalar: {repr(tolerance)}" ) return tolerance def _get_fill_indexer( self, target: Index, method: str_t, limit: int | None = None, tolerance=None ) -> npt.NDArray[np.intp]: if self._is_multi: # TODO: get_indexer_with_fill docstring says values must be _sorted_ # but that doesn't appear to be enforced # error: "IndexEngine" has no attribute "get_indexer_with_fill" engine = self._engine with warnings.catch_warnings(): # TODO: We need to fix this. Casting to int64 in cython warnings.filterwarnings("ignore", category=RuntimeWarning) return engine.get_indexer_with_fill( # type: ignore[union-attr] target=target._values, values=self._values, method=method, limit=limit, ) if self.is_monotonic_increasing and target.is_monotonic_increasing: target_values = target._get_engine_target() own_values = self._get_engine_target() if not isinstance(target_values, np.ndarray) or not isinstance( own_values, np.ndarray ): raise NotImplementedError if method == "pad": indexer = libalgos.pad(own_values, target_values, limit=limit) else: # i.e. "backfill" indexer = libalgos.backfill(own_values, target_values, limit=limit) else: indexer = self._get_fill_indexer_searchsorted(target, method, limit) if tolerance is not None and len(self): indexer = self._filter_indexer_tolerance(target, indexer, tolerance) return indexer def _get_fill_indexer_searchsorted( self, target: Index, method: str_t, limit: int | None = None ) -> npt.NDArray[np.intp]: """ Fallback pad/backfill get_indexer that works for monotonic decreasing indexes and non-monotonic targets. """ if limit is not None: raise ValueError( f"limit argument for {repr(method)} method only well-defined " "if index and target are monotonic" ) side: Literal["left", "right"] = "left" if method == "pad" else "right" # find exact matches first (this simplifies the algorithm) indexer = self.get_indexer(target) nonexact = indexer == -1 indexer[nonexact] = self._searchsorted_monotonic(target[nonexact], side) if side == "left": # searchsorted returns "indices into a sorted array such that, # if the corresponding elements in v were inserted before the # indices, the order of a would be preserved". # Thus, we need to subtract 1 to find values to the left. indexer[nonexact] -= 1 # This also mapped not found values (values of 0 from # np.searchsorted) to -1, which conveniently is also our # sentinel for missing values else: # Mark indices to the right of the largest value as not found indexer[indexer == len(self)] = -1 return indexer def _get_nearest_indexer( self, target: Index, limit: int | None, tolerance ) -> npt.NDArray[np.intp]: """ Get the indexer for the nearest index labels; requires an index with values that can be subtracted from each other (e.g., not strings or tuples). """ if not len(self): return self._get_fill_indexer(target, "pad") left_indexer = self.get_indexer(target, "pad", limit=limit) right_indexer = self.get_indexer(target, "backfill", limit=limit) left_distances = self._difference_compat(target, left_indexer) right_distances = self._difference_compat(target, right_indexer) op = operator.lt if self.is_monotonic_increasing else operator.le indexer = np.where( # error: Argument 1&2 has incompatible type "Union[ExtensionArray, # ndarray[Any, Any]]"; expected "Union[SupportsDunderLE, # SupportsDunderGE, SupportsDunderGT, SupportsDunderLT]" op(left_distances, right_distances) # type: ignore[arg-type] | (right_indexer == -1), left_indexer, right_indexer, ) if tolerance is not None: indexer = self._filter_indexer_tolerance(target, indexer, tolerance) return indexer def _filter_indexer_tolerance( self, target: Index, indexer: npt.NDArray[np.intp], tolerance, ) -> npt.NDArray[np.intp]: distance = self._difference_compat(target, indexer) return np.where(distance <= tolerance, indexer, -1) def _difference_compat( self, target: Index, indexer: npt.NDArray[np.intp] ) -> ArrayLike: # Compatibility for PeriodArray, for which __sub__ returns an ndarray[object] # of DateOffset objects, which do not support __abs__ (and would be slow # if they did) if isinstance(self.dtype, PeriodDtype): # Note: we only get here with matching dtypes own_values = cast("PeriodArray", self._data)._ndarray target_values = cast("PeriodArray", target._data)._ndarray diff = own_values[indexer] - target_values else: # error: Unsupported left operand type for - ("ExtensionArray") diff = self._values[indexer] - target._values # type: ignore[operator] return abs(diff) # -------------------------------------------------------------------- # Indexer Conversion Methods def _validate_positional_slice(self, key: slice) -> None: """ For positional indexing, a slice must have either int or None for each of start, stop, and step. """ self._validate_indexer("positional", key.start, "iloc") self._validate_indexer("positional", key.stop, "iloc") self._validate_indexer("positional", key.step, "iloc") def _convert_slice_indexer(self, key: slice, kind: str_t): """ Convert a slice indexer. By definition, these are labels unless 'iloc' is passed in. Floats are not allowed as the start, step, or stop of the slice. Parameters ---------- key : label of the slice bound kind : {'loc', 'getitem'} """ assert kind in ["loc", "getitem"], kind # potentially cast the bounds to integers start, stop, step = key.start, key.stop, key.step # TODO(GH#50617): once Series.__[gs]etitem__ is removed we should be able # to simplify this. if isinstance(self.dtype, np.dtype) and is_float_dtype(self.dtype): # We always treat __getitem__ slicing as label-based # translate to locations return self.slice_indexer(start, stop, step) # figure out if this is a positional indexer def is_int(v): return v is None or is_integer(v) is_index_slice = is_int(start) and is_int(stop) and is_int(step) # special case for interval_dtype bc we do not do partial-indexing # on integer Intervals when slicing # TODO: write this in terms of e.g. should_partial_index? ints_are_positional = self._should_fallback_to_positional or is_interval_dtype( self.dtype ) is_positional = is_index_slice and ints_are_positional if kind == "getitem": # called from the getitem slicers, validate that we are in fact integers if is_integer_dtype(self.dtype) or is_index_slice: # Note: these checks are redundant if we know is_index_slice self._validate_indexer("slice", key.start, "getitem") self._validate_indexer("slice", key.stop, "getitem") self._validate_indexer("slice", key.step, "getitem") return key # convert the slice to an indexer here # if we are mixed and have integers if is_positional: try: # Validate start & stop if start is not None: self.get_loc(start) if stop is not None: self.get_loc(stop) is_positional = False except KeyError: pass if com.is_null_slice(key): # It doesn't matter if we are positional or label based indexer = key elif is_positional: if kind == "loc": # GH#16121, GH#24612, GH#31810 raise TypeError( "Slicing a positional slice with .loc is not allowed, " "Use .loc with labels or .iloc with positions instead.", ) indexer = key else: indexer = self.slice_indexer(start, stop, step) return indexer def _raise_invalid_indexer( self, form: str_t, key, reraise: lib.NoDefault | None | Exception = lib.no_default, ) -> None: """ Raise consistent invalid indexer message. """ msg = ( f"cannot do {form} indexing on {type(self).__name__} with these " f"indexers [{key}] of type {type(key).__name__}" ) if reraise is not lib.no_default: raise TypeError(msg) from reraise raise TypeError(msg) # -------------------------------------------------------------------- # Reindex Methods def _validate_can_reindex(self, indexer: np.ndarray) -> None: """ Check if we are allowing reindexing with this particular indexer. Parameters ---------- indexer : an integer ndarray Raises ------ ValueError if its a duplicate axis """ # trying to reindex on an axis with duplicates if not self._index_as_unique and len(indexer): raise ValueError("cannot reindex on an axis with duplicate labels") def reindex( self, target, method=None, level=None, limit=None, tolerance=None ) -> tuple[Index, npt.NDArray[np.intp] | None]: """ Create index with target's values. Parameters ---------- target : an iterable method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional * default: exact matches only. * pad / ffill: find the PREVIOUS index value if no exact match. * backfill / bfill: use NEXT index value if no exact match * nearest: use the NEAREST index value if no exact match. Tied distances are broken by preferring the larger index value. level : int, optional Level of multiindex. limit : int, optional Maximum number of consecutive labels in ``target`` to match for inexact matches. tolerance : int or float, optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- new_index : pd.Index Resulting index. indexer : np.ndarray[np.intp] or None Indices of output values in original index. Raises ------ TypeError If ``method`` passed along with ``level``. ValueError If non-unique multi-index ValueError If non-unique index and ``method`` or ``limit`` passed. See Also -------- Series.reindex : Conform Series to new index with optional filling logic. DataFrame.reindex : Conform DataFrame to new index with optional filling logic. Examples -------- >>> idx = pd.Index(['car', 'bike', 'train', 'tractor']) >>> idx Index(['car', 'bike', 'train', 'tractor'], dtype='object') >>> idx.reindex(['car', 'bike']) (Index(['car', 'bike'], dtype='object'), array([0, 1])) """ # GH6552: preserve names when reindexing to non-named target # (i.e. neither Index nor Series). preserve_names = not hasattr(target, "name") # GH7774: preserve dtype/tz if target is empty and not an Index. target = ensure_has_len(target) # target may be an iterator if not isinstance(target, Index) and len(target) == 0: if level is not None and self._is_multi: # "Index" has no attribute "levels"; maybe "nlevels"? idx = self.levels[level] # type: ignore[attr-defined] else: idx = self target = idx[:0] else: target = ensure_index(target) if level is not None and ( isinstance(self, ABCMultiIndex) or isinstance(target, ABCMultiIndex) ): if method is not None: raise TypeError("Fill method not supported if level passed") # TODO: tests where passing `keep_order=not self._is_multi` # makes a difference for non-MultiIndex case target, indexer, _ = self._join_level( target, level, how="right", keep_order=not self._is_multi ) else: if self.equals(target): indexer = None else: if self._index_as_unique: indexer = self.get_indexer( target, method=method, limit=limit, tolerance=tolerance ) elif self._is_multi: raise ValueError("cannot handle a non-unique multi-index!") elif not self.is_unique: # GH#42568 raise ValueError("cannot reindex on an axis with duplicate labels") else: indexer, _ = self.get_indexer_non_unique(target) target = self._wrap_reindex_result(target, indexer, preserve_names) return target, indexer def _wrap_reindex_result(self, target, indexer, preserve_names: bool): target = self._maybe_preserve_names(target, preserve_names) return target def _maybe_preserve_names(self, target: Index, preserve_names: bool): if preserve_names and target.nlevels == 1 and target.name != self.name: target = target.copy(deep=False) target.name = self.name return target def _reindex_non_unique( self, target: Index ) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp] | None]: """ Create a new index with target's values (move/add/delete values as necessary) use with non-unique Index and a possibly non-unique target. Parameters ---------- target : an iterable Returns ------- new_index : pd.Index Resulting index. indexer : np.ndarray[np.intp] Indices of output values in original index. new_indexer : np.ndarray[np.intp] or None """ target = ensure_index(target) if len(target) == 0: # GH#13691 return self[:0], np.array([], dtype=np.intp), None indexer, missing = self.get_indexer_non_unique(target) check = indexer != -1 new_labels = self.take(indexer[check]) new_indexer = None if len(missing): length = np.arange(len(indexer), dtype=np.intp) missing = ensure_platform_int(missing) missing_labels = target.take(missing) missing_indexer = length[~check] cur_labels = self.take(indexer[check]).values cur_indexer = length[check] # Index constructor below will do inference new_labels = np.empty((len(indexer),), dtype=object) new_labels[cur_indexer] = cur_labels new_labels[missing_indexer] = missing_labels # GH#38906 if not len(self): new_indexer = np.arange(0, dtype=np.intp) # a unique indexer elif target.is_unique: # see GH5553, make sure we use the right indexer new_indexer = np.arange(len(indexer), dtype=np.intp) new_indexer[cur_indexer] = np.arange(len(cur_labels)) new_indexer[missing_indexer] = -1 # we have a non_unique selector, need to use the original # indexer here else: # need to retake to have the same size as the indexer indexer[~check] = -1 # reset the new indexer to account for the new size new_indexer = np.arange(len(self.take(indexer)), dtype=np.intp) new_indexer[~check] = -1 if not isinstance(self, ABCMultiIndex): new_index = Index(new_labels, name=self.name) else: new_index = type(self).from_tuples(new_labels, names=self.names) return new_index, indexer, new_indexer # -------------------------------------------------------------------- # Join Methods def join( self, other: Index, *, how: JoinHow = ..., level: Level = ..., return_indexers: Literal[True], sort: bool = ..., ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: ... def join( self, other: Index, *, how: JoinHow = ..., level: Level = ..., return_indexers: Literal[False] = ..., sort: bool = ..., ) -> Index: ... def join( self, other: Index, *, how: JoinHow = ..., level: Level = ..., return_indexers: bool = ..., sort: bool = ..., ) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: ... def join( self, other: Index, *, how: JoinHow = "left", level: Level = None, return_indexers: bool = False, sort: bool = False, ) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: """ Compute join_index and indexers to conform data structures to the new index. Parameters ---------- other : Index how : {'left', 'right', 'inner', 'outer'} level : int or level name, default None return_indexers : bool, default False sort : bool, default False Sort the join keys lexicographically in the result Index. If False, the order of the join keys depends on the join type (how keyword). Returns ------- join_index, (left_indexer, right_indexer) """ other = ensure_index(other) if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex): if (self.tz is None) ^ (other.tz is None): # Raise instead of casting to object below. raise TypeError("Cannot join tz-naive with tz-aware DatetimeIndex") if not self._is_multi and not other._is_multi: # We have specific handling for MultiIndex below pself, pother = self._maybe_promote(other) if pself is not self or pother is not other: return pself.join( pother, how=how, level=level, return_indexers=True, sort=sort ) lindexer: np.ndarray | None rindexer: np.ndarray | None # try to figure out the join level # GH3662 if level is None and (self._is_multi or other._is_multi): # have the same levels/names so a simple join if self.names == other.names: pass else: return self._join_multi(other, how=how) # join on the level if level is not None and (self._is_multi or other._is_multi): return self._join_level(other, level, how=how) if len(other) == 0: if how in ("left", "outer"): join_index = self._view() rindexer = np.broadcast_to(np.intp(-1), len(join_index)) return join_index, None, rindexer elif how in ("right", "inner", "cross"): join_index = other._view() lindexer = np.array([]) return join_index, lindexer, None if len(self) == 0: if how in ("right", "outer"): join_index = other._view() lindexer = np.broadcast_to(np.intp(-1), len(join_index)) return join_index, lindexer, None elif how in ("left", "inner", "cross"): join_index = self._view() rindexer = np.array([]) return join_index, None, rindexer if self._join_precedence < other._join_precedence: flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} how = flip.get(how, how) join_index, lidx, ridx = other.join( self, how=how, level=level, return_indexers=True ) lidx, ridx = ridx, lidx return join_index, lidx, ridx if not is_dtype_equal(self.dtype, other.dtype): dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) other = other.astype(dtype, copy=False) return this.join(other, how=how, return_indexers=True) _validate_join_method(how) if not self.is_unique and not other.is_unique: return self._join_non_unique(other, how=how) elif not self.is_unique or not other.is_unique: if self.is_monotonic_increasing and other.is_monotonic_increasing: if not is_interval_dtype(self.dtype): # otherwise we will fall through to _join_via_get_indexer # GH#39133 # go through object dtype for ea till engine is supported properly return self._join_monotonic(other, how=how) else: return self._join_non_unique(other, how=how) elif ( # GH48504: exclude MultiIndex to avoid going through MultiIndex._values self.is_monotonic_increasing and other.is_monotonic_increasing and self._can_use_libjoin and not isinstance(self, ABCMultiIndex) and not is_categorical_dtype(self.dtype) ): # Categorical is monotonic if data are ordered as categories, but join can # not handle this in case of not lexicographically monotonic GH#38502 try: return self._join_monotonic(other, how=how) except TypeError: # object dtype; non-comparable objects pass return self._join_via_get_indexer(other, how, sort) def _join_via_get_indexer( self, other: Index, how: JoinHow, sort: bool ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: # Fallback if we do not have any fastpaths available based on # uniqueness/monotonicity # Note: at this point we have checked matching dtypes if how == "left": join_index = self elif how == "right": join_index = other elif how == "inner": # TODO: sort=False here for backwards compat. It may # be better to use the sort parameter passed into join join_index = self.intersection(other, sort=False) elif how == "outer": # TODO: sort=True here for backwards compat. It may # be better to use the sort parameter passed into join join_index = self.union(other) if sort: join_index = join_index.sort_values() if join_index is self: lindexer = None else: lindexer = self.get_indexer_for(join_index) if join_index is other: rindexer = None else: rindexer = other.get_indexer_for(join_index) return join_index, lindexer, rindexer def _join_multi(self, other: Index, how: JoinHow): from pandas.core.indexes.multi import MultiIndex from pandas.core.reshape.merge import restore_dropped_levels_multijoin # figure out join names self_names_list = list(com.not_none(*self.names)) other_names_list = list(com.not_none(*other.names)) self_names_order = self_names_list.index other_names_order = other_names_list.index self_names = set(self_names_list) other_names = set(other_names_list) overlap = self_names & other_names # need at least 1 in common if not overlap: raise ValueError("cannot join with no overlapping index names") if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): # Drop the non-matching levels from left and right respectively ldrop_names = sorted(self_names - overlap, key=self_names_order) rdrop_names = sorted(other_names - overlap, key=other_names_order) # if only the order differs if not len(ldrop_names + rdrop_names): self_jnlevels = self other_jnlevels = other.reorder_levels(self.names) else: self_jnlevels = self.droplevel(ldrop_names) other_jnlevels = other.droplevel(rdrop_names) # Join left and right # Join on same leveled multi-index frames is supported join_idx, lidx, ridx = self_jnlevels.join( other_jnlevels, how=how, return_indexers=True ) # Restore the dropped levels # Returned index level order is # common levels, ldrop_names, rdrop_names dropped_names = ldrop_names + rdrop_names # error: Argument 5/6 to "restore_dropped_levels_multijoin" has # incompatible type "Optional[ndarray[Any, dtype[signedinteger[Any # ]]]]"; expected "ndarray[Any, dtype[signedinteger[Any]]]" levels, codes, names = restore_dropped_levels_multijoin( self, other, dropped_names, join_idx, lidx, # type: ignore[arg-type] ridx, # type: ignore[arg-type] ) # Re-create the multi-index multi_join_idx = MultiIndex( levels=levels, codes=codes, names=names, verify_integrity=False ) multi_join_idx = multi_join_idx.remove_unused_levels() return multi_join_idx, lidx, ridx jl = list(overlap)[0] # Case where only one index is multi # make the indices into mi's that match flip_order = False if isinstance(self, MultiIndex): self, other = other, self flip_order = True # flip if join method is right or left flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} how = flip.get(how, how) level = other.names.index(jl) result = self._join_level(other, level, how=how) if flip_order: return result[0], result[2], result[1] return result def _join_non_unique( self, other: Index, how: JoinHow = "left" ) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp]]: from pandas.core.reshape.merge import get_join_indexers # We only get here if dtypes match assert self.dtype == other.dtype left_idx, right_idx = get_join_indexers( [self._values], [other._values], how=how, sort=True ) mask = left_idx == -1 join_idx = self.take(left_idx) right = other.take(right_idx) join_index = join_idx.putmask(mask, right) return join_index, left_idx, right_idx def _join_level( self, other: Index, level, how: JoinHow = "left", keep_order: bool = True ) -> tuple[MultiIndex, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: """ The join method *only* affects the level of the resulting MultiIndex. Otherwise it just exactly aligns the Index data to the labels of the level in the MultiIndex. If ```keep_order == True```, the order of the data indexed by the MultiIndex will not be changed; otherwise, it will tie out with `other`. """ from pandas.core.indexes.multi import MultiIndex def _get_leaf_sorter(labels: list[np.ndarray]) -> npt.NDArray[np.intp]: """ Returns sorter for the inner most level while preserving the order of higher levels. Parameters ---------- labels : list[np.ndarray] Each ndarray has signed integer dtype, not necessarily identical. Returns ------- np.ndarray[np.intp] """ if labels[0].size == 0: return np.empty(0, dtype=np.intp) if len(labels) == 1: return get_group_index_sorter(ensure_platform_int(labels[0])) # find indexers of beginning of each set of # same-key labels w.r.t all but last level tic = labels[0][:-1] != labels[0][1:] for lab in labels[1:-1]: tic |= lab[:-1] != lab[1:] starts = np.hstack(([True], tic, [True])).nonzero()[0] lab = ensure_int64(labels[-1]) return lib.get_level_sorter(lab, ensure_platform_int(starts)) if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): raise TypeError("Join on level between two MultiIndex objects is ambiguous") left, right = self, other flip_order = not isinstance(self, MultiIndex) if flip_order: left, right = right, left flip: dict[JoinHow, JoinHow] = {"right": "left", "left": "right"} how = flip.get(how, how) assert isinstance(left, MultiIndex) level = left._get_level_number(level) old_level = left.levels[level] if not right.is_unique: raise NotImplementedError( "Index._join_level on non-unique index is not implemented" ) new_level, left_lev_indexer, right_lev_indexer = old_level.join( right, how=how, return_indexers=True ) if left_lev_indexer is None: if keep_order or len(left) == 0: left_indexer = None join_index = left else: # sort the leaves left_indexer = _get_leaf_sorter(left.codes[: level + 1]) join_index = left[left_indexer] else: left_lev_indexer = ensure_platform_int(left_lev_indexer) rev_indexer = lib.get_reverse_indexer(left_lev_indexer, len(old_level)) old_codes = left.codes[level] taker = old_codes[old_codes != -1] new_lev_codes = rev_indexer.take(taker) new_codes = list(left.codes) new_codes[level] = new_lev_codes new_levels = list(left.levels) new_levels[level] = new_level if keep_order: # just drop missing values. o.w. keep order left_indexer = np.arange(len(left), dtype=np.intp) left_indexer = cast(np.ndarray, left_indexer) mask = new_lev_codes != -1 if not mask.all(): new_codes = [lab[mask] for lab in new_codes] left_indexer = left_indexer[mask] else: # tie out the order with other if level == 0: # outer most level, take the fast route max_new_lev = 0 if len(new_lev_codes) == 0 else new_lev_codes.max() ngroups = 1 + max_new_lev left_indexer, counts = libalgos.groupsort_indexer( new_lev_codes, ngroups ) # missing values are placed first; drop them! left_indexer = left_indexer[counts[0] :] new_codes = [lab[left_indexer] for lab in new_codes] else: # sort the leaves mask = new_lev_codes != -1 mask_all = mask.all() if not mask_all: new_codes = [lab[mask] for lab in new_codes] left_indexer = _get_leaf_sorter(new_codes[: level + 1]) new_codes = [lab[left_indexer] for lab in new_codes] # left_indexers are w.r.t masked frame. # reverse to original frame! if not mask_all: left_indexer = mask.nonzero()[0][left_indexer] join_index = MultiIndex( levels=new_levels, codes=new_codes, names=left.names, verify_integrity=False, ) if right_lev_indexer is not None: right_indexer = right_lev_indexer.take(join_index.codes[level]) else: right_indexer = join_index.codes[level] if flip_order: left_indexer, right_indexer = right_indexer, left_indexer left_indexer = ( None if left_indexer is None else ensure_platform_int(left_indexer) ) right_indexer = ( None if right_indexer is None else ensure_platform_int(right_indexer) ) return join_index, left_indexer, right_indexer def _join_monotonic( self, other: Index, how: JoinHow = "left" ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: # We only get here with matching dtypes and both monotonic increasing assert other.dtype == self.dtype if self.equals(other): # This is a convenient place for this check, but its correctness # does not depend on monotonicity, so it could go earlier # in the calling method. ret_index = other if how == "right" else self return ret_index, None, None ridx: npt.NDArray[np.intp] | None lidx: npt.NDArray[np.intp] | None if self.is_unique and other.is_unique: # We can perform much better than the general case if how == "left": join_index = self lidx = None ridx = self._left_indexer_unique(other) elif how == "right": join_index = other lidx = other._left_indexer_unique(self) ridx = None elif how == "inner": join_array, lidx, ridx = self._inner_indexer(other) join_index = self._wrap_joined_index(join_array, other, lidx, ridx) elif how == "outer": join_array, lidx, ridx = self._outer_indexer(other) join_index = self._wrap_joined_index(join_array, other, lidx, ridx) else: if how == "left": join_array, lidx, ridx = self._left_indexer(other) elif how == "right": join_array, ridx, lidx = other._left_indexer(self) elif how == "inner": join_array, lidx, ridx = self._inner_indexer(other) elif how == "outer": join_array, lidx, ridx = self._outer_indexer(other) assert lidx is not None assert ridx is not None join_index = self._wrap_joined_index(join_array, other, lidx, ridx) lidx = None if lidx is None else ensure_platform_int(lidx) ridx = None if ridx is None else ensure_platform_int(ridx) return join_index, lidx, ridx def _wrap_joined_index( self: _IndexT, joined: ArrayLike, other: _IndexT, lidx: npt.NDArray[np.intp], ridx: npt.NDArray[np.intp], ) -> _IndexT: assert other.dtype == self.dtype if isinstance(self, ABCMultiIndex): name = self.names if self.names == other.names else None # error: Incompatible return value type (got "MultiIndex", # expected "_IndexT") mask = lidx == -1 join_idx = self.take(lidx) right = other.take(ridx) join_index = join_idx.putmask(mask, right) return join_index.set_names(name) # type: ignore[return-value] else: name = get_op_result_name(self, other) return self._constructor._with_infer(joined, name=name, dtype=self.dtype) def _can_use_libjoin(self) -> bool: """ Whether we can use the fastpaths implement in _libs.join """ if type(self) is Index: # excludes EAs, but include masks, we get here with monotonic # values only, meaning no NA return ( isinstance(self.dtype, np.dtype) or isinstance(self.values, BaseMaskedArray) or isinstance(self._values, ArrowExtensionArray) ) return not is_interval_dtype(self.dtype) # -------------------------------------------------------------------- # Uncategorized Methods def values(self) -> ArrayLike: """ Return an array representing the data in the Index. .. warning:: We recommend using :attr:`Index.array` or :meth:`Index.to_numpy`, depending on whether you need a reference to the underlying data or a NumPy array. Returns ------- array: numpy.ndarray or ExtensionArray See Also -------- Index.array : Reference to the underlying data. Index.to_numpy : A NumPy array representing the underlying data. """ return self._data def array(self) -> ExtensionArray: array = self._data if isinstance(array, np.ndarray): from pandas.core.arrays.numpy_ import PandasArray array = PandasArray(array) return array def _values(self) -> ExtensionArray | np.ndarray: """ The best array representation. This is an ndarray or ExtensionArray. ``_values`` are consistent between ``Series`` and ``Index``. It may differ from the public '.values' method. index | values | _values | ----------------- | --------------- | ------------- | Index | ndarray | ndarray | CategoricalIndex | Categorical | Categorical | DatetimeIndex | ndarray[M8ns] | DatetimeArray | DatetimeIndex[tz] | ndarray[M8ns] | DatetimeArray | PeriodIndex | ndarray[object] | PeriodArray | IntervalIndex | IntervalArray | IntervalArray | See Also -------- values : Values """ return self._data def _get_engine_target(self) -> ArrayLike: """ Get the ndarray or ExtensionArray that we can pass to the IndexEngine constructor. """ vals = self._values if isinstance(vals, StringArray): # GH#45652 much more performant than ExtensionEngine return vals._ndarray if ( type(self) is Index and isinstance(self._values, ExtensionArray) and not isinstance(self._values, BaseMaskedArray) and not ( isinstance(self._values, ArrowExtensionArray) and is_numeric_dtype(self.dtype) # Exclude decimal and self.dtype.kind != "O" ) ): # TODO(ExtensionIndex): remove special-case, just use self._values return self._values.astype(object) return vals def _get_join_target(self) -> ArrayLike: """ Get the ndarray or ExtensionArray that we can pass to the join functions. """ if isinstance(self._values, BaseMaskedArray): # This is only used if our array is monotonic, so no NAs present return self._values._data elif isinstance(self._values, ArrowExtensionArray): # This is only used if our array is monotonic, so no missing values # present return self._values.to_numpy() return self._get_engine_target() def _from_join_target(self, result: np.ndarray) -> ArrayLike: """ Cast the ndarray returned from one of the libjoin.foo_indexer functions back to type(self)._data. """ if isinstance(self.values, BaseMaskedArray): return type(self.values)(result, np.zeros(result.shape, dtype=np.bool_)) elif isinstance(self.values, ArrowExtensionArray): return type(self.values)._from_sequence(result) return result def memory_usage(self, deep: bool = False) -> int: result = self._memory_usage(deep=deep) # include our engine hashtable result += self._engine.sizeof(deep=deep) return result def where(self, cond, other=None) -> Index: """ Replace values where the condition is False. The replacement is taken from other. Parameters ---------- cond : bool array-like with the same length as self Condition to select the values on. other : scalar, or array-like, default None Replacement if the condition is False. Returns ------- pandas.Index A copy of self with values replaced from other where the condition is False. See Also -------- Series.where : Same method for Series. DataFrame.where : Same method for DataFrame. Examples -------- >>> idx = pd.Index(['car', 'bike', 'train', 'tractor']) >>> idx Index(['car', 'bike', 'train', 'tractor'], dtype='object') >>> idx.where(idx.isin(['car', 'train']), 'other') Index(['car', 'other', 'train', 'other'], dtype='object') """ if isinstance(self, ABCMultiIndex): raise NotImplementedError( ".where is not supported for MultiIndex operations" ) cond = np.asarray(cond, dtype=bool) return self.putmask(~cond, other) # construction helpers def _raise_scalar_data_error(cls, data): # We return the TypeError so that we can raise it from the constructor # in order to keep mypy happy raise TypeError( f"{cls.__name__}(...) must be called with a collection of some " f"kind, {repr(data)} was passed" ) def _validate_fill_value(self, value): """ Check if the value can be inserted into our array without casting, and convert it to an appropriate native type if necessary. Raises ------ TypeError If the value cannot be inserted into an array of this dtype. """ dtype = self.dtype if isinstance(dtype, np.dtype) and dtype.kind not in ["m", "M"]: # return np_can_hold_element(dtype, value) try: return np_can_hold_element(dtype, value) except LossySetitemError as err: # re-raise as TypeError for consistency raise TypeError from err elif not can_hold_element(self._values, value): raise TypeError return value def _require_scalar(self, value): """ Check that this is a scalar value that we can use for setitem-like operations without changing dtype. """ if not is_scalar(value): raise TypeError(f"'value' must be a scalar, passed: {type(value).__name__}") return value def _is_memory_usage_qualified(self) -> bool: """ Return a boolean if we need a qualified .info display. """ return is_object_dtype(self.dtype) def __contains__(self, key: Any) -> bool: """ Return a boolean indicating whether the provided key is in the index. Parameters ---------- key : label The key to check if it is present in the index. Returns ------- bool Whether the key search is in the index. Raises ------ TypeError If the key is not hashable. See Also -------- Index.isin : Returns an ndarray of boolean dtype indicating whether the list-like key is in the index. Examples -------- >>> idx = pd.Index([1, 2, 3, 4]) >>> idx Index([1, 2, 3, 4], dtype='int64') >>> 2 in idx True >>> 6 in idx False """ hash(key) try: return key in self._engine except (OverflowError, TypeError, ValueError): return False # https://github.com/python/typeshed/issues/2148#issuecomment-520783318 # Incompatible types in assignment (expression has type "None", base class # "object" defined the type as "Callable[[object], int]") __hash__: ClassVar[None] # type: ignore[assignment] def __setitem__(self, key, value): raise TypeError("Index does not support mutable operations") def __getitem__(self, key): """ Override numpy.ndarray's __getitem__ method to work as desired. This function adds lists and Series as valid boolean indexers (ndarrays only supports ndarray with dtype=bool). If resulting ndim != 1, plain ndarray is returned instead of corresponding `Index` subclass. """ getitem = self._data.__getitem__ if is_integer(key) or is_float(key): # GH#44051 exclude bool, which would return a 2d ndarray key = com.cast_scalar_indexer(key) return getitem(key) if isinstance(key, slice): # This case is separated from the conditional above to avoid # pessimization com.is_bool_indexer and ndim checks. result = getitem(key) # Going through simple_new for performance. return type(self)._simple_new( result, name=self._name, refs=self._references ) if com.is_bool_indexer(key): # if we have list[bools, length=1e5] then doing this check+convert # takes 166 µs + 2.1 ms and cuts the ndarray.__getitem__ # time below from 3.8 ms to 496 µs # if we already have ndarray[bool], the overhead is 1.4 µs or .25% if is_extension_array_dtype(getattr(key, "dtype", None)): key = key.to_numpy(dtype=bool, na_value=False) else: key = np.asarray(key, dtype=bool) result = getitem(key) # Because we ruled out integer above, we always get an arraylike here if result.ndim > 1: disallow_ndim_indexing(result) # NB: Using _constructor._simple_new would break if MultiIndex # didn't override __getitem__ return self._constructor._simple_new(result, name=self._name) def _getitem_slice(self: _IndexT, slobj: slice) -> _IndexT: """ Fastpath for __getitem__ when we know we have a slice. """ res = self._data[slobj] return type(self)._simple_new(res, name=self._name, refs=self._references) def _can_hold_identifiers_and_holds_name(self, name) -> bool: """ Faster check for ``name in self`` when we know `name` is a Python identifier (e.g. in NDFrame.__getattr__, which hits this to support . key lookup). For indexes that can't hold identifiers (everything but object & categorical) we just return False. https://github.com/pandas-dev/pandas/issues/19764 """ if ( is_object_dtype(self.dtype) or is_string_dtype(self.dtype) or is_categorical_dtype(self.dtype) ): return name in self return False def append(self, other: Index | Sequence[Index]) -> Index: """ Append a collection of Index options together. Parameters ---------- other : Index or list/tuple of indices Returns ------- Index """ to_concat = [self] if isinstance(other, (list, tuple)): to_concat += list(other) else: # error: Argument 1 to "append" of "list" has incompatible type # "Union[Index, Sequence[Index]]"; expected "Index" to_concat.append(other) # type: ignore[arg-type] for obj in to_concat: if not isinstance(obj, Index): raise TypeError("all inputs must be Index") names = {obj.name for obj in to_concat} name = None if len(names) > 1 else self.name return self._concat(to_concat, name) def _concat(self, to_concat: list[Index], name: Hashable) -> Index: """ Concatenate multiple Index objects. """ to_concat_vals = [x._values for x in to_concat] result = concat_compat(to_concat_vals) return Index._with_infer(result, name=name) def putmask(self, mask, value) -> Index: """ Return a new Index of the values set with the mask. Returns ------- Index See Also -------- numpy.ndarray.putmask : Changes elements of an array based on conditional and input values. """ mask, noop = validate_putmask(self._values, mask) if noop: return self.copy() if self.dtype != object and is_valid_na_for_dtype(value, self.dtype): # e.g. None -> np.nan, see also Block._standardize_fill_value value = self._na_value try: converted = self._validate_fill_value(value) except (LossySetitemError, ValueError, TypeError) as err: if is_object_dtype(self): # pragma: no cover raise err # See also: Block.coerce_to_target_dtype dtype = self._find_common_type_compat(value) return self.astype(dtype).putmask(mask, value) values = self._values.copy() if isinstance(values, np.ndarray): converted = setitem_datetimelike_compat(values, mask.sum(), converted) np.putmask(values, mask, converted) else: # Note: we use the original value here, not converted, as # _validate_fill_value is not idempotent values._putmask(mask, value) return self._shallow_copy(values) def equals(self, other: Any) -> bool: """ Determine if two Index object are equal. The things that are being compared are: * The elements inside the Index object. * The order of the elements inside the Index object. Parameters ---------- other : Any The other object to compare against. Returns ------- bool True if "other" is an Index and it has the same elements and order as the calling index; False otherwise. Examples -------- >>> idx1 = pd.Index([1, 2, 3]) >>> idx1 Index([1, 2, 3], dtype='int64') >>> idx1.equals(pd.Index([1, 2, 3])) True The elements inside are compared >>> idx2 = pd.Index(["1", "2", "3"]) >>> idx2 Index(['1', '2', '3'], dtype='object') >>> idx1.equals(idx2) False The order is compared >>> ascending_idx = pd.Index([1, 2, 3]) >>> ascending_idx Index([1, 2, 3], dtype='int64') >>> descending_idx = pd.Index([3, 2, 1]) >>> descending_idx Index([3, 2, 1], dtype='int64') >>> ascending_idx.equals(descending_idx) False The dtype is *not* compared >>> int64_idx = pd.Index([1, 2, 3], dtype='int64') >>> int64_idx Index([1, 2, 3], dtype='int64') >>> uint64_idx = pd.Index([1, 2, 3], dtype='uint64') >>> uint64_idx Index([1, 2, 3], dtype='uint64') >>> int64_idx.equals(uint64_idx) True """ if self.is_(other): return True if not isinstance(other, Index): return False if is_object_dtype(self.dtype) and not is_object_dtype(other.dtype): # if other is not object, use other's logic for coercion return other.equals(self) if isinstance(other, ABCMultiIndex): # d-level MultiIndex can equal d-tuple Index return other.equals(self) if isinstance(self._values, ExtensionArray): # Dispatch to the ExtensionArray's .equals method. if not isinstance(other, type(self)): return False earr = cast(ExtensionArray, self._data) return earr.equals(other._data) if is_extension_array_dtype(other.dtype): # All EA-backed Index subclasses override equals return other.equals(self) return array_equivalent(self._values, other._values) def identical(self, other) -> bool: """ Similar to equals, but checks that object attributes and types are also equal. Returns ------- bool If two Index objects have equal elements and same type True, otherwise False. """ return ( self.equals(other) and all( getattr(self, c, None) == getattr(other, c, None) for c in self._comparables ) and type(self) == type(other) and self.dtype == other.dtype ) def asof(self, label): """ Return the label from the index, or, if not present, the previous one. Assuming that the index is sorted, return the passed index label if it is in the index, or return the previous index label if the passed one is not in the index. Parameters ---------- label : object The label up to which the method returns the latest index label. Returns ------- object The passed label if it is in the index. The previous label if the passed label is not in the sorted index or `NaN` if there is no such label. See Also -------- Series.asof : Return the latest value in a Series up to the passed index. merge_asof : Perform an asof merge (similar to left join but it matches on nearest key rather than equal key). Index.get_loc : An `asof` is a thin wrapper around `get_loc` with method='pad'. Examples -------- `Index.asof` returns the latest index label up to the passed label. >>> idx = pd.Index(['2013-12-31', '2014-01-02', '2014-01-03']) >>> idx.asof('2014-01-01') '2013-12-31' If the label is in the index, the method returns the passed label. >>> idx.asof('2014-01-02') '2014-01-02' If all of the labels in the index are later than the passed label, NaN is returned. >>> idx.asof('1999-01-02') nan If the index is not sorted, an error is raised. >>> idx_not_sorted = pd.Index(['2013-12-31', '2015-01-02', ... '2014-01-03']) >>> idx_not_sorted.asof('2013-12-31') Traceback (most recent call last): ValueError: index must be monotonic increasing or decreasing """ self._searchsorted_monotonic(label) # validate sortedness try: loc = self.get_loc(label) except (KeyError, TypeError): # KeyError -> No exact match, try for padded # TypeError -> passed e.g. non-hashable, fall through to get # the tested exception message indexer = self.get_indexer([label], method="pad") if indexer.ndim > 1 or indexer.size > 1: raise TypeError("asof requires scalar valued input") loc = indexer.item() if loc == -1: return self._na_value else: if isinstance(loc, slice): loc = loc.indices(len(self))[-1] return self[loc] def asof_locs( self, where: Index, mask: npt.NDArray[np.bool_] ) -> npt.NDArray[np.intp]: """ Return the locations (indices) of labels in the index. As in the `asof` function, if the label (a particular entry in `where`) is not in the index, the latest index label up to the passed label is chosen and its index returned. If all of the labels in the index are later than a label in `where`, -1 is returned. `mask` is used to ignore NA values in the index during calculation. Parameters ---------- where : Index An Index consisting of an array of timestamps. mask : np.ndarray[bool] Array of booleans denoting where values in the original data are not NA. Returns ------- np.ndarray[np.intp] An array of locations (indices) of the labels from the Index which correspond to the return values of the `asof` function for every element in `where`. """ # error: No overload variant of "searchsorted" of "ndarray" matches argument # types "Union[ExtensionArray, ndarray[Any, Any]]", "str" # TODO: will be fixed when ExtensionArray.searchsorted() is fixed locs = self._values[mask].searchsorted( where._values, side="right" # type: ignore[call-overload] ) locs = np.where(locs > 0, locs - 1, 0) result = np.arange(len(self), dtype=np.intp)[mask].take(locs) first_value = self._values[mask.argmax()] result[(locs == 0) & (where._values < first_value)] = -1 return result def sort_values( self, return_indexer: bool = False, ascending: bool = True, na_position: str_t = "last", key: Callable | None = None, ): """ Return a sorted copy of the index. Return a sorted copy of the index, and optionally return the indices that sorted the index itself. Parameters ---------- return_indexer : bool, default False Should the indices that would sort the index be returned. ascending : bool, default True Should the index values be sorted in an ascending order. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. .. versionadded:: 1.2.0 key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. .. versionadded:: 1.1.0 Returns ------- sorted_index : pandas.Index Sorted copy of the index. indexer : numpy.ndarray, optional The indices that the index itself was sorted by. See Also -------- Series.sort_values : Sort values of a Series. DataFrame.sort_values : Sort values in a DataFrame. Examples -------- >>> idx = pd.Index([10, 100, 1, 1000]) >>> idx Index([10, 100, 1, 1000], dtype='int64') Sort values in ascending order (default behavior). >>> idx.sort_values() Index([1, 10, 100, 1000], dtype='int64') Sort values in descending order, and also get the indices `idx` was sorted by. >>> idx.sort_values(ascending=False, return_indexer=True) (Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2])) """ idx = ensure_key_mapped(self, key) # GH 35584. Sort missing values according to na_position kwarg # ignore na_position for MultiIndex if not isinstance(self, ABCMultiIndex): _as = nargsort( items=idx, ascending=ascending, na_position=na_position, key=key ) else: _as = idx.argsort() if not ascending: _as = _as[::-1] sorted_index = self.take(_as) if return_indexer: return sorted_index, _as else: return sorted_index def sort(self, *args, **kwargs): """ Use sort_values instead. """ raise TypeError("cannot sort an Index object in-place, use sort_values instead") def shift(self, periods: int = 1, freq=None): """ Shift index by desired number of time frequency increments. This method is for shifting the values of datetime-like indexes by a specified time increment a given number of times. Parameters ---------- periods : int, default 1 Number of periods (or increments) to shift by, can be positive or negative. freq : pandas.DateOffset, pandas.Timedelta or str, optional Frequency increment to shift by. If None, the index is shifted by its own `freq` attribute. Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc. Returns ------- pandas.Index Shifted index. See Also -------- Series.shift : Shift values of Series. Notes ----- This method is only implemented for datetime-like index classes, i.e., DatetimeIndex, PeriodIndex and TimedeltaIndex. Examples -------- Put the first 5 month starts of 2011 into an index. >>> month_starts = pd.date_range('1/1/2011', periods=5, freq='MS') >>> month_starts DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01', '2011-04-01', '2011-05-01'], dtype='datetime64[ns]', freq='MS') Shift the index by 10 days. >>> month_starts.shift(10, freq='D') DatetimeIndex(['2011-01-11', '2011-02-11', '2011-03-11', '2011-04-11', '2011-05-11'], dtype='datetime64[ns]', freq=None) The default value of `freq` is the `freq` attribute of the index, which is 'MS' (month start) in this example. >>> month_starts.shift(10) DatetimeIndex(['2011-11-01', '2011-12-01', '2012-01-01', '2012-02-01', '2012-03-01'], dtype='datetime64[ns]', freq='MS') """ raise NotImplementedError( f"This method is only implemented for DatetimeIndex, PeriodIndex and " f"TimedeltaIndex; Got type {type(self).__name__}" ) def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]: """ Return the integer indices that would sort the index. Parameters ---------- *args Passed to `numpy.ndarray.argsort`. **kwargs Passed to `numpy.ndarray.argsort`. Returns ------- np.ndarray[np.intp] Integer indices that would sort the index if used as an indexer. See Also -------- numpy.argsort : Similar method for NumPy arrays. Index.sort_values : Return sorted copy of Index. Examples -------- >>> idx = pd.Index(['b', 'a', 'd', 'c']) >>> idx Index(['b', 'a', 'd', 'c'], dtype='object') >>> order = idx.argsort() >>> order array([1, 0, 3, 2]) >>> idx[order] Index(['a', 'b', 'c', 'd'], dtype='object') """ # This works for either ndarray or EA, is overridden # by RangeIndex, MultIIndex return self._data.argsort(*args, **kwargs) def _check_indexing_error(self, key): if not is_scalar(key): # if key is not a scalar, directly raise an error (the code below # would convert to numpy arrays and raise later any way) - GH29926 raise InvalidIndexError(key) def _should_fallback_to_positional(self) -> bool: """ Should an integer key be treated as positional? """ return self.inferred_type not in { "integer", "mixed-integer", "floating", "complex", } _index_shared_docs[ "get_indexer_non_unique" ] = """ Compute indexer and mask for new index given the current index. The indexer should be then used as an input to ndarray.take to align the current data to the new index. Parameters ---------- target : %(target_klass)s Returns ------- indexer : np.ndarray[np.intp] Integers from 0 to n - 1 indicating that the index at these positions matches the corresponding target values. Missing values in the target are marked by -1. missing : np.ndarray[np.intp] An indexer into the target of the values not found. These correspond to the -1 in the indexer array. Examples -------- >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) >>> index.get_indexer_non_unique(['b', 'b']) (array([1, 3, 4, 1, 3, 4]), array([], dtype=int64)) In the example below there are no matched values. >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) >>> index.get_indexer_non_unique(['q', 'r', 't']) (array([-1, -1, -1]), array([0, 1, 2])) For this reason, the returned ``indexer`` contains only integers equal to -1. It demonstrates that there's no match between the index and the ``target`` values at these positions. The mask [0, 1, 2] in the return value shows that the first, second, and third elements are missing. Notice that the return value is a tuple contains two items. In the example below the first item is an array of locations in ``index``. The second item is a mask shows that the first and third elements are missing. >>> index = pd.Index(['c', 'b', 'a', 'b', 'b']) >>> index.get_indexer_non_unique(['f', 'b', 's']) (array([-1, 1, 3, 4, -1]), array([0, 2])) """ def get_indexer_non_unique( self, target ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: target = ensure_index(target) target = self._maybe_cast_listlike_indexer(target) if not self._should_compare(target) and not self._should_partial_index(target): # _should_partial_index e.g. IntervalIndex with numeric scalars # that can be matched to Interval scalars. return self._get_indexer_non_comparable(target, method=None, unique=False) pself, ptarget = self._maybe_promote(target) if pself is not self or ptarget is not target: return pself.get_indexer_non_unique(ptarget) if not is_dtype_equal(self.dtype, target.dtype): # TODO: if object, could use infer_dtype to preempt costly # conversion if still non-comparable? dtype = self._find_common_type_compat(target) this = self.astype(dtype, copy=False) that = target.astype(dtype, copy=False) return this.get_indexer_non_unique(that) # TODO: get_indexer has fastpaths for both Categorical-self and # Categorical-target. Can we do something similar here? # Note: _maybe_promote ensures we never get here with MultiIndex # self and non-Multi target tgt_values = target._get_engine_target() if self._is_multi and target._is_multi: engine = self._engine # Item "IndexEngine" of "Union[IndexEngine, ExtensionEngine]" has # no attribute "_extract_level_codes" tgt_values = engine._extract_level_codes(target) # type: ignore[union-attr] indexer, missing = self._engine.get_indexer_non_unique(tgt_values) return ensure_platform_int(indexer), ensure_platform_int(missing) def get_indexer_for(self, target) -> npt.NDArray[np.intp]: """ Guaranteed return of an indexer even when non-unique. This dispatches to get_indexer or get_indexer_non_unique as appropriate. Returns ------- np.ndarray[np.intp] List of indices. Examples -------- >>> idx = pd.Index([np.nan, 'var1', np.nan]) >>> idx.get_indexer_for([np.nan]) array([0, 2]) """ if self._index_as_unique: return self.get_indexer(target) indexer, _ = self.get_indexer_non_unique(target) return indexer def _get_indexer_strict(self, key, axis_name: str_t) -> tuple[Index, np.ndarray]: """ Analogue to get_indexer that raises if any elements are missing. """ keyarr = key if not isinstance(keyarr, Index): keyarr = com.asarray_tuplesafe(keyarr) if self._index_as_unique: indexer = self.get_indexer_for(keyarr) keyarr = self.reindex(keyarr)[0] else: keyarr, indexer, new_indexer = self._reindex_non_unique(keyarr) self._raise_if_missing(keyarr, indexer, axis_name) keyarr = self.take(indexer) if isinstance(key, Index): # GH 42790 - Preserve name from an Index keyarr.name = key.name if keyarr.dtype.kind in ["m", "M"]: # DTI/TDI.take can infer a freq in some cases when we dont want one if isinstance(key, list) or ( isinstance(key, type(self)) # "Index" has no attribute "freq" and key.freq is None # type: ignore[attr-defined] ): keyarr = keyarr._with_freq(None) return keyarr, indexer def _raise_if_missing(self, key, indexer, axis_name: str_t) -> None: """ Check that indexer can be used to return a result. e.g. at least one element was found, unless the list of keys was actually empty. Parameters ---------- key : list-like Targeted labels (only used to show correct error message). indexer: array-like of booleans Indices corresponding to the key, (with -1 indicating not found). axis_name : str Raises ------ KeyError If at least one key was requested but none was found. """ if len(key) == 0: return # Count missing values missing_mask = indexer < 0 nmissing = missing_mask.sum() if nmissing: # TODO: remove special-case; this is just to keep exception # message tests from raising while debugging use_interval_msg = is_interval_dtype(self.dtype) or ( is_categorical_dtype(self.dtype) # "Index" has no attribute "categories" [attr-defined] and is_interval_dtype( self.categories.dtype # type: ignore[attr-defined] ) ) if nmissing == len(indexer): if use_interval_msg: key = list(key) raise KeyError(f"None of [{key}] are in the [{axis_name}]") not_found = list(ensure_index(key)[missing_mask.nonzero()[0]].unique()) raise KeyError(f"{not_found} not in index") def _get_indexer_non_comparable( self, target: Index, method, unique: Literal[True] = ... ) -> npt.NDArray[np.intp]: ... def _get_indexer_non_comparable( self, target: Index, method, unique: Literal[False] ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... def _get_indexer_non_comparable( self, target: Index, method, unique: bool = True ) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... def _get_indexer_non_comparable( self, target: Index, method, unique: bool = True ) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: """ Called from get_indexer or get_indexer_non_unique when the target is of a non-comparable dtype. For get_indexer lookups with method=None, get_indexer is an _equality_ check, so non-comparable dtypes mean we will always have no matches. For get_indexer lookups with a method, get_indexer is an _inequality_ check, so non-comparable dtypes mean we will always raise TypeError. Parameters ---------- target : Index method : str or None unique : bool, default True * True if called from get_indexer. * False if called from get_indexer_non_unique. Raises ------ TypeError If doing an inequality check, i.e. method is not None. """ if method is not None: other = _unpack_nested_dtype(target) raise TypeError(f"Cannot compare dtypes {self.dtype} and {other.dtype}") no_matches = -1 * np.ones(target.shape, dtype=np.intp) if unique: # This is for get_indexer return no_matches else: # This is for get_indexer_non_unique missing = np.arange(len(target), dtype=np.intp) return no_matches, missing def _index_as_unique(self) -> bool: """ Whether we should treat this as unique for the sake of get_indexer vs get_indexer_non_unique. For IntervalIndex compat. """ return self.is_unique _requires_unique_msg = "Reindexing only valid with uniquely valued Index objects" def _maybe_promote(self, other: Index) -> tuple[Index, Index]: """ When dealing with an object-dtype Index and a non-object Index, see if we can upcast the object-dtype one to improve performance. """ if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex): if ( self.tz is not None and other.tz is not None and not tz_compare(self.tz, other.tz) ): # standardize on UTC return self.tz_convert("UTC"), other.tz_convert("UTC") elif self.inferred_type == "date" and isinstance(other, ABCDatetimeIndex): try: return type(other)(self), other except OutOfBoundsDatetime: return self, other elif self.inferred_type == "timedelta" and isinstance(other, ABCTimedeltaIndex): # TODO: we dont have tests that get here return type(other)(self), other elif self.dtype.kind == "u" and other.dtype.kind == "i": # GH#41873 if other.min() >= 0: # lookup min as it may be cached # TODO: may need itemsize check if we have non-64-bit Indexes return self, other.astype(self.dtype) elif self._is_multi and not other._is_multi: try: # "Type[Index]" has no attribute "from_tuples" other = type(self).from_tuples(other) # type: ignore[attr-defined] except (TypeError, ValueError): # let's instead try with a straight Index self = Index(self._values) if not is_object_dtype(self.dtype) and is_object_dtype(other.dtype): # Reverse op so we dont need to re-implement on the subclasses other, self = other._maybe_promote(self) return self, other def _find_common_type_compat(self, target) -> DtypeObj: """ Implementation of find_common_type that adjusts for Index-specific special cases. """ target_dtype, _ = infer_dtype_from(target, pandas_dtype=True) # special case: if one dtype is uint64 and the other a signed int, return object # See https://github.com/pandas-dev/pandas/issues/26778 for discussion # Now it's: # * float | [u]int -> float # * uint64 | signed int -> object # We may change union(float | [u]int) to go to object. if self.dtype == "uint64" or target_dtype == "uint64": if is_signed_integer_dtype(self.dtype) or is_signed_integer_dtype( target_dtype ): return _dtype_obj dtype = find_result_type(self._values, target) dtype = common_dtype_categorical_compat([self, target], dtype) return dtype def _should_compare(self, other: Index) -> bool: """ Check if `self == other` can ever have non-False entries. """ if (is_bool_dtype(other) and is_any_real_numeric_dtype(self)) or ( is_bool_dtype(self) and is_any_real_numeric_dtype(other) ): # GH#16877 Treat boolean labels passed to a numeric index as not # found. Without this fix False and True would be treated as 0 and 1 # respectively. return False other = _unpack_nested_dtype(other) dtype = other.dtype return self._is_comparable_dtype(dtype) or is_object_dtype(dtype) def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: """ Can we compare values of the given dtype to our own? """ if self.dtype.kind == "b": return dtype.kind == "b" elif is_numeric_dtype(self.dtype): return is_numeric_dtype(dtype) # TODO: this was written assuming we only get here with object-dtype, # which is nom longer correct. Can we specialize for EA? return True def groupby(self, values) -> PrettyDict[Hashable, np.ndarray]: """ Group the index labels by a given array of values. Parameters ---------- values : array Values used to determine the groups. Returns ------- dict {group name -> group labels} """ # TODO: if we are a MultiIndex, we can do better # that converting to tuples if isinstance(values, ABCMultiIndex): values = values._values values = Categorical(values) result = values._reverse_indexer() # map to the label result = {k: self.take(v) for k, v in result.items()} return PrettyDict(result) def map(self, mapper, na_action=None): """ Map values using an input mapping or function. Parameters ---------- mapper : function, dict, or Series Mapping correspondence. na_action : {None, 'ignore'} If 'ignore', propagate NA values, without passing them to the mapping correspondence. Returns ------- Union[Index, MultiIndex] The output of the mapping function applied to the index. If the function returns a tuple with more than one element a MultiIndex will be returned. """ from pandas.core.indexes.multi import MultiIndex new_values = self._map_values(mapper, na_action=na_action) # we can return a MultiIndex if new_values.size and isinstance(new_values[0], tuple): if isinstance(self, MultiIndex): names = self.names elif self.name: names = [self.name] * len(new_values[0]) else: names = None return MultiIndex.from_tuples(new_values, names=names) dtype = None if not new_values.size: # empty dtype = self.dtype # e.g. if we are floating and new_values is all ints, then we # don't want to cast back to floating. But if we are UInt64 # and new_values is all ints, we want to try. same_dtype = lib.infer_dtype(new_values, skipna=False) == self.inferred_type if same_dtype: new_values = maybe_cast_pointwise_result( new_values, self.dtype, same_dtype=same_dtype ) return Index._with_infer(new_values, dtype=dtype, copy=False, name=self.name) # TODO: De-duplicate with map, xref GH#32349 def _transform_index(self, func, *, level=None) -> Index: """ Apply function to all values found in index. This includes transforming multiindex entries separately. Only apply function to one level of the MultiIndex if level is specified. """ if isinstance(self, ABCMultiIndex): values = [ self.get_level_values(i).map(func) if i == level or level is None else self.get_level_values(i) for i in range(self.nlevels) ] return type(self).from_arrays(values) else: items = [func(x) for x in self] return Index(items, name=self.name, tupleize_cols=False) def isin(self, values, level=None) -> npt.NDArray[np.bool_]: """ Return a boolean array where the index values are in `values`. Compute boolean array of whether each index value is found in the passed set of values. The length of the returned boolean array matches the length of the index. Parameters ---------- values : set or list-like Sought values. level : str or int, optional Name or position of the index level to use (if the index is a `MultiIndex`). Returns ------- np.ndarray[bool] NumPy array of boolean values. See Also -------- Series.isin : Same for Series. DataFrame.isin : Same method for DataFrames. Notes ----- In the case of `MultiIndex` you must either specify `values` as a list-like object containing tuples that are the same length as the number of levels, or specify `level`. Otherwise it will raise a ``ValueError``. If `level` is specified: - if it is the name of one *and only one* index level, use that level; - otherwise it should be a number indicating level position. Examples -------- >>> idx = pd.Index([1,2,3]) >>> idx Index([1, 2, 3], dtype='int64') Check whether each index value in a list of values. >>> idx.isin([1, 4]) array([ True, False, False]) >>> midx = pd.MultiIndex.from_arrays([[1,2,3], ... ['red', 'blue', 'green']], ... names=('number', 'color')) >>> midx MultiIndex([(1, 'red'), (2, 'blue'), (3, 'green')], names=['number', 'color']) Check whether the strings in the 'color' level of the MultiIndex are in a list of colors. >>> midx.isin(['red', 'orange', 'yellow'], level='color') array([ True, False, False]) To check across the levels of a MultiIndex, pass a list of tuples: >>> midx.isin([(1, 'red'), (3, 'red')]) array([ True, False, False]) For a DatetimeIndex, string values in `values` are converted to Timestamps. >>> dates = ['2000-03-11', '2000-03-12', '2000-03-13'] >>> dti = pd.to_datetime(dates) >>> dti DatetimeIndex(['2000-03-11', '2000-03-12', '2000-03-13'], dtype='datetime64[ns]', freq=None) >>> dti.isin(['2000-03-11']) array([ True, False, False]) """ if level is not None: self._validate_index_level(level) return algos.isin(self._values, values) def _get_string_slice(self, key: str_t): # this is for partial string indexing, # overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex raise NotImplementedError def slice_indexer( self, start: Hashable | None = None, end: Hashable | None = None, step: int | None = None, ) -> slice: """ Compute the slice indexer for input labels and step. Index needs to be ordered and unique. Parameters ---------- start : label, default None If None, defaults to the beginning. end : label, default None If None, defaults to the end. step : int, default None Returns ------- slice Raises ------ KeyError : If key does not exist, or key is not unique and index is not ordered. Notes ----- This function assumes that the data is sorted, so use at your own peril Examples -------- This is a method on all index types. For example you can do: >>> idx = pd.Index(list('abcd')) >>> idx.slice_indexer(start='b', end='c') slice(1, 3, None) >>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')]) >>> idx.slice_indexer(start='b', end=('c', 'g')) slice(1, 3, None) """ start_slice, end_slice = self.slice_locs(start, end, step=step) # return a slice if not is_scalar(start_slice): raise AssertionError("Start slice bound is non-scalar") if not is_scalar(end_slice): raise AssertionError("End slice bound is non-scalar") return slice(start_slice, end_slice, step) def _maybe_cast_indexer(self, key): """ If we have a float key and are not a floating index, then try to cast to an int if equivalent. """ return key def _maybe_cast_listlike_indexer(self, target) -> Index: """ Analogue to maybe_cast_indexer for get_indexer instead of get_loc. """ return ensure_index(target) def _validate_indexer(self, form: str_t, key, kind: str_t) -> None: """ If we are positional indexer, validate that we have appropriate typed bounds must be an integer. """ assert kind in ["getitem", "iloc"] if key is not None and not is_integer(key): self._raise_invalid_indexer(form, key) def _maybe_cast_slice_bound(self, label, side: str_t): """ This function should be overloaded in subclasses that allow non-trivial casting on label-slice bounds, e.g. datetime-like indices allowing strings containing formatted datetimes. Parameters ---------- label : object side : {'left', 'right'} Returns ------- label : object Notes ----- Value of `side` parameter should be validated in caller. """ # We are a plain index here (sub-class override this method if they # wish to have special treatment for floats/ints, e.g. datetimelike Indexes if is_numeric_dtype(self.dtype): return self._maybe_cast_indexer(label) # reject them, if index does not contain label if (is_float(label) or is_integer(label)) and label not in self: self._raise_invalid_indexer("slice", label) return label def _searchsorted_monotonic(self, label, side: Literal["left", "right"] = "left"): if self.is_monotonic_increasing: return self.searchsorted(label, side=side) elif self.is_monotonic_decreasing: # np.searchsorted expects ascending sort order, have to reverse # everything for it to work (element ordering, search side and # resulting value). pos = self[::-1].searchsorted( label, side="right" if side == "left" else "left" ) return len(self) - pos raise ValueError("index must be monotonic increasing or decreasing") def get_slice_bound(self, label, side: Literal["left", "right"]) -> int: """ Calculate slice bound that corresponds to given label. Returns leftmost (one-past-the-rightmost if ``side=='right'``) position of given label. Parameters ---------- label : object side : {'left', 'right'} Returns ------- int Index of label. """ if side not in ("left", "right"): raise ValueError( "Invalid value for side kwarg, must be either " f"'left' or 'right': {side}" ) original_label = label # For datetime indices label may be a string that has to be converted # to datetime boundary according to its resolution. label = self._maybe_cast_slice_bound(label, side) # we need to look up the label try: slc = self.get_loc(label) except KeyError as err: try: return self._searchsorted_monotonic(label, side) except ValueError: # raise the original KeyError raise err if isinstance(slc, np.ndarray): # get_loc may return a boolean array, which # is OK as long as they are representable by a slice. assert is_bool_dtype(slc.dtype) slc = lib.maybe_booleans_to_slice(slc.view("u1")) if isinstance(slc, np.ndarray): raise KeyError( f"Cannot get {side} slice bound for non-unique " f"label: {repr(original_label)}" ) if isinstance(slc, slice): if side == "left": return slc.start else: return slc.stop else: if side == "right": return slc + 1 else: return slc def slice_locs(self, start=None, end=None, step=None) -> tuple[int, int]: """ Compute slice locations for input labels. Parameters ---------- start : label, default None If None, defaults to the beginning. end : label, default None If None, defaults to the end. step : int, defaults None If None, defaults to 1. Returns ------- tuple[int, int] See Also -------- Index.get_loc : Get location for a single label. Notes ----- This method only works if the index is monotonic or unique. Examples -------- >>> idx = pd.Index(list('abcd')) >>> idx.slice_locs(start='b', end='c') (1, 3) """ inc = step is None or step >= 0 if not inc: # If it's a reverse slice, temporarily swap bounds. start, end = end, start # GH 16785: If start and end happen to be date strings with UTC offsets # attempt to parse and check that the offsets are the same if isinstance(start, (str, datetime)) and isinstance(end, (str, datetime)): try: ts_start = Timestamp(start) ts_end = Timestamp(end) except (ValueError, TypeError): pass else: if not tz_compare(ts_start.tzinfo, ts_end.tzinfo): raise ValueError("Both dates must have the same UTC offset") start_slice = None if start is not None: start_slice = self.get_slice_bound(start, "left") if start_slice is None: start_slice = 0 end_slice = None if end is not None: end_slice = self.get_slice_bound(end, "right") if end_slice is None: end_slice = len(self) if not inc: # Bounds at this moment are swapped, swap them back and shift by 1. # # slice_locs('B', 'A', step=-1): s='B', e='A' # # s='A' e='B' # AFTER SWAP: | | # v ------------------> V # ----------------------------------- # | | |A|A|A|A| | | | | |B|B| | | | | # ----------------------------------- # ^ <------------------ ^ # SHOULD BE: | | # end=s-1 start=e-1 # end_slice, start_slice = start_slice - 1, end_slice - 1 # i == -1 triggers ``len(self) + i`` selection that points to the # last element, not before-the-first one, subtracting len(self) # compensates that. if end_slice == -1: end_slice -= len(self) if start_slice == -1: start_slice -= len(self) return start_slice, end_slice def delete(self: _IndexT, loc) -> _IndexT: """ Make new Index with passed location(-s) deleted. Parameters ---------- loc : int or list of int Location of item(-s) which will be deleted. Use a list of locations to delete more than one value at the same time. Returns ------- Index Will be same type as self, except for RangeIndex. See Also -------- numpy.delete : Delete any rows and column from NumPy array (ndarray). Examples -------- >>> idx = pd.Index(['a', 'b', 'c']) >>> idx.delete(1) Index(['a', 'c'], dtype='object') >>> idx = pd.Index(['a', 'b', 'c']) >>> idx.delete([0, 2]) Index(['b'], dtype='object') """ values = self._values res_values: ArrayLike if isinstance(values, np.ndarray): # TODO(__array_function__): special casing will be unnecessary res_values = np.delete(values, loc) else: res_values = values.delete(loc) # _constructor so RangeIndex-> Index with an int64 dtype return self._constructor._simple_new(res_values, name=self.name) def insert(self, loc: int, item) -> Index: """ Make new Index inserting new item at location. Follows Python numpy.insert semantics for negative values. Parameters ---------- loc : int item : object Returns ------- Index """ item = lib.item_from_zerodim(item) if is_valid_na_for_dtype(item, self.dtype) and self.dtype != object: item = self._na_value arr = self._values try: if isinstance(arr, ExtensionArray): res_values = arr.insert(loc, item) return type(self)._simple_new(res_values, name=self.name) else: item = self._validate_fill_value(item) except (TypeError, ValueError, LossySetitemError): # e.g. trying to insert an integer into a DatetimeIndex # We cannot keep the same dtype, so cast to the (often object) # minimal shared dtype before doing the insert. dtype = self._find_common_type_compat(item) return self.astype(dtype).insert(loc, item) if arr.dtype != object or not isinstance( item, (tuple, np.datetime64, np.timedelta64) ): # with object-dtype we need to worry about numpy incorrectly casting # dt64/td64 to integer, also about treating tuples as sequences # special-casing dt64/td64 https://github.com/numpy/numpy/issues/12550 casted = arr.dtype.type(item) new_values = np.insert(arr, loc, casted) else: # error: No overload variant of "insert" matches argument types # "ndarray[Any, Any]", "int", "None" new_values = np.insert(arr, loc, None) # type: ignore[call-overload] loc = loc if loc >= 0 else loc - 1 new_values[loc] = item return Index._with_infer(new_values, name=self.name) def drop( self, labels: Index | np.ndarray | Iterable[Hashable], errors: IgnoreRaise = "raise", ) -> Index: """ Make new Index with passed list of labels deleted. Parameters ---------- labels : array-like or scalar errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and existing labels are dropped. Returns ------- Index Will be same type as self, except for RangeIndex. Raises ------ KeyError If not all of the labels are found in the selected axis """ if not isinstance(labels, Index): # avoid materializing e.g. RangeIndex arr_dtype = "object" if self.dtype == "object" else None labels = com.index_labels_to_array(labels, dtype=arr_dtype) indexer = self.get_indexer_for(labels) mask = indexer == -1 if mask.any(): if errors != "ignore": raise KeyError(f"{list(labels[mask])} not found in axis") indexer = indexer[~mask] return self.delete(indexer) def infer_objects(self, copy: bool = True) -> Index: """ If we have an object dtype, try to infer a non-object dtype. Parameters ---------- copy : bool, default True Whether to make a copy in cases where no inference occurs. """ if self._is_multi: raise NotImplementedError( "infer_objects is not implemented for MultiIndex. " "Use index.to_frame().infer_objects() instead." ) if self.dtype != object: return self.copy() if copy else self values = self._values values = cast("npt.NDArray[np.object_]", values) res_values = lib.maybe_convert_objects( values, convert_datetime=True, convert_timedelta=True, convert_period=True, convert_interval=True, ) if copy and res_values is values: return self.copy() result = Index(res_values, name=self.name) if not copy and res_values is values and self._references is not None: result._references = self._references result._references.add_index_reference(result) return result # -------------------------------------------------------------------- # Generated Arithmetic, Comparison, and Unary Methods def _cmp_method(self, other, op): """ Wrapper used to dispatch comparison operations. """ if self.is_(other): # fastpath if op in {operator.eq, operator.le, operator.ge}: arr = np.ones(len(self), dtype=bool) if self._can_hold_na and not isinstance(self, ABCMultiIndex): # TODO: should set MultiIndex._can_hold_na = False? arr[self.isna()] = False return arr elif op is operator.ne: arr = np.zeros(len(self), dtype=bool) if self._can_hold_na and not isinstance(self, ABCMultiIndex): arr[self.isna()] = True return arr if isinstance(other, (np.ndarray, Index, ABCSeries, ExtensionArray)) and len( self ) != len(other): raise ValueError("Lengths must match to compare") if not isinstance(other, ABCMultiIndex): other = extract_array(other, extract_numpy=True) else: other = np.asarray(other) if is_object_dtype(self.dtype) and isinstance(other, ExtensionArray): # e.g. PeriodArray, Categorical with np.errstate(all="ignore"): result = op(self._values, other) elif isinstance(self._values, ExtensionArray): result = op(self._values, other) elif is_object_dtype(self.dtype) and not isinstance(self, ABCMultiIndex): # don't pass MultiIndex with np.errstate(all="ignore"): result = ops.comp_method_OBJECT_ARRAY(op, self._values, other) else: with np.errstate(all="ignore"): result = ops.comparison_op(self._values, other, op) return result def _logical_method(self, other, op): res_name = ops.get_op_result_name(self, other) lvalues = self._values rvalues = extract_array(other, extract_numpy=True, extract_range=True) res_values = ops.logical_op(lvalues, rvalues, op) return self._construct_result(res_values, name=res_name) def _construct_result(self, result, name): if isinstance(result, tuple): return ( Index(result[0], name=name, dtype=result[0].dtype), Index(result[1], name=name, dtype=result[1].dtype), ) return Index(result, name=name, dtype=result.dtype) def _arith_method(self, other, op): if ( isinstance(other, Index) and is_object_dtype(other.dtype) and type(other) is not Index ): # We return NotImplemented for object-dtype index *subclasses* so they have # a chance to implement ops before we unwrap them. # See https://github.com/pandas-dev/pandas/issues/31109 return NotImplemented return super()._arith_method(other, op) def _unary_method(self, op): result = op(self._values) return Index(result, name=self.name) def __abs__(self) -> Index: return self._unary_method(operator.abs) def __neg__(self) -> Index: return self._unary_method(operator.neg) def __pos__(self) -> Index: return self._unary_method(operator.pos) def __invert__(self) -> Index: # GH#8875 return self._unary_method(operator.inv) # -------------------------------------------------------------------- # Reductions def any(self, *args, **kwargs): """ Return whether any element is Truthy. Parameters ---------- *args Required for compatibility with numpy. **kwargs Required for compatibility with numpy. Returns ------- bool or array-like (if axis is specified) A single element array-like may be converted to bool. See Also -------- Index.all : Return whether all elements are True. Series.all : Return whether all elements are True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to True because these are not equal to zero. Examples -------- >>> index = pd.Index([0, 1, 2]) >>> index.any() True >>> index = pd.Index([0, 0, 0]) >>> index.any() False """ nv.validate_any(args, kwargs) self._maybe_disable_logical_methods("any") # error: Argument 1 to "any" has incompatible type "ArrayLike"; expected # "Union[Union[int, float, complex, str, bytes, generic], Sequence[Union[int, # float, complex, str, bytes, generic]], Sequence[Sequence[Any]], # _SupportsArray]" return np.any(self.values) # type: ignore[arg-type] def all(self, *args, **kwargs): """ Return whether all elements are Truthy. Parameters ---------- *args Required for compatibility with numpy. **kwargs Required for compatibility with numpy. Returns ------- bool or array-like (if axis is specified) A single element array-like may be converted to bool. See Also -------- Index.any : Return whether any element in an Index is True. Series.any : Return whether any element in a Series is True. Series.all : Return whether all elements in a Series are True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to True because these are not equal to zero. Examples -------- True, because nonzero integers are considered True. >>> pd.Index([1, 2, 3]).all() True False, because ``0`` is considered False. >>> pd.Index([0, 1, 2]).all() False """ nv.validate_all(args, kwargs) self._maybe_disable_logical_methods("all") # error: Argument 1 to "all" has incompatible type "ArrayLike"; expected # "Union[Union[int, float, complex, str, bytes, generic], Sequence[Union[int, # float, complex, str, bytes, generic]], Sequence[Sequence[Any]], # _SupportsArray]" return np.all(self.values) # type: ignore[arg-type] def _maybe_disable_logical_methods(self, opname: str_t) -> None: """ raise if this Index subclass does not support any or all. """ if ( isinstance(self, ABCMultiIndex) or needs_i8_conversion(self.dtype) or is_interval_dtype(self.dtype) or is_categorical_dtype(self.dtype) or is_float_dtype(self.dtype) ): # This call will raise make_invalid_op(opname)(self) def argmin(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: nv.validate_argmin(args, kwargs) nv.validate_minmax_axis(axis) if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return -1 return super().argmin(skipna=skipna) def argmax(self, axis=None, skipna: bool = True, *args, **kwargs) -> int: nv.validate_argmax(args, kwargs) nv.validate_minmax_axis(axis) if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return -1 return super().argmax(skipna=skipna) def min(self, axis=None, skipna: bool = True, *args, **kwargs): nv.validate_min(args, kwargs) nv.validate_minmax_axis(axis) if not len(self): return self._na_value if len(self) and self.is_monotonic_increasing: # quick check first = self[0] if not isna(first): return first if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return self._na_value if not self._is_multi and not isinstance(self._values, np.ndarray): return self._values._reduce(name="min", skipna=skipna) return super().min(skipna=skipna) def max(self, axis=None, skipna: bool = True, *args, **kwargs): nv.validate_max(args, kwargs) nv.validate_minmax_axis(axis) if not len(self): return self._na_value if len(self) and self.is_monotonic_increasing: # quick check last = self[-1] if not isna(last): return last if not self._is_multi and self.hasnans: # Take advantage of cache mask = self._isnan if not skipna or mask.all(): return self._na_value if not self._is_multi and not isinstance(self._values, np.ndarray): return self._values._reduce(name="max", skipna=skipna) return super().max(skipna=skipna) # -------------------------------------------------------------------- def shape(self) -> Shape: """ Return a tuple of the shape of the underlying data. """ # See GH#27775, GH#27384 for history/reasoning in how this is defined. return (len(self),) The provided code snippet includes necessary dependencies for implementing the `get_indexer_dict` function. Write a Python function `def get_indexer_dict( label_list: list[np.ndarray], keys: list[Index] ) -> dict[Hashable, npt.NDArray[np.intp]]` to solve the following problem: Returns ------- dict: Labels mapped to indexers. Here is the function: def get_indexer_dict( label_list: list[np.ndarray], keys: list[Index] ) -> dict[Hashable, npt.NDArray[np.intp]]: """ Returns ------- dict: Labels mapped to indexers. """ shape = tuple(len(x) for x in keys) group_index = get_group_index(label_list, shape, sort=True, xnull=True) if np.all(group_index == -1): # Short-circuit, lib.indices_fast will return the same return {} ngroups = ( ((group_index.size and group_index.max()) + 1) if is_int64_overflow_possible(shape) else np.prod(shape, dtype="i8") ) sorter = get_group_index_sorter(group_index, ngroups) sorted_labels = [lab.take(sorter) for lab in label_list] group_index = group_index.take(sorter) return lib.indices_fast(sorter, group_index, keys, sorted_labels)
Returns ------- dict: Labels mapped to indexers.
173,035
from __future__ import annotations import operator from textwrap import dedent from typing import ( TYPE_CHECKING, Literal, cast, ) import warnings import numpy as np from pandas._libs import ( algos, hashtable as htable, iNaT, lib, ) from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeObj, TakeIndexer, npt, ) from pandas.util._decorators import doc from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import ( construct_1d_object_array_from_listlike, infer_dtype_from_array, ) from pandas.core.dtypes.common import ( ensure_float64, ensure_object, ensure_platform_int, is_array_like, is_bool_dtype, is_categorical_dtype, is_complex_dtype, is_extension_array_dtype, is_float_dtype, is_integer, is_integer_dtype, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, is_signed_integer_dtype, needs_i8_conversion, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.dtypes import ( BaseMaskedDtype, ExtensionDtype, PandasDtype, ) from pandas.core.dtypes.generic import ( ABCDatetimeArray, ABCExtensionArray, ABCIndex, ABCMultiIndex, ABCSeries, ABCTimedeltaArray, ) from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, ) from pandas.core.array_algos.take import take_nd from pandas.core.construction import ( array as pd_array, ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.indexers import validate_indices def _ensure_data(values: ArrayLike) -> np.ndarray: """ routine to ensure that our data is of the correct input dtype for lower-level routines This will coerce: - ints -> int64 - uint -> uint64 - bool -> uint8 - datetimelike -> i8 - datetime64tz -> i8 (in local tz) - categorical -> codes Parameters ---------- values : np.ndarray or ExtensionArray Returns ------- np.ndarray """ if not isinstance(values, ABCMultiIndex): # extract_array would raise values = extract_array(values, extract_numpy=True) if is_object_dtype(values.dtype): return ensure_object(np.asarray(values)) elif isinstance(values.dtype, BaseMaskedDtype): # i.e. BooleanArray, FloatingArray, IntegerArray values = cast("BaseMaskedArray", values) if not values._hasna: # No pd.NAs -> We can avoid an object-dtype cast (and copy) GH#41816 # recurse to avoid re-implementing logic for eg bool->uint8 return _ensure_data(values._data) return np.asarray(values) elif is_categorical_dtype(values.dtype): # NB: cases that go through here should NOT be using _reconstruct_data # on the back-end. values = cast("Categorical", values) return values.codes elif is_bool_dtype(values.dtype): if isinstance(values, np.ndarray): # i.e. actually dtype == np.dtype("bool") return np.asarray(values).view("uint8") else: # e.g. Sparse[bool, False] # TODO: no test cases get here return np.asarray(values).astype("uint8", copy=False) elif is_integer_dtype(values.dtype): return np.asarray(values) elif is_float_dtype(values.dtype): # Note: checking `values.dtype == "float128"` raises on Windows and 32bit # error: Item "ExtensionDtype" of "Union[Any, ExtensionDtype, dtype[Any]]" # has no attribute "itemsize" if values.dtype.itemsize in [2, 12, 16]: # type: ignore[union-attr] # we dont (yet) have float128 hashtable support return ensure_float64(values) return np.asarray(values) elif is_complex_dtype(values.dtype): return cast(np.ndarray, values) # datetimelike elif needs_i8_conversion(values.dtype): npvalues = values.view("i8") npvalues = cast(np.ndarray, npvalues) return npvalues # we have failed, return object values = np.asarray(values, dtype=object) return ensure_object(values) ArrayLike = Union["ExtensionArray", np.ndarray] The provided code snippet includes necessary dependencies for implementing the `nunique_ints` function. Write a Python function `def nunique_ints(values: ArrayLike) -> int` to solve the following problem: Return the number of unique values for integer array-likes. Significantly faster than pandas.unique for long enough sequences. No checks are done to ensure input is integral. Parameters ---------- values : 1d array-like Returns ------- int : The number of unique values in ``values`` Here is the function: def nunique_ints(values: ArrayLike) -> int: """ Return the number of unique values for integer array-likes. Significantly faster than pandas.unique for long enough sequences. No checks are done to ensure input is integral. Parameters ---------- values : 1d array-like Returns ------- int : The number of unique values in ``values`` """ if len(values) == 0: return 0 values = _ensure_data(values) # bincount requires intp result = (np.bincount(values.ravel().astype("intp")) != 0).sum() return result
Return the number of unique values for integer array-likes. Significantly faster than pandas.unique for long enough sequences. No checks are done to ensure input is integral. Parameters ---------- values : 1d array-like Returns ------- int : The number of unique values in ``values``
173,036
from __future__ import annotations import operator from textwrap import dedent from typing import ( TYPE_CHECKING, Literal, cast, ) import warnings import numpy as np from pandas._libs import ( algos, hashtable as htable, iNaT, lib, ) from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeObj, TakeIndexer, npt, ) from pandas.util._decorators import doc from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import ( construct_1d_object_array_from_listlike, infer_dtype_from_array, ) from pandas.core.dtypes.common import ( ensure_float64, ensure_object, ensure_platform_int, is_array_like, is_bool_dtype, is_categorical_dtype, is_complex_dtype, is_extension_array_dtype, is_float_dtype, is_integer, is_integer_dtype, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, is_signed_integer_dtype, needs_i8_conversion, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.dtypes import ( BaseMaskedDtype, ExtensionDtype, PandasDtype, ) from pandas.core.dtypes.generic import ( ABCDatetimeArray, ABCExtensionArray, ABCIndex, ABCMultiIndex, ABCSeries, ABCTimedeltaArray, ) from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, ) from pandas.core.array_algos.take import take_nd from pandas.core.construction import ( array as pd_array, ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.indexers import validate_indices def _ensure_data(values: ArrayLike) -> np.ndarray: """ routine to ensure that our data is of the correct input dtype for lower-level routines This will coerce: - ints -> int64 - uint -> uint64 - bool -> uint8 - datetimelike -> i8 - datetime64tz -> i8 (in local tz) - categorical -> codes Parameters ---------- values : np.ndarray or ExtensionArray Returns ------- np.ndarray """ if not isinstance(values, ABCMultiIndex): # extract_array would raise values = extract_array(values, extract_numpy=True) if is_object_dtype(values.dtype): return ensure_object(np.asarray(values)) elif isinstance(values.dtype, BaseMaskedDtype): # i.e. BooleanArray, FloatingArray, IntegerArray values = cast("BaseMaskedArray", values) if not values._hasna: # No pd.NAs -> We can avoid an object-dtype cast (and copy) GH#41816 # recurse to avoid re-implementing logic for eg bool->uint8 return _ensure_data(values._data) return np.asarray(values) elif is_categorical_dtype(values.dtype): # NB: cases that go through here should NOT be using _reconstruct_data # on the back-end. values = cast("Categorical", values) return values.codes elif is_bool_dtype(values.dtype): if isinstance(values, np.ndarray): # i.e. actually dtype == np.dtype("bool") return np.asarray(values).view("uint8") else: # e.g. Sparse[bool, False] # TODO: no test cases get here return np.asarray(values).astype("uint8", copy=False) elif is_integer_dtype(values.dtype): return np.asarray(values) elif is_float_dtype(values.dtype): # Note: checking `values.dtype == "float128"` raises on Windows and 32bit # error: Item "ExtensionDtype" of "Union[Any, ExtensionDtype, dtype[Any]]" # has no attribute "itemsize" if values.dtype.itemsize in [2, 12, 16]: # type: ignore[union-attr] # we dont (yet) have float128 hashtable support return ensure_float64(values) return np.asarray(values) elif is_complex_dtype(values.dtype): return cast(np.ndarray, values) # datetimelike elif needs_i8_conversion(values.dtype): npvalues = values.view("i8") npvalues = cast(np.ndarray, npvalues) return npvalues # we have failed, return object values = np.asarray(values, dtype=object) return ensure_object(values) ArrayLike = Union["ExtensionArray", np.ndarray] AxisInt = int def needs_i8_conversion(arr_or_dtype) -> bool: """ Check whether the array or dtype should be converted to int64. An array-like or dtype "needs" such a conversion if the array-like or dtype is of a datetime-like dtype Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype should be converted to int64. Examples -------- >>> needs_i8_conversion(str) False >>> needs_i8_conversion(np.int64) False >>> needs_i8_conversion(np.datetime64) True >>> needs_i8_conversion(np.array(['a', 'b'])) False >>> needs_i8_conversion(pd.Series([1, 2])) False >>> needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]")) True >>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) True """ if arr_or_dtype is None: return False if isinstance(arr_or_dtype, np.dtype): return arr_or_dtype.kind in ["m", "M"] elif isinstance(arr_or_dtype, ExtensionDtype): return isinstance(arr_or_dtype, (PeriodDtype, DatetimeTZDtype)) try: dtype = get_dtype(arr_or_dtype) except (TypeError, ValueError): return False if isinstance(dtype, np.dtype): return dtype.kind in ["m", "M"] return isinstance(dtype, (PeriodDtype, DatetimeTZDtype)) The provided code snippet includes necessary dependencies for implementing the `rank` function. Write a Python function `def rank( values: ArrayLike, axis: AxisInt = 0, method: str = "average", na_option: str = "keep", ascending: bool = True, pct: bool = False, ) -> npt.NDArray[np.float64]` to solve the following problem: Rank the values along a given axis. Parameters ---------- values : np.ndarray or ExtensionArray Array whose values will be ranked. The number of dimensions in this array must not exceed 2. axis : int, default 0 Axis over which to perform rankings. method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' The method by which tiebreaks are broken during the ranking. na_option : {'keep', 'top'}, default 'keep' The method by which NaNs are placed in the ranking. - ``keep``: rank each NaN value with a NaN ranking - ``top``: replace each NaN with either +/- inf so that they there are ranked at the top ascending : bool, default True Whether or not the elements should be ranked in ascending order. pct : bool, default False Whether or not to the display the returned rankings in integer form (e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1). Here is the function: def rank( values: ArrayLike, axis: AxisInt = 0, method: str = "average", na_option: str = "keep", ascending: bool = True, pct: bool = False, ) -> npt.NDArray[np.float64]: """ Rank the values along a given axis. Parameters ---------- values : np.ndarray or ExtensionArray Array whose values will be ranked. The number of dimensions in this array must not exceed 2. axis : int, default 0 Axis over which to perform rankings. method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' The method by which tiebreaks are broken during the ranking. na_option : {'keep', 'top'}, default 'keep' The method by which NaNs are placed in the ranking. - ``keep``: rank each NaN value with a NaN ranking - ``top``: replace each NaN with either +/- inf so that they there are ranked at the top ascending : bool, default True Whether or not the elements should be ranked in ascending order. pct : bool, default False Whether or not to the display the returned rankings in integer form (e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1). """ is_datetimelike = needs_i8_conversion(values.dtype) values = _ensure_data(values) if values.ndim == 1: ranks = algos.rank_1d( values, is_datetimelike=is_datetimelike, ties_method=method, ascending=ascending, na_option=na_option, pct=pct, ) elif values.ndim == 2: ranks = algos.rank_2d( values, axis=axis, is_datetimelike=is_datetimelike, ties_method=method, ascending=ascending, na_option=na_option, pct=pct, ) else: raise TypeError("Array with ndim > 2 are not supported.") return ranks
Rank the values along a given axis. Parameters ---------- values : np.ndarray or ExtensionArray Array whose values will be ranked. The number of dimensions in this array must not exceed 2. axis : int, default 0 Axis over which to perform rankings. method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' The method by which tiebreaks are broken during the ranking. na_option : {'keep', 'top'}, default 'keep' The method by which NaNs are placed in the ranking. - ``keep``: rank each NaN value with a NaN ranking - ``top``: replace each NaN with either +/- inf so that they there are ranked at the top ascending : bool, default True Whether or not the elements should be ranked in ascending order. pct : bool, default False Whether or not to the display the returned rankings in integer form (e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1).
173,037
from __future__ import annotations import operator from textwrap import dedent from typing import ( TYPE_CHECKING, Literal, cast, ) import warnings import numpy as np from pandas._libs import ( algos, hashtable as htable, iNaT, lib, ) from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeObj, TakeIndexer, npt, ) from pandas.util._decorators import doc from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import ( construct_1d_object_array_from_listlike, infer_dtype_from_array, ) from pandas.core.dtypes.common import ( ensure_float64, ensure_object, ensure_platform_int, is_array_like, is_bool_dtype, is_categorical_dtype, is_complex_dtype, is_extension_array_dtype, is_float_dtype, is_integer, is_integer_dtype, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, is_signed_integer_dtype, needs_i8_conversion, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.dtypes import ( BaseMaskedDtype, ExtensionDtype, PandasDtype, ) from pandas.core.dtypes.generic import ( ABCDatetimeArray, ABCExtensionArray, ABCIndex, ABCMultiIndex, ABCSeries, ABCTimedeltaArray, ) from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, ) from pandas.core.array_algos.take import take_nd from pandas.core.construction import ( array as pd_array, ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.indexers import validate_indices The provided code snippet includes necessary dependencies for implementing the `checked_add_with_arr` function. Write a Python function `def checked_add_with_arr( arr: npt.NDArray[np.int64], b: int | npt.NDArray[np.int64], arr_mask: npt.NDArray[np.bool_] | None = None, b_mask: npt.NDArray[np.bool_] | None = None, ) -> npt.NDArray[np.int64]` to solve the following problem: Perform array addition that checks for underflow and overflow. Performs the addition of an int64 array and an int64 integer (or array) but checks that they do not result in overflow first. For elements that are indicated to be NaN, whether or not there is overflow for that element is automatically ignored. Parameters ---------- arr : np.ndarray[int64] addend. b : array or scalar addend. arr_mask : np.ndarray[bool] or None, default None array indicating which elements to exclude from checking b_mask : np.ndarray[bool] or None, default None array or scalar indicating which element(s) to exclude from checking Returns ------- sum : An array for elements x + b for each element x in arr if b is a scalar or an array for elements x + y for each element pair (x, y) in (arr, b). Raises ------ OverflowError if any x + y exceeds the maximum or minimum int64 value. Here is the function: def checked_add_with_arr( arr: npt.NDArray[np.int64], b: int | npt.NDArray[np.int64], arr_mask: npt.NDArray[np.bool_] | None = None, b_mask: npt.NDArray[np.bool_] | None = None, ) -> npt.NDArray[np.int64]: """ Perform array addition that checks for underflow and overflow. Performs the addition of an int64 array and an int64 integer (or array) but checks that they do not result in overflow first. For elements that are indicated to be NaN, whether or not there is overflow for that element is automatically ignored. Parameters ---------- arr : np.ndarray[int64] addend. b : array or scalar addend. arr_mask : np.ndarray[bool] or None, default None array indicating which elements to exclude from checking b_mask : np.ndarray[bool] or None, default None array or scalar indicating which element(s) to exclude from checking Returns ------- sum : An array for elements x + b for each element x in arr if b is a scalar or an array for elements x + y for each element pair (x, y) in (arr, b). Raises ------ OverflowError if any x + y exceeds the maximum or minimum int64 value. """ # For performance reasons, we broadcast 'b' to the new array 'b2' # so that it has the same size as 'arr'. b2 = np.broadcast_to(b, arr.shape) if b_mask is not None: # We do the same broadcasting for b_mask as well. b2_mask = np.broadcast_to(b_mask, arr.shape) else: b2_mask = None # For elements that are NaN, regardless of their value, we should # ignore whether they overflow or not when doing the checked add. if arr_mask is not None and b2_mask is not None: not_nan = np.logical_not(arr_mask | b2_mask) elif arr_mask is not None: not_nan = np.logical_not(arr_mask) elif b_mask is not None: # error: Argument 1 to "__call__" of "_UFunc_Nin1_Nout1" has # incompatible type "Optional[ndarray[Any, dtype[bool_]]]"; # expected "Union[_SupportsArray[dtype[Any]], _NestedSequence # [_SupportsArray[dtype[Any]]], bool, int, float, complex, str # , bytes, _NestedSequence[Union[bool, int, float, complex, str # , bytes]]]" not_nan = np.logical_not(b2_mask) # type: ignore[arg-type] else: not_nan = np.empty(arr.shape, dtype=bool) not_nan.fill(True) # gh-14324: For each element in 'arr' and its corresponding element # in 'b2', we check the sign of the element in 'b2'. If it is positive, # we then check whether its sum with the element in 'arr' exceeds # np.iinfo(np.int64).max. If so, we have an overflow error. If it # it is negative, we then check whether its sum with the element in # 'arr' exceeds np.iinfo(np.int64).min. If so, we have an overflow # error as well. i8max = lib.i8max i8min = iNaT mask1 = b2 > 0 mask2 = b2 < 0 if not mask1.any(): to_raise = ((i8min - b2 > arr) & not_nan).any() elif not mask2.any(): to_raise = ((i8max - b2 < arr) & not_nan).any() else: to_raise = ((i8max - b2[mask1] < arr[mask1]) & not_nan[mask1]).any() or ( (i8min - b2[mask2] > arr[mask2]) & not_nan[mask2] ).any() if to_raise: raise OverflowError("Overflow in int64 addition") result = arr + b if arr_mask is not None or b2_mask is not None: np.putmask(result, ~not_nan, iNaT) return result
Perform array addition that checks for underflow and overflow. Performs the addition of an int64 array and an int64 integer (or array) but checks that they do not result in overflow first. For elements that are indicated to be NaN, whether or not there is overflow for that element is automatically ignored. Parameters ---------- arr : np.ndarray[int64] addend. b : array or scalar addend. arr_mask : np.ndarray[bool] or None, default None array indicating which elements to exclude from checking b_mask : np.ndarray[bool] or None, default None array or scalar indicating which element(s) to exclude from checking Returns ------- sum : An array for elements x + b for each element x in arr if b is a scalar or an array for elements x + y for each element pair (x, y) in (arr, b). Raises ------ OverflowError if any x + y exceeds the maximum or minimum int64 value.
173,038
from __future__ import annotations import operator from textwrap import dedent from typing import ( TYPE_CHECKING, Literal, cast, ) import warnings import numpy as np from pandas._libs import ( algos, hashtable as htable, iNaT, lib, ) from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeObj, TakeIndexer, npt, ) from pandas.util._decorators import doc from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import ( construct_1d_object_array_from_listlike, infer_dtype_from_array, ) from pandas.core.dtypes.common import ( ensure_float64, ensure_object, ensure_platform_int, is_array_like, is_bool_dtype, is_categorical_dtype, is_complex_dtype, is_extension_array_dtype, is_float_dtype, is_integer, is_integer_dtype, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, is_signed_integer_dtype, needs_i8_conversion, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.dtypes import ( BaseMaskedDtype, ExtensionDtype, PandasDtype, ) from pandas.core.dtypes.generic import ( ABCDatetimeArray, ABCExtensionArray, ABCIndex, ABCMultiIndex, ABCSeries, ABCTimedeltaArray, ) from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, ) from pandas.core.array_algos.take import take_nd from pandas.core.construction import ( array as pd_array, ensure_wrapped_if_datetimelike, extract_array, ) from pandas.core.indexers import validate_indices def unique(values): """ Return unique values based on a hash table. Uniques are returned in order of appearance. This does NOT sort. Significantly faster than numpy.unique for long enough sequences. Includes NA values. Parameters ---------- values : 1d array-like Returns ------- numpy.ndarray or ExtensionArray The return can be: * Index : when the input is an Index * Categorical : when the input is a Categorical dtype * ndarray : when the input is a Series/ndarray Return numpy.ndarray or ExtensionArray. See Also -------- Index.unique : Return unique values from an Index. Series.unique : Return unique values of Series object. Examples -------- >>> pd.unique(pd.Series([2, 1, 3, 3])) array([2, 1, 3]) >>> pd.unique(pd.Series([2] + [1] * 5)) array([2, 1]) >>> pd.unique(pd.Series([pd.Timestamp("20160101"), pd.Timestamp("20160101")])) array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]') >>> pd.unique( ... pd.Series( ... [ ... pd.Timestamp("20160101", tz="US/Eastern"), ... pd.Timestamp("20160101", tz="US/Eastern"), ... ] ... ) ... ) <DatetimeArray> ['2016-01-01 00:00:00-05:00'] Length: 1, dtype: datetime64[ns, US/Eastern] >>> pd.unique( ... pd.Index( ... [ ... pd.Timestamp("20160101", tz="US/Eastern"), ... pd.Timestamp("20160101", tz="US/Eastern"), ... ] ... ) ... ) DatetimeIndex(['2016-01-01 00:00:00-05:00'], dtype='datetime64[ns, US/Eastern]', freq=None) >>> pd.unique(list("baabc")) array(['b', 'a', 'c'], dtype=object) An unordered Categorical will return categories in the order of appearance. >>> pd.unique(pd.Series(pd.Categorical(list("baabc")))) ['b', 'a', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> pd.unique(pd.Series(pd.Categorical(list("baabc"), categories=list("abc")))) ['b', 'a', 'c'] Categories (3, object): ['a', 'b', 'c'] An ordered Categorical preserves the category ordering. >>> pd.unique( ... pd.Series( ... pd.Categorical(list("baabc"), categories=list("abc"), ordered=True) ... ) ... ) ['b', 'a', 'c'] Categories (3, object): ['a' < 'b' < 'c'] An array of tuples >>> pd.unique([("a", "b"), ("b", "a"), ("a", "c"), ("b", "a")]) array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object) """ return unique_with_mask(values) def value_counts( values, sort: bool = True, ascending: bool = False, normalize: bool = False, bins=None, dropna: bool = True, ) -> Series: """ Compute a histogram of the counts of non-null values. Parameters ---------- values : ndarray (1-d) sort : bool, default True Sort by values ascending : bool, default False Sort in ascending order normalize: bool, default False If True then compute a relative histogram bins : integer, optional Rather than count values, group them into half-open bins, convenience for pd.cut, only works with numeric data dropna : bool, default True Don't include counts of NaN Returns ------- Series """ from pandas import ( Index, Series, ) index_name = getattr(values, "name", None) name = "proportion" if normalize else "count" if bins is not None: from pandas.core.reshape.tile import cut values = Series(values, copy=False) try: ii = cut(values, bins, include_lowest=True) except TypeError as err: raise TypeError("bins argument only works with numeric data.") from err # count, remove nulls (from the index), and but the bins result = ii.value_counts(dropna=dropna) result.name = name result = result[result.index.notna()] result.index = result.index.astype("interval") result = result.sort_index() # if we are dropna and we have NO values if dropna and (result._values == 0).all(): result = result.iloc[0:0] # normalizing is by len of all (regardless of dropna) counts = np.array([len(ii)]) else: if is_extension_array_dtype(values): # handle Categorical and sparse, result = Series(values, copy=False)._values.value_counts(dropna=dropna) result.name = name result.index.name = index_name counts = result._values if not isinstance(counts, np.ndarray): # e.g. ArrowExtensionArray counts = np.asarray(counts) elif isinstance(values, ABCMultiIndex): # GH49558 levels = list(range(values.nlevels)) result = ( Series(index=values, name=name) .groupby(level=levels, dropna=dropna) .size() ) result.index.names = values.names counts = result._values else: values = _ensure_arraylike(values) keys, counts = value_counts_arraylike(values, dropna) if keys.dtype == np.float16: keys = keys.astype(np.float32) # For backwards compatibility, we let Index do its normal type # inference, _except_ for if if infers from object to bool. idx = Index(keys) if idx.dtype == bool and keys.dtype == object: idx = idx.astype(object) idx.name = index_name result = Series(counts, index=idx, name=name, copy=False) if sort: result = result.sort_values(ascending=ascending) if normalize: result = result / counts.sum() return result ArrayLike = Union["ExtensionArray", np.ndarray] def concat_compat(to_concat, axis: AxisInt = 0, ea_compat_axis: bool = False): """ provide concatenation of an array of arrays each of which is a single 'normalized' dtypes (in that for example, if it's object, then it is a non-datetimelike and provide a combined dtype for the resulting array that preserves the overall dtype if possible) Parameters ---------- to_concat : array of arrays axis : axis to provide concatenation ea_compat_axis : bool, default False For ExtensionArray compat, behave as if axis == 1 when determining whether to drop empty arrays. Returns ------- a single array, preserving the combined dtypes """ # filter empty arrays # 1-d dtypes always are included here def is_nonempty(x) -> bool: if x.ndim <= axis: return True return x.shape[axis] > 0 # If all arrays are empty, there's nothing to convert, just short-cut to # the concatenation, #3121. # # Creating an empty array directly is tempting, but the winnings would be # marginal given that it would still require shape & dtype calculation and # np.concatenate which has them both implemented is compiled. non_empties = [x for x in to_concat if is_nonempty(x)] if non_empties and axis == 0 and not ea_compat_axis: # ea_compat_axis see GH#39574 to_concat = non_empties dtypes = {obj.dtype for obj in to_concat} kinds = {obj.dtype.kind for obj in to_concat} contains_datetime = any( isinstance(dtype, (np.dtype, DatetimeTZDtype)) and dtype.kind in ["m", "M"] for dtype in dtypes ) or any(isinstance(obj, ABCExtensionArray) and obj.ndim > 1 for obj in to_concat) all_empty = not len(non_empties) single_dtype = len({x.dtype for x in to_concat}) == 1 any_ea = any(isinstance(x.dtype, ExtensionDtype) for x in to_concat) if contains_datetime: return _concat_datetime(to_concat, axis=axis) if any_ea: # we ignore axis here, as internally concatting with EAs is always # for axis=0 if not single_dtype: target_dtype = find_common_type([x.dtype for x in to_concat]) target_dtype = common_dtype_categorical_compat(to_concat, target_dtype) to_concat = [ astype_array(arr, target_dtype, copy=False) for arr in to_concat ] if isinstance(to_concat[0], ABCExtensionArray): # TODO: what about EA-backed Index? cls = type(to_concat[0]) return cls._concat_same_type(to_concat) else: return np.concatenate(to_concat) elif all_empty: # we have all empties, but may need to coerce the result dtype to # object if we have non-numeric type operands (numpy would otherwise # cast this to float) if len(kinds) != 1: if not len(kinds - {"i", "u", "f"}) or not len(kinds - {"b", "i", "u"}): # let numpy coerce pass else: # coerce to object to_concat = [x.astype("object") for x in to_concat] kinds = {"o"} result = np.concatenate(to_concat, axis=axis) if "b" in kinds and result.dtype.kind in ["i", "u", "f"]: # GH#39817 cast to object instead of casting bools to numeric result = result.astype(object, copy=False) return result ABCMultiIndex = cast( "Type[MultiIndex]", create_pandas_abc_type("ABCMultiIndex", "_typ", ("multiindex",)), ) ABCIndex = cast( "Type[Index]", create_pandas_abc_type( "ABCIndex", "_typ", { "index", "rangeindex", "multiindex", "datetimeindex", "timedeltaindex", "periodindex", "categoricalindex", "intervalindex", }, ), ) The provided code snippet includes necessary dependencies for implementing the `union_with_duplicates` function. Write a Python function `def union_with_duplicates( lvals: ArrayLike | Index, rvals: ArrayLike | Index ) -> ArrayLike | Index` to solve the following problem: Extracts the union from lvals and rvals with respect to duplicates and nans in both arrays. Parameters ---------- lvals: np.ndarray or ExtensionArray left values which is ordered in front. rvals: np.ndarray or ExtensionArray right values ordered after lvals. Returns ------- np.ndarray or ExtensionArray Containing the unsorted union of both arrays. Notes ----- Caller is responsible for ensuring lvals.dtype == rvals.dtype. Here is the function: def union_with_duplicates( lvals: ArrayLike | Index, rvals: ArrayLike | Index ) -> ArrayLike | Index: """ Extracts the union from lvals and rvals with respect to duplicates and nans in both arrays. Parameters ---------- lvals: np.ndarray or ExtensionArray left values which is ordered in front. rvals: np.ndarray or ExtensionArray right values ordered after lvals. Returns ------- np.ndarray or ExtensionArray Containing the unsorted union of both arrays. Notes ----- Caller is responsible for ensuring lvals.dtype == rvals.dtype. """ from pandas import Series l_count = value_counts(lvals, dropna=False) r_count = value_counts(rvals, dropna=False) l_count, r_count = l_count.align(r_count, fill_value=0) final_count = np.maximum(l_count.values, r_count.values) final_count = Series(final_count, index=l_count.index, dtype="int", copy=False) if isinstance(lvals, ABCMultiIndex) and isinstance(rvals, ABCMultiIndex): unique_vals = lvals.append(rvals).unique() else: if isinstance(lvals, ABCIndex): lvals = lvals._values if isinstance(rvals, ABCIndex): rvals = rvals._values unique_vals = unique(concat_compat([lvals, rvals])) unique_vals = ensure_wrapped_if_datetimelike(unique_vals) repeats = final_count.reindex(unique_vals).values return np.repeat(unique_vals, repeats)
Extracts the union from lvals and rvals with respect to duplicates and nans in both arrays. Parameters ---------- lvals: np.ndarray or ExtensionArray left values which is ordered in front. rvals: np.ndarray or ExtensionArray right values ordered after lvals. Returns ------- np.ndarray or ExtensionArray Containing the unsorted union of both arrays. Notes ----- Caller is responsible for ensuring lvals.dtype == rvals.dtype.
173,039
from __future__ import annotations import collections import datetime as dt from functools import partial import gc from json import loads import operator import pickle import re from typing import ( TYPE_CHECKING, Any, Callable, ClassVar, Hashable, Iterator, Literal, Mapping, NoReturn, Sequence, Type, cast, final, overload, ) import warnings import weakref import numpy as np from pandas._config import ( config, using_copy_on_write, ) from pandas._libs import lib from pandas._libs.lib import is_range_indexer from pandas._libs.tslibs import ( Period, Tick, Timestamp, to_offset, ) from pandas._typing import ( AlignJoin, AnyArrayLike, ArrayLike, Axis, AxisInt, CompressionOptions, Dtype, DtypeArg, DtypeBackend, DtypeObj, FilePath, FillnaOptions, FloatFormatType, FormattersType, Frequency, IgnoreRaise, IndexKeyFunc, IndexLabel, IntervalClosedType, JSONSerializable, Level, Manager, NaPosition, NDFrameT, RandomState, Renamer, Scalar, SortKind, StorageOptions, Suffixes, T, TimeAmbiguous, TimedeltaConvertibleTypes, TimeNonexistent, TimestampConvertibleTypes, ValueKeyFunc, WriteBuffer, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import function as nv from pandas.errors import ( AbstractMethodError, InvalidIndexError, SettingWithCopyError, SettingWithCopyWarning, ) from pandas.util._decorators import doc from pandas.util._exceptions import find_stack_level from pandas.util._validators import ( check_dtype_backend, validate_ascending, validate_bool_kwarg, validate_fillna_kwargs, validate_inclusive, ) from pandas.core.dtypes.astype import astype_is_view from pandas.core.dtypes.common import ( ensure_object, ensure_platform_int, ensure_str, is_bool, is_bool_dtype, is_datetime64_any_dtype, is_datetime64tz_dtype, is_dict_like, is_dtype_equal, is_extension_array_dtype, is_float, is_list_like, is_number, is_numeric_dtype, is_re_compilable, is_scalar, is_timedelta64_dtype, pandas_dtype, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.inference import ( is_hashable, is_nested_list_like, ) from pandas.core.dtypes.missing import ( isna, notna, ) from pandas.core import ( algorithms as algos, arraylike, common, indexing, nanops, sample, ) from pandas.core.array_algos.replace import should_use_regex from pandas.core.arrays import ExtensionArray from pandas.core.base import PandasObject from pandas.core.construction import extract_array from pandas.core.flags import Flags from pandas.core.indexes.api import ( DatetimeIndex, Index, MultiIndex, PeriodIndex, RangeIndex, default_index, ensure_index, ) from pandas.core.internals import ( ArrayManager, BlockManager, SingleArrayManager, ) from pandas.core.internals.construction import ( mgr_to_mgr, ndarray_to_mgr, ) from pandas.core.methods.describe import describe_ndframe from pandas.core.missing import ( clean_fill_method, clean_reindex_fill_method, find_valid_index, ) from pandas.core.ops import align_method_FRAME from pandas.core.reshape.concat import concat from pandas.core.shared_docs import _shared_docs from pandas.core.sorting import get_indexer_indexer from pandas.core.window import ( Expanding, ExponentialMovingWindow, Rolling, Window, ) from pandas.io.formats.format import ( DataFrameFormatter, DataFrameRenderer, ) from pandas.io.formats.printing import pprint_thing The provided code snippet includes necessary dependencies for implementing the `_doc_params` function. Write a Python function `def _doc_params(cls)` to solve the following problem: Return a tuple of the doc params. Here is the function: def _doc_params(cls): """Return a tuple of the doc params.""" axis_descr = ( f"{{{', '.join([f'{a} ({i})' for i, a in enumerate(cls._AXIS_ORDERS)])}}}" ) name = cls._constructor_sliced.__name__ if cls._AXIS_LEN > 1 else "scalar" name2 = cls.__name__ return axis_descr, name, name2
Return a tuple of the doc params.
173,040
from __future__ import annotations import collections import datetime as dt from functools import partial import gc from json import loads import operator import pickle import re from typing import ( TYPE_CHECKING, Any, Callable, ClassVar, Hashable, Iterator, Literal, Mapping, NoReturn, Sequence, Type, cast, final, overload, ) import warnings import weakref import numpy as np from pandas._config import ( config, using_copy_on_write, ) from pandas._libs import lib from pandas._libs.lib import is_range_indexer from pandas._libs.tslibs import ( Period, Tick, Timestamp, to_offset, ) from pandas._typing import ( AlignJoin, AnyArrayLike, ArrayLike, Axis, AxisInt, CompressionOptions, Dtype, DtypeArg, DtypeBackend, DtypeObj, FilePath, FillnaOptions, FloatFormatType, FormattersType, Frequency, IgnoreRaise, IndexKeyFunc, IndexLabel, IntervalClosedType, JSONSerializable, Level, Manager, NaPosition, NDFrameT, RandomState, Renamer, Scalar, SortKind, StorageOptions, Suffixes, T, TimeAmbiguous, TimedeltaConvertibleTypes, TimeNonexistent, TimestampConvertibleTypes, ValueKeyFunc, WriteBuffer, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import function as nv from pandas.errors import ( AbstractMethodError, InvalidIndexError, SettingWithCopyError, SettingWithCopyWarning, ) from pandas.util._decorators import doc from pandas.util._exceptions import find_stack_level from pandas.util._validators import ( check_dtype_backend, validate_ascending, validate_bool_kwarg, validate_fillna_kwargs, validate_inclusive, ) from pandas.core.dtypes.astype import astype_is_view from pandas.core.dtypes.common import ( ensure_object, ensure_platform_int, ensure_str, is_bool, is_bool_dtype, is_datetime64_any_dtype, is_datetime64tz_dtype, is_dict_like, is_dtype_equal, is_extension_array_dtype, is_float, is_list_like, is_number, is_numeric_dtype, is_re_compilable, is_scalar, is_timedelta64_dtype, pandas_dtype, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) from pandas.core.dtypes.inference import ( is_hashable, is_nested_list_like, ) from pandas.core.dtypes.missing import ( isna, notna, ) from pandas.core import ( algorithms as algos, arraylike, common, indexing, nanops, sample, ) from pandas.core.array_algos.replace import should_use_regex from pandas.core.arrays import ExtensionArray from pandas.core.base import PandasObject from pandas.core.construction import extract_array from pandas.core.flags import Flags from pandas.core.indexes.api import ( DatetimeIndex, Index, MultiIndex, PeriodIndex, RangeIndex, default_index, ensure_index, ) from pandas.core.internals import ( ArrayManager, BlockManager, SingleArrayManager, ) from pandas.core.internals.construction import ( mgr_to_mgr, ndarray_to_mgr, ) from pandas.core.methods.describe import describe_ndframe from pandas.core.missing import ( clean_fill_method, clean_reindex_fill_method, find_valid_index, ) from pandas.core.ops import align_method_FRAME from pandas.core.reshape.concat import concat from pandas.core.shared_docs import _shared_docs from pandas.core.sorting import get_indexer_indexer from pandas.core.window import ( Expanding, ExponentialMovingWindow, Rolling, Window, ) from pandas.io.formats.format import ( DataFrameFormatter, DataFrameRenderer, ) from pandas.io.formats.printing import pprint_thing NDFrameT = TypeVar("NDFrameT", bound="NDFrame") def is_datetime64tz_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of a DatetimeTZDtype dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of a DatetimeTZDtype dtype. Examples -------- >>> is_datetime64tz_dtype(object) False >>> is_datetime64tz_dtype([1, 2, 3]) False >>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3])) # tz-naive False >>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) True >>> dtype = DatetimeTZDtype("ns", tz="US/Eastern") >>> s = pd.Series([], dtype=dtype) >>> is_datetime64tz_dtype(dtype) True >>> is_datetime64tz_dtype(s) True """ if isinstance(arr_or_dtype, DatetimeTZDtype): # GH#33400 fastpath for dtype object # GH 34986 return True if arr_or_dtype is None: return False return DatetimeTZDtype.is_dtype(arr_or_dtype) The provided code snippet includes necessary dependencies for implementing the `_align_as_utc` function. Write a Python function `def _align_as_utc( left: NDFrameT, right: NDFrameT, join_index: Index | None ) -> tuple[NDFrameT, NDFrameT]` to solve the following problem: If we are aligning timezone-aware DatetimeIndexes and the timezones do not match, convert both to UTC. Here is the function: def _align_as_utc( left: NDFrameT, right: NDFrameT, join_index: Index | None ) -> tuple[NDFrameT, NDFrameT]: """ If we are aligning timezone-aware DatetimeIndexes and the timezones do not match, convert both to UTC. """ if is_datetime64tz_dtype(left.index.dtype): if left.index.tz != right.index.tz: if join_index is not None: # GH#33671 ensure we don't change the index on # our original Series (NB: by default deep=False) left = left.copy() right = right.copy() left.index = join_index right.index = join_index return left, right
If we are aligning timezone-aware DatetimeIndexes and the timezones do not match, convert both to UTC.
173,041
from __future__ import annotations import datetime as dt import functools from typing import ( TYPE_CHECKING, Any, Literal, Sized, TypeVar, cast, overload, ) import warnings import numpy as np from pandas._libs import lib from pandas._libs.missing import ( NA, NAType, checknull, ) from pandas._libs.tslibs import ( NaT, OutOfBoundsDatetime, OutOfBoundsTimedelta, Timedelta, Timestamp, get_unit_from_dtype, is_supported_unit, ) from pandas._libs.tslibs.timedeltas import array_to_timedelta64 from pandas._typing import ( ArrayLike, Dtype, DtypeObj, NumpyIndexT, Scalar, npt, ) from pandas.errors import ( IntCastingNaNError, LossySetitemError, ) from pandas.core.dtypes.common import ( ensure_int8, ensure_int16, ensure_int32, ensure_int64, ensure_object, ensure_str, is_bool, is_bool_dtype, is_complex, is_complex_dtype, is_datetime64_dtype, is_extension_array_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_signed_integer_dtype, is_string_dtype, is_timedelta64_dtype, is_unsigned_integer_dtype, pandas_dtype as pandas_dtype_func, ) from pandas.core.dtypes.dtypes import ( BaseMaskedDtype, CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PandasExtensionDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ( ABCExtensionArray, ABCIndex, ABCSeries, ) from pandas.core.dtypes.inference import is_list_like from pandas.core.dtypes.missing import ( is_valid_na_for_dtype, isna, na_value_for_dtype, notna, ) def is_object_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of the object dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the object dtype. Examples -------- >>> from pandas.api.types import is_object_dtype >>> is_object_dtype(object) True >>> is_object_dtype(int) False >>> is_object_dtype(np.array([], dtype=object)) True >>> is_object_dtype(np.array([], dtype=int)) False >>> is_object_dtype([1, 2, 3]) False """ return _is_dtype_type(arr_or_dtype, classes(np.object_)) ABCSeries = cast( "Type[Series]", create_pandas_abc_type("ABCSeries", "_typ", ("series",)), ) The provided code snippet includes necessary dependencies for implementing the `is_nested_object` function. Write a Python function `def is_nested_object(obj) -> bool` to solve the following problem: return a boolean if we have a nested object, e.g. a Series with 1 or more Series elements This may not be necessarily be performant. Here is the function: def is_nested_object(obj) -> bool: """ return a boolean if we have a nested object, e.g. a Series with 1 or more Series elements This may not be necessarily be performant. """ return bool( isinstance(obj, ABCSeries) and is_object_dtype(obj.dtype) and any(isinstance(v, ABCSeries) for v in obj._values) )
return a boolean if we have a nested object, e.g. a Series with 1 or more Series elements This may not be necessarily be performant.
173,042
from __future__ import annotations import datetime as dt import functools from typing import ( TYPE_CHECKING, Any, Literal, Sized, TypeVar, cast, overload, ) import warnings import numpy as np from pandas._libs import lib from pandas._libs.missing import ( NA, NAType, checknull, ) from pandas._libs.tslibs import ( NaT, OutOfBoundsDatetime, OutOfBoundsTimedelta, Timedelta, Timestamp, get_unit_from_dtype, is_supported_unit, ) from pandas._libs.tslibs.timedeltas import array_to_timedelta64 from pandas._typing import ( ArrayLike, Dtype, DtypeObj, NumpyIndexT, Scalar, npt, ) from pandas.errors import ( IntCastingNaNError, LossySetitemError, ) from pandas.core.dtypes.common import ( ensure_int8, ensure_int16, ensure_int32, ensure_int64, ensure_object, ensure_str, is_bool, is_bool_dtype, is_complex, is_complex_dtype, is_datetime64_dtype, is_extension_array_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_signed_integer_dtype, is_string_dtype, is_timedelta64_dtype, is_unsigned_integer_dtype, pandas_dtype as pandas_dtype_func, ) from pandas.core.dtypes.dtypes import ( BaseMaskedDtype, CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PandasExtensionDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ( ABCExtensionArray, ABCIndex, ABCSeries, ) from pandas.core.dtypes.inference import is_list_like from pandas.core.dtypes.missing import ( is_valid_na_for_dtype, isna, na_value_for_dtype, notna, ) NumpyIndexT = TypeVar("NumpyIndexT", np.ndarray, "Index") def is_signed_integer_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a signed integer dtype. Unlike in `is_any_int_dtype`, timedelta64 instances will return False. The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered as integer by this function. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a signed integer dtype and not an instance of timedelta64. Examples -------- >>> is_signed_integer_dtype(str) False >>> is_signed_integer_dtype(int) True >>> is_signed_integer_dtype(float) False >>> is_signed_integer_dtype(np.uint64) # unsigned False >>> is_signed_integer_dtype('int8') True >>> is_signed_integer_dtype('Int8') True >>> is_signed_integer_dtype(pd.Int8Dtype) True >>> is_signed_integer_dtype(np.datetime64) False >>> is_signed_integer_dtype(np.timedelta64) False >>> is_signed_integer_dtype(np.array(['a', 'b'])) False >>> is_signed_integer_dtype(pd.Series([1, 2])) True >>> is_signed_integer_dtype(np.array([], dtype=np.timedelta64)) False >>> is_signed_integer_dtype(pd.Index([1, 2.])) # float False >>> is_signed_integer_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned False """ return _is_dtype_type( arr_or_dtype, classes_and_not_datetimelike(np.signedinteger) ) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind == "i" ) def is_unsigned_integer_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of an unsigned integer dtype. The nullable Integer dtypes (e.g. pandas.UInt64Dtype) are also considered as integer by this function. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of an unsigned integer dtype. Examples -------- >>> is_unsigned_integer_dtype(str) False >>> is_unsigned_integer_dtype(int) # signed False >>> is_unsigned_integer_dtype(float) False >>> is_unsigned_integer_dtype(np.uint64) True >>> is_unsigned_integer_dtype('uint8') True >>> is_unsigned_integer_dtype('UInt8') True >>> is_unsigned_integer_dtype(pd.UInt8Dtype) True >>> is_unsigned_integer_dtype(np.array(['a', 'b'])) False >>> is_unsigned_integer_dtype(pd.Series([1, 2])) # signed False >>> is_unsigned_integer_dtype(pd.Index([1, 2.])) # float False >>> is_unsigned_integer_dtype(np.array([1, 2], dtype=np.uint32)) True """ return _is_dtype_type( arr_or_dtype, classes_and_not_datetimelike(np.unsignedinteger) ) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind == "u" ) def is_float_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a float dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a float dtype. Examples -------- >>> from pandas.api.types import is_float_dtype >>> is_float_dtype(str) False >>> is_float_dtype(int) False >>> is_float_dtype(float) True >>> is_float_dtype(np.array(['a', 'b'])) False >>> is_float_dtype(pd.Series([1, 2])) False >>> is_float_dtype(pd.Index([1, 2.])) True """ return _is_dtype_type(arr_or_dtype, classes(np.floating)) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "f" ) The provided code snippet includes necessary dependencies for implementing the `maybe_upcast_numeric_to_64bit` function. Write a Python function `def maybe_upcast_numeric_to_64bit(arr: NumpyIndexT) -> NumpyIndexT` to solve the following problem: If array is a int/uint/float bit size lower than 64 bit, upcast it to 64 bit. Parameters ---------- arr : ndarray or ExtensionArray Returns ------- ndarray or ExtensionArray Here is the function: def maybe_upcast_numeric_to_64bit(arr: NumpyIndexT) -> NumpyIndexT: """ If array is a int/uint/float bit size lower than 64 bit, upcast it to 64 bit. Parameters ---------- arr : ndarray or ExtensionArray Returns ------- ndarray or ExtensionArray """ dtype = arr.dtype if is_signed_integer_dtype(dtype) and dtype != np.int64: return arr.astype(np.int64) elif is_unsigned_integer_dtype(dtype) and dtype != np.uint64: return arr.astype(np.uint64) elif is_float_dtype(dtype) and dtype != np.float64: return arr.astype(np.float64) else: return arr
If array is a int/uint/float bit size lower than 64 bit, upcast it to 64 bit. Parameters ---------- arr : ndarray or ExtensionArray Returns ------- ndarray or ExtensionArray
173,043
from __future__ import annotations import datetime as dt import functools from typing import ( TYPE_CHECKING, Any, Literal, Sized, TypeVar, cast, overload, ) import warnings import numpy as np from pandas._libs import lib from pandas._libs.missing import ( NA, NAType, checknull, ) from pandas._libs.tslibs import ( NaT, OutOfBoundsDatetime, OutOfBoundsTimedelta, Timedelta, Timestamp, get_unit_from_dtype, is_supported_unit, ) from pandas._libs.tslibs.timedeltas import array_to_timedelta64 from pandas._typing import ( ArrayLike, Dtype, DtypeObj, NumpyIndexT, Scalar, npt, ) from pandas.errors import ( IntCastingNaNError, LossySetitemError, ) from pandas.core.dtypes.common import ( ensure_int8, ensure_int16, ensure_int32, ensure_int64, ensure_object, ensure_str, is_bool, is_bool_dtype, is_complex, is_complex_dtype, is_datetime64_dtype, is_extension_array_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_signed_integer_dtype, is_string_dtype, is_timedelta64_dtype, is_unsigned_integer_dtype, pandas_dtype as pandas_dtype_func, ) from pandas.core.dtypes.dtypes import ( BaseMaskedDtype, CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PandasExtensionDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ( ABCExtensionArray, ABCIndex, ABCSeries, ) from pandas.core.dtypes.inference import is_list_like from pandas.core.dtypes.missing import ( is_valid_na_for_dtype, isna, na_value_for_dtype, notna, ) def maybe_downcast_to_dtype(result: np.ndarray, dtype: str | np.dtype) -> np.ndarray: ... def maybe_downcast_to_dtype(result: ExtensionArray, dtype: str | np.dtype) -> ArrayLike: ... def maybe_downcast_to_dtype(result: ArrayLike, dtype: str | np.dtype) -> ArrayLike: """ try to cast to the specified dtype (e.g. convert back to bool/int or could be an astype of float64->float32 """ do_round = False if isinstance(dtype, str): if dtype == "infer": inferred_type = lib.infer_dtype(result, skipna=False) if inferred_type == "boolean": dtype = "bool" elif inferred_type == "integer": dtype = "int64" elif inferred_type == "datetime64": dtype = "datetime64[ns]" elif inferred_type in ["timedelta", "timedelta64"]: dtype = "timedelta64[ns]" # try to upcast here elif inferred_type == "floating": dtype = "int64" if issubclass(result.dtype.type, np.number): do_round = True else: # TODO: complex? what if result is already non-object? dtype = "object" dtype = np.dtype(dtype) if not isinstance(dtype, np.dtype): # enforce our signature annotation raise TypeError(dtype) # pragma: no cover converted = maybe_downcast_numeric(result, dtype, do_round) if converted is not result: return converted # a datetimelike # GH12821, iNaT is cast to float if dtype.kind in ["M", "m"] and result.dtype.kind in ["i", "f"]: result = result.astype(dtype) elif dtype.kind == "m" and result.dtype == _dtype_obj: # test_where_downcast_to_td64 result = cast(np.ndarray, result) result = array_to_timedelta64(result) elif dtype == np.dtype("M8[ns]") and result.dtype == _dtype_obj: result = cast(np.ndarray, result) return np.asarray(maybe_cast_to_datetime(result, dtype=dtype)) return result def maybe_cast_to_extension_array( cls: type[ExtensionArray], obj: ArrayLike, dtype: ExtensionDtype | None = None ) -> ArrayLike: """ Call to `_from_sequence` that returns the object unchanged on Exception. Parameters ---------- cls : class, subclass of ExtensionArray obj : arraylike Values to pass to cls._from_sequence dtype : ExtensionDtype, optional Returns ------- ExtensionArray or obj """ from pandas.core.arrays.string_ import BaseStringArray assert isinstance(cls, type), f"must pass a type: {cls}" assertion_msg = f"must pass a subclass of ExtensionArray: {cls}" assert issubclass(cls, ABCExtensionArray), assertion_msg # Everything can be converted to StringArrays, but we may not want to convert if issubclass(cls, BaseStringArray) and lib.infer_dtype(obj) != "string": return obj try: result = cls._from_sequence(obj, dtype=dtype) except Exception: # We can't predict what downstream EA constructors may raise result = obj return result ArrayLike = Union["ExtensionArray", np.ndarray] DtypeObj = Union[np.dtype, "ExtensionDtype"] def is_numeric_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a numeric dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a numeric dtype. Examples -------- >>> from pandas.api.types import is_numeric_dtype >>> is_numeric_dtype(str) False >>> is_numeric_dtype(int) True >>> is_numeric_dtype(float) True >>> is_numeric_dtype(np.uint64) True >>> is_numeric_dtype(np.datetime64) False >>> is_numeric_dtype(np.timedelta64) False >>> is_numeric_dtype(np.array(['a', 'b'])) False >>> is_numeric_dtype(pd.Series([1, 2])) True >>> is_numeric_dtype(pd.Index([1, 2.])) True >>> is_numeric_dtype(np.array([], dtype=np.timedelta64)) False """ return _is_dtype_type( arr_or_dtype, classes_and_not_datetimelike(np.number, np.bool_) ) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ._is_numeric ) class CategoricalDtype(PandasExtensionDtype, ExtensionDtype): """ Type for categorical data with the categories and orderedness. Parameters ---------- categories : sequence, optional Must be unique, and must not contain any nulls. The categories are stored in an Index, and if an index is provided the dtype of that index will be used. ordered : bool or None, default False Whether or not this categorical is treated as a ordered categorical. None can be used to maintain the ordered value of existing categoricals when used in operations that combine categoricals, e.g. astype, and will resolve to False if there is no existing ordered to maintain. Attributes ---------- categories ordered Methods ------- None See Also -------- Categorical : Represent a categorical variable in classic R / S-plus fashion. Notes ----- This class is useful for specifying the type of a ``Categorical`` independent of the values. See :ref:`categorical.categoricaldtype` for more. Examples -------- >>> t = pd.CategoricalDtype(categories=['b', 'a'], ordered=True) >>> pd.Series(['a', 'b', 'a', 'c'], dtype=t) 0 a 1 b 2 a 3 NaN dtype: category Categories (2, object): ['b' < 'a'] An empty CategoricalDtype with a specific dtype can be created by providing an empty index. As follows, >>> pd.CategoricalDtype(pd.DatetimeIndex([])).categories.dtype dtype('<M8[ns]') """ # TODO: Document public vs. private API name = "category" type: type[CategoricalDtypeType] = CategoricalDtypeType kind: str_type = "O" str = "|O08" base = np.dtype("O") _metadata = ("categories", "ordered") _cache_dtypes: dict[str_type, PandasExtensionDtype] = {} def __init__(self, categories=None, ordered: Ordered = False) -> None: self._finalize(categories, ordered, fastpath=False) def _from_fastpath( cls, categories=None, ordered: bool | None = None ) -> CategoricalDtype: self = cls.__new__(cls) self._finalize(categories, ordered, fastpath=True) return self def _from_categorical_dtype( cls, dtype: CategoricalDtype, categories=None, ordered: Ordered = None ) -> CategoricalDtype: if categories is ordered is None: return dtype if categories is None: categories = dtype.categories if ordered is None: ordered = dtype.ordered return cls(categories, ordered) def _from_values_or_dtype( cls, values=None, categories=None, ordered: bool | None = None, dtype: Dtype | None = None, ) -> CategoricalDtype: """ Construct dtype from the input parameters used in :class:`Categorical`. This constructor method specifically does not do the factorization step, if that is needed to find the categories. This constructor may therefore return ``CategoricalDtype(categories=None, ordered=None)``, which may not be useful. Additional steps may therefore have to be taken to create the final dtype. The return dtype is specified from the inputs in this prioritized order: 1. if dtype is a CategoricalDtype, return dtype 2. if dtype is the string 'category', create a CategoricalDtype from the supplied categories and ordered parameters, and return that. 3. if values is a categorical, use value.dtype, but override it with categories and ordered if either/both of those are not None. 4. if dtype is None and values is not a categorical, construct the dtype from categories and ordered, even if either of those is None. Parameters ---------- values : list-like, optional The list-like must be 1-dimensional. categories : list-like, optional Categories for the CategoricalDtype. ordered : bool, optional Designating if the categories are ordered. dtype : CategoricalDtype or the string "category", optional If ``CategoricalDtype``, cannot be used together with `categories` or `ordered`. Returns ------- CategoricalDtype Examples -------- >>> pd.CategoricalDtype._from_values_or_dtype() CategoricalDtype(categories=None, ordered=None) >>> pd.CategoricalDtype._from_values_or_dtype( ... categories=['a', 'b'], ordered=True ... ) CategoricalDtype(categories=['a', 'b'], ordered=True) >>> dtype1 = pd.CategoricalDtype(['a', 'b'], ordered=True) >>> dtype2 = pd.CategoricalDtype(['x', 'y'], ordered=False) >>> c = pd.Categorical([0, 1], dtype=dtype1, fastpath=True) >>> pd.CategoricalDtype._from_values_or_dtype( ... c, ['x', 'y'], ordered=True, dtype=dtype2 ... ) Traceback (most recent call last): ... ValueError: Cannot specify `categories` or `ordered` together with `dtype`. The supplied dtype takes precedence over values' dtype: >>> pd.CategoricalDtype._from_values_or_dtype(c, dtype=dtype2) CategoricalDtype(categories=['x', 'y'], ordered=False) """ if dtype is not None: # The dtype argument takes precedence over values.dtype (if any) if isinstance(dtype, str): if dtype == "category": if ordered is None and cls.is_dtype(values): # GH#49309 preserve orderedness ordered = values.dtype.ordered dtype = CategoricalDtype(categories, ordered) else: raise ValueError(f"Unknown dtype {repr(dtype)}") elif categories is not None or ordered is not None: raise ValueError( "Cannot specify `categories` or `ordered` together with `dtype`." ) elif not isinstance(dtype, CategoricalDtype): raise ValueError(f"Cannot not construct CategoricalDtype from {dtype}") elif cls.is_dtype(values): # If no "dtype" was passed, use the one from "values", but honor # the "ordered" and "categories" arguments dtype = values.dtype._from_categorical_dtype( values.dtype, categories, ordered ) else: # If dtype=None and values is not categorical, create a new dtype. # Note: This could potentially have categories=None and # ordered=None. dtype = CategoricalDtype(categories, ordered) return cast(CategoricalDtype, dtype) def construct_from_string(cls, string: str_type) -> CategoricalDtype: """ Construct a CategoricalDtype from a string. Parameters ---------- string : str Must be the string "category" in order to be successfully constructed. Returns ------- CategoricalDtype Instance of the dtype. Raises ------ TypeError If a CategoricalDtype cannot be constructed from the input. """ if not isinstance(string, str): raise TypeError( f"'construct_from_string' expects a string, got {type(string)}" ) if string != cls.name: raise TypeError(f"Cannot construct a 'CategoricalDtype' from '{string}'") # need ordered=None to ensure that operations specifying dtype="category" don't # override the ordered value for existing categoricals return cls(ordered=None) def _finalize(self, categories, ordered: Ordered, fastpath: bool = False) -> None: if ordered is not None: self.validate_ordered(ordered) if categories is not None: categories = self.validate_categories(categories, fastpath=fastpath) self._categories = categories self._ordered = ordered def __setstate__(self, state: MutableMapping[str_type, Any]) -> None: # for pickle compat. __get_state__ is defined in the # PandasExtensionDtype superclass and uses the public properties to # pickle -> need to set the settable private ones here (see GH26067) self._categories = state.pop("categories", None) self._ordered = state.pop("ordered", False) def __hash__(self) -> int: # _hash_categories returns a uint64, so use the negative # space for when we have unknown categories to avoid a conflict if self.categories is None: if self.ordered: return -1 else: return -2 # We *do* want to include the real self.ordered here return int(self._hash_categories) def __eq__(self, other: Any) -> bool: """ Rules for CDT equality: 1) Any CDT is equal to the string 'category' 2) Any CDT is equal to itself 3) Any CDT is equal to a CDT with categories=None regardless of ordered 4) A CDT with ordered=True is only equal to another CDT with ordered=True and identical categories in the same order 5) A CDT with ordered={False, None} is only equal to another CDT with ordered={False, None} and identical categories, but same order is not required. There is no distinction between False/None. 6) Any other comparison returns False """ if isinstance(other, str): return other == self.name elif other is self: return True elif not (hasattr(other, "ordered") and hasattr(other, "categories")): return False elif self.categories is None or other.categories is None: # For non-fully-initialized dtypes, these are only equal to # - the string "category" (handled above) # - other CategoricalDtype with categories=None return self.categories is other.categories elif self.ordered or other.ordered: # At least one has ordered=True; equal if both have ordered=True # and the same values for categories in the same order. return (self.ordered == other.ordered) and self.categories.equals( other.categories ) else: # Neither has ordered=True; equal if both have the same categories, # but same order is not necessary. There is no distinction between # ordered=False and ordered=None: CDT(., False) and CDT(., None) # will be equal if they have the same categories. left = self.categories right = other.categories # GH#36280 the ordering of checks here is for performance if not left.dtype == right.dtype: return False if len(left) != len(right): return False if self.categories.equals(other.categories): # Check and see if they happen to be identical categories return True if left.dtype != object: # Faster than calculating hash indexer = left.get_indexer(right) # Because left and right have the same length and are unique, # `indexer` not having any -1s implies that there is a # bijection between `left` and `right`. return (indexer != -1).all() # With object-dtype we need a comparison that identifies # e.g. int(2) as distinct from float(2) return hash(self) == hash(other) def __repr__(self) -> str_type: if self.categories is None: data = "None" else: data = self.categories._format_data(name=type(self).__name__) if data is None: # self.categories is RangeIndex data = str(self.categories._range) data = data.rstrip(", ") return f"CategoricalDtype(categories={data}, ordered={self.ordered})" def _hash_categories(self) -> int: from pandas.core.util.hashing import ( combine_hash_arrays, hash_array, hash_tuples, ) categories = self.categories ordered = self.ordered if len(categories) and isinstance(categories[0], tuple): # assumes if any individual category is a tuple, then all our. ATM # I don't really want to support just some of the categories being # tuples. cat_list = list(categories) # breaks if a np.array of categories cat_array = hash_tuples(cat_list) else: if categories.dtype == "O" and len({type(x) for x in categories}) != 1: # TODO: hash_array doesn't handle mixed types. It casts # everything to a str first, which means we treat # {'1', '2'} the same as {'1', 2} # find a better solution hashed = hash((tuple(categories), ordered)) return hashed if DatetimeTZDtype.is_dtype(categories.dtype): # Avoid future warning. categories = categories.view("datetime64[ns]") cat_array = hash_array(np.asarray(categories), categorize=False) if ordered: cat_array = np.vstack( [cat_array, np.arange(len(cat_array), dtype=cat_array.dtype)] ) else: cat_array = np.array([cat_array]) combined_hashed = combine_hash_arrays(iter(cat_array), num_items=len(cat_array)) return np.bitwise_xor.reduce(combined_hashed) def construct_array_type(cls) -> type_t[Categorical]: """ Return the array type associated with this dtype. Returns ------- type """ from pandas import Categorical return Categorical def validate_ordered(ordered: Ordered) -> None: """ Validates that we have a valid ordered parameter. If it is not a boolean, a TypeError will be raised. Parameters ---------- ordered : object The parameter to be verified. Raises ------ TypeError If 'ordered' is not a boolean. """ if not is_bool(ordered): raise TypeError("'ordered' must either be 'True' or 'False'") def validate_categories(categories, fastpath: bool = False) -> Index: """ Validates that we have good categories Parameters ---------- categories : array-like fastpath : bool Whether to skip nan and uniqueness checks Returns ------- categories : Index """ from pandas.core.indexes.base import Index if not fastpath and not is_list_like(categories): raise TypeError( f"Parameter 'categories' must be list-like, was {repr(categories)}" ) if not isinstance(categories, ABCIndex): categories = Index._with_infer(categories, tupleize_cols=False) if not fastpath: if categories.hasnans: raise ValueError("Categorical categories cannot be null") if not categories.is_unique: raise ValueError("Categorical categories must be unique") if isinstance(categories, ABCCategoricalIndex): categories = categories.categories return categories def update_dtype(self, dtype: str_type | CategoricalDtype) -> CategoricalDtype: """ Returns a CategoricalDtype with categories and ordered taken from dtype if specified, otherwise falling back to self if unspecified Parameters ---------- dtype : CategoricalDtype Returns ------- new_dtype : CategoricalDtype """ if isinstance(dtype, str) and dtype == "category": # dtype='category' should not change anything return self elif not self.is_dtype(dtype): raise ValueError( f"a CategoricalDtype must be passed to perform an update, " f"got {repr(dtype)}" ) else: # from here on, dtype is a CategoricalDtype dtype = cast(CategoricalDtype, dtype) # update categories/ordered unless they've been explicitly passed as None new_categories = ( dtype.categories if dtype.categories is not None else self.categories ) new_ordered = dtype.ordered if dtype.ordered is not None else self.ordered return CategoricalDtype(new_categories, new_ordered) def categories(self) -> Index: """ An ``Index`` containing the unique categories allowed. """ return self._categories def ordered(self) -> Ordered: """ Whether the categories have an ordered relationship. """ return self._ordered def _is_boolean(self) -> bool: from pandas.core.dtypes.common import is_bool_dtype return is_bool_dtype(self.categories) def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: from pandas.core.arrays.sparse import SparseDtype # check if we have all categorical dtype with identical categories if all(isinstance(x, CategoricalDtype) for x in dtypes): first = dtypes[0] if all(first == other for other in dtypes[1:]): return first # special case non-initialized categorical # TODO we should figure out the expected return value in general non_init_cats = [ isinstance(x, CategoricalDtype) and x.categories is None for x in dtypes ] if all(non_init_cats): return self elif any(non_init_cats): return None # categorical is aware of Sparse -> extract sparse subdtypes dtypes = [x.subtype if isinstance(x, SparseDtype) else x for x in dtypes] # extract the categories' dtype non_cat_dtypes = [ x.categories.dtype if isinstance(x, CategoricalDtype) else x for x in dtypes ] # TODO should categorical always give an answer? from pandas.core.dtypes.cast import find_common_type return find_common_type(non_cat_dtypes) class DatetimeTZDtype(PandasExtensionDtype): """ An ExtensionDtype for timezone-aware datetime data. **This is not an actual numpy dtype**, but a duck type. Parameters ---------- unit : str, default "ns" The precision of the datetime data. Currently limited to ``"ns"``. tz : str, int, or datetime.tzinfo The timezone. Attributes ---------- unit tz Methods ------- None Raises ------ pytz.UnknownTimeZoneError When the requested timezone cannot be found. Examples -------- >>> pd.DatetimeTZDtype(tz='UTC') datetime64[ns, UTC] >>> pd.DatetimeTZDtype(tz='dateutil/US/Central') datetime64[ns, tzfile('/usr/share/zoneinfo/US/Central')] """ type: type[Timestamp] = Timestamp kind: str_type = "M" num = 101 base = np.dtype("M8[ns]") # TODO: depend on reso? _metadata = ("unit", "tz") _match = re.compile(r"(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]") _cache_dtypes: dict[str_type, PandasExtensionDtype] = {} def na_value(self) -> NaTType: return NaT # error: Signature of "str" incompatible with supertype "PandasExtensionDtype" def str(self) -> str: # type: ignore[override] return f"|M8[{self.unit}]" def __init__(self, unit: str_type | DatetimeTZDtype = "ns", tz=None) -> None: if isinstance(unit, DatetimeTZDtype): # error: "str" has no attribute "tz" unit, tz = unit.unit, unit.tz # type: ignore[attr-defined] if unit != "ns": if isinstance(unit, str) and tz is None: # maybe a string like datetime64[ns, tz], which we support for # now. result = type(self).construct_from_string(unit) unit = result.unit tz = result.tz msg = ( f"Passing a dtype alias like 'datetime64[ns, {tz}]' " "to DatetimeTZDtype is no longer supported. Use " "'DatetimeTZDtype.construct_from_string()' instead." ) raise ValueError(msg) if unit not in ["s", "ms", "us", "ns"]: raise ValueError("DatetimeTZDtype only supports s, ms, us, ns units") if tz: tz = timezones.maybe_get_tz(tz) tz = timezones.tz_standardize(tz) elif tz is not None: raise pytz.UnknownTimeZoneError(tz) if tz is None: raise TypeError("A 'tz' is required.") self._unit = unit self._tz = tz def _creso(self) -> int: """ The NPY_DATETIMEUNIT corresponding to this dtype's resolution. """ return abbrev_to_npy_unit(self.unit) def unit(self) -> str_type: """ The precision of the datetime data. """ return self._unit def tz(self) -> tzinfo: """ The timezone. """ return self._tz def construct_array_type(cls) -> type_t[DatetimeArray]: """ Return the array type associated with this dtype. Returns ------- type """ from pandas.core.arrays import DatetimeArray return DatetimeArray def construct_from_string(cls, string: str_type) -> DatetimeTZDtype: """ Construct a DatetimeTZDtype from a string. Parameters ---------- string : str The string alias for this DatetimeTZDtype. Should be formatted like ``datetime64[ns, <tz>]``, where ``<tz>`` is the timezone name. Examples -------- >>> DatetimeTZDtype.construct_from_string('datetime64[ns, UTC]') datetime64[ns, UTC] """ if not isinstance(string, str): raise TypeError( f"'construct_from_string' expects a string, got {type(string)}" ) msg = f"Cannot construct a 'DatetimeTZDtype' from '{string}'" match = cls._match.match(string) if match: d = match.groupdict() try: return cls(unit=d["unit"], tz=d["tz"]) except (KeyError, TypeError, ValueError) as err: # KeyError if maybe_get_tz tries and fails to get a # pytz timezone (actually pytz.UnknownTimeZoneError). # TypeError if we pass a nonsense tz; # ValueError if we pass a unit other than "ns" raise TypeError(msg) from err raise TypeError(msg) def __str__(self) -> str_type: return f"datetime64[{self.unit}, {self.tz}]" def name(self) -> str_type: """A string representation of the dtype.""" return str(self) def __hash__(self) -> int: # make myself hashable # TODO: update this. return hash(str(self)) def __eq__(self, other: Any) -> bool: if isinstance(other, str): if other.startswith("M8["): other = f"datetime64[{other[3:]}" return other == self.name return ( isinstance(other, DatetimeTZDtype) and self.unit == other.unit and tz_compare(self.tz, other.tz) ) def __setstate__(self, state) -> None: # for pickle compat. __get_state__ is defined in the # PandasExtensionDtype superclass and uses the public properties to # pickle -> need to set the settable private ones here (see GH26067) self._tz = state["tz"] self._unit = state["unit"] The provided code snippet includes necessary dependencies for implementing the `maybe_cast_pointwise_result` function. Write a Python function `def maybe_cast_pointwise_result( result: ArrayLike, dtype: DtypeObj, numeric_only: bool = False, same_dtype: bool = True, ) -> ArrayLike` to solve the following problem: Try casting result of a pointwise operation back to the original dtype if appropriate. Parameters ---------- result : array-like Result to cast. dtype : np.dtype or ExtensionDtype Input Series from which result was calculated. numeric_only : bool, default False Whether to cast only numerics or datetimes as well. same_dtype : bool, default True Specify dtype when calling _from_sequence Returns ------- result : array-like result maybe casted to the dtype. Here is the function: def maybe_cast_pointwise_result( result: ArrayLike, dtype: DtypeObj, numeric_only: bool = False, same_dtype: bool = True, ) -> ArrayLike: """ Try casting result of a pointwise operation back to the original dtype if appropriate. Parameters ---------- result : array-like Result to cast. dtype : np.dtype or ExtensionDtype Input Series from which result was calculated. numeric_only : bool, default False Whether to cast only numerics or datetimes as well. same_dtype : bool, default True Specify dtype when calling _from_sequence Returns ------- result : array-like result maybe casted to the dtype. """ assert not is_scalar(result) if isinstance(dtype, ExtensionDtype): if not isinstance(dtype, (CategoricalDtype, DatetimeTZDtype)): # TODO: avoid this special-casing # We have to special case categorical so as not to upcast # things like counts back to categorical cls = dtype.construct_array_type() if same_dtype: result = maybe_cast_to_extension_array(cls, result, dtype=dtype) else: result = maybe_cast_to_extension_array(cls, result) elif (numeric_only and is_numeric_dtype(dtype)) or not numeric_only: result = maybe_downcast_to_dtype(result, dtype) return result
Try casting result of a pointwise operation back to the original dtype if appropriate. Parameters ---------- result : array-like Result to cast. dtype : np.dtype or ExtensionDtype Input Series from which result was calculated. numeric_only : bool, default False Whether to cast only numerics or datetimes as well. same_dtype : bool, default True Specify dtype when calling _from_sequence Returns ------- result : array-like result maybe casted to the dtype.
173,044
from __future__ import annotations import datetime as dt import functools from typing import ( TYPE_CHECKING, Any, Literal, Sized, TypeVar, cast, overload, ) import warnings import numpy as np from pandas._libs import lib from pandas._libs.missing import ( NA, NAType, checknull, ) from pandas._libs.tslibs import ( NaT, OutOfBoundsDatetime, OutOfBoundsTimedelta, Timedelta, Timestamp, get_unit_from_dtype, is_supported_unit, ) from pandas._libs.tslibs.timedeltas import array_to_timedelta64 from pandas._typing import ( ArrayLike, Dtype, DtypeObj, NumpyIndexT, Scalar, npt, ) from pandas.errors import ( IntCastingNaNError, LossySetitemError, ) from pandas.core.dtypes.common import ( ensure_int8, ensure_int16, ensure_int32, ensure_int64, ensure_object, ensure_str, is_bool, is_bool_dtype, is_complex, is_complex_dtype, is_datetime64_dtype, is_extension_array_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_signed_integer_dtype, is_string_dtype, is_timedelta64_dtype, is_unsigned_integer_dtype, pandas_dtype as pandas_dtype_func, ) from pandas.core.dtypes.dtypes import ( BaseMaskedDtype, CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PandasExtensionDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ( ABCExtensionArray, ABCIndex, ABCSeries, ) from pandas.core.dtypes.inference import is_list_like from pandas.core.dtypes.missing import ( is_valid_na_for_dtype, isna, na_value_for_dtype, notna, ) DtypeObj = Union[np.dtype, "ExtensionDtype"] The provided code snippet includes necessary dependencies for implementing the `invalidate_string_dtypes` function. Write a Python function `def invalidate_string_dtypes(dtype_set: set[DtypeObj]) -> None` to solve the following problem: Change string like dtypes to object for ``DataFrame.select_dtypes()``. Here is the function: def invalidate_string_dtypes(dtype_set: set[DtypeObj]) -> None: """ Change string like dtypes to object for ``DataFrame.select_dtypes()``. """ # error: Argument 1 to <set> has incompatible type "Type[generic]"; expected # "Union[dtype[Any], ExtensionDtype, None]" # error: Argument 2 to <set> has incompatible type "Type[generic]"; expected # "Union[dtype[Any], ExtensionDtype, None]" non_string_dtypes = dtype_set - { np.dtype("S").type, # type: ignore[arg-type] np.dtype("<U").type, # type: ignore[arg-type] } if non_string_dtypes != dtype_set: raise TypeError("string dtypes are not allowed, use 'object' instead")
Change string like dtypes to object for ``DataFrame.select_dtypes()``.
173,045
from __future__ import annotations import datetime as dt import functools from typing import ( TYPE_CHECKING, Any, Literal, Sized, TypeVar, cast, overload, ) import warnings import numpy as np from pandas._libs import lib from pandas._libs.missing import ( NA, NAType, checknull, ) from pandas._libs.tslibs import ( NaT, OutOfBoundsDatetime, OutOfBoundsTimedelta, Timedelta, Timestamp, get_unit_from_dtype, is_supported_unit, ) from pandas._libs.tslibs.timedeltas import array_to_timedelta64 from pandas._typing import ( ArrayLike, Dtype, DtypeObj, NumpyIndexT, Scalar, npt, ) from pandas.errors import ( IntCastingNaNError, LossySetitemError, ) from pandas.core.dtypes.common import ( ensure_int8, ensure_int16, ensure_int32, ensure_int64, ensure_object, ensure_str, is_bool, is_bool_dtype, is_complex, is_complex_dtype, is_datetime64_dtype, is_extension_array_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_signed_integer_dtype, is_string_dtype, is_timedelta64_dtype, is_unsigned_integer_dtype, pandas_dtype as pandas_dtype_func, ) from pandas.core.dtypes.dtypes import ( BaseMaskedDtype, CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PandasExtensionDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ( ABCExtensionArray, ABCIndex, ABCSeries, ) from pandas.core.dtypes.inference import is_list_like from pandas.core.dtypes.missing import ( is_valid_na_for_dtype, isna, na_value_for_dtype, notna, ) Literal: _SpecialForm = ... ArrayLike = Union["ExtensionArray", np.ndarray] DtypeObj = Union[np.dtype, "ExtensionDtype"] def is_object_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of the object dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the object dtype. Examples -------- >>> from pandas.api.types import is_object_dtype >>> is_object_dtype(object) True >>> is_object_dtype(int) False >>> is_object_dtype(np.array([], dtype=object)) True >>> is_object_dtype(np.array([], dtype=int)) False >>> is_object_dtype([1, 2, 3]) False """ return _is_dtype_type(arr_or_dtype, classes(np.object_)) def is_string_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of the string dtype. If an array is passed with an object dtype, the elements must be inferred as strings. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of the string dtype. Examples -------- >>> is_string_dtype(str) True >>> is_string_dtype(object) True >>> is_string_dtype(int) False >>> is_string_dtype(np.array(['a', 'b'])) True >>> is_string_dtype(pd.Series([1, 2])) False >>> is_string_dtype(pd.Series([1, 2], dtype=object)) False """ if hasattr(arr_or_dtype, "dtype") and get_dtype(arr_or_dtype).kind == "O": return is_all_strings(arr_or_dtype) def condition(dtype) -> bool: if is_string_or_object_np_dtype(dtype): return True try: return dtype == "string" except TypeError: return False return _is_dtype(arr_or_dtype, condition) def is_integer_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of an integer dtype. Unlike in `is_any_int_dtype`, timedelta64 instances will return False. The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered as integer by this function. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of an integer dtype and not an instance of timedelta64. Examples -------- >>> is_integer_dtype(str) False >>> is_integer_dtype(int) True >>> is_integer_dtype(float) False >>> is_integer_dtype(np.uint64) True >>> is_integer_dtype('int8') True >>> is_integer_dtype('Int8') True >>> is_integer_dtype(pd.Int8Dtype) True >>> is_integer_dtype(np.datetime64) False >>> is_integer_dtype(np.timedelta64) False >>> is_integer_dtype(np.array(['a', 'b'])) False >>> is_integer_dtype(pd.Series([1, 2])) True >>> is_integer_dtype(np.array([], dtype=np.timedelta64)) False >>> is_integer_dtype(pd.Index([1, 2.])) # float False """ return _is_dtype_type( arr_or_dtype, classes_and_not_datetimelike(np.integer) ) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "iu" ) def is_numeric_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a numeric dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a numeric dtype. Examples -------- >>> from pandas.api.types import is_numeric_dtype >>> is_numeric_dtype(str) False >>> is_numeric_dtype(int) True >>> is_numeric_dtype(float) True >>> is_numeric_dtype(np.uint64) True >>> is_numeric_dtype(np.datetime64) False >>> is_numeric_dtype(np.timedelta64) False >>> is_numeric_dtype(np.array(['a', 'b'])) False >>> is_numeric_dtype(pd.Series([1, 2])) True >>> is_numeric_dtype(pd.Index([1, 2.])) True >>> is_numeric_dtype(np.array([], dtype=np.timedelta64)) False """ return _is_dtype_type( arr_or_dtype, classes_and_not_datetimelike(np.number, np.bool_) ) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ._is_numeric ) def is_bool_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a boolean dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a boolean dtype. Notes ----- An ExtensionArray is considered boolean when the ``_is_boolean`` attribute is set to True. Examples -------- >>> from pandas.api.types import is_bool_dtype >>> is_bool_dtype(str) False >>> is_bool_dtype(int) False >>> is_bool_dtype(bool) True >>> is_bool_dtype(np.bool_) True >>> is_bool_dtype(np.array(['a', 'b'])) False >>> is_bool_dtype(pd.Series([1, 2])) False >>> is_bool_dtype(np.array([True, False])) True >>> is_bool_dtype(pd.Categorical([True, False])) True >>> is_bool_dtype(pd.arrays.SparseArray([True, False])) True """ if arr_or_dtype is None: return False try: dtype = get_dtype(arr_or_dtype) except (TypeError, ValueError): return False if isinstance(dtype, CategoricalDtype): arr_or_dtype = dtype.categories # now we use the special definition for Index if isinstance(arr_or_dtype, ABCIndex): # Allow Index[object] that is all-bools or Index["boolean"] return arr_or_dtype.inferred_type == "boolean" elif isinstance(dtype, ExtensionDtype): return getattr(dtype, "_is_boolean", False) return issubclass(dtype.type, np.bool_) class PandasExtensionDtype(ExtensionDtype): """ A np.dtype duck-typed class, suitable for holding a custom dtype. THIS IS NOT A REAL NUMPY DTYPE """ type: Any kind: Any # The Any type annotations above are here only because mypy seems to have a # problem dealing with multiple inheritance from PandasExtensionDtype # and ExtensionDtype's @properties in the subclasses below. The kind and # type variables in those subclasses are explicitly typed below. subdtype = None str: str_type num = 100 shape: tuple[int, ...] = () itemsize = 8 base: DtypeObj | None = None isbuiltin = 0 isnative = 0 _cache_dtypes: dict[str_type, PandasExtensionDtype] = {} def __repr__(self) -> str_type: """ Return a string representation for a particular object. """ return str(self) def __hash__(self) -> int: raise NotImplementedError("sub-classes should implement an __hash__ method") def __getstate__(self) -> dict[str_type, Any]: # pickle support; we don't want to pickle the cache return {k: getattr(self, k, None) for k in self._metadata} def reset_cache(cls) -> None: """clear the cache""" cls._cache_dtypes = {} class BaseMaskedDtype(ExtensionDtype): """ Base class for dtypes for BaseMaskedArray subclasses. """ name: str base = None type: type def na_value(self) -> libmissing.NAType: return libmissing.NA def numpy_dtype(self) -> np.dtype: """Return an instance of our numpy dtype""" return np.dtype(self.type) def kind(self) -> str: return self.numpy_dtype.kind def itemsize(self) -> int: """Return the number of bytes in this dtype""" return self.numpy_dtype.itemsize def construct_array_type(cls) -> type_t[BaseMaskedArray]: """ Return the array type associated with this dtype. Returns ------- type """ raise NotImplementedError def from_numpy_dtype(cls, dtype: np.dtype) -> BaseMaskedDtype: """ Construct the MaskedDtype corresponding to the given numpy dtype. """ if dtype.kind == "b": from pandas.core.arrays.boolean import BooleanDtype return BooleanDtype() elif dtype.kind in ["i", "u"]: from pandas.core.arrays.integer import INT_STR_TO_DTYPE return INT_STR_TO_DTYPE[dtype.name] elif dtype.kind == "f": from pandas.core.arrays.floating import FLOAT_STR_TO_DTYPE return FLOAT_STR_TO_DTYPE[dtype.name] else: raise NotImplementedError(dtype) def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: # We unwrap any masked dtypes, find the common dtype we would use # for that, then re-mask the result. from pandas.core.dtypes.cast import find_common_type new_dtype = find_common_type( [ dtype.numpy_dtype if isinstance(dtype, BaseMaskedDtype) else dtype for dtype in dtypes ] ) if not isinstance(new_dtype, np.dtype): # If we ever support e.g. Masked[DatetimeArray] then this will change return None try: return type(self).from_numpy_dtype(new_dtype) except (KeyError, NotImplementedError): return None def notna(obj: Scalar) -> bool: ... def notna( obj: ArrayLike | Index | list, ) -> npt.NDArray[np.bool_]: ... def notna(obj: NDFrameT) -> NDFrameT: ... def notna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]: ... def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: ... def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: """ Detect non-missing values for an array-like object. This function takes a scalar or array-like object and indicates whether values are valid (not missing, which is ``NaN`` in numeric arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike). Parameters ---------- obj : array-like or object value Object to check for *not* null or *non*-missing values. Returns ------- bool or array-like of bool For scalar input, returns a scalar boolean. For array input, returns an array of boolean indicating whether each corresponding element is valid. See Also -------- isna : Boolean inverse of pandas.notna. Series.notna : Detect valid values in a Series. DataFrame.notna : Detect valid values in a DataFrame. Index.notna : Detect valid values in an Index. Examples -------- Scalar arguments (including strings) result in a scalar boolean. >>> pd.notna('dog') True >>> pd.notna(pd.NA) False >>> pd.notna(np.nan) False ndarrays result in an ndarray of booleans. >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]]) >>> array array([[ 1., nan, 3.], [ 4., 5., nan]]) >>> pd.notna(array) array([[ True, False, True], [ True, True, False]]) For indexes, an ndarray of booleans is returned. >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, ... "2017-07-08"]) >>> index DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'], dtype='datetime64[ns]', freq=None) >>> pd.notna(index) array([ True, True, False, True]) For Series and DataFrame, the same type is returned, containing booleans. >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) >>> df 0 1 2 0 ant bee cat 1 dog None fly >>> pd.notna(df) 0 1 2 0 True True True 1 True False True >>> pd.notna(df[1]) 0 True 1 False Name: 1, dtype: bool """ res = isna(obj) if isinstance(res, bool): return not res return ~res INT_STR_TO_DTYPE: dict[str, IntegerDtype] = { "int8": Int8Dtype(), "int16": Int16Dtype(), "int32": Int32Dtype(), "int64": Int64Dtype(), "uint8": UInt8Dtype(), "uint16": UInt16Dtype(), "uint32": UInt32Dtype(), "uint64": UInt64Dtype(), } FLOAT_STR_TO_DTYPE = { "float32": Float32Dtype(), "float64": Float64Dtype(), } def to_pyarrow_type( dtype: ArrowDtype | pa.DataType | Dtype | None, ) -> pa.DataType | None: """ Convert dtype to a pyarrow type instance. """ if isinstance(dtype, ArrowDtype): return dtype.pyarrow_dtype elif isinstance(dtype, pa.DataType): return dtype elif dtype: try: # Accepts python types too # Doesn't handle all numpy types return pa.from_numpy_dtype(dtype) except pa.ArrowNotImplementedError: pass return None class ArrowDtype(StorageExtensionDtype): """ An ExtensionDtype for PyArrow data types. .. warning:: ArrowDtype is considered experimental. The implementation and parts of the API may change without warning. While most ``dtype`` arguments can accept the "string" constructor, e.g. ``"int64[pyarrow]"``, ArrowDtype is useful if the data type contains parameters like ``pyarrow.timestamp``. Parameters ---------- pyarrow_dtype : pa.DataType An instance of a `pyarrow.DataType <https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions>`__. Attributes ---------- pyarrow_dtype Methods ------- None Returns ------- ArrowDtype Examples -------- >>> import pyarrow as pa >>> pd.ArrowDtype(pa.int64()) int64[pyarrow] Types with parameters must be constructed with ArrowDtype. >>> pd.ArrowDtype(pa.timestamp("s", tz="America/New_York")) timestamp[s, tz=America/New_York][pyarrow] >>> pd.ArrowDtype(pa.list_(pa.int64())) list<item: int64>[pyarrow] """ # noqa: E501 _metadata = ("storage", "pyarrow_dtype") # type: ignore[assignment] def __init__(self, pyarrow_dtype: pa.DataType) -> None: super().__init__("pyarrow") if pa_version_under7p0: raise ImportError("pyarrow>=7.0.0 is required for ArrowDtype") if not isinstance(pyarrow_dtype, pa.DataType): raise ValueError( f"pyarrow_dtype ({pyarrow_dtype}) must be an instance " f"of a pyarrow.DataType. Got {type(pyarrow_dtype)} instead." ) self.pyarrow_dtype = pyarrow_dtype def __repr__(self) -> str: return self.name def type(self): """ Returns associated scalar type. """ pa_type = self.pyarrow_dtype if pa.types.is_integer(pa_type): return int elif pa.types.is_floating(pa_type): return float elif pa.types.is_string(pa_type) or pa.types.is_large_string(pa_type): return str elif ( pa.types.is_binary(pa_type) or pa.types.is_fixed_size_binary(pa_type) or pa.types.is_large_binary(pa_type) ): return bytes elif pa.types.is_boolean(pa_type): return bool elif pa.types.is_duration(pa_type): if pa_type.unit == "ns": return Timedelta else: return timedelta elif pa.types.is_timestamp(pa_type): if pa_type.unit == "ns": return Timestamp else: return datetime elif pa.types.is_date(pa_type): return date elif pa.types.is_time(pa_type): return time elif pa.types.is_decimal(pa_type): return Decimal elif pa.types.is_dictionary(pa_type): # TODO: Potentially change this & CategoricalDtype.type to # something more representative of the scalar return CategoricalDtypeType elif pa.types.is_list(pa_type) or pa.types.is_large_list(pa_type): return list elif pa.types.is_map(pa_type): return dict elif pa.types.is_null(pa_type): # TODO: None? pd.NA? pa.null? return type(pa_type) else: raise NotImplementedError(pa_type) def name(self) -> str: # type: ignore[override] """ A string identifying the data type. """ return f"{str(self.pyarrow_dtype)}[{self.storage}]" def numpy_dtype(self) -> np.dtype: """Return an instance of the related numpy dtype""" if pa.types.is_string(self.pyarrow_dtype): # pa.string().to_pandas_dtype() = object which we don't want return np.dtype(str) try: return np.dtype(self.pyarrow_dtype.to_pandas_dtype()) except (NotImplementedError, TypeError): return np.dtype(object) def kind(self) -> str: if pa.types.is_timestamp(self.pyarrow_dtype): # To mirror DatetimeTZDtype return "M" return self.numpy_dtype.kind def itemsize(self) -> int: """Return the number of bytes in this dtype""" return self.numpy_dtype.itemsize def construct_array_type(cls) -> type_t[ArrowExtensionArray]: """ Return the array type associated with this dtype. Returns ------- type """ from pandas.core.arrays.arrow import ArrowExtensionArray return ArrowExtensionArray def construct_from_string(cls, string: str) -> ArrowDtype: """ Construct this type from a string. Parameters ---------- string : str string should follow the format f"{pyarrow_type}[pyarrow]" e.g. int64[pyarrow] """ if not isinstance(string, str): raise TypeError( f"'construct_from_string' expects a string, got {type(string)}" ) if not string.endswith("[pyarrow]"): raise TypeError(f"'{string}' must end with '[pyarrow]'") if string == "string[pyarrow]": # Ensure Registry.find skips ArrowDtype to use StringDtype instead raise TypeError("string[pyarrow] should be constructed by StringDtype") base_type = string[:-9] # get rid of "[pyarrow]" try: pa_dtype = pa.type_for_alias(base_type) except ValueError as err: has_parameters = re.search(r"[\[\(].*[\]\)]", base_type) if has_parameters: # Fallback to try common temporal types try: return cls._parse_temporal_dtype_string(base_type) except (NotImplementedError, ValueError): # Fall through to raise with nice exception message below pass raise NotImplementedError( "Passing pyarrow type specific parameters " f"({has_parameters.group()}) in the string is not supported. " "Please construct an ArrowDtype object with a pyarrow_dtype " "instance with specific parameters." ) from err raise TypeError(f"'{base_type}' is not a valid pyarrow data type.") from err return cls(pa_dtype) # TODO(arrow#33642): This can be removed once supported by pyarrow def _parse_temporal_dtype_string(cls, string: str) -> ArrowDtype: """ Construct a temporal ArrowDtype from string. """ # we assume # 1) "[pyarrow]" has already been stripped from the end of our string. # 2) we know "[" is present head, tail = string.split("[", 1) if not tail.endswith("]"): raise ValueError tail = tail[:-1] if head == "timestamp": assert "," in tail # otherwise type_for_alias should work unit, tz = tail.split(",", 1) unit = unit.strip() tz = tz.strip() if tz.startswith("tz="): tz = tz[3:] pa_type = pa.timestamp(unit, tz=tz) dtype = cls(pa_type) return dtype raise NotImplementedError(string) def _is_numeric(self) -> bool: """ Whether columns with this dtype should be considered numeric. """ # TODO: pa.types.is_boolean? return ( pa.types.is_integer(self.pyarrow_dtype) or pa.types.is_floating(self.pyarrow_dtype) or pa.types.is_decimal(self.pyarrow_dtype) ) def _is_boolean(self) -> bool: """ Whether this dtype should be considered boolean. """ return pa.types.is_boolean(self.pyarrow_dtype) def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: # We unwrap any masked dtypes, find the common dtype we would use # for that, then re-mask the result. # Mirrors BaseMaskedDtype from pandas.core.dtypes.cast import find_common_type new_dtype = find_common_type( [ dtype.numpy_dtype if isinstance(dtype, ArrowDtype) else dtype for dtype in dtypes ] ) if not isinstance(new_dtype, np.dtype): return None try: pa_dtype = pa.from_numpy_dtype(new_dtype) return type(self)(pa_dtype) except NotImplementedError: return None def __from_arrow__(self, array: pa.Array | pa.ChunkedArray): """ Construct IntegerArray/FloatingArray from pyarrow Array/ChunkedArray. """ array_class = self.construct_array_type() return array_class(array) class StringDtype(StorageExtensionDtype): """ Extension dtype for string data. .. warning:: StringDtype is considered experimental. The implementation and parts of the API may change without warning. Parameters ---------- storage : {"python", "pyarrow"}, optional If not given, the value of ``pd.options.mode.string_storage``. Attributes ---------- None Methods ------- None Examples -------- >>> pd.StringDtype() string[python] >>> pd.StringDtype(storage="pyarrow") string[pyarrow] """ name = "string" #: StringDtype().na_value uses pandas.NA def na_value(self) -> libmissing.NAType: return libmissing.NA _metadata = ("storage",) def __init__(self, storage=None) -> None: if storage is None: storage = get_option("mode.string_storage") if storage not in {"python", "pyarrow"}: raise ValueError( f"Storage must be 'python' or 'pyarrow'. Got {storage} instead." ) if storage == "pyarrow" and pa_version_under7p0: raise ImportError( "pyarrow>=7.0.0 is required for PyArrow backed StringArray." ) self.storage = storage def type(self) -> type[str]: return str def construct_from_string(cls, string): """ Construct a StringDtype from a string. Parameters ---------- string : str The type of the name. The storage type will be taking from `string`. Valid options and their storage types are ========================== ============================================== string result storage ========================== ============================================== ``'string'`` pd.options.mode.string_storage, default python ``'string[python]'`` python ``'string[pyarrow]'`` pyarrow ========================== ============================================== Returns ------- StringDtype Raise ----- TypeError If the string is not a valid option. """ if not isinstance(string, str): raise TypeError( f"'construct_from_string' expects a string, got {type(string)}" ) if string == "string": return cls() elif string == "string[python]": return cls(storage="python") elif string == "string[pyarrow]": return cls(storage="pyarrow") else: raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'") # https://github.com/pandas-dev/pandas/issues/36126 # error: Signature of "construct_array_type" incompatible with supertype # "ExtensionDtype" def construct_array_type( # type: ignore[override] self, ) -> type_t[BaseStringArray]: """ Return the array type associated with this dtype. Returns ------- type """ from pandas.core.arrays.string_arrow import ArrowStringArray if self.storage == "python": return StringArray else: return ArrowStringArray def __from_arrow__( self, array: pyarrow.Array | pyarrow.ChunkedArray ) -> BaseStringArray: """ Construct StringArray from pyarrow Array/ChunkedArray. """ if self.storage == "pyarrow": from pandas.core.arrays.string_arrow import ArrowStringArray return ArrowStringArray(array) else: import pyarrow if isinstance(array, pyarrow.Array): chunks = [array] else: # pyarrow.ChunkedArray chunks = array.chunks results = [] for arr in chunks: # using _from_sequence to ensure None is converted to NA str_arr = StringArray._from_sequence(np.array(arr)) results.append(str_arr) if results: return StringArray._concat_same_type(results) else: return StringArray(np.array([], dtype="object")) The provided code snippet includes necessary dependencies for implementing the `convert_dtypes` function. Write a Python function `def convert_dtypes( input_array: ArrayLike, convert_string: bool = True, convert_integer: bool = True, convert_boolean: bool = True, convert_floating: bool = True, infer_objects: bool = False, dtype_backend: Literal["numpy_nullable", "pyarrow"] = "numpy_nullable", ) -> DtypeObj` to solve the following problem: Convert objects to best possible type, and optionally, to types supporting ``pd.NA``. Parameters ---------- input_array : ExtensionArray or np.ndarray convert_string : bool, default True Whether object dtypes should be converted to ``StringDtype()``. convert_integer : bool, default True Whether, if possible, conversion can be done to integer extension types. convert_boolean : bool, defaults True Whether object dtypes should be converted to ``BooleanDtypes()``. convert_floating : bool, defaults True Whether, if possible, conversion can be done to floating extension types. If `convert_integer` is also True, preference will be give to integer dtypes if the floats can be faithfully casted to integers. infer_objects : bool, defaults False Whether to also infer objects to float/int if possible. Is only hit if the object array contains pd.NA. dtype_backend : str, default "numpy_nullable" Nullable dtype implementation to use. * "numpy_nullable" returns numpy-backed nullable types * "pyarrow" returns pyarrow-backed nullable types using ``ArrowDtype`` Returns ------- np.dtype, or ExtensionDtype Here is the function: def convert_dtypes( input_array: ArrayLike, convert_string: bool = True, convert_integer: bool = True, convert_boolean: bool = True, convert_floating: bool = True, infer_objects: bool = False, dtype_backend: Literal["numpy_nullable", "pyarrow"] = "numpy_nullable", ) -> DtypeObj: """ Convert objects to best possible type, and optionally, to types supporting ``pd.NA``. Parameters ---------- input_array : ExtensionArray or np.ndarray convert_string : bool, default True Whether object dtypes should be converted to ``StringDtype()``. convert_integer : bool, default True Whether, if possible, conversion can be done to integer extension types. convert_boolean : bool, defaults True Whether object dtypes should be converted to ``BooleanDtypes()``. convert_floating : bool, defaults True Whether, if possible, conversion can be done to floating extension types. If `convert_integer` is also True, preference will be give to integer dtypes if the floats can be faithfully casted to integers. infer_objects : bool, defaults False Whether to also infer objects to float/int if possible. Is only hit if the object array contains pd.NA. dtype_backend : str, default "numpy_nullable" Nullable dtype implementation to use. * "numpy_nullable" returns numpy-backed nullable types * "pyarrow" returns pyarrow-backed nullable types using ``ArrowDtype`` Returns ------- np.dtype, or ExtensionDtype """ inferred_dtype: str | DtypeObj if ( convert_string or convert_integer or convert_boolean or convert_floating ) and isinstance(input_array, np.ndarray): if is_object_dtype(input_array.dtype): inferred_dtype = lib.infer_dtype(input_array) else: inferred_dtype = input_array.dtype if is_string_dtype(inferred_dtype): if not convert_string or inferred_dtype == "bytes": inferred_dtype = input_array.dtype else: inferred_dtype = pandas_dtype_func("string") if convert_integer: target_int_dtype = pandas_dtype_func("Int64") if is_integer_dtype(input_array.dtype): from pandas.core.arrays.integer import INT_STR_TO_DTYPE inferred_dtype = INT_STR_TO_DTYPE.get( input_array.dtype.name, target_int_dtype ) elif is_numeric_dtype(input_array.dtype): # TODO: de-dup with maybe_cast_to_integer_array? arr = input_array[notna(input_array)] if (arr.astype(int) == arr).all(): inferred_dtype = target_int_dtype else: inferred_dtype = input_array.dtype elif ( infer_objects and is_object_dtype(input_array.dtype) and (isinstance(inferred_dtype, str) and inferred_dtype == "integer") ): inferred_dtype = target_int_dtype if convert_floating: if not is_integer_dtype(input_array.dtype) and is_numeric_dtype( input_array.dtype ): from pandas.core.arrays.floating import FLOAT_STR_TO_DTYPE inferred_float_dtype: DtypeObj = FLOAT_STR_TO_DTYPE.get( input_array.dtype.name, pandas_dtype_func("Float64") ) # if we could also convert to integer, check if all floats # are actually integers if convert_integer: # TODO: de-dup with maybe_cast_to_integer_array? arr = input_array[notna(input_array)] if (arr.astype(int) == arr).all(): inferred_dtype = pandas_dtype_func("Int64") else: inferred_dtype = inferred_float_dtype else: inferred_dtype = inferred_float_dtype elif ( infer_objects and is_object_dtype(input_array.dtype) and ( isinstance(inferred_dtype, str) and inferred_dtype == "mixed-integer-float" ) ): inferred_dtype = pandas_dtype_func("Float64") if convert_boolean: if is_bool_dtype(input_array.dtype): inferred_dtype = pandas_dtype_func("boolean") elif isinstance(inferred_dtype, str) and inferred_dtype == "boolean": inferred_dtype = pandas_dtype_func("boolean") if isinstance(inferred_dtype, str): # If we couldn't do anything else, then we retain the dtype inferred_dtype = input_array.dtype else: inferred_dtype = input_array.dtype if dtype_backend == "pyarrow": from pandas.core.arrays.arrow.array import to_pyarrow_type from pandas.core.arrays.arrow.dtype import ArrowDtype from pandas.core.arrays.string_ import StringDtype if isinstance(inferred_dtype, PandasExtensionDtype): base_dtype = inferred_dtype.base elif isinstance(inferred_dtype, (BaseMaskedDtype, ArrowDtype)): base_dtype = inferred_dtype.numpy_dtype elif isinstance(inferred_dtype, StringDtype): base_dtype = np.dtype(str) else: # error: Incompatible types in assignment (expression has type # "Union[str, Any, dtype[Any], ExtensionDtype]", # variable has type "Union[dtype[Any], ExtensionDtype, None]") base_dtype = inferred_dtype # type: ignore[assignment] pa_type = to_pyarrow_type(base_dtype) if pa_type is not None: inferred_dtype = ArrowDtype(pa_type) # error: Incompatible return value type (got "Union[str, Union[dtype[Any], # ExtensionDtype]]", expected "Union[dtype[Any], ExtensionDtype]") return inferred_dtype # type: ignore[return-value]
Convert objects to best possible type, and optionally, to types supporting ``pd.NA``. Parameters ---------- input_array : ExtensionArray or np.ndarray convert_string : bool, default True Whether object dtypes should be converted to ``StringDtype()``. convert_integer : bool, default True Whether, if possible, conversion can be done to integer extension types. convert_boolean : bool, defaults True Whether object dtypes should be converted to ``BooleanDtypes()``. convert_floating : bool, defaults True Whether, if possible, conversion can be done to floating extension types. If `convert_integer` is also True, preference will be give to integer dtypes if the floats can be faithfully casted to integers. infer_objects : bool, defaults False Whether to also infer objects to float/int if possible. Is only hit if the object array contains pd.NA. dtype_backend : str, default "numpy_nullable" Nullable dtype implementation to use. * "numpy_nullable" returns numpy-backed nullable types * "pyarrow" returns pyarrow-backed nullable types using ``ArrowDtype`` Returns ------- np.dtype, or ExtensionDtype
173,046
from __future__ import annotations import datetime as dt import functools from typing import ( TYPE_CHECKING, Any, Literal, Sized, TypeVar, cast, overload, ) import warnings import numpy as np from pandas._libs import lib from pandas._libs.missing import ( NA, NAType, checknull, ) from pandas._libs.tslibs import ( NaT, OutOfBoundsDatetime, OutOfBoundsTimedelta, Timedelta, Timestamp, get_unit_from_dtype, is_supported_unit, ) from pandas._libs.tslibs.timedeltas import array_to_timedelta64 from pandas._typing import ( ArrayLike, Dtype, DtypeObj, NumpyIndexT, Scalar, npt, ) from pandas.errors import ( IntCastingNaNError, LossySetitemError, ) from pandas.core.dtypes.common import ( ensure_int8, ensure_int16, ensure_int32, ensure_int64, ensure_object, ensure_str, is_bool, is_bool_dtype, is_complex, is_complex_dtype, is_datetime64_dtype, is_extension_array_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_signed_integer_dtype, is_string_dtype, is_timedelta64_dtype, is_unsigned_integer_dtype, pandas_dtype as pandas_dtype_func, ) from pandas.core.dtypes.dtypes import ( BaseMaskedDtype, CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PandasExtensionDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ( ABCExtensionArray, ABCIndex, ABCSeries, ) from pandas.core.dtypes.inference import is_list_like from pandas.core.dtypes.missing import ( is_valid_na_for_dtype, isna, na_value_for_dtype, notna, ) def ensure_dtype_can_hold_na(dtype: np.dtype) -> np.dtype: ... def ensure_dtype_can_hold_na(dtype: ExtensionDtype) -> ExtensionDtype: ... def ensure_dtype_can_hold_na(dtype: DtypeObj) -> DtypeObj: """ If we have a dtype that cannot hold NA values, find the best match that can. """ if isinstance(dtype, ExtensionDtype): if dtype._can_hold_na: return dtype elif isinstance(dtype, IntervalDtype): # TODO(GH#45349): don't special-case IntervalDtype, allow # overriding instead of returning object below. return IntervalDtype(np.float64, closed=dtype.closed) return _dtype_obj elif dtype.kind == "b": return _dtype_obj elif dtype.kind in ["i", "u"]: return np.dtype(np.float64) return dtype def infer_dtype_from(val, pandas_dtype: bool = False) -> tuple[DtypeObj, Any]: """ Interpret the dtype from a scalar or array. Parameters ---------- val : object pandas_dtype : bool, default False whether to infer dtype including pandas extension types. If False, scalar/array belongs to pandas extension types is inferred as object """ if not is_list_like(val): return infer_dtype_from_scalar(val, pandas_dtype=pandas_dtype) return infer_dtype_from_array(val, pandas_dtype=pandas_dtype) def find_common_type(types: list[np.dtype]) -> np.dtype: ... def find_common_type(types: list[ExtensionDtype]) -> DtypeObj: ... def find_common_type(types: list[DtypeObj]) -> DtypeObj: ... def find_common_type(types): """ Find a common data type among the given dtypes. Parameters ---------- types : list of dtypes Returns ------- pandas extension or numpy dtype See Also -------- numpy.find_common_type """ if not types: raise ValueError("no types given") first = types[0] # workaround for find_common_type([np.dtype('datetime64[ns]')] * 2) # => object if lib.dtypes_all_equal(list(types)): return first # get unique types (dict.fromkeys is used as order-preserving set()) types = list(dict.fromkeys(types).keys()) if any(isinstance(t, ExtensionDtype) for t in types): for t in types: if isinstance(t, ExtensionDtype): res = t._get_common_dtype(types) if res is not None: return res return np.dtype("object") # take lowest unit if all(is_datetime64_dtype(t) for t in types): return np.dtype("datetime64[ns]") if all(is_timedelta64_dtype(t) for t in types): return np.dtype("timedelta64[ns]") # don't mix bool / int or float or complex # this is different from numpy, which casts bool with float/int as int has_bools = any(is_bool_dtype(t) for t in types) if has_bools: for t in types: if is_integer_dtype(t) or is_float_dtype(t) or is_complex_dtype(t): return np.dtype("object") return np.find_common_type(types, []) Any = object() ArrayLike = Union["ExtensionArray", np.ndarray] DtypeObj = Union[np.dtype, "ExtensionDtype"] def pandas_dtype(dtype) -> DtypeObj: """ Convert input into a pandas only dtype object or a numpy dtype object. Parameters ---------- dtype : object to be converted Returns ------- np.dtype or a pandas dtype Raises ------ TypeError if not a dtype """ # short-circuit if isinstance(dtype, np.ndarray): return dtype.dtype elif isinstance(dtype, (np.dtype, ExtensionDtype)): return dtype # registered extension types result = registry.find(dtype) if result is not None: return result # try a numpy dtype # raise a consistent TypeError if failed try: with warnings.catch_warnings(): # GH#51523 - Series.astype(np.integer) doesn't show # numpy deprication warning of np.integer # Hence enabling DeprecationWarning warnings.simplefilter("always", DeprecationWarning) npdtype = np.dtype(dtype) except SyntaxError as err: # np.dtype uses `eval` which can raise SyntaxError raise TypeError(f"data type '{dtype}' not understood") from err # Any invalid dtype (such as pd.Timestamp) should raise an error. # np.dtype(invalid_type).kind = 0 for such objects. However, this will # also catch some valid dtypes such as object, np.object_ and 'object' # which we safeguard against by catching them earlier and returning # np.dtype(valid_dtype) before this condition is evaluated. if is_hashable(dtype) and dtype in [object, np.object_, "object", "O"]: # check hashability to avoid errors/DeprecationWarning when we get # here and `dtype` is an array return npdtype elif npdtype.kind == "O": raise TypeError(f"dtype '{dtype}' not understood") return npdtype def is_valid_na_for_dtype(obj, dtype: DtypeObj) -> bool: """ isna check that excludes incompatible dtypes Parameters ---------- obj : object dtype : np.datetime64, np.timedelta64, DatetimeTZDtype, or PeriodDtype Returns ------- bool """ if not lib.is_scalar(obj) or not isna(obj): return False elif dtype.kind == "M": if isinstance(dtype, np.dtype): # i.e. not tzaware return not isinstance(obj, (np.timedelta64, Decimal)) # we have to rule out tznaive dt64("NaT") return not isinstance(obj, (np.timedelta64, np.datetime64, Decimal)) elif dtype.kind == "m": return not isinstance(obj, (np.datetime64, Decimal)) elif dtype.kind in ["i", "u", "f", "c"]: # Numeric return obj is not NaT and not isinstance(obj, (np.datetime64, np.timedelta64)) elif dtype.kind == "b": # We allow pd.NA, None, np.nan in BooleanArray (same as IntervalDtype) return lib.is_float(obj) or obj is None or obj is libmissing.NA elif dtype == _dtype_str: # numpy string dtypes to avoid float np.nan return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal, float)) elif dtype == _dtype_object: # This is needed for Categorical, but is kind of weird return True elif isinstance(dtype, PeriodDtype): return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal)) elif isinstance(dtype, IntervalDtype): return lib.is_float(obj) or obj is None or obj is libmissing.NA elif isinstance(dtype, CategoricalDtype): return is_valid_na_for_dtype(obj, dtype.categories.dtype) # fallback, default to allowing NaN, None, NA, NaT return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal)) The provided code snippet includes necessary dependencies for implementing the `find_result_type` function. Write a Python function `def find_result_type(left: ArrayLike, right: Any) -> DtypeObj` to solve the following problem: Find the type/dtype for a the result of an operation between these objects. This is similar to find_common_type, but looks at the objects instead of just their dtypes. This can be useful in particular when one of the objects does not have a `dtype`. Parameters ---------- left : np.ndarray or ExtensionArray right : Any Returns ------- np.dtype or ExtensionDtype See also -------- find_common_type numpy.result_type Here is the function: def find_result_type(left: ArrayLike, right: Any) -> DtypeObj: """ Find the type/dtype for a the result of an operation between these objects. This is similar to find_common_type, but looks at the objects instead of just their dtypes. This can be useful in particular when one of the objects does not have a `dtype`. Parameters ---------- left : np.ndarray or ExtensionArray right : Any Returns ------- np.dtype or ExtensionDtype See also -------- find_common_type numpy.result_type """ new_dtype: DtypeObj if ( isinstance(left, np.ndarray) and left.dtype.kind in ["i", "u", "c"] and (lib.is_integer(right) or lib.is_float(right)) ): # e.g. with int8 dtype and right=512, we want to end up with # np.int16, whereas infer_dtype_from(512) gives np.int64, # which will make us upcast too far. if lib.is_float(right) and right.is_integer() and left.dtype.kind != "f": right = int(right) new_dtype = np.result_type(left, right) elif is_valid_na_for_dtype(right, left.dtype): # e.g. IntervalDtype[int] and None/np.nan new_dtype = ensure_dtype_can_hold_na(left.dtype) else: dtype, _ = infer_dtype_from(right, pandas_dtype=True) new_dtype = find_common_type([left.dtype, dtype]) return new_dtype
Find the type/dtype for a the result of an operation between these objects. This is similar to find_common_type, but looks at the objects instead of just their dtypes. This can be useful in particular when one of the objects does not have a `dtype`. Parameters ---------- left : np.ndarray or ExtensionArray right : Any Returns ------- np.dtype or ExtensionDtype See also -------- find_common_type numpy.result_type
173,047
from __future__ import annotations import datetime as dt import functools from typing import ( TYPE_CHECKING, Any, Literal, Sized, TypeVar, cast, overload, ) import warnings import numpy as np from pandas._libs import lib from pandas._libs.missing import ( NA, NAType, checknull, ) from pandas._libs.tslibs import ( NaT, OutOfBoundsDatetime, OutOfBoundsTimedelta, Timedelta, Timestamp, get_unit_from_dtype, is_supported_unit, ) from pandas._libs.tslibs.timedeltas import array_to_timedelta64 from pandas._typing import ( ArrayLike, Dtype, DtypeObj, NumpyIndexT, Scalar, npt, ) from pandas.errors import ( IntCastingNaNError, LossySetitemError, ) from pandas.core.dtypes.common import ( ensure_int8, ensure_int16, ensure_int32, ensure_int64, ensure_object, ensure_str, is_bool, is_bool_dtype, is_complex, is_complex_dtype, is_datetime64_dtype, is_extension_array_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_signed_integer_dtype, is_string_dtype, is_timedelta64_dtype, is_unsigned_integer_dtype, pandas_dtype as pandas_dtype_func, ) from pandas.core.dtypes.dtypes import ( BaseMaskedDtype, CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PandasExtensionDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ( ABCExtensionArray, ABCIndex, ABCSeries, ) from pandas.core.dtypes.inference import is_list_like from pandas.core.dtypes.missing import ( is_valid_na_for_dtype, isna, na_value_for_dtype, notna, ) _dtype_obj = np.dtype(object) def _maybe_box_and_unbox_datetimelike(value: Scalar, dtype: DtypeObj): # Caller is responsible for checking dtype.kind in ["m", "M"] if isinstance(value, dt.datetime): # we dont want to box dt64, in particular datetime64("NaT") value = maybe_box_datetimelike(value, dtype) return _maybe_unbox_datetimelike(value, dtype) Scalar = Union[PythonScalar, PandasScalar, np.datetime64, np.timedelta64, datetime] def construct_2d_arraylike_from_scalar( value: Scalar, length: int, width: int, dtype: np.dtype, copy: bool ) -> np.ndarray: shape = (length, width) if dtype.kind in ["m", "M"]: value = _maybe_box_and_unbox_datetimelike(value, dtype) elif dtype == _dtype_obj: if isinstance(value, (np.timedelta64, np.datetime64)): # calling np.array below would cast to pytimedelta/pydatetime out = np.empty(shape, dtype=object) out.fill(value) return out # Attempt to coerce to a numpy array try: arr = np.array(value, dtype=dtype, copy=copy) except (ValueError, TypeError) as err: raise TypeError( f"DataFrame constructor called with incompatible data and dtype: {err}" ) from err if arr.ndim != 0: raise ValueError("DataFrame constructor not properly called!") return np.full(shape, arr)
null
173,048
from __future__ import annotations import datetime as dt import functools from typing import ( TYPE_CHECKING, Any, Literal, Sized, TypeVar, cast, overload, ) import warnings import numpy as np from pandas._libs import lib from pandas._libs.missing import ( NA, NAType, checknull, ) from pandas._libs.tslibs import ( NaT, OutOfBoundsDatetime, OutOfBoundsTimedelta, Timedelta, Timestamp, get_unit_from_dtype, is_supported_unit, ) from pandas._libs.tslibs.timedeltas import array_to_timedelta64 from pandas._typing import ( ArrayLike, Dtype, DtypeObj, NumpyIndexT, Scalar, npt, ) from pandas.errors import ( IntCastingNaNError, LossySetitemError, ) from pandas.core.dtypes.common import ( ensure_int8, ensure_int16, ensure_int32, ensure_int64, ensure_object, ensure_str, is_bool, is_bool_dtype, is_complex, is_complex_dtype, is_datetime64_dtype, is_extension_array_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, is_object_dtype, is_scalar, is_signed_integer_dtype, is_string_dtype, is_timedelta64_dtype, is_unsigned_integer_dtype, pandas_dtype as pandas_dtype_func, ) from pandas.core.dtypes.dtypes import ( BaseMaskedDtype, CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PandasExtensionDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ( ABCExtensionArray, ABCIndex, ABCSeries, ) from pandas.core.dtypes.inference import is_list_like from pandas.core.dtypes.missing import ( is_valid_na_for_dtype, isna, na_value_for_dtype, notna, ) def np_can_hold_element(dtype: np.dtype, element: Any) -> Any: """ Raise if we cannot losslessly set this element into an ndarray with this dtype. Specifically about places where we disagree with numpy. i.e. there are cases where numpy will raise in doing the setitem that we do not check for here, e.g. setting str "X" into a numeric ndarray. Returns ------- Any The element, potentially cast to the dtype. Raises ------ ValueError : If we cannot losslessly store this element with this dtype. """ if dtype == _dtype_obj: return element tipo = _maybe_infer_dtype_type(element) if dtype.kind in ["i", "u"]: if isinstance(element, range): if _dtype_can_hold_range(element, dtype): return element raise LossySetitemError if is_integer(element) or (is_float(element) and element.is_integer()): # e.g. test_setitem_series_int8 if we have a python int 1 # tipo may be np.int32, despite the fact that it will fit # in smaller int dtypes. info = np.iinfo(dtype) if info.min <= element <= info.max: return dtype.type(element) raise LossySetitemError if tipo is not None: if tipo.kind not in ["i", "u"]: if isinstance(element, np.ndarray) and element.dtype.kind == "f": # If all can be losslessly cast to integers, then we can hold them with np.errstate(invalid="ignore"): # We check afterwards if cast was losslessly, so no need to show # the warning casted = element.astype(dtype) comp = casted == element if comp.all(): # Return the casted values bc they can be passed to # np.putmask, whereas the raw values cannot. # see TestSetitemFloatNDarrayIntoIntegerSeries return casted raise LossySetitemError # Anything other than integer we cannot hold raise LossySetitemError if ( dtype.kind == "u" and isinstance(element, np.ndarray) and element.dtype.kind == "i" ): # see test_where_uint64 casted = element.astype(dtype) if (casted == element).all(): # TODO: faster to check (element >=0).all()? potential # itemsize issues there? return casted raise LossySetitemError if dtype.itemsize < tipo.itemsize: raise LossySetitemError if not isinstance(tipo, np.dtype): # i.e. nullable IntegerDtype; we can put this into an ndarray # losslessly iff it has no NAs if element._hasna: raise LossySetitemError return element return element raise LossySetitemError if dtype.kind == "f": if lib.is_integer(element) or lib.is_float(element): casted = dtype.type(element) if np.isnan(casted) or casted == element: return casted # otherwise e.g. overflow see TestCoercionFloat32 raise LossySetitemError if tipo is not None: # TODO: itemsize check? if tipo.kind not in ["f", "i", "u"]: # Anything other than float/integer we cannot hold raise LossySetitemError if not isinstance(tipo, np.dtype): # i.e. nullable IntegerDtype or FloatingDtype; # we can put this into an ndarray losslessly iff it has no NAs if element._hasna: raise LossySetitemError return element elif tipo.itemsize > dtype.itemsize or tipo.kind != dtype.kind: if isinstance(element, np.ndarray): # e.g. TestDataFrameIndexingWhere::test_where_alignment casted = element.astype(dtype) if np.array_equal(casted, element, equal_nan=True): return casted raise LossySetitemError return element raise LossySetitemError if dtype.kind == "c": if lib.is_integer(element) or lib.is_complex(element) or lib.is_float(element): if np.isnan(element): # see test_where_complex GH#6345 return dtype.type(element) with warnings.catch_warnings(): warnings.filterwarnings("ignore") casted = dtype.type(element) if casted == element: return casted # otherwise e.g. overflow see test_32878_complex_itemsize raise LossySetitemError if tipo is not None: if tipo.kind in ["c", "f", "i", "u"]: return element raise LossySetitemError raise LossySetitemError if dtype.kind == "b": if tipo is not None: if tipo.kind == "b": if not isinstance(tipo, np.dtype): # i.e. we have a BooleanArray if element._hasna: # i.e. there are pd.NA elements raise LossySetitemError return element raise LossySetitemError if lib.is_bool(element): return element raise LossySetitemError if dtype.kind == "S": # TODO: test tests.frame.methods.test_replace tests get here, # need more targeted tests. xref phofl has a PR about this if tipo is not None: if tipo.kind == "S" and tipo.itemsize <= dtype.itemsize: return element raise LossySetitemError if isinstance(element, bytes) and len(element) <= dtype.itemsize: return element raise LossySetitemError if dtype.kind == "V": # i.e. np.void, which cannot hold _anything_ raise LossySetitemError raise NotImplementedError(dtype) Any = object() def cast(typ: Type[_T], val: Any) -> _T: ... def cast(typ: str, val: Any) -> Any: ... def cast(typ: object, val: Any) -> Any: ... ArrayLike = Union["ExtensionArray", np.ndarray] class LossySetitemError(Exception): """ Raised when trying to do a __setitem__ on an np.ndarray that is not lossless. """ class DatetimeTZDtype(PandasExtensionDtype): """ An ExtensionDtype for timezone-aware datetime data. **This is not an actual numpy dtype**, but a duck type. Parameters ---------- unit : str, default "ns" The precision of the datetime data. Currently limited to ``"ns"``. tz : str, int, or datetime.tzinfo The timezone. Attributes ---------- unit tz Methods ------- None Raises ------ pytz.UnknownTimeZoneError When the requested timezone cannot be found. Examples -------- >>> pd.DatetimeTZDtype(tz='UTC') datetime64[ns, UTC] >>> pd.DatetimeTZDtype(tz='dateutil/US/Central') datetime64[ns, tzfile('/usr/share/zoneinfo/US/Central')] """ type: type[Timestamp] = Timestamp kind: str_type = "M" num = 101 base = np.dtype("M8[ns]") # TODO: depend on reso? _metadata = ("unit", "tz") _match = re.compile(r"(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]") _cache_dtypes: dict[str_type, PandasExtensionDtype] = {} def na_value(self) -> NaTType: return NaT # error: Signature of "str" incompatible with supertype "PandasExtensionDtype" def str(self) -> str: # type: ignore[override] return f"|M8[{self.unit}]" def __init__(self, unit: str_type | DatetimeTZDtype = "ns", tz=None) -> None: if isinstance(unit, DatetimeTZDtype): # error: "str" has no attribute "tz" unit, tz = unit.unit, unit.tz # type: ignore[attr-defined] if unit != "ns": if isinstance(unit, str) and tz is None: # maybe a string like datetime64[ns, tz], which we support for # now. result = type(self).construct_from_string(unit) unit = result.unit tz = result.tz msg = ( f"Passing a dtype alias like 'datetime64[ns, {tz}]' " "to DatetimeTZDtype is no longer supported. Use " "'DatetimeTZDtype.construct_from_string()' instead." ) raise ValueError(msg) if unit not in ["s", "ms", "us", "ns"]: raise ValueError("DatetimeTZDtype only supports s, ms, us, ns units") if tz: tz = timezones.maybe_get_tz(tz) tz = timezones.tz_standardize(tz) elif tz is not None: raise pytz.UnknownTimeZoneError(tz) if tz is None: raise TypeError("A 'tz' is required.") self._unit = unit self._tz = tz def _creso(self) -> int: """ The NPY_DATETIMEUNIT corresponding to this dtype's resolution. """ return abbrev_to_npy_unit(self.unit) def unit(self) -> str_type: """ The precision of the datetime data. """ return self._unit def tz(self) -> tzinfo: """ The timezone. """ return self._tz def construct_array_type(cls) -> type_t[DatetimeArray]: """ Return the array type associated with this dtype. Returns ------- type """ from pandas.core.arrays import DatetimeArray return DatetimeArray def construct_from_string(cls, string: str_type) -> DatetimeTZDtype: """ Construct a DatetimeTZDtype from a string. Parameters ---------- string : str The string alias for this DatetimeTZDtype. Should be formatted like ``datetime64[ns, <tz>]``, where ``<tz>`` is the timezone name. Examples -------- >>> DatetimeTZDtype.construct_from_string('datetime64[ns, UTC]') datetime64[ns, UTC] """ if not isinstance(string, str): raise TypeError( f"'construct_from_string' expects a string, got {type(string)}" ) msg = f"Cannot construct a 'DatetimeTZDtype' from '{string}'" match = cls._match.match(string) if match: d = match.groupdict() try: return cls(unit=d["unit"], tz=d["tz"]) except (KeyError, TypeError, ValueError) as err: # KeyError if maybe_get_tz tries and fails to get a # pytz timezone (actually pytz.UnknownTimeZoneError). # TypeError if we pass a nonsense tz; # ValueError if we pass a unit other than "ns" raise TypeError(msg) from err raise TypeError(msg) def __str__(self) -> str_type: return f"datetime64[{self.unit}, {self.tz}]" def name(self) -> str_type: """A string representation of the dtype.""" return str(self) def __hash__(self) -> int: # make myself hashable # TODO: update this. return hash(str(self)) def __eq__(self, other: Any) -> bool: if isinstance(other, str): if other.startswith("M8["): other = f"datetime64[{other[3:]}" return other == self.name return ( isinstance(other, DatetimeTZDtype) and self.unit == other.unit and tz_compare(self.tz, other.tz) ) def __setstate__(self, state) -> None: # for pickle compat. __get_state__ is defined in the # PandasExtensionDtype superclass and uses the public properties to # pickle -> need to set the settable private ones here (see GH26067) self._tz = state["tz"] self._unit = state["unit"] class PeriodDtype(PeriodDtypeBase, PandasExtensionDtype): """ An ExtensionDtype for Period data. **This is not an actual numpy dtype**, but a duck type. Parameters ---------- freq : str or DateOffset The frequency of this PeriodDtype. Attributes ---------- freq Methods ------- None Examples -------- >>> pd.PeriodDtype(freq='D') period[D] >>> pd.PeriodDtype(freq=pd.offsets.MonthEnd()) period[M] """ type: type[Period] = Period kind: str_type = "O" str = "|O08" base = np.dtype("O") num = 102 _metadata = ("freq",) _match = re.compile(r"(P|p)eriod\[(?P<freq>.+)\]") _cache_dtypes: dict[str_type, PandasExtensionDtype] = {} def __new__(cls, freq=None): """ Parameters ---------- freq : frequency """ if isinstance(freq, PeriodDtype): return freq elif freq is None: # empty constructor for pickle compat # -10_000 corresponds to PeriodDtypeCode.UNDEFINED u = PeriodDtypeBase.__new__(cls, -10_000) u._freq = None return u if not isinstance(freq, BaseOffset): freq = cls._parse_dtype_strict(freq) try: return cls._cache_dtypes[freq.freqstr] except KeyError: dtype_code = freq._period_dtype_code u = PeriodDtypeBase.__new__(cls, dtype_code) u._freq = freq cls._cache_dtypes[freq.freqstr] = u return u def __reduce__(self): return type(self), (self.freq,) def freq(self): """ The frequency object of this PeriodDtype. """ return self._freq def _parse_dtype_strict(cls, freq: str_type) -> BaseOffset: if isinstance(freq, str): # note: freq is already of type str! if freq.startswith("period[") or freq.startswith("Period["): m = cls._match.search(freq) if m is not None: freq = m.group("freq") freq_offset = to_offset(freq) if freq_offset is not None: return freq_offset raise ValueError("could not construct PeriodDtype") def construct_from_string(cls, string: str_type) -> PeriodDtype: """ Strict construction from a string, raise a TypeError if not possible """ if ( isinstance(string, str) and (string.startswith("period[") or string.startswith("Period[")) or isinstance(string, BaseOffset) ): # do not parse string like U as period[U] # avoid tuple to be regarded as freq try: return cls(freq=string) except ValueError: pass if isinstance(string, str): msg = f"Cannot construct a 'PeriodDtype' from '{string}'" else: msg = f"'construct_from_string' expects a string, got {type(string)}" raise TypeError(msg) def __str__(self) -> str_type: return self.name def name(self) -> str_type: return f"period[{self.freq.freqstr}]" def na_value(self) -> NaTType: return NaT def __hash__(self) -> int: # make myself hashable return hash(str(self)) def __eq__(self, other: Any) -> bool: if isinstance(other, str): return other in [self.name, self.name.title()] elif isinstance(other, PeriodDtype): # For freqs that can be held by a PeriodDtype, this check is # equivalent to (and much faster than) self.freq == other.freq sfreq = self.freq ofreq = other.freq return ( sfreq.n == ofreq.n and sfreq._period_dtype_code == ofreq._period_dtype_code ) return False def __ne__(self, other: Any) -> bool: return not self.__eq__(other) def __setstate__(self, state) -> None: # for pickle compat. __getstate__ is defined in the # PandasExtensionDtype superclass and uses the public properties to # pickle -> need to set the settable private ones here (see GH26067) self._freq = state["freq"] def is_dtype(cls, dtype: object) -> bool: """ Return a boolean if we if the passed type is an actual dtype that we can match (via string or type) """ if isinstance(dtype, str): # PeriodDtype can be instantiated from freq string like "U", # but doesn't regard freq str like "U" as dtype. if dtype.startswith("period[") or dtype.startswith("Period["): try: return cls._parse_dtype_strict(dtype) is not None except ValueError: return False else: return False return super().is_dtype(dtype) def construct_array_type(cls) -> type_t[PeriodArray]: """ Return the array type associated with this dtype. Returns ------- type """ from pandas.core.arrays import PeriodArray return PeriodArray def __from_arrow__( self, array: pyarrow.Array | pyarrow.ChunkedArray ) -> PeriodArray: """ Construct PeriodArray from pyarrow Array/ChunkedArray. """ import pyarrow from pandas.core.arrays import PeriodArray from pandas.core.arrays.arrow._arrow_utils import ( pyarrow_array_to_numpy_and_mask, ) if isinstance(array, pyarrow.Array): chunks = [array] else: chunks = array.chunks results = [] for arr in chunks: data, mask = pyarrow_array_to_numpy_and_mask(arr, dtype=np.dtype(np.int64)) parr = PeriodArray(data.copy(), freq=self.freq, copy=False) # error: Invalid index type "ndarray[Any, dtype[bool_]]" for "PeriodArray"; # expected type "Union[int, Sequence[int], Sequence[bool], slice]" parr[~mask] = NaT # type: ignore[index] results.append(parr) if not results: return PeriodArray(np.array([], dtype="int64"), freq=self.freq, copy=False) return PeriodArray._concat_same_type(results) class IntervalDtype(PandasExtensionDtype): """ An ExtensionDtype for Interval data. **This is not an actual numpy dtype**, but a duck type. Parameters ---------- subtype : str, np.dtype The dtype of the Interval bounds. Attributes ---------- subtype Methods ------- None Examples -------- >>> pd.IntervalDtype(subtype='int64', closed='both') interval[int64, both] """ name = "interval" kind: str_type = "O" str = "|O08" base = np.dtype("O") num = 103 _metadata = ( "subtype", "closed", ) _match = re.compile( r"(I|i)nterval\[(?P<subtype>[^,]+(\[.+\])?)" r"(, (?P<closed>(right|left|both|neither)))?\]" ) _cache_dtypes: dict[str_type, PandasExtensionDtype] = {} def __new__(cls, subtype=None, closed: str_type | None = None): from pandas.core.dtypes.common import ( is_string_dtype, pandas_dtype, ) if closed is not None and closed not in {"right", "left", "both", "neither"}: raise ValueError("closed must be one of 'right', 'left', 'both', 'neither'") if isinstance(subtype, IntervalDtype): if closed is not None and closed != subtype.closed: raise ValueError( "dtype.closed and 'closed' do not match. " "Try IntervalDtype(dtype.subtype, closed) instead." ) return subtype elif subtype is None: # we are called as an empty constructor # generally for pickle compat u = object.__new__(cls) u._subtype = None u._closed = closed return u elif isinstance(subtype, str) and subtype.lower() == "interval": subtype = None else: if isinstance(subtype, str): m = cls._match.search(subtype) if m is not None: gd = m.groupdict() subtype = gd["subtype"] if gd.get("closed", None) is not None: if closed is not None: if closed != gd["closed"]: raise ValueError( "'closed' keyword does not match value " "specified in dtype string" ) closed = gd["closed"] try: subtype = pandas_dtype(subtype) except TypeError as err: raise TypeError("could not construct IntervalDtype") from err if CategoricalDtype.is_dtype(subtype) or is_string_dtype(subtype): # GH 19016 msg = ( "category, object, and string subtypes are not supported " "for IntervalDtype" ) raise TypeError(msg) key = f"{subtype}{closed}" try: return cls._cache_dtypes[key] except KeyError: u = object.__new__(cls) u._subtype = subtype u._closed = closed cls._cache_dtypes[key] = u return u def _can_hold_na(self) -> bool: subtype = self._subtype if subtype is None: # partially-initialized raise NotImplementedError( "_can_hold_na is not defined for partially-initialized IntervalDtype" ) if subtype.kind in ["i", "u"]: return False return True def closed(self): return self._closed def subtype(self): """ The dtype of the Interval bounds. """ return self._subtype def construct_array_type(cls) -> type[IntervalArray]: """ Return the array type associated with this dtype. Returns ------- type """ from pandas.core.arrays import IntervalArray return IntervalArray def construct_from_string(cls, string: str_type) -> IntervalDtype: """ attempt to construct this type from a string, raise a TypeError if its not possible """ if not isinstance(string, str): raise TypeError( f"'construct_from_string' expects a string, got {type(string)}" ) if string.lower() == "interval" or cls._match.search(string) is not None: return cls(string) msg = ( f"Cannot construct a 'IntervalDtype' from '{string}'.\n\n" "Incorrectly formatted string passed to constructor. " "Valid formats include Interval or Interval[dtype] " "where dtype is numeric, datetime, or timedelta" ) raise TypeError(msg) def type(self) -> type[Interval]: return Interval def __str__(self) -> str_type: if self.subtype is None: return "interval" if self.closed is None: # Only partially initialized GH#38394 return f"interval[{self.subtype}]" return f"interval[{self.subtype}, {self.closed}]" def __hash__(self) -> int: # make myself hashable return hash(str(self)) def __eq__(self, other: Any) -> bool: if isinstance(other, str): return other.lower() in (self.name.lower(), str(self).lower()) elif not isinstance(other, IntervalDtype): return False elif self.subtype is None or other.subtype is None: # None should match any subtype return True elif self.closed != other.closed: return False else: from pandas.core.dtypes.common import is_dtype_equal return is_dtype_equal(self.subtype, other.subtype) def __setstate__(self, state) -> None: # for pickle compat. __get_state__ is defined in the # PandasExtensionDtype superclass and uses the public properties to # pickle -> need to set the settable private ones here (see GH26067) self._subtype = state["subtype"] # backward-compat older pickles won't have "closed" key self._closed = state.pop("closed", None) def is_dtype(cls, dtype: object) -> bool: """ Return a boolean if we if the passed type is an actual dtype that we can match (via string or type) """ if isinstance(dtype, str): if dtype.lower().startswith("interval"): try: return cls.construct_from_string(dtype) is not None except (ValueError, TypeError): return False else: return False return super().is_dtype(dtype) def __from_arrow__( self, array: pyarrow.Array | pyarrow.ChunkedArray ) -> IntervalArray: """ Construct IntervalArray from pyarrow Array/ChunkedArray. """ import pyarrow from pandas.core.arrays import IntervalArray if isinstance(array, pyarrow.Array): chunks = [array] else: chunks = array.chunks results = [] for arr in chunks: if isinstance(arr, pyarrow.ExtensionArray): arr = arr.storage left = np.asarray(arr.field("left"), dtype=self.subtype) right = np.asarray(arr.field("right"), dtype=self.subtype) iarr = IntervalArray.from_arrays(left, right, closed=self.closed) results.append(iarr) if not results: return IntervalArray.from_arrays( np.array([], dtype=self.subtype), np.array([], dtype=self.subtype), closed=self.closed, ) return IntervalArray._concat_same_type(results) def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: if not all(isinstance(x, IntervalDtype) for x in dtypes): return None closed = cast("IntervalDtype", dtypes[0]).closed if not all(cast("IntervalDtype", x).closed == closed for x in dtypes): return np.dtype(object) from pandas.core.dtypes.cast import find_common_type common = find_common_type([cast("IntervalDtype", x).subtype for x in dtypes]) if common == object: return np.dtype(object) return IntervalDtype(common, closed=closed) The provided code snippet includes necessary dependencies for implementing the `can_hold_element` function. Write a Python function `def can_hold_element(arr: ArrayLike, element: Any) -> bool` to solve the following problem: Can we do an inplace setitem with this element in an array with this dtype? Parameters ---------- arr : np.ndarray or ExtensionArray element : Any Returns ------- bool Here is the function: def can_hold_element(arr: ArrayLike, element: Any) -> bool: """ Can we do an inplace setitem with this element in an array with this dtype? Parameters ---------- arr : np.ndarray or ExtensionArray element : Any Returns ------- bool """ dtype = arr.dtype if not isinstance(dtype, np.dtype) or dtype.kind in ["m", "M"]: if isinstance(dtype, (PeriodDtype, IntervalDtype, DatetimeTZDtype, np.dtype)): # np.dtype here catches datetime64ns and timedelta64ns; we assume # in this case that we have DatetimeArray/TimedeltaArray arr = cast( "PeriodArray | DatetimeArray | TimedeltaArray | IntervalArray", arr ) try: arr._validate_setitem_value(element) return True except (ValueError, TypeError): # TODO: re-use _catch_deprecated_value_error to ensure we are # strict about what exceptions we allow through here. return False # This is technically incorrect, but maintains the behavior of # ExtensionBlock._can_hold_element return True try: np_can_hold_element(dtype, element) return True except (TypeError, LossySetitemError): return False
Can we do an inplace setitem with this element in an array with this dtype? Parameters ---------- arr : np.ndarray or ExtensionArray element : Any Returns ------- bool
173,049
from __future__ import annotations from typing import ( TYPE_CHECKING, Type, cast, ) def create_pandas_abc_type(name, attr, comp): def _check(inst) -> bool: return getattr(inst, attr, "_typ") in comp # https://github.com/python/mypy/issues/1006 # error: 'classmethod' used with a non-method @classmethod # type: ignore[misc] def _instancecheck(cls, inst) -> bool: return _check(inst) and not isinstance(inst, type) @classmethod # type: ignore[misc] def _subclasscheck(cls, inst) -> bool: # Raise instead of returning False # This is consistent with default __subclasscheck__ behavior if not isinstance(inst, type): raise TypeError("issubclass() arg 1 must be a class") return _check(inst) dct = {"__instancecheck__": _instancecheck, "__subclasscheck__": _subclasscheck} meta = type("ABCBase", (type,), dct) return meta(name, (), dct)
null
173,050
from __future__ import annotations import inspect from typing import ( TYPE_CHECKING, overload, ) import warnings import numpy as np from pandas._libs import lib from pandas._libs.tslibs.timedeltas import array_to_timedelta64 from pandas._typing import ( ArrayLike, DtypeObj, IgnoreRaise, ) from pandas.errors import IntCastingNaNError from pandas.core.dtypes.common import ( is_datetime64_dtype, is_dtype_equal, is_integer_dtype, is_object_dtype, is_string_dtype, is_timedelta64_dtype, pandas_dtype, ) from pandas.core.dtypes.dtypes import ( ExtensionDtype, PandasDtype, ) def astype_array(values: ArrayLike, dtype: DtypeObj, copy: bool = False) -> ArrayLike: """ Cast array (ndarray or ExtensionArray) to the new dtype. Parameters ---------- values : ndarray or ExtensionArray dtype : dtype object copy : bool, default False copy if indicated Returns ------- ndarray or ExtensionArray """ if is_dtype_equal(values.dtype, dtype): if copy: return values.copy() return values if not isinstance(values, np.ndarray): # i.e. ExtensionArray values = values.astype(dtype, copy=copy) else: values = _astype_nansafe(values, dtype, copy=copy) # in pandas we don't store numpy str dtypes, so convert to object if isinstance(dtype, np.dtype) and issubclass(values.dtype.type, str): values = np.array(values, dtype=object) return values ArrayLike = Union["ExtensionArray", np.ndarray] IgnoreRaise = Literal["ignore", "raise"] def pandas_dtype(dtype) -> DtypeObj: """ Convert input into a pandas only dtype object or a numpy dtype object. Parameters ---------- dtype : object to be converted Returns ------- np.dtype or a pandas dtype Raises ------ TypeError if not a dtype """ # short-circuit if isinstance(dtype, np.ndarray): return dtype.dtype elif isinstance(dtype, (np.dtype, ExtensionDtype)): return dtype # registered extension types result = registry.find(dtype) if result is not None: return result # try a numpy dtype # raise a consistent TypeError if failed try: with warnings.catch_warnings(): # GH#51523 - Series.astype(np.integer) doesn't show # numpy deprication warning of np.integer # Hence enabling DeprecationWarning warnings.simplefilter("always", DeprecationWarning) npdtype = np.dtype(dtype) except SyntaxError as err: # np.dtype uses `eval` which can raise SyntaxError raise TypeError(f"data type '{dtype}' not understood") from err # Any invalid dtype (such as pd.Timestamp) should raise an error. # np.dtype(invalid_type).kind = 0 for such objects. However, this will # also catch some valid dtypes such as object, np.object_ and 'object' # which we safeguard against by catching them earlier and returning # np.dtype(valid_dtype) before this condition is evaluated. if is_hashable(dtype) and dtype in [object, np.object_, "object", "O"]: # check hashability to avoid errors/DeprecationWarning when we get # here and `dtype` is an array return npdtype elif npdtype.kind == "O": raise TypeError(f"dtype '{dtype}' not understood") return npdtype class PandasDtype(ExtensionDtype): """ A Pandas ExtensionDtype for NumPy dtypes. This is mostly for internal compatibility, and is not especially useful on its own. Parameters ---------- dtype : object Object to be converted to a NumPy data type object. See Also -------- numpy.dtype """ _metadata = ("_dtype",) def __init__(self, dtype: npt.DTypeLike | PandasDtype | None) -> None: if isinstance(dtype, PandasDtype): # make constructor univalent dtype = dtype.numpy_dtype self._dtype = np.dtype(dtype) def __repr__(self) -> str: return f"PandasDtype({repr(self.name)})" def numpy_dtype(self) -> np.dtype: """ The NumPy dtype this PandasDtype wraps. """ return self._dtype def name(self) -> str: """ A bit-width name for this data-type. """ return self._dtype.name def type(self) -> type[np.generic]: """ The type object used to instantiate a scalar of this NumPy data-type. """ return self._dtype.type def _is_numeric(self) -> bool: # exclude object, str, unicode, void. return self.kind in set("biufc") def _is_boolean(self) -> bool: return self.kind == "b" def construct_from_string(cls, string: str) -> PandasDtype: try: dtype = np.dtype(string) except TypeError as err: if not isinstance(string, str): msg = f"'construct_from_string' expects a string, got {type(string)}" else: msg = f"Cannot construct a 'PandasDtype' from '{string}'" raise TypeError(msg) from err return cls(dtype) def construct_array_type(cls) -> type_t[PandasArray]: """ Return the array type associated with this dtype. Returns ------- type """ from pandas.core.arrays import PandasArray return PandasArray def kind(self) -> str: """ A character code (one of 'biufcmMOSUV') identifying the general kind of data. """ return self._dtype.kind def itemsize(self) -> int: """ The element size of this data-type object. """ return self._dtype.itemsize The provided code snippet includes necessary dependencies for implementing the `astype_array_safe` function. Write a Python function `def astype_array_safe( values: ArrayLike, dtype, copy: bool = False, errors: IgnoreRaise = "raise" ) -> ArrayLike` to solve the following problem: Cast array (ndarray or ExtensionArray) to the new dtype. This basically is the implementation for DataFrame/Series.astype and includes all custom logic for pandas (NaN-safety, converting str to object, not allowing ) Parameters ---------- values : ndarray or ExtensionArray dtype : str, dtype convertible copy : bool, default False copy if indicated errors : str, {'raise', 'ignore'}, default 'raise' - ``raise`` : allow exceptions to be raised - ``ignore`` : suppress exceptions. On error return original object Returns ------- ndarray or ExtensionArray Here is the function: def astype_array_safe( values: ArrayLike, dtype, copy: bool = False, errors: IgnoreRaise = "raise" ) -> ArrayLike: """ Cast array (ndarray or ExtensionArray) to the new dtype. This basically is the implementation for DataFrame/Series.astype and includes all custom logic for pandas (NaN-safety, converting str to object, not allowing ) Parameters ---------- values : ndarray or ExtensionArray dtype : str, dtype convertible copy : bool, default False copy if indicated errors : str, {'raise', 'ignore'}, default 'raise' - ``raise`` : allow exceptions to be raised - ``ignore`` : suppress exceptions. On error return original object Returns ------- ndarray or ExtensionArray """ errors_legal_values = ("raise", "ignore") if errors not in errors_legal_values: invalid_arg = ( "Expected value of kwarg 'errors' to be one of " f"{list(errors_legal_values)}. Supplied value is '{errors}'" ) raise ValueError(invalid_arg) if inspect.isclass(dtype) and issubclass(dtype, ExtensionDtype): msg = ( f"Expected an instance of {dtype.__name__}, " "but got the class instead. Try instantiating 'dtype'." ) raise TypeError(msg) dtype = pandas_dtype(dtype) if isinstance(dtype, PandasDtype): # Ensure we don't end up with a PandasArray dtype = dtype.numpy_dtype try: new_values = astype_array(values, dtype, copy=copy) except (ValueError, TypeError): # e.g. _astype_nansafe can fail on object-dtype of strings # trying to convert to float if errors == "ignore": new_values = values else: raise return new_values
Cast array (ndarray or ExtensionArray) to the new dtype. This basically is the implementation for DataFrame/Series.astype and includes all custom logic for pandas (NaN-safety, converting str to object, not allowing ) Parameters ---------- values : ndarray or ExtensionArray dtype : str, dtype convertible copy : bool, default False copy if indicated errors : str, {'raise', 'ignore'}, default 'raise' - ``raise`` : allow exceptions to be raised - ``ignore`` : suppress exceptions. On error return original object Returns ------- ndarray or ExtensionArray
173,051
from __future__ import annotations from collections import abc from numbers import Number import re from typing import Pattern import numpy as np from pandas._libs import lib class Number(metaclass=ABCMeta): def __hash__(self) -> int: ... The provided code snippet includes necessary dependencies for implementing the `is_number` function. Write a Python function `def is_number(obj) -> bool` to solve the following problem: Check if the object is a number. Returns True when the object is a number, and False if is not. Parameters ---------- obj : any type The object to check if is a number. Returns ------- bool Whether `obj` is a number or not. See Also -------- api.types.is_integer: Checks a subgroup of numbers. Examples -------- >>> from pandas.api.types import is_number >>> is_number(1) True >>> is_number(7.15) True Booleans are valid because they are int subclass. >>> is_number(False) True >>> is_number("foo") False >>> is_number("5") False Here is the function: def is_number(obj) -> bool: """ Check if the object is a number. Returns True when the object is a number, and False if is not. Parameters ---------- obj : any type The object to check if is a number. Returns ------- bool Whether `obj` is a number or not. See Also -------- api.types.is_integer: Checks a subgroup of numbers. Examples -------- >>> from pandas.api.types import is_number >>> is_number(1) True >>> is_number(7.15) True Booleans are valid because they are int subclass. >>> is_number(False) True >>> is_number("foo") False >>> is_number("5") False """ return isinstance(obj, (Number, np.number))
Check if the object is a number. Returns True when the object is a number, and False if is not. Parameters ---------- obj : any type The object to check if is a number. Returns ------- bool Whether `obj` is a number or not. See Also -------- api.types.is_integer: Checks a subgroup of numbers. Examples -------- >>> from pandas.api.types import is_number >>> is_number(1) True >>> is_number(7.15) True Booleans are valid because they are int subclass. >>> is_number(False) True >>> is_number("foo") False >>> is_number("5") False
173,052
from __future__ import annotations from collections import abc from numbers import Number import re from typing import Pattern import numpy as np from pandas._libs import lib The provided code snippet includes necessary dependencies for implementing the `is_file_like` function. Write a Python function `def is_file_like(obj) -> bool` to solve the following problem: Check if the object is a file-like object. For objects to be considered file-like, they must be an iterator AND have either a `read` and/or `write` method as an attribute. Note: file-like objects must be iterable, but iterable objects need not be file-like. Parameters ---------- obj : The object to check Returns ------- bool Whether `obj` has file-like properties. Examples -------- >>> import io >>> from pandas.api.types import is_file_like >>> buffer = io.StringIO("data") >>> is_file_like(buffer) True >>> is_file_like([1, 2, 3]) False Here is the function: def is_file_like(obj) -> bool: """ Check if the object is a file-like object. For objects to be considered file-like, they must be an iterator AND have either a `read` and/or `write` method as an attribute. Note: file-like objects must be iterable, but iterable objects need not be file-like. Parameters ---------- obj : The object to check Returns ------- bool Whether `obj` has file-like properties. Examples -------- >>> import io >>> from pandas.api.types import is_file_like >>> buffer = io.StringIO("data") >>> is_file_like(buffer) True >>> is_file_like([1, 2, 3]) False """ if not (hasattr(obj, "read") or hasattr(obj, "write")): return False return bool(hasattr(obj, "__iter__"))
Check if the object is a file-like object. For objects to be considered file-like, they must be an iterator AND have either a `read` and/or `write` method as an attribute. Note: file-like objects must be iterable, but iterable objects need not be file-like. Parameters ---------- obj : The object to check Returns ------- bool Whether `obj` has file-like properties. Examples -------- >>> import io >>> from pandas.api.types import is_file_like >>> buffer = io.StringIO("data") >>> is_file_like(buffer) True >>> is_file_like([1, 2, 3]) False
173,053
from __future__ import annotations from collections import abc from numbers import Number import re from typing import Pattern import numpy as np from pandas._libs import lib class Pattern(Generic[AnyStr]): flags: int groupindex: Mapping[str, int] groups: int pattern: AnyStr def search(self, string: AnyStr, pos: int = ..., endpos: int = ...) -> Optional[Match[AnyStr]]: ... def match(self, string: AnyStr, pos: int = ..., endpos: int = ...) -> Optional[Match[AnyStr]]: ... # New in Python 3.4 def fullmatch(self, string: AnyStr, pos: int = ..., endpos: int = ...) -> Optional[Match[AnyStr]]: ... def split(self, string: AnyStr, maxsplit: int = ...) -> list[AnyStr]: ... def findall(self, string: AnyStr, pos: int = ..., endpos: int = ...) -> list[Any]: ... def finditer(self, string: AnyStr, pos: int = ..., endpos: int = ...) -> Iterator[Match[AnyStr]]: ... def sub(self, repl: AnyStr, string: AnyStr, count: int = ...) -> AnyStr: ... def sub(self, repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int = ...) -> AnyStr: ... def subn(self, repl: AnyStr, string: AnyStr, count: int = ...) -> Tuple[AnyStr, int]: ... def subn(self, repl: Callable[[Match[AnyStr]], AnyStr], string: AnyStr, count: int = ...) -> Tuple[AnyStr, int]: ... if sys.version_info >= (3, 9): def __class_getitem__(cls, item: Any) -> GenericAlias: ... The provided code snippet includes necessary dependencies for implementing the `is_re` function. Write a Python function `def is_re(obj) -> bool` to solve the following problem: Check if the object is a regex pattern instance. Parameters ---------- obj : The object to check Returns ------- bool Whether `obj` is a regex pattern. Examples -------- >>> from pandas.api.types import is_re >>> import re >>> is_re(re.compile(".*")) True >>> is_re("foo") False Here is the function: def is_re(obj) -> bool: """ Check if the object is a regex pattern instance. Parameters ---------- obj : The object to check Returns ------- bool Whether `obj` is a regex pattern. Examples -------- >>> from pandas.api.types import is_re >>> import re >>> is_re(re.compile(".*")) True >>> is_re("foo") False """ return isinstance(obj, Pattern)
Check if the object is a regex pattern instance. Parameters ---------- obj : The object to check Returns ------- bool Whether `obj` is a regex pattern. Examples -------- >>> from pandas.api.types import is_re >>> import re >>> is_re(re.compile(".*")) True >>> is_re("foo") False
173,054
from __future__ import annotations from collections import abc from numbers import Number import re from typing import Pattern import numpy as np from pandas._libs import lib The provided code snippet includes necessary dependencies for implementing the `is_re_compilable` function. Write a Python function `def is_re_compilable(obj) -> bool` to solve the following problem: Check if the object can be compiled into a regex pattern instance. Parameters ---------- obj : The object to check Returns ------- bool Whether `obj` can be compiled as a regex pattern. Examples -------- >>> from pandas.api.types import is_re_compilable >>> is_re_compilable(".*") True >>> is_re_compilable(1) False Here is the function: def is_re_compilable(obj) -> bool: """ Check if the object can be compiled into a regex pattern instance. Parameters ---------- obj : The object to check Returns ------- bool Whether `obj` can be compiled as a regex pattern. Examples -------- >>> from pandas.api.types import is_re_compilable >>> is_re_compilable(".*") True >>> is_re_compilable(1) False """ try: re.compile(obj) except TypeError: return False else: return True
Check if the object can be compiled into a regex pattern instance. Parameters ---------- obj : The object to check Returns ------- bool Whether `obj` can be compiled as a regex pattern. Examples -------- >>> from pandas.api.types import is_re_compilable >>> is_re_compilable(".*") True >>> is_re_compilable(1) False
173,055
from __future__ import annotations from collections import abc from numbers import Number import re from typing import Pattern import numpy as np from pandas._libs import lib is_list_like = lib.is_list_like The provided code snippet includes necessary dependencies for implementing the `is_nested_list_like` function. Write a Python function `def is_nested_list_like(obj) -> bool` to solve the following problem: Check if the object is list-like, and that all of its elements are also list-like. Parameters ---------- obj : The object to check Returns ------- is_list_like : bool Whether `obj` has list-like properties. Examples -------- >>> is_nested_list_like([[1, 2, 3]]) True >>> is_nested_list_like([{1, 2, 3}, {1, 2, 3}]) True >>> is_nested_list_like(["foo"]) False >>> is_nested_list_like([]) False >>> is_nested_list_like([[1, 2, 3], 1]) False Notes ----- This won't reliably detect whether a consumable iterator (e. g. a generator) is a nested-list-like without consuming the iterator. To avoid consuming it, we always return False if the outer container doesn't define `__len__`. See Also -------- is_list_like Here is the function: def is_nested_list_like(obj) -> bool: """ Check if the object is list-like, and that all of its elements are also list-like. Parameters ---------- obj : The object to check Returns ------- is_list_like : bool Whether `obj` has list-like properties. Examples -------- >>> is_nested_list_like([[1, 2, 3]]) True >>> is_nested_list_like([{1, 2, 3}, {1, 2, 3}]) True >>> is_nested_list_like(["foo"]) False >>> is_nested_list_like([]) False >>> is_nested_list_like([[1, 2, 3], 1]) False Notes ----- This won't reliably detect whether a consumable iterator (e. g. a generator) is a nested-list-like without consuming the iterator. To avoid consuming it, we always return False if the outer container doesn't define `__len__`. See Also -------- is_list_like """ return ( is_list_like(obj) and hasattr(obj, "__len__") and len(obj) > 0 and all(is_list_like(item) for item in obj) )
Check if the object is list-like, and that all of its elements are also list-like. Parameters ---------- obj : The object to check Returns ------- is_list_like : bool Whether `obj` has list-like properties. Examples -------- >>> is_nested_list_like([[1, 2, 3]]) True >>> is_nested_list_like([{1, 2, 3}, {1, 2, 3}]) True >>> is_nested_list_like(["foo"]) False >>> is_nested_list_like([]) False >>> is_nested_list_like([[1, 2, 3], 1]) False Notes ----- This won't reliably detect whether a consumable iterator (e. g. a generator) is a nested-list-like without consuming the iterator. To avoid consuming it, we always return False if the outer container doesn't define `__len__`. See Also -------- is_list_like
173,056
from __future__ import annotations from collections import abc from numbers import Number import re from typing import Pattern import numpy as np from pandas._libs import lib The provided code snippet includes necessary dependencies for implementing the `is_dict_like` function. Write a Python function `def is_dict_like(obj) -> bool` to solve the following problem: Check if the object is dict-like. Parameters ---------- obj : The object to check Returns ------- bool Whether `obj` has dict-like properties. Examples -------- >>> from pandas.api.types import is_dict_like >>> is_dict_like({1: 2}) True >>> is_dict_like([1, 2, 3]) False >>> is_dict_like(dict) False >>> is_dict_like(dict()) True Here is the function: def is_dict_like(obj) -> bool: """ Check if the object is dict-like. Parameters ---------- obj : The object to check Returns ------- bool Whether `obj` has dict-like properties. Examples -------- >>> from pandas.api.types import is_dict_like >>> is_dict_like({1: 2}) True >>> is_dict_like([1, 2, 3]) False >>> is_dict_like(dict) False >>> is_dict_like(dict()) True """ dict_like_attrs = ("__getitem__", "keys", "__contains__") return ( all(hasattr(obj, attr) for attr in dict_like_attrs) # [GH 25196] exclude classes and not isinstance(obj, type) )
Check if the object is dict-like. Parameters ---------- obj : The object to check Returns ------- bool Whether `obj` has dict-like properties. Examples -------- >>> from pandas.api.types import is_dict_like >>> is_dict_like({1: 2}) True >>> is_dict_like([1, 2, 3]) False >>> is_dict_like(dict) False >>> is_dict_like(dict()) True
173,057
from __future__ import annotations from collections import abc from numbers import Number import re from typing import Pattern import numpy as np from pandas._libs import lib The provided code snippet includes necessary dependencies for implementing the `is_named_tuple` function. Write a Python function `def is_named_tuple(obj) -> bool` to solve the following problem: Check if the object is a named tuple. Parameters ---------- obj : The object to check Returns ------- bool Whether `obj` is a named tuple. Examples -------- >>> from collections import namedtuple >>> from pandas.api.types import is_named_tuple >>> Point = namedtuple("Point", ["x", "y"]) >>> p = Point(1, 2) >>> >>> is_named_tuple(p) True >>> is_named_tuple((1, 2)) False Here is the function: def is_named_tuple(obj) -> bool: """ Check if the object is a named tuple. Parameters ---------- obj : The object to check Returns ------- bool Whether `obj` is a named tuple. Examples -------- >>> from collections import namedtuple >>> from pandas.api.types import is_named_tuple >>> Point = namedtuple("Point", ["x", "y"]) >>> p = Point(1, 2) >>> >>> is_named_tuple(p) True >>> is_named_tuple((1, 2)) False """ return isinstance(obj, abc.Sequence) and hasattr(obj, "_fields")
Check if the object is a named tuple. Parameters ---------- obj : The object to check Returns ------- bool Whether `obj` is a named tuple. Examples -------- >>> from collections import namedtuple >>> from pandas.api.types import is_named_tuple >>> Point = namedtuple("Point", ["x", "y"]) >>> p = Point(1, 2) >>> >>> is_named_tuple(p) True >>> is_named_tuple((1, 2)) False
173,058
from __future__ import annotations from collections import abc from numbers import Number import re from typing import Pattern import numpy as np from pandas._libs import lib The provided code snippet includes necessary dependencies for implementing the `is_dataclass` function. Write a Python function `def is_dataclass(item)` to solve the following problem: Checks if the object is a data-class instance Parameters ---------- item : object Returns -------- is_dataclass : bool True if the item is an instance of a data-class, will return false if you pass the data class itself Examples -------- >>> from dataclasses import dataclass >>> @dataclass ... class Point: ... x: int ... y: int >>> is_dataclass(Point) False >>> is_dataclass(Point(0,2)) True Here is the function: def is_dataclass(item): """ Checks if the object is a data-class instance Parameters ---------- item : object Returns -------- is_dataclass : bool True if the item is an instance of a data-class, will return false if you pass the data class itself Examples -------- >>> from dataclasses import dataclass >>> @dataclass ... class Point: ... x: int ... y: int >>> is_dataclass(Point) False >>> is_dataclass(Point(0,2)) True """ try: import dataclasses return dataclasses.is_dataclass(item) and not isinstance(item, type) except ImportError: return False
Checks if the object is a data-class instance Parameters ---------- item : object Returns -------- is_dataclass : bool True if the item is an instance of a data-class, will return false if you pass the data class itself Examples -------- >>> from dataclasses import dataclass >>> @dataclass ... class Point: ... x: int ... y: int >>> is_dataclass(Point) False >>> is_dataclass(Point(0,2)) True
173,059
from __future__ import annotations from typing import ( Any, Callable, ) import warnings import numpy as np from pandas._libs import ( Interval, Period, algos, lib, ) from pandas._libs.tslibs import conversion from pandas._typing import ( ArrayLike, DtypeObj, ) from pandas.core.dtypes.base import _registry as registry from pandas.core.dtypes.dtypes import ( CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ABCIndex from pandas.core.dtypes.inference import ( is_array_like, is_bool, is_complex, is_dataclass, is_decimal, is_dict_like, is_file_like, is_float, is_hashable, is_integer, is_interval, is_iterator, is_list_like, is_named_tuple, is_nested_list_like, is_number, is_re, is_re_compilable, is_scalar, is_sequence, ) def is_float_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a float dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a float dtype. Examples -------- >>> from pandas.api.types import is_float_dtype >>> is_float_dtype(str) False >>> is_float_dtype(int) False >>> is_float_dtype(float) True >>> is_float_dtype(np.array(['a', 'b'])) False >>> is_float_dtype(pd.Series([1, 2])) False >>> is_float_dtype(pd.Index([1, 2.])) True """ return _is_dtype_type(arr_or_dtype, classes(np.floating)) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "f" ) def is_extension_array_dtype(arr_or_dtype) -> bool: """ Check if an object is a pandas extension array type. See the :ref:`Use Guide <extending.extension-types>` for more. Parameters ---------- arr_or_dtype : object For array-like input, the ``.dtype`` attribute will be extracted. Returns ------- bool Whether the `arr_or_dtype` is an extension array type. Notes ----- This checks whether an object implements the pandas extension array interface. In pandas, this includes: * Categorical * Sparse * Interval * Period * DatetimeArray * TimedeltaArray Third-party libraries may implement arrays or types satisfying this interface as well. Examples -------- >>> from pandas.api.types import is_extension_array_dtype >>> arr = pd.Categorical(['a', 'b']) >>> is_extension_array_dtype(arr) True >>> is_extension_array_dtype(arr.dtype) True >>> arr = np.array(['a', 'b']) >>> is_extension_array_dtype(arr.dtype) False """ dtype = getattr(arr_or_dtype, "dtype", arr_or_dtype) if isinstance(dtype, ExtensionDtype): return True elif isinstance(dtype, np.dtype): return False else: return registry.find(dtype) is not None The provided code snippet includes necessary dependencies for implementing the `ensure_float` function. Write a Python function `def ensure_float(arr)` to solve the following problem: Ensure that an array object has a float dtype if possible. Parameters ---------- arr : array-like The array whose data type we want to enforce as float. Returns ------- float_arr : The original array cast to the float dtype if possible. Otherwise, the original array is returned. Here is the function: def ensure_float(arr): """ Ensure that an array object has a float dtype if possible. Parameters ---------- arr : array-like The array whose data type we want to enforce as float. Returns ------- float_arr : The original array cast to the float dtype if possible. Otherwise, the original array is returned. """ if is_extension_array_dtype(arr.dtype): if is_float_dtype(arr.dtype): arr = arr.to_numpy(dtype=arr.dtype.numpy_dtype, na_value=np.nan) else: arr = arr.to_numpy(dtype="float64", na_value=np.nan) elif issubclass(arr.dtype.type, (np.integer, np.bool_)): arr = arr.astype(float) return arr
Ensure that an array object has a float dtype if possible. Parameters ---------- arr : array-like The array whose data type we want to enforce as float. Returns ------- float_arr : The original array cast to the float dtype if possible. Otherwise, the original array is returned.
173,060
from __future__ import annotations from typing import ( Any, Callable, ) import warnings import numpy as np from pandas._libs import ( Interval, Period, algos, lib, ) from pandas._libs.tslibs import conversion from pandas._typing import ( ArrayLike, DtypeObj, ) from pandas.core.dtypes.base import _registry as registry from pandas.core.dtypes.dtypes import ( CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ABCIndex from pandas.core.dtypes.inference import ( is_array_like, is_bool, is_complex, is_dataclass, is_decimal, is_dict_like, is_file_like, is_float, is_hashable, is_integer, is_interval, is_iterator, is_list_like, is_named_tuple, is_nested_list_like, is_number, is_re, is_re_compilable, is_scalar, is_sequence, ) is_integer = lib.is_integer is_float = lib.is_float is_scalar = lib.is_scalar The provided code snippet includes necessary dependencies for implementing the `ensure_python_int` function. Write a Python function `def ensure_python_int(value: int | np.integer) -> int` to solve the following problem: Ensure that a value is a python int. Parameters ---------- value: int or numpy.integer Returns ------- int Raises ------ TypeError: if the value isn't an int or can't be converted to one. Here is the function: def ensure_python_int(value: int | np.integer) -> int: """ Ensure that a value is a python int. Parameters ---------- value: int or numpy.integer Returns ------- int Raises ------ TypeError: if the value isn't an int or can't be converted to one. """ if not (is_integer(value) or is_float(value)): if not is_scalar(value): raise TypeError( f"Value needs to be a scalar value, was type {type(value).__name__}" ) raise TypeError(f"Wrong type {type(value)} for value {value}") try: new_value = int(value) assert new_value == value except (TypeError, ValueError, AssertionError) as err: raise TypeError(f"Wrong type {type(value)} for value {value}") from err return new_value
Ensure that a value is a python int. Parameters ---------- value: int or numpy.integer Returns ------- int Raises ------ TypeError: if the value isn't an int or can't be converted to one.
173,061
from __future__ import annotations from typing import ( Any, Callable, ) import warnings import numpy as np from pandas._libs import ( Interval, Period, algos, lib, ) from pandas._libs.tslibs import conversion from pandas._typing import ( ArrayLike, DtypeObj, ) from pandas.core.dtypes.base import _registry as registry from pandas.core.dtypes.dtypes import ( CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ABCIndex from pandas.core.dtypes.inference import ( is_array_like, is_bool, is_complex, is_dataclass, is_decimal, is_dict_like, is_file_like, is_float, is_hashable, is_integer, is_interval, is_iterator, is_list_like, is_named_tuple, is_nested_list_like, is_number, is_re, is_re_compilable, is_scalar, is_sequence, ) _is_scipy_sparse = None The provided code snippet includes necessary dependencies for implementing the `is_scipy_sparse` function. Write a Python function `def is_scipy_sparse(arr) -> bool` to solve the following problem: Check whether an array-like is a scipy.sparse.spmatrix instance. Parameters ---------- arr : array-like The array-like to check. Returns ------- boolean Whether or not the array-like is a scipy.sparse.spmatrix instance. Notes ----- If scipy is not installed, this function will always return False. Examples -------- >>> from scipy.sparse import bsr_matrix >>> is_scipy_sparse(bsr_matrix([1, 2, 3])) True >>> is_scipy_sparse(pd.arrays.SparseArray([1, 2, 3])) False Here is the function: def is_scipy_sparse(arr) -> bool: """ Check whether an array-like is a scipy.sparse.spmatrix instance. Parameters ---------- arr : array-like The array-like to check. Returns ------- boolean Whether or not the array-like is a scipy.sparse.spmatrix instance. Notes ----- If scipy is not installed, this function will always return False. Examples -------- >>> from scipy.sparse import bsr_matrix >>> is_scipy_sparse(bsr_matrix([1, 2, 3])) True >>> is_scipy_sparse(pd.arrays.SparseArray([1, 2, 3])) False """ global _is_scipy_sparse if _is_scipy_sparse is None: # pylint: disable=used-before-assignment try: from scipy.sparse import issparse as _is_scipy_sparse except ImportError: _is_scipy_sparse = lambda _: False assert _is_scipy_sparse is not None return _is_scipy_sparse(arr)
Check whether an array-like is a scipy.sparse.spmatrix instance. Parameters ---------- arr : array-like The array-like to check. Returns ------- boolean Whether or not the array-like is a scipy.sparse.spmatrix instance. Notes ----- If scipy is not installed, this function will always return False. Examples -------- >>> from scipy.sparse import bsr_matrix >>> is_scipy_sparse(bsr_matrix([1, 2, 3])) True >>> is_scipy_sparse(pd.arrays.SparseArray([1, 2, 3])) False
173,062
from __future__ import annotations from typing import ( Any, Callable, ) import warnings import numpy as np from pandas._libs import ( Interval, Period, algos, lib, ) from pandas._libs.tslibs import conversion from pandas._typing import ( ArrayLike, DtypeObj, ) from pandas.core.dtypes.base import _registry as registry from pandas.core.dtypes.dtypes import ( CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ABCIndex from pandas.core.dtypes.inference import ( is_array_like, is_bool, is_complex, is_dataclass, is_decimal, is_dict_like, is_file_like, is_float, is_hashable, is_integer, is_interval, is_iterator, is_list_like, is_named_tuple, is_nested_list_like, is_number, is_re, is_re_compilable, is_scalar, is_sequence, ) def classes(*klasses) -> Callable: """Evaluate if the tipo is a subclass of the klasses.""" return lambda tipo: issubclass(tipo, klasses) def _is_dtype_type(arr_or_dtype, condition) -> bool: """ Return true if the condition is satisfied for the arr_or_dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype object whose dtype we want to extract. condition : callable[Union[np.dtype, ExtensionDtypeType]] Returns ------- bool : if the condition is satisfied for the arr_or_dtype """ if arr_or_dtype is None: return condition(type(None)) # fastpath if isinstance(arr_or_dtype, np.dtype): return condition(arr_or_dtype.type) elif isinstance(arr_or_dtype, type): if issubclass(arr_or_dtype, ExtensionDtype): arr_or_dtype = arr_or_dtype.type return condition(np.dtype(arr_or_dtype).type) # if we have an array-like if hasattr(arr_or_dtype, "dtype"): arr_or_dtype = arr_or_dtype.dtype # we are not possibly a dtype elif is_list_like(arr_or_dtype): return condition(type(None)) try: tipo = pandas_dtype(arr_or_dtype).type except (TypeError, ValueError): if is_scalar(arr_or_dtype): return condition(type(None)) return False return condition(tipo) The provided code snippet includes necessary dependencies for implementing the `is_int64_dtype` function. Write a Python function `def is_int64_dtype(arr_or_dtype) -> bool` to solve the following problem: Check whether the provided array or dtype is of the int64 dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of the int64 dtype. Notes ----- Depending on system architecture, the return value of `is_int64_dtype( int)` will be True if the OS uses 64-bit integers and False if the OS uses 32-bit integers. Examples -------- >>> from pandas.api.types import is_int64_dtype >>> is_int64_dtype(str) False >>> is_int64_dtype(np.int32) False >>> is_int64_dtype(np.int64) True >>> is_int64_dtype('int8') False >>> is_int64_dtype('Int8') False >>> is_int64_dtype(pd.Int64Dtype) True >>> is_int64_dtype(float) False >>> is_int64_dtype(np.uint64) # unsigned False >>> is_int64_dtype(np.array(['a', 'b'])) False >>> is_int64_dtype(np.array([1, 2], dtype=np.int64)) True >>> is_int64_dtype(pd.Index([1, 2.])) # float False >>> is_int64_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned False Here is the function: def is_int64_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of the int64 dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of the int64 dtype. Notes ----- Depending on system architecture, the return value of `is_int64_dtype( int)` will be True if the OS uses 64-bit integers and False if the OS uses 32-bit integers. Examples -------- >>> from pandas.api.types import is_int64_dtype >>> is_int64_dtype(str) False >>> is_int64_dtype(np.int32) False >>> is_int64_dtype(np.int64) True >>> is_int64_dtype('int8') False >>> is_int64_dtype('Int8') False >>> is_int64_dtype(pd.Int64Dtype) True >>> is_int64_dtype(float) False >>> is_int64_dtype(np.uint64) # unsigned False >>> is_int64_dtype(np.array(['a', 'b'])) False >>> is_int64_dtype(np.array([1, 2], dtype=np.int64)) True >>> is_int64_dtype(pd.Index([1, 2.])) # float False >>> is_int64_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned False """ return _is_dtype_type(arr_or_dtype, classes(np.int64))
Check whether the provided array or dtype is of the int64 dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of the int64 dtype. Notes ----- Depending on system architecture, the return value of `is_int64_dtype( int)` will be True if the OS uses 64-bit integers and False if the OS uses 32-bit integers. Examples -------- >>> from pandas.api.types import is_int64_dtype >>> is_int64_dtype(str) False >>> is_int64_dtype(np.int32) False >>> is_int64_dtype(np.int64) True >>> is_int64_dtype('int8') False >>> is_int64_dtype('Int8') False >>> is_int64_dtype(pd.Int64Dtype) True >>> is_int64_dtype(float) False >>> is_int64_dtype(np.uint64) # unsigned False >>> is_int64_dtype(np.array(['a', 'b'])) False >>> is_int64_dtype(np.array([1, 2], dtype=np.int64)) True >>> is_int64_dtype(pd.Index([1, 2.])) # float False >>> is_int64_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned False
173,063
from __future__ import annotations from typing import ( Any, Callable, ) import warnings import numpy as np from pandas._libs import ( Interval, Period, algos, lib, ) from pandas._libs.tslibs import conversion from pandas._typing import ( ArrayLike, DtypeObj, ) from pandas.core.dtypes.base import _registry as registry from pandas.core.dtypes.dtypes import ( CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ABCIndex from pandas.core.dtypes.inference import ( is_array_like, is_bool, is_complex, is_dataclass, is_decimal, is_dict_like, is_file_like, is_float, is_hashable, is_integer, is_interval, is_iterator, is_list_like, is_named_tuple, is_nested_list_like, is_number, is_re, is_re_compilable, is_scalar, is_sequence, ) def is_numeric_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a numeric dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a numeric dtype. Examples -------- >>> from pandas.api.types import is_numeric_dtype >>> is_numeric_dtype(str) False >>> is_numeric_dtype(int) True >>> is_numeric_dtype(float) True >>> is_numeric_dtype(np.uint64) True >>> is_numeric_dtype(np.datetime64) False >>> is_numeric_dtype(np.timedelta64) False >>> is_numeric_dtype(np.array(['a', 'b'])) False >>> is_numeric_dtype(pd.Series([1, 2])) True >>> is_numeric_dtype(pd.Index([1, 2.])) True >>> is_numeric_dtype(np.array([], dtype=np.timedelta64)) False """ return _is_dtype_type( arr_or_dtype, classes_and_not_datetimelike(np.number, np.bool_) ) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ._is_numeric ) def is_bool_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a boolean dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a boolean dtype. Notes ----- An ExtensionArray is considered boolean when the ``_is_boolean`` attribute is set to True. Examples -------- >>> from pandas.api.types import is_bool_dtype >>> is_bool_dtype(str) False >>> is_bool_dtype(int) False >>> is_bool_dtype(bool) True >>> is_bool_dtype(np.bool_) True >>> is_bool_dtype(np.array(['a', 'b'])) False >>> is_bool_dtype(pd.Series([1, 2])) False >>> is_bool_dtype(np.array([True, False])) True >>> is_bool_dtype(pd.Categorical([True, False])) True >>> is_bool_dtype(pd.arrays.SparseArray([True, False])) True """ if arr_or_dtype is None: return False try: dtype = get_dtype(arr_or_dtype) except (TypeError, ValueError): return False if isinstance(dtype, CategoricalDtype): arr_or_dtype = dtype.categories # now we use the special definition for Index if isinstance(arr_or_dtype, ABCIndex): # Allow Index[object] that is all-bools or Index["boolean"] return arr_or_dtype.inferred_type == "boolean" elif isinstance(dtype, ExtensionDtype): return getattr(dtype, "_is_boolean", False) return issubclass(dtype.type, np.bool_) def is_complex_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a complex dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a complex dtype. Examples -------- >>> from pandas.api.types import is_complex_dtype >>> is_complex_dtype(str) False >>> is_complex_dtype(int) False >>> is_complex_dtype(np.complex_) True >>> is_complex_dtype(np.array(['a', 'b'])) False >>> is_complex_dtype(pd.Series([1, 2])) False >>> is_complex_dtype(np.array([1 + 1j, 5])) True """ return _is_dtype_type(arr_or_dtype, classes(np.complexfloating)) The provided code snippet includes necessary dependencies for implementing the `is_any_real_numeric_dtype` function. Write a Python function `def is_any_real_numeric_dtype(arr_or_dtype) -> bool` to solve the following problem: Check whether the provided array or dtype is of a real number dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a real number dtype. Examples -------- >>> from pandas.api.types import is_any_real_numeric_dtype >>> is_any_real_numeric_dtype(int) True >>> is_any_real_numeric_dtype(float) True >>> is_any_real_numeric_dtype(object) False >>> is_any_real_numeric_dtype(str) False >>> is_any_real_numeric_dtype(complex(1, 2)) False >>> is_any_real_numeric_dtype(bool) False Here is the function: def is_any_real_numeric_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a real number dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a real number dtype. Examples -------- >>> from pandas.api.types import is_any_real_numeric_dtype >>> is_any_real_numeric_dtype(int) True >>> is_any_real_numeric_dtype(float) True >>> is_any_real_numeric_dtype(object) False >>> is_any_real_numeric_dtype(str) False >>> is_any_real_numeric_dtype(complex(1, 2)) False >>> is_any_real_numeric_dtype(bool) False """ return ( is_numeric_dtype(arr_or_dtype) and not is_complex_dtype(arr_or_dtype) and not is_bool_dtype(arr_or_dtype) )
Check whether the provided array or dtype is of a real number dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a real number dtype. Examples -------- >>> from pandas.api.types import is_any_real_numeric_dtype >>> is_any_real_numeric_dtype(int) True >>> is_any_real_numeric_dtype(float) True >>> is_any_real_numeric_dtype(object) False >>> is_any_real_numeric_dtype(str) False >>> is_any_real_numeric_dtype(complex(1, 2)) False >>> is_any_real_numeric_dtype(bool) False
173,064
from __future__ import annotations from typing import ( Any, Callable, ) import warnings import numpy as np from pandas._libs import ( Interval, Period, algos, lib, ) from pandas._libs.tslibs import conversion from pandas._typing import ( ArrayLike, DtypeObj, ) from pandas.core.dtypes.base import _registry as registry from pandas.core.dtypes.dtypes import ( CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ABCIndex from pandas.core.dtypes.inference import ( is_array_like, is_bool, is_complex, is_dataclass, is_decimal, is_dict_like, is_file_like, is_float, is_hashable, is_integer, is_interval, is_iterator, is_list_like, is_named_tuple, is_nested_list_like, is_number, is_re, is_re_compilable, is_scalar, is_sequence, ) DtypeObj = Union[np.dtype, "ExtensionDtype"] The provided code snippet includes necessary dependencies for implementing the `is_ea_or_datetimelike_dtype` function. Write a Python function `def is_ea_or_datetimelike_dtype(dtype: DtypeObj | None) -> bool` to solve the following problem: Check for ExtensionDtype, datetime64 dtype, or timedelta64 dtype. Notes ----- Checks only for dtype objects, not dtype-castable strings or types. Here is the function: def is_ea_or_datetimelike_dtype(dtype: DtypeObj | None) -> bool: """ Check for ExtensionDtype, datetime64 dtype, or timedelta64 dtype. Notes ----- Checks only for dtype objects, not dtype-castable strings or types. """ return isinstance(dtype, ExtensionDtype) or ( isinstance(dtype, np.dtype) and dtype.kind in ["m", "M"] )
Check for ExtensionDtype, datetime64 dtype, or timedelta64 dtype. Notes ----- Checks only for dtype objects, not dtype-castable strings or types.
173,065
from __future__ import annotations from typing import ( Any, Callable, ) import warnings import numpy as np from pandas._libs import ( Interval, Period, algos, lib, ) from pandas._libs.tslibs import conversion from pandas._typing import ( ArrayLike, DtypeObj, ) from pandas.core.dtypes.base import _registry as registry from pandas.core.dtypes.dtypes import ( CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ABCIndex from pandas.core.dtypes.inference import ( is_array_like, is_bool, is_complex, is_dataclass, is_decimal, is_dict_like, is_file_like, is_float, is_hashable, is_integer, is_interval, is_iterator, is_list_like, is_named_tuple, is_nested_list_like, is_number, is_re, is_re_compilable, is_scalar, is_sequence, ) def is_extension_array_dtype(arr_or_dtype) -> bool: """ Check if an object is a pandas extension array type. See the :ref:`Use Guide <extending.extension-types>` for more. Parameters ---------- arr_or_dtype : object For array-like input, the ``.dtype`` attribute will be extracted. Returns ------- bool Whether the `arr_or_dtype` is an extension array type. Notes ----- This checks whether an object implements the pandas extension array interface. In pandas, this includes: * Categorical * Sparse * Interval * Period * DatetimeArray * TimedeltaArray Third-party libraries may implement arrays or types satisfying this interface as well. Examples -------- >>> from pandas.api.types import is_extension_array_dtype >>> arr = pd.Categorical(['a', 'b']) >>> is_extension_array_dtype(arr) True >>> is_extension_array_dtype(arr.dtype) True >>> arr = np.array(['a', 'b']) >>> is_extension_array_dtype(arr.dtype) False """ dtype = getattr(arr_or_dtype, "dtype", arr_or_dtype) if isinstance(dtype, ExtensionDtype): return True elif isinstance(dtype, np.dtype): return False else: return registry.find(dtype) is not None def _validate_date_like_dtype(dtype) -> None: """ Check whether the dtype is a date-like dtype. Raises an error if invalid. Parameters ---------- dtype : dtype, type The dtype to check. Raises ------ TypeError : The dtype could not be casted to a date-like dtype. ValueError : The dtype is an illegal date-like dtype (e.g. the frequency provided is too specific) """ try: typ = np.datetime_data(dtype)[0] except ValueError as e: raise TypeError(e) from e if typ not in ["generic", "ns"]: raise ValueError( f"{repr(dtype.name)} is too specific of a frequency, " f"try passing {repr(dtype.type.__name__)}" ) def pandas_dtype(dtype) -> DtypeObj: """ Convert input into a pandas only dtype object or a numpy dtype object. Parameters ---------- dtype : object to be converted Returns ------- np.dtype or a pandas dtype Raises ------ TypeError if not a dtype """ # short-circuit if isinstance(dtype, np.ndarray): return dtype.dtype elif isinstance(dtype, (np.dtype, ExtensionDtype)): return dtype # registered extension types result = registry.find(dtype) if result is not None: return result # try a numpy dtype # raise a consistent TypeError if failed try: with warnings.catch_warnings(): # GH#51523 - Series.astype(np.integer) doesn't show # numpy deprication warning of np.integer # Hence enabling DeprecationWarning warnings.simplefilter("always", DeprecationWarning) npdtype = np.dtype(dtype) except SyntaxError as err: # np.dtype uses `eval` which can raise SyntaxError raise TypeError(f"data type '{dtype}' not understood") from err # Any invalid dtype (such as pd.Timestamp) should raise an error. # np.dtype(invalid_type).kind = 0 for such objects. However, this will # also catch some valid dtypes such as object, np.object_ and 'object' # which we safeguard against by catching them earlier and returning # np.dtype(valid_dtype) before this condition is evaluated. if is_hashable(dtype) and dtype in [object, np.object_, "object", "O"]: # check hashability to avoid errors/DeprecationWarning when we get # here and `dtype` is an array return npdtype elif npdtype.kind == "O": raise TypeError(f"dtype '{dtype}' not understood") return npdtype class DatetimeTZDtype(PandasExtensionDtype): """ An ExtensionDtype for timezone-aware datetime data. **This is not an actual numpy dtype**, but a duck type. Parameters ---------- unit : str, default "ns" The precision of the datetime data. Currently limited to ``"ns"``. tz : str, int, or datetime.tzinfo The timezone. Attributes ---------- unit tz Methods ------- None Raises ------ pytz.UnknownTimeZoneError When the requested timezone cannot be found. Examples -------- >>> pd.DatetimeTZDtype(tz='UTC') datetime64[ns, UTC] >>> pd.DatetimeTZDtype(tz='dateutil/US/Central') datetime64[ns, tzfile('/usr/share/zoneinfo/US/Central')] """ type: type[Timestamp] = Timestamp kind: str_type = "M" num = 101 base = np.dtype("M8[ns]") # TODO: depend on reso? _metadata = ("unit", "tz") _match = re.compile(r"(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]") _cache_dtypes: dict[str_type, PandasExtensionDtype] = {} def na_value(self) -> NaTType: return NaT # error: Signature of "str" incompatible with supertype "PandasExtensionDtype" def str(self) -> str: # type: ignore[override] return f"|M8[{self.unit}]" def __init__(self, unit: str_type | DatetimeTZDtype = "ns", tz=None) -> None: if isinstance(unit, DatetimeTZDtype): # error: "str" has no attribute "tz" unit, tz = unit.unit, unit.tz # type: ignore[attr-defined] if unit != "ns": if isinstance(unit, str) and tz is None: # maybe a string like datetime64[ns, tz], which we support for # now. result = type(self).construct_from_string(unit) unit = result.unit tz = result.tz msg = ( f"Passing a dtype alias like 'datetime64[ns, {tz}]' " "to DatetimeTZDtype is no longer supported. Use " "'DatetimeTZDtype.construct_from_string()' instead." ) raise ValueError(msg) if unit not in ["s", "ms", "us", "ns"]: raise ValueError("DatetimeTZDtype only supports s, ms, us, ns units") if tz: tz = timezones.maybe_get_tz(tz) tz = timezones.tz_standardize(tz) elif tz is not None: raise pytz.UnknownTimeZoneError(tz) if tz is None: raise TypeError("A 'tz' is required.") self._unit = unit self._tz = tz def _creso(self) -> int: """ The NPY_DATETIMEUNIT corresponding to this dtype's resolution. """ return abbrev_to_npy_unit(self.unit) def unit(self) -> str_type: """ The precision of the datetime data. """ return self._unit def tz(self) -> tzinfo: """ The timezone. """ return self._tz def construct_array_type(cls) -> type_t[DatetimeArray]: """ Return the array type associated with this dtype. Returns ------- type """ from pandas.core.arrays import DatetimeArray return DatetimeArray def construct_from_string(cls, string: str_type) -> DatetimeTZDtype: """ Construct a DatetimeTZDtype from a string. Parameters ---------- string : str The string alias for this DatetimeTZDtype. Should be formatted like ``datetime64[ns, <tz>]``, where ``<tz>`` is the timezone name. Examples -------- >>> DatetimeTZDtype.construct_from_string('datetime64[ns, UTC]') datetime64[ns, UTC] """ if not isinstance(string, str): raise TypeError( f"'construct_from_string' expects a string, got {type(string)}" ) msg = f"Cannot construct a 'DatetimeTZDtype' from '{string}'" match = cls._match.match(string) if match: d = match.groupdict() try: return cls(unit=d["unit"], tz=d["tz"]) except (KeyError, TypeError, ValueError) as err: # KeyError if maybe_get_tz tries and fails to get a # pytz timezone (actually pytz.UnknownTimeZoneError). # TypeError if we pass a nonsense tz; # ValueError if we pass a unit other than "ns" raise TypeError(msg) from err raise TypeError(msg) def __str__(self) -> str_type: return f"datetime64[{self.unit}, {self.tz}]" def name(self) -> str_type: """A string representation of the dtype.""" return str(self) def __hash__(self) -> int: # make myself hashable # TODO: update this. return hash(str(self)) def __eq__(self, other: Any) -> bool: if isinstance(other, str): if other.startswith("M8["): other = f"datetime64[{other[3:]}" return other == self.name return ( isinstance(other, DatetimeTZDtype) and self.unit == other.unit and tz_compare(self.tz, other.tz) ) def __setstate__(self, state) -> None: # for pickle compat. __get_state__ is defined in the # PandasExtensionDtype superclass and uses the public properties to # pickle -> need to set the settable private ones here (see GH26067) self._tz = state["tz"] self._unit = state["unit"] The provided code snippet includes necessary dependencies for implementing the `infer_dtype_from_object` function. Write a Python function `def infer_dtype_from_object(dtype) -> type` to solve the following problem: Get a numpy dtype.type-style object for a dtype object. This methods also includes handling of the datetime64[ns] and datetime64[ns, TZ] objects. If no dtype can be found, we return ``object``. Parameters ---------- dtype : dtype, type The dtype object whose numpy dtype.type-style object we want to extract. Returns ------- type Here is the function: def infer_dtype_from_object(dtype) -> type: """ Get a numpy dtype.type-style object for a dtype object. This methods also includes handling of the datetime64[ns] and datetime64[ns, TZ] objects. If no dtype can be found, we return ``object``. Parameters ---------- dtype : dtype, type The dtype object whose numpy dtype.type-style object we want to extract. Returns ------- type """ if isinstance(dtype, type) and issubclass(dtype, np.generic): # Type object from a dtype return dtype elif isinstance(dtype, (np.dtype, ExtensionDtype)): # dtype object try: _validate_date_like_dtype(dtype) except TypeError: # Should still pass if we don't have a date-like pass return dtype.type try: dtype = pandas_dtype(dtype) except TypeError: pass if is_extension_array_dtype(dtype): return dtype.type elif isinstance(dtype, str): # TODO(jreback) # should deprecate these if dtype in ["datetimetz", "datetime64tz"]: return DatetimeTZDtype.type elif dtype in ["period"]: raise NotImplementedError if dtype in ["datetime", "timedelta"]: dtype += "64" try: return infer_dtype_from_object(getattr(np, dtype)) except (AttributeError, TypeError): # Handles cases like get_dtype(int) i.e., # Python objects that are valid dtypes # (unlike user-defined types, in general) # # TypeError handles the float16 type code of 'e' # further handle internal types pass return infer_dtype_from_object(np.dtype(dtype))
Get a numpy dtype.type-style object for a dtype object. This methods also includes handling of the datetime64[ns] and datetime64[ns, TZ] objects. If no dtype can be found, we return ``object``. Parameters ---------- dtype : dtype, type The dtype object whose numpy dtype.type-style object we want to extract. Returns ------- type
173,066
from __future__ import annotations from typing import ( Any, Callable, ) import warnings import numpy as np from pandas._libs import ( Interval, Period, algos, lib, ) from pandas._libs.tslibs import conversion from pandas._typing import ( ArrayLike, DtypeObj, ) from pandas.core.dtypes.base import _registry as registry from pandas.core.dtypes.dtypes import ( CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ABCIndex from pandas.core.dtypes.inference import ( is_array_like, is_bool, is_complex, is_dataclass, is_decimal, is_dict_like, is_file_like, is_float, is_hashable, is_integer, is_interval, is_iterator, is_list_like, is_named_tuple, is_nested_list_like, is_number, is_re, is_re_compilable, is_scalar, is_sequence, ) def is_hashable(obj) -> bool: """ Return True if hash(obj) will succeed, False otherwise. Some types will pass a test against collections.abc.Hashable but fail when they are actually hashed with hash(). Distinguish between these and other types by trying the call to hash() and seeing if they raise TypeError. Returns ------- bool Examples -------- >>> import collections >>> from pandas.api.types import is_hashable >>> a = ([],) >>> isinstance(a, collections.abc.Hashable) True >>> is_hashable(a) False """ # Unfortunately, we can't use isinstance(obj, collections.abc.Hashable), # which can be faster than calling hash. That is because numpy scalars # fail this test. # Reconsider this decision once this numpy bug is fixed: # https://github.com/numpy/numpy/issues/5562 try: hash(obj) except TypeError: return False else: return True The provided code snippet includes necessary dependencies for implementing the `validate_all_hashable` function. Write a Python function `def validate_all_hashable(*args, error_name: str | None = None) -> None` to solve the following problem: Return None if all args are hashable, else raise a TypeError. Parameters ---------- *args Arguments to validate. error_name : str, optional The name to use if error Raises ------ TypeError : If an argument is not hashable Returns ------- None Here is the function: def validate_all_hashable(*args, error_name: str | None = None) -> None: """ Return None if all args are hashable, else raise a TypeError. Parameters ---------- *args Arguments to validate. error_name : str, optional The name to use if error Raises ------ TypeError : If an argument is not hashable Returns ------- None """ if not all(is_hashable(arg) for arg in args): if error_name: raise TypeError(f"{error_name} must be a hashable type") raise TypeError("All elements must be hashable")
Return None if all args are hashable, else raise a TypeError. Parameters ---------- *args Arguments to validate. error_name : str, optional The name to use if error Raises ------ TypeError : If an argument is not hashable Returns ------- None
173,067
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, TypeVar, cast, overload, ) import numpy as np from pandas._libs import missing as libmissing from pandas._libs.hashtable import object_hash from pandas._typing import ( DtypeObj, Shape, npt, type_t, ) from pandas.errors import AbstractMethodError from pandas.core.dtypes.generic import ( ABCDataFrame, ABCIndex, ABCSeries, ) _registry = Registry() The provided code snippet includes necessary dependencies for implementing the `register_extension_dtype` function. Write a Python function `def register_extension_dtype(cls: type_t[ExtensionDtypeT]) -> type_t[ExtensionDtypeT]` to solve the following problem: Register an ExtensionType with pandas as class decorator. This enables operations like ``.astype(name)`` for the name of the ExtensionDtype. Returns ------- callable A class decorator. Examples -------- >>> from pandas.api.extensions import register_extension_dtype, ExtensionDtype >>> @register_extension_dtype ... class MyExtensionDtype(ExtensionDtype): ... name = "myextension" Here is the function: def register_extension_dtype(cls: type_t[ExtensionDtypeT]) -> type_t[ExtensionDtypeT]: """ Register an ExtensionType with pandas as class decorator. This enables operations like ``.astype(name)`` for the name of the ExtensionDtype. Returns ------- callable A class decorator. Examples -------- >>> from pandas.api.extensions import register_extension_dtype, ExtensionDtype >>> @register_extension_dtype ... class MyExtensionDtype(ExtensionDtype): ... name = "myextension" """ _registry.register(cls) return cls
Register an ExtensionType with pandas as class decorator. This enables operations like ``.astype(name)`` for the name of the ExtensionDtype. Returns ------- callable A class decorator. Examples -------- >>> from pandas.api.extensions import register_extension_dtype, ExtensionDtype >>> @register_extension_dtype ... class MyExtensionDtype(ExtensionDtype): ... name = "myextension"
173,068
from __future__ import annotations from decimal import Decimal from functools import partial from typing import ( TYPE_CHECKING, overload, ) import numpy as np from pandas._config import get_option from pandas._libs import lib import pandas._libs.missing as libmissing from pandas._libs.tslibs import ( NaT, iNaT, ) from pandas.core.dtypes.common import ( DT64NS_DTYPE, TD64NS_DTYPE, ensure_object, is_bool_dtype, is_categorical_dtype, is_complex_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_integer_dtype, is_object_dtype, is_scalar, is_string_or_object_np_dtype, needs_i8_conversion, ) from pandas.core.dtypes.dtypes import ( CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCExtensionArray, ABCIndex, ABCMultiIndex, ABCSeries, ) from pandas.core.dtypes.inference import is_list_like def isna(obj: Scalar) -> bool: ... def isna( obj: ArrayLike | Index | list, ) -> npt.NDArray[np.bool_]: ... def isna(obj: NDFrameT) -> NDFrameT: ... def isna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]: ... def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: ... def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: """ Detect missing values for an array-like object. This function takes a scalar or array-like object and indicates whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike). Parameters ---------- obj : scalar or array-like Object to check for null or missing values. Returns ------- bool or array-like of bool For scalar input, returns a scalar boolean. For array input, returns an array of boolean indicating whether each corresponding element is missing. See Also -------- notna : Boolean inverse of pandas.isna. Series.isna : Detect missing values in a Series. DataFrame.isna : Detect missing values in a DataFrame. Index.isna : Detect missing values in an Index. Examples -------- Scalar arguments (including strings) result in a scalar boolean. >>> pd.isna('dog') False >>> pd.isna(pd.NA) True >>> pd.isna(np.nan) True ndarrays result in an ndarray of booleans. >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]]) >>> array array([[ 1., nan, 3.], [ 4., 5., nan]]) >>> pd.isna(array) array([[False, True, False], [False, False, True]]) For indexes, an ndarray of booleans is returned. >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, ... "2017-07-08"]) >>> index DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'], dtype='datetime64[ns]', freq=None) >>> pd.isna(index) array([False, False, True, False]) For Series and DataFrame, the same type is returned, containing booleans. >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) >>> df 0 1 2 0 ant bee cat 1 dog None fly >>> pd.isna(df) 0 1 2 0 False False False 1 False True False >>> pd.isna(df[1]) 0 False 1 True Name: 1, dtype: bool """ return _isna(obj) def is_integer_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of an integer dtype. Unlike in `is_any_int_dtype`, timedelta64 instances will return False. The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered as integer by this function. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of an integer dtype and not an instance of timedelta64. Examples -------- >>> is_integer_dtype(str) False >>> is_integer_dtype(int) True >>> is_integer_dtype(float) False >>> is_integer_dtype(np.uint64) True >>> is_integer_dtype('int8') True >>> is_integer_dtype('Int8') True >>> is_integer_dtype(pd.Int8Dtype) True >>> is_integer_dtype(np.datetime64) False >>> is_integer_dtype(np.timedelta64) False >>> is_integer_dtype(np.array(['a', 'b'])) False >>> is_integer_dtype(pd.Series([1, 2])) True >>> is_integer_dtype(np.array([], dtype=np.timedelta64)) False >>> is_integer_dtype(pd.Index([1, 2.])) # float False """ return _is_dtype_type( arr_or_dtype, classes_and_not_datetimelike(np.integer) ) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "iu" ) def is_bool_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of a boolean dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a boolean dtype. Notes ----- An ExtensionArray is considered boolean when the ``_is_boolean`` attribute is set to True. Examples -------- >>> from pandas.api.types import is_bool_dtype >>> is_bool_dtype(str) False >>> is_bool_dtype(int) False >>> is_bool_dtype(bool) True >>> is_bool_dtype(np.bool_) True >>> is_bool_dtype(np.array(['a', 'b'])) False >>> is_bool_dtype(pd.Series([1, 2])) False >>> is_bool_dtype(np.array([True, False])) True >>> is_bool_dtype(pd.Categorical([True, False])) True >>> is_bool_dtype(pd.arrays.SparseArray([True, False])) True """ if arr_or_dtype is None: return False try: dtype = get_dtype(arr_or_dtype) except (TypeError, ValueError): return False if isinstance(dtype, CategoricalDtype): arr_or_dtype = dtype.categories # now we use the special definition for Index if isinstance(arr_or_dtype, ABCIndex): # Allow Index[object] that is all-bools or Index["boolean"] return arr_or_dtype.inferred_type == "boolean" elif isinstance(dtype, ExtensionDtype): return getattr(dtype, "_is_boolean", False) return issubclass(dtype.type, np.bool_) The provided code snippet includes necessary dependencies for implementing the `isna_compat` function. Write a Python function `def isna_compat(arr, fill_value=np.nan) -> bool` to solve the following problem: Parameters ---------- arr: a numpy array fill_value: fill value, default to np.nan Returns ------- True if we can fill using this fill_value Here is the function: def isna_compat(arr, fill_value=np.nan) -> bool: """ Parameters ---------- arr: a numpy array fill_value: fill value, default to np.nan Returns ------- True if we can fill using this fill_value """ if isna(fill_value): dtype = arr.dtype return not (is_bool_dtype(dtype) or is_integer_dtype(dtype)) return True
Parameters ---------- arr: a numpy array fill_value: fill value, default to np.nan Returns ------- True if we can fill using this fill_value
173,069
from __future__ import annotations from decimal import Decimal from functools import partial from typing import ( TYPE_CHECKING, overload, ) import numpy as np from pandas._config import get_option from pandas._libs import lib import pandas._libs.missing as libmissing from pandas._libs.tslibs import ( NaT, iNaT, ) from pandas.core.dtypes.common import ( DT64NS_DTYPE, TD64NS_DTYPE, ensure_object, is_bool_dtype, is_categorical_dtype, is_complex_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_integer_dtype, is_object_dtype, is_scalar, is_string_or_object_np_dtype, needs_i8_conversion, ) from pandas.core.dtypes.dtypes import ( CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCExtensionArray, ABCIndex, ABCMultiIndex, ABCSeries, ) from pandas.core.dtypes.inference import is_list_like def array_equivalent( left, right, strict_nan: bool = False, dtype_equal: bool = False, ) -> bool: """ True if two arrays, left and right, have equal non-NaN elements, and NaNs in corresponding locations. False otherwise. It is assumed that left and right are NumPy arrays of the same dtype. The behavior of this function (particularly with respect to NaNs) is not defined if the dtypes are different. Parameters ---------- left, right : ndarrays strict_nan : bool, default False If True, consider NaN and None to be different. dtype_equal : bool, default False Whether `left` and `right` are known to have the same dtype according to `is_dtype_equal`. Some methods like `BlockManager.equals`. require that the dtypes match. Setting this to ``True`` can improve performance, but will give different results for arrays that are equal but different dtypes. Returns ------- b : bool Returns True if the arrays are equivalent. Examples -------- >>> array_equivalent( ... np.array([1, 2, np.nan]), ... np.array([1, 2, np.nan])) True >>> array_equivalent( ... np.array([1, np.nan, 2]), ... np.array([1, 2, np.nan])) False """ left, right = np.asarray(left), np.asarray(right) # shape compat if left.shape != right.shape: return False if dtype_equal: # fastpath when we require that the dtypes match (Block.equals) if left.dtype.kind in ["f", "c"]: return _array_equivalent_float(left, right) elif needs_i8_conversion(left.dtype): return _array_equivalent_datetimelike(left, right) elif is_string_or_object_np_dtype(left.dtype): # TODO: fastpath for pandas' StringDtype return _array_equivalent_object(left, right, strict_nan) else: return np.array_equal(left, right) # Slow path when we allow comparing different dtypes. # Object arrays can contain None, NaN and NaT. # string dtypes must be come to this path for NumPy 1.7.1 compat if left.dtype.kind in "OSU" or right.dtype.kind in "OSU": # Note: `in "OSU"` is non-trivially faster than `in ["O", "S", "U"]` # or `in ("O", "S", "U")` return _array_equivalent_object(left, right, strict_nan) # NaNs can occur in float and complex arrays. if is_float_dtype(left.dtype) or is_complex_dtype(left.dtype): if not (left.size and right.size): return True return ((left == right) | (isna(left) & isna(right))).all() elif needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype): # datetime64, timedelta64, Period if not is_dtype_equal(left.dtype, right.dtype): return False left = left.view("i8") right = right.view("i8") # if we have structured dtypes, compare first if ( left.dtype.type is np.void or right.dtype.type is np.void ) and left.dtype != right.dtype: return False return np.array_equal(left, right) def is_dtype_equal(source, target) -> bool: """ Check if two dtypes are equal. Parameters ---------- source : The first dtype to compare target : The second dtype to compare Returns ------- boolean Whether or not the two dtypes are equal. Examples -------- >>> is_dtype_equal(int, float) False >>> is_dtype_equal("int", int) True >>> is_dtype_equal(object, "category") False >>> is_dtype_equal(CategoricalDtype(), "category") True >>> is_dtype_equal(DatetimeTZDtype(tz="UTC"), "datetime64") False """ if isinstance(target, str): if not isinstance(source, str): # GH#38516 ensure we get the same behavior from # is_dtype_equal(CDT, "category") and CDT == "category" try: src = get_dtype(source) if isinstance(src, ExtensionDtype): return src == target except (TypeError, AttributeError, ImportError): return False elif isinstance(source, str): return is_dtype_equal(target, source) try: source = get_dtype(source) target = get_dtype(target) return source == target except (TypeError, AttributeError, ImportError): # invalid comparison # object == category will hit this return False ABCExtensionArray = cast( "Type[ExtensionArray]", create_pandas_abc_type( "ABCExtensionArray", "_typ", # Note: IntervalArray and SparseArray are included bc they have _typ="extension" {"extension", "categorical", "periodarray", "datetimearray", "timedeltaarray"}, ), ) ArrayLike = Union["ExtensionArray", np.ndarray] The provided code snippet includes necessary dependencies for implementing the `array_equals` function. Write a Python function `def array_equals(left: ArrayLike, right: ArrayLike) -> bool` to solve the following problem: ExtensionArray-compatible implementation of array_equivalent. Here is the function: def array_equals(left: ArrayLike, right: ArrayLike) -> bool: """ ExtensionArray-compatible implementation of array_equivalent. """ if not is_dtype_equal(left.dtype, right.dtype): return False elif isinstance(left, ABCExtensionArray): return left.equals(right) else: return array_equivalent(left, right, dtype_equal=True)
ExtensionArray-compatible implementation of array_equivalent.
173,070
from __future__ import annotations from decimal import Decimal from functools import partial from typing import ( TYPE_CHECKING, overload, ) import numpy as np from pandas._config import get_option from pandas._libs import lib import pandas._libs.missing as libmissing from pandas._libs.tslibs import ( NaT, iNaT, ) from pandas.core.dtypes.common import ( DT64NS_DTYPE, TD64NS_DTYPE, ensure_object, is_bool_dtype, is_categorical_dtype, is_complex_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_integer_dtype, is_object_dtype, is_scalar, is_string_or_object_np_dtype, needs_i8_conversion, ) from pandas.core.dtypes.dtypes import ( CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCExtensionArray, ABCIndex, ABCMultiIndex, ABCSeries, ) from pandas.core.dtypes.inference import is_list_like DT64NS_DTYPE = conversion.DT64NS_DTYPE TD64NS_DTYPE = conversion.TD64NS_DTYPE ensure_object = algos.ensure_object def is_object_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of the object dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the object dtype. Examples -------- >>> from pandas.api.types import is_object_dtype >>> is_object_dtype(object) True >>> is_object_dtype(int) False >>> is_object_dtype(np.array([], dtype=object)) True >>> is_object_dtype(np.array([], dtype=int)) False >>> is_object_dtype([1, 2, 3]) False """ return _is_dtype_type(arr_or_dtype, classes(np.object_)) def needs_i8_conversion(arr_or_dtype) -> bool: """ Check whether the array or dtype should be converted to int64. An array-like or dtype "needs" such a conversion if the array-like or dtype is of a datetime-like dtype Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype should be converted to int64. Examples -------- >>> needs_i8_conversion(str) False >>> needs_i8_conversion(np.int64) False >>> needs_i8_conversion(np.datetime64) True >>> needs_i8_conversion(np.array(['a', 'b'])) False >>> needs_i8_conversion(pd.Series([1, 2])) False >>> needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]")) True >>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) True """ if arr_or_dtype is None: return False if isinstance(arr_or_dtype, np.dtype): return arr_or_dtype.kind in ["m", "M"] elif isinstance(arr_or_dtype, ExtensionDtype): return isinstance(arr_or_dtype, (PeriodDtype, DatetimeTZDtype)) try: dtype = get_dtype(arr_or_dtype) except (TypeError, ValueError): return False if isinstance(dtype, np.dtype): return dtype.kind in ["m", "M"] return isinstance(dtype, (PeriodDtype, DatetimeTZDtype)) is_list_like = lib.is_list_like The provided code snippet includes necessary dependencies for implementing the `infer_fill_value` function. Write a Python function `def infer_fill_value(val)` to solve the following problem: infer the fill value for the nan/NaT from the provided scalar/ndarray/list-like if we are a NaT, return the correct dtyped element to provide proper block construction Here is the function: def infer_fill_value(val): """ infer the fill value for the nan/NaT from the provided scalar/ndarray/list-like if we are a NaT, return the correct dtyped element to provide proper block construction """ if not is_list_like(val): val = [val] val = np.array(val, copy=False) if needs_i8_conversion(val.dtype): return np.array("NaT", dtype=val.dtype) elif is_object_dtype(val.dtype): dtype = lib.infer_dtype(ensure_object(val), skipna=False) if dtype in ["datetime", "datetime64"]: return np.array("NaT", dtype=DT64NS_DTYPE) elif dtype in ["timedelta", "timedelta64"]: return np.array("NaT", dtype=TD64NS_DTYPE) return np.nan
infer the fill value for the nan/NaT from the provided scalar/ndarray/list-like if we are a NaT, return the correct dtyped element to provide proper block construction
173,071
from __future__ import annotations from decimal import Decimal from functools import partial from typing import ( TYPE_CHECKING, overload, ) import numpy as np from pandas._config import get_option from pandas._libs import lib import pandas._libs.missing as libmissing from pandas._libs.tslibs import ( NaT, iNaT, ) from pandas.core.dtypes.common import ( DT64NS_DTYPE, TD64NS_DTYPE, ensure_object, is_bool_dtype, is_categorical_dtype, is_complex_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_integer_dtype, is_object_dtype, is_scalar, is_string_or_object_np_dtype, needs_i8_conversion, ) from pandas.core.dtypes.dtypes import ( CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCExtensionArray, ABCIndex, ABCMultiIndex, ABCSeries, ) from pandas.core.dtypes.inference import is_list_like The provided code snippet includes necessary dependencies for implementing the `maybe_fill` function. Write a Python function `def maybe_fill(arr: np.ndarray) -> np.ndarray` to solve the following problem: Fill numpy.ndarray with NaN, unless we have a integer or boolean dtype. Here is the function: def maybe_fill(arr: np.ndarray) -> np.ndarray: """ Fill numpy.ndarray with NaN, unless we have a integer or boolean dtype. """ if arr.dtype.kind not in ("u", "i", "b"): arr.fill(np.nan) return arr
Fill numpy.ndarray with NaN, unless we have a integer or boolean dtype.
173,072
from __future__ import annotations from decimal import Decimal from functools import partial from typing import ( TYPE_CHECKING, overload, ) import numpy as np from pandas._config import get_option from pandas._libs import lib import pandas._libs.missing as libmissing from pandas._libs.tslibs import ( NaT, iNaT, ) from pandas.core.dtypes.common import ( DT64NS_DTYPE, TD64NS_DTYPE, ensure_object, is_bool_dtype, is_categorical_dtype, is_complex_dtype, is_dtype_equal, is_extension_array_dtype, is_float_dtype, is_integer_dtype, is_object_dtype, is_scalar, is_string_or_object_np_dtype, needs_i8_conversion, ) from pandas.core.dtypes.dtypes import ( CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PeriodDtype, ) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCExtensionArray, ABCIndex, ABCMultiIndex, ABCSeries, ) from pandas.core.dtypes.inference import is_list_like nan_checker = np.isnan INF_AS_NA = False def _isna_array(values: ArrayLike, inf_as_na: bool = False): """ Return an array indicating which values of the input array are NaN / NA. Parameters ---------- obj: ndarray or ExtensionArray The input array whose elements are to be checked. inf_as_na: bool Whether or not to treat infinite values as NA. Returns ------- array-like Array of boolean values denoting the NA status of each element. """ dtype = values.dtype if not isinstance(values, np.ndarray): # i.e. ExtensionArray if inf_as_na and is_categorical_dtype(dtype): result = libmissing.isnaobj(values.to_numpy(), inf_as_na=inf_as_na) else: # error: Incompatible types in assignment (expression has type # "Union[ndarray[Any, Any], ExtensionArraySupportsAnyAll]", variable has # type "ndarray[Any, dtype[bool_]]") result = values.isna() # type: ignore[assignment] elif is_string_or_object_np_dtype(values.dtype): result = _isna_string_dtype(values, inf_as_na=inf_as_na) elif needs_i8_conversion(dtype): # this is the NaT pattern result = values.view("i8") == iNaT else: if inf_as_na: result = ~np.isfinite(values) else: result = np.isnan(values) return result class DatetimeTZDtype(PandasExtensionDtype): """ An ExtensionDtype for timezone-aware datetime data. **This is not an actual numpy dtype**, but a duck type. Parameters ---------- unit : str, default "ns" The precision of the datetime data. Currently limited to ``"ns"``. tz : str, int, or datetime.tzinfo The timezone. Attributes ---------- unit tz Methods ------- None Raises ------ pytz.UnknownTimeZoneError When the requested timezone cannot be found. Examples -------- >>> pd.DatetimeTZDtype(tz='UTC') datetime64[ns, UTC] >>> pd.DatetimeTZDtype(tz='dateutil/US/Central') datetime64[ns, tzfile('/usr/share/zoneinfo/US/Central')] """ type: type[Timestamp] = Timestamp kind: str_type = "M" num = 101 base = np.dtype("M8[ns]") # TODO: depend on reso? _metadata = ("unit", "tz") _match = re.compile(r"(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]") _cache_dtypes: dict[str_type, PandasExtensionDtype] = {} def na_value(self) -> NaTType: return NaT # error: Signature of "str" incompatible with supertype "PandasExtensionDtype" def str(self) -> str: # type: ignore[override] return f"|M8[{self.unit}]" def __init__(self, unit: str_type | DatetimeTZDtype = "ns", tz=None) -> None: if isinstance(unit, DatetimeTZDtype): # error: "str" has no attribute "tz" unit, tz = unit.unit, unit.tz # type: ignore[attr-defined] if unit != "ns": if isinstance(unit, str) and tz is None: # maybe a string like datetime64[ns, tz], which we support for # now. result = type(self).construct_from_string(unit) unit = result.unit tz = result.tz msg = ( f"Passing a dtype alias like 'datetime64[ns, {tz}]' " "to DatetimeTZDtype is no longer supported. Use " "'DatetimeTZDtype.construct_from_string()' instead." ) raise ValueError(msg) if unit not in ["s", "ms", "us", "ns"]: raise ValueError("DatetimeTZDtype only supports s, ms, us, ns units") if tz: tz = timezones.maybe_get_tz(tz) tz = timezones.tz_standardize(tz) elif tz is not None: raise pytz.UnknownTimeZoneError(tz) if tz is None: raise TypeError("A 'tz' is required.") self._unit = unit self._tz = tz def _creso(self) -> int: """ The NPY_DATETIMEUNIT corresponding to this dtype's resolution. """ return abbrev_to_npy_unit(self.unit) def unit(self) -> str_type: """ The precision of the datetime data. """ return self._unit def tz(self) -> tzinfo: """ The timezone. """ return self._tz def construct_array_type(cls) -> type_t[DatetimeArray]: """ Return the array type associated with this dtype. Returns ------- type """ from pandas.core.arrays import DatetimeArray return DatetimeArray def construct_from_string(cls, string: str_type) -> DatetimeTZDtype: """ Construct a DatetimeTZDtype from a string. Parameters ---------- string : str The string alias for this DatetimeTZDtype. Should be formatted like ``datetime64[ns, <tz>]``, where ``<tz>`` is the timezone name. Examples -------- >>> DatetimeTZDtype.construct_from_string('datetime64[ns, UTC]') datetime64[ns, UTC] """ if not isinstance(string, str): raise TypeError( f"'construct_from_string' expects a string, got {type(string)}" ) msg = f"Cannot construct a 'DatetimeTZDtype' from '{string}'" match = cls._match.match(string) if match: d = match.groupdict() try: return cls(unit=d["unit"], tz=d["tz"]) except (KeyError, TypeError, ValueError) as err: # KeyError if maybe_get_tz tries and fails to get a # pytz timezone (actually pytz.UnknownTimeZoneError). # TypeError if we pass a nonsense tz; # ValueError if we pass a unit other than "ns" raise TypeError(msg) from err raise TypeError(msg) def __str__(self) -> str_type: return f"datetime64[{self.unit}, {self.tz}]" def name(self) -> str_type: """A string representation of the dtype.""" return str(self) def __hash__(self) -> int: # make myself hashable # TODO: update this. return hash(str(self)) def __eq__(self, other: Any) -> bool: if isinstance(other, str): if other.startswith("M8["): other = f"datetime64[{other[3:]}" return other == self.name return ( isinstance(other, DatetimeTZDtype) and self.unit == other.unit and tz_compare(self.tz, other.tz) ) def __setstate__(self, state) -> None: # for pickle compat. __get_state__ is defined in the # PandasExtensionDtype superclass and uses the public properties to # pickle -> need to set the settable private ones here (see GH26067) self._tz = state["tz"] self._unit = state["unit"] class PeriodDtype(PeriodDtypeBase, PandasExtensionDtype): """ An ExtensionDtype for Period data. **This is not an actual numpy dtype**, but a duck type. Parameters ---------- freq : str or DateOffset The frequency of this PeriodDtype. Attributes ---------- freq Methods ------- None Examples -------- >>> pd.PeriodDtype(freq='D') period[D] >>> pd.PeriodDtype(freq=pd.offsets.MonthEnd()) period[M] """ type: type[Period] = Period kind: str_type = "O" str = "|O08" base = np.dtype("O") num = 102 _metadata = ("freq",) _match = re.compile(r"(P|p)eriod\[(?P<freq>.+)\]") _cache_dtypes: dict[str_type, PandasExtensionDtype] = {} def __new__(cls, freq=None): """ Parameters ---------- freq : frequency """ if isinstance(freq, PeriodDtype): return freq elif freq is None: # empty constructor for pickle compat # -10_000 corresponds to PeriodDtypeCode.UNDEFINED u = PeriodDtypeBase.__new__(cls, -10_000) u._freq = None return u if not isinstance(freq, BaseOffset): freq = cls._parse_dtype_strict(freq) try: return cls._cache_dtypes[freq.freqstr] except KeyError: dtype_code = freq._period_dtype_code u = PeriodDtypeBase.__new__(cls, dtype_code) u._freq = freq cls._cache_dtypes[freq.freqstr] = u return u def __reduce__(self): return type(self), (self.freq,) def freq(self): """ The frequency object of this PeriodDtype. """ return self._freq def _parse_dtype_strict(cls, freq: str_type) -> BaseOffset: if isinstance(freq, str): # note: freq is already of type str! if freq.startswith("period[") or freq.startswith("Period["): m = cls._match.search(freq) if m is not None: freq = m.group("freq") freq_offset = to_offset(freq) if freq_offset is not None: return freq_offset raise ValueError("could not construct PeriodDtype") def construct_from_string(cls, string: str_type) -> PeriodDtype: """ Strict construction from a string, raise a TypeError if not possible """ if ( isinstance(string, str) and (string.startswith("period[") or string.startswith("Period[")) or isinstance(string, BaseOffset) ): # do not parse string like U as period[U] # avoid tuple to be regarded as freq try: return cls(freq=string) except ValueError: pass if isinstance(string, str): msg = f"Cannot construct a 'PeriodDtype' from '{string}'" else: msg = f"'construct_from_string' expects a string, got {type(string)}" raise TypeError(msg) def __str__(self) -> str_type: return self.name def name(self) -> str_type: return f"period[{self.freq.freqstr}]" def na_value(self) -> NaTType: return NaT def __hash__(self) -> int: # make myself hashable return hash(str(self)) def __eq__(self, other: Any) -> bool: if isinstance(other, str): return other in [self.name, self.name.title()] elif isinstance(other, PeriodDtype): # For freqs that can be held by a PeriodDtype, this check is # equivalent to (and much faster than) self.freq == other.freq sfreq = self.freq ofreq = other.freq return ( sfreq.n == ofreq.n and sfreq._period_dtype_code == ofreq._period_dtype_code ) return False def __ne__(self, other: Any) -> bool: return not self.__eq__(other) def __setstate__(self, state) -> None: # for pickle compat. __getstate__ is defined in the # PandasExtensionDtype superclass and uses the public properties to # pickle -> need to set the settable private ones here (see GH26067) self._freq = state["freq"] def is_dtype(cls, dtype: object) -> bool: """ Return a boolean if we if the passed type is an actual dtype that we can match (via string or type) """ if isinstance(dtype, str): # PeriodDtype can be instantiated from freq string like "U", # but doesn't regard freq str like "U" as dtype. if dtype.startswith("period[") or dtype.startswith("Period["): try: return cls._parse_dtype_strict(dtype) is not None except ValueError: return False else: return False return super().is_dtype(dtype) def construct_array_type(cls) -> type_t[PeriodArray]: """ Return the array type associated with this dtype. Returns ------- type """ from pandas.core.arrays import PeriodArray return PeriodArray def __from_arrow__( self, array: pyarrow.Array | pyarrow.ChunkedArray ) -> PeriodArray: """ Construct PeriodArray from pyarrow Array/ChunkedArray. """ import pyarrow from pandas.core.arrays import PeriodArray from pandas.core.arrays.arrow._arrow_utils import ( pyarrow_array_to_numpy_and_mask, ) if isinstance(array, pyarrow.Array): chunks = [array] else: chunks = array.chunks results = [] for arr in chunks: data, mask = pyarrow_array_to_numpy_and_mask(arr, dtype=np.dtype(np.int64)) parr = PeriodArray(data.copy(), freq=self.freq, copy=False) # error: Invalid index type "ndarray[Any, dtype[bool_]]" for "PeriodArray"; # expected type "Union[int, Sequence[int], Sequence[bool], slice]" parr[~mask] = NaT # type: ignore[index] results.append(parr) if not results: return PeriodArray(np.array([], dtype="int64"), freq=self.freq, copy=False) return PeriodArray._concat_same_type(results) ArrayLike = Union["ExtensionArray", np.ndarray] The provided code snippet includes necessary dependencies for implementing the `isna_all` function. Write a Python function `def isna_all(arr: ArrayLike) -> bool` to solve the following problem: Optimized equivalent to isna(arr).all() Here is the function: def isna_all(arr: ArrayLike) -> bool: """ Optimized equivalent to isna(arr).all() """ total_len = len(arr) # Usually it's enough to check but a small fraction of values to see if # a block is NOT null, chunks should help in such cases. # parameters 1000 and 40 were chosen arbitrarily chunk_len = max(total_len // 40, 1000) dtype = arr.dtype if dtype.kind == "f" and isinstance(dtype, np.dtype): checker = nan_checker elif (isinstance(dtype, np.dtype) and dtype.kind in ["m", "M"]) or isinstance( dtype, (DatetimeTZDtype, PeriodDtype) ): # error: Incompatible types in assignment (expression has type # "Callable[[Any], Any]", variable has type "ufunc") checker = lambda x: np.asarray(x.view("i8")) == iNaT # type: ignore[assignment] else: # error: Incompatible types in assignment (expression has type "Callable[[Any], # Any]", variable has type "ufunc") checker = lambda x: _isna_array( # type: ignore[assignment] x, inf_as_na=INF_AS_NA ) return all( checker(arr[i : i + chunk_len]).all() for i in range(0, total_len, chunk_len) )
Optimized equivalent to isna(arr).all()
173,073
from __future__ import annotations from typing import TYPE_CHECKING import numpy as np from pandas._libs import lib from pandas._typing import AxisInt from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) AxisInt = int ABCSeries = cast( "Type[Series]", create_pandas_abc_type("ABCSeries", "_typ", ("series",)), ) ABCDataFrame = cast( "Type[DataFrame]", create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",)) ) class NDFrame(PandasObject, indexing.IndexingMixin): """ N-dimensional analogue of DataFrame. Store multi-dimensional in a size-mutable, labeled data structure Parameters ---------- data : BlockManager axes : list copy : bool, default False """ _internal_names: list[str] = [ "_mgr", "_cacher", "_item_cache", "_cache", "_is_copy", "_subtyp", "_name", "_default_kind", "_default_fill_value", "_metadata", "__array_struct__", "__array_interface__", "_flags", ] _internal_names_set: set[str] = set(_internal_names) _accessors: set[str] = set() _hidden_attrs: frozenset[str] = frozenset([]) _metadata: list[str] = [] _is_copy: weakref.ReferenceType[NDFrame] | None = None _mgr: Manager _attrs: dict[Hashable, Any] _typ: str # ---------------------------------------------------------------------- # Constructors def __init__( self, data: Manager, copy: bool_t = False, attrs: Mapping[Hashable, Any] | None = None, ) -> None: # copy kwarg is retained for mypy compat, is not used object.__setattr__(self, "_is_copy", None) object.__setattr__(self, "_mgr", data) object.__setattr__(self, "_item_cache", {}) if attrs is None: attrs = {} else: attrs = dict(attrs) object.__setattr__(self, "_attrs", attrs) object.__setattr__(self, "_flags", Flags(self, allows_duplicate_labels=True)) def _init_mgr( cls, mgr: Manager, axes, dtype: Dtype | None = None, copy: bool_t = False, ) -> Manager: """passed a manager and a axes dict""" for a, axe in axes.items(): if axe is not None: axe = ensure_index(axe) bm_axis = cls._get_block_manager_axis(a) mgr = mgr.reindex_axis(axe, axis=bm_axis) # make a copy if explicitly requested if copy: mgr = mgr.copy() if dtype is not None: # avoid further copies if we can if ( isinstance(mgr, BlockManager) and len(mgr.blocks) == 1 and is_dtype_equal(mgr.blocks[0].values.dtype, dtype) ): pass else: mgr = mgr.astype(dtype=dtype) return mgr def _as_manager(self: NDFrameT, typ: str, copy: bool_t = True) -> NDFrameT: """ Private helper function to create a DataFrame with specific manager. Parameters ---------- typ : {"block", "array"} copy : bool, default True Only controls whether the conversion from Block->ArrayManager copies the 1D arrays (to ensure proper/contiguous memory layout). Returns ------- DataFrame New DataFrame using specified manager type. Is not guaranteed to be a copy or not. """ new_mgr: Manager new_mgr = mgr_to_mgr(self._mgr, typ=typ, copy=copy) # fastpath of passing a manager doesn't check the option/manager class return self._constructor(new_mgr).__finalize__(self) # ---------------------------------------------------------------------- # attrs and flags def attrs(self) -> dict[Hashable, Any]: """ Dictionary of global attributes of this dataset. .. warning:: attrs is experimental and may change without warning. See Also -------- DataFrame.flags : Global flags applying to this object. """ if self._attrs is None: self._attrs = {} return self._attrs def attrs(self, value: Mapping[Hashable, Any]) -> None: self._attrs = dict(value) def flags(self) -> Flags: """ Get the properties associated with this pandas object. The available flags are * :attr:`Flags.allows_duplicate_labels` See Also -------- Flags : Flags that apply to pandas objects. DataFrame.attrs : Global metadata applying to this dataset. Notes ----- "Flags" differ from "metadata". Flags reflect properties of the pandas object (the Series or DataFrame). Metadata refer to properties of the dataset, and should be stored in :attr:`DataFrame.attrs`. Examples -------- >>> df = pd.DataFrame({"A": [1, 2]}) >>> df.flags <Flags(allows_duplicate_labels=True)> Flags can be get or set using ``.`` >>> df.flags.allows_duplicate_labels True >>> df.flags.allows_duplicate_labels = False Or by slicing with a key >>> df.flags["allows_duplicate_labels"] False >>> df.flags["allows_duplicate_labels"] = True """ return self._flags def set_flags( self: NDFrameT, *, copy: bool_t = False, allows_duplicate_labels: bool_t | None = None, ) -> NDFrameT: """ Return a new object with updated flags. Parameters ---------- copy : bool, default False Specify if a copy of the object should be made. allows_duplicate_labels : bool, optional Whether the returned object allows duplicate labels. Returns ------- Series or DataFrame The same type as the caller. See Also -------- DataFrame.attrs : Global metadata applying to this dataset. DataFrame.flags : Global flags applying to this object. Notes ----- This method returns a new object that's a view on the same data as the input. Mutating the input or the output values will be reflected in the other. This method is intended to be used in method chains. "Flags" differ from "metadata". Flags reflect properties of the pandas object (the Series or DataFrame). Metadata refer to properties of the dataset, and should be stored in :attr:`DataFrame.attrs`. Examples -------- >>> df = pd.DataFrame({"A": [1, 2]}) >>> df.flags.allows_duplicate_labels True >>> df2 = df.set_flags(allows_duplicate_labels=False) >>> df2.flags.allows_duplicate_labels False """ df = self.copy(deep=copy and not using_copy_on_write()) if allows_duplicate_labels is not None: df.flags["allows_duplicate_labels"] = allows_duplicate_labels return df def _validate_dtype(cls, dtype) -> DtypeObj | None: """validate the passed dtype""" if dtype is not None: dtype = pandas_dtype(dtype) # a compound dtype if dtype.kind == "V": raise NotImplementedError( "compound dtypes are not implemented " f"in the {cls.__name__} constructor" ) return dtype # ---------------------------------------------------------------------- # Construction def _constructor(self: NDFrameT) -> Callable[..., NDFrameT]: """ Used when a manipulation result has the same dimensions as the original. """ raise AbstractMethodError(self) # ---------------------------------------------------------------------- # Internals def _data(self): # GH#33054 retained because some downstream packages uses this, # e.g. fastparquet return self._mgr # ---------------------------------------------------------------------- # Axis _stat_axis_number = 0 _stat_axis_name = "index" _AXIS_ORDERS: list[Literal["index", "columns"]] _AXIS_TO_AXIS_NUMBER: dict[Axis, AxisInt] = {0: 0, "index": 0, "rows": 0} _info_axis_number: int _info_axis_name: Literal["index", "columns"] _AXIS_LEN: int def _construct_axes_dict(self, axes: Sequence[Axis] | None = None, **kwargs): """Return an axes dictionary for myself.""" d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)} # error: Argument 1 to "update" of "MutableMapping" has incompatible type # "Dict[str, Any]"; expected "SupportsKeysAndGetItem[Union[int, str], Any]" d.update(kwargs) # type: ignore[arg-type] return d def _get_axis_number(cls, axis: Axis) -> AxisInt: try: return cls._AXIS_TO_AXIS_NUMBER[axis] except KeyError: raise ValueError(f"No axis named {axis} for object type {cls.__name__}") def _get_axis_name(cls, axis: Axis) -> Literal["index", "columns"]: axis_number = cls._get_axis_number(axis) return cls._AXIS_ORDERS[axis_number] def _get_axis(self, axis: Axis) -> Index: axis_number = self._get_axis_number(axis) assert axis_number in {0, 1} return self.index if axis_number == 0 else self.columns def _get_block_manager_axis(cls, axis: Axis) -> AxisInt: """Map the axis to the block_manager axis.""" axis = cls._get_axis_number(axis) ndim = cls._AXIS_LEN if ndim == 2: # i.e. DataFrame return 1 - axis return axis def _get_axis_resolvers(self, axis: str) -> dict[str, Series | MultiIndex]: # index or columns axis_index = getattr(self, axis) d = {} prefix = axis[0] for i, name in enumerate(axis_index.names): if name is not None: key = level = name else: # prefix with 'i' or 'c' depending on the input axis # e.g., you must do ilevel_0 for the 0th level of an unnamed # multiiindex key = f"{prefix}level_{i}" level = i level_values = axis_index.get_level_values(level) s = level_values.to_series() s.index = axis_index d[key] = s # put the index/columns itself in the dict if isinstance(axis_index, MultiIndex): dindex = axis_index else: dindex = axis_index.to_series() d[axis] = dindex return d def _get_index_resolvers(self) -> dict[Hashable, Series | MultiIndex]: from pandas.core.computation.parsing import clean_column_name d: dict[str, Series | MultiIndex] = {} for axis_name in self._AXIS_ORDERS: d.update(self._get_axis_resolvers(axis_name)) return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)} def _get_cleaned_column_resolvers(self) -> dict[Hashable, Series]: """ Return the special character free column resolvers of a dataframe. Column names with special characters are 'cleaned up' so that they can be referred to by backtick quoting. Used in :meth:`DataFrame.eval`. """ from pandas.core.computation.parsing import clean_column_name if isinstance(self, ABCSeries): return {clean_column_name(self.name): self} return { clean_column_name(k): v for k, v in self.items() if not isinstance(k, int) } def _info_axis(self) -> Index: return getattr(self, self._info_axis_name) def _stat_axis(self) -> Index: return getattr(self, self._stat_axis_name) def shape(self) -> tuple[int, ...]: """ Return a tuple of axis dimensions """ return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS) def axes(self) -> list[Index]: """ Return index label(s) of the internal NDFrame """ # we do it this way because if we have reversed axes, then # the block manager shows then reversed return [self._get_axis(a) for a in self._AXIS_ORDERS] def ndim(self) -> int: """ Return an int representing the number of axes / array dimensions. Return 1 if Series. Otherwise return 2 if DataFrame. See Also -------- ndarray.ndim : Number of array dimensions. Examples -------- >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3}) >>> s.ndim 1 >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.ndim 2 """ return self._mgr.ndim def size(self) -> int: """ Return an int representing the number of elements in this object. Return the number of rows if Series. Otherwise return the number of rows times number of columns if DataFrame. See Also -------- ndarray.size : Number of elements in the array. Examples -------- >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3}) >>> s.size 3 >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.size 4 """ # error: Incompatible return value type (got "signedinteger[_64Bit]", # expected "int") [return-value] return np.prod(self.shape) # type: ignore[return-value] def set_axis( self: NDFrameT, labels, *, axis: Axis = 0, copy: bool_t | None = None, ) -> NDFrameT: """ Assign desired index to given axis. Indexes for%(extended_summary_sub)s row labels can be changed by assigning a list-like or Index. Parameters ---------- labels : list-like, Index The values for the new index. axis : %(axes_single_arg)s, default 0 The axis to update. The value 0 identifies the rows. For `Series` this parameter is unused and defaults to 0. copy : bool, default True Whether to make a copy of the underlying data. .. versionadded:: 1.5.0 Returns ------- %(klass)s An object of type %(klass)s. See Also -------- %(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s. """ return self._set_axis_nocheck(labels, axis, inplace=False, copy=copy) def _set_axis_nocheck( self, labels, axis: Axis, inplace: bool_t, copy: bool_t | None ): if inplace: setattr(self, self._get_axis_name(axis), labels) else: # With copy=False, we create a new object but don't copy the # underlying data. obj = self.copy(deep=copy and not using_copy_on_write()) setattr(obj, obj._get_axis_name(axis), labels) return obj def _set_axis(self, axis: AxisInt, labels: AnyArrayLike | list) -> None: """ This is called from the cython code when we set the `index` attribute directly, e.g. `series.index = [1, 2, 3]`. """ labels = ensure_index(labels) self._mgr.set_axis(axis, labels) self._clear_item_cache() def swapaxes( self: NDFrameT, axis1: Axis, axis2: Axis, copy: bool_t | None = None ) -> NDFrameT: """ Interchange axes and swap values axes appropriately. Returns ------- same as input """ i = self._get_axis_number(axis1) j = self._get_axis_number(axis2) if i == j: return self.copy(deep=copy and not using_copy_on_write()) mapping = {i: j, j: i} new_axes = [self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN)] new_values = self._values.swapaxes(i, j) # type: ignore[union-attr] if ( using_copy_on_write() and self._mgr.is_single_block and isinstance(self._mgr, BlockManager) ): # This should only get hit in case of having a single block, otherwise a # copy is made, we don't have to set up references. new_mgr = ndarray_to_mgr( new_values, new_axes[0], new_axes[1], dtype=None, copy=False, typ="block", ) assert isinstance(new_mgr, BlockManager) assert isinstance(self._mgr, BlockManager) new_mgr.blocks[0].refs = self._mgr.blocks[0].refs new_mgr.blocks[0].refs.add_reference( new_mgr.blocks[0] # type: ignore[arg-type] ) return self._constructor(new_mgr).__finalize__(self, method="swapaxes") elif (copy or copy is None) and self._mgr.is_single_block: new_values = new_values.copy() return self._constructor( new_values, *new_axes, # The no-copy case for CoW is handled above copy=False, ).__finalize__(self, method="swapaxes") def droplevel(self: NDFrameT, level: IndexLabel, axis: Axis = 0) -> NDFrameT: """ Return {klass} with requested index / column level(s) removed. Parameters ---------- level : int, str, or list-like If a string is given, must be the name of a level If list-like, elements must be names or positional indexes of levels. axis : {{0 or 'index', 1 or 'columns'}}, default 0 Axis along which the level(s) is removed: * 0 or 'index': remove level(s) in column. * 1 or 'columns': remove level(s) in row. For `Series` this parameter is unused and defaults to 0. Returns ------- {klass} {klass} with requested index / column level(s) removed. Examples -------- >>> df = pd.DataFrame([ ... [1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12] ... ]).set_index([0, 1]).rename_axis(['a', 'b']) >>> df.columns = pd.MultiIndex.from_tuples([ ... ('c', 'e'), ('d', 'f') ... ], names=['level_1', 'level_2']) >>> df level_1 c d level_2 e f a b 1 2 3 4 5 6 7 8 9 10 11 12 >>> df.droplevel('a') level_1 c d level_2 e f b 2 3 4 6 7 8 10 11 12 >>> df.droplevel('level_2', axis=1) level_1 c d a b 1 2 3 4 5 6 7 8 9 10 11 12 """ labels = self._get_axis(axis) new_labels = labels.droplevel(level) return self.set_axis(new_labels, axis=axis, copy=None) def pop(self, item: Hashable) -> Series | Any: result = self[item] del self[item] return result def squeeze(self, axis: Axis | None = None): """ Squeeze 1 dimensional axis objects into scalars. Series or DataFrames with a single element are squeezed to a scalar. DataFrames with a single column or a single row are squeezed to a Series. Otherwise the object is unchanged. This method is most useful when you don't know if your object is a Series or DataFrame, but you do know it has just a single column. In that case you can safely call `squeeze` to ensure you have a Series. Parameters ---------- axis : {0 or 'index', 1 or 'columns', None}, default None A specific axis to squeeze. By default, all length-1 axes are squeezed. For `Series` this parameter is unused and defaults to `None`. Returns ------- DataFrame, Series, or scalar The projection after squeezing `axis` or all the axes. See Also -------- Series.iloc : Integer-location based indexing for selecting scalars. DataFrame.iloc : Integer-location based indexing for selecting Series. Series.to_frame : Inverse of DataFrame.squeeze for a single-column DataFrame. Examples -------- >>> primes = pd.Series([2, 3, 5, 7]) Slicing might produce a Series with a single value: >>> even_primes = primes[primes % 2 == 0] >>> even_primes 0 2 dtype: int64 >>> even_primes.squeeze() 2 Squeezing objects with more than one value in every axis does nothing: >>> odd_primes = primes[primes % 2 == 1] >>> odd_primes 1 3 2 5 3 7 dtype: int64 >>> odd_primes.squeeze() 1 3 2 5 3 7 dtype: int64 Squeezing is even more effective when used with DataFrames. >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b']) >>> df a b 0 1 2 1 3 4 Slicing a single column will produce a DataFrame with the columns having only one value: >>> df_a = df[['a']] >>> df_a a 0 1 1 3 So the columns can be squeezed down, resulting in a Series: >>> df_a.squeeze('columns') 0 1 1 3 Name: a, dtype: int64 Slicing a single row from a single column will produce a single scalar DataFrame: >>> df_0a = df.loc[df.index < 1, ['a']] >>> df_0a a 0 1 Squeezing the rows produces a single scalar Series: >>> df_0a.squeeze('rows') a 1 Name: 0, dtype: int64 Squeezing all axes will project directly into a scalar: >>> df_0a.squeeze() 1 """ axes = range(self._AXIS_LEN) if axis is None else (self._get_axis_number(axis),) return self.iloc[ tuple( 0 if i in axes and len(a) == 1 else slice(None) for i, a in enumerate(self.axes) ) ] # ---------------------------------------------------------------------- # Rename def _rename( self: NDFrameT, mapper: Renamer | None = None, *, index: Renamer | None = None, columns: Renamer | None = None, axis: Axis | None = None, copy: bool_t | None = None, inplace: bool_t = False, level: Level | None = None, errors: str = "ignore", ) -> NDFrameT | None: # called by Series.rename and DataFrame.rename if mapper is None and index is None and columns is None: raise TypeError("must pass an index to rename") if index is not None or columns is not None: if axis is not None: raise TypeError( "Cannot specify both 'axis' and any of 'index' or 'columns'" ) if mapper is not None: raise TypeError( "Cannot specify both 'mapper' and any of 'index' or 'columns'" ) else: # use the mapper argument if axis and self._get_axis_number(axis) == 1: columns = mapper else: index = mapper self._check_inplace_and_allows_duplicate_labels(inplace) result = self if inplace else self.copy(deep=copy and not using_copy_on_write()) for axis_no, replacements in enumerate((index, columns)): if replacements is None: continue ax = self._get_axis(axis_no) f = common.get_rename_function(replacements) if level is not None: level = ax._get_level_number(level) # GH 13473 if not callable(replacements): if ax._is_multi and level is not None: indexer = ax.get_level_values(level).get_indexer_for(replacements) else: indexer = ax.get_indexer_for(replacements) if errors == "raise" and len(indexer[indexer == -1]): missing_labels = [ label for index, label in enumerate(replacements) if indexer[index] == -1 ] raise KeyError(f"{missing_labels} not found in axis") new_index = ax._transform_index(f, level=level) result._set_axis_nocheck(new_index, axis=axis_no, inplace=True, copy=False) result._clear_item_cache() if inplace: self._update_inplace(result) return None else: return result.__finalize__(self, method="rename") def rename_axis( self: NDFrameT, mapper: IndexLabel | lib.NoDefault = ..., *, index=..., columns=..., axis: Axis = ..., copy: bool_t | None = ..., inplace: Literal[False] = ..., ) -> NDFrameT: ... def rename_axis( self, mapper: IndexLabel | lib.NoDefault = ..., *, index=..., columns=..., axis: Axis = ..., copy: bool_t | None = ..., inplace: Literal[True], ) -> None: ... def rename_axis( self: NDFrameT, mapper: IndexLabel | lib.NoDefault = ..., *, index=..., columns=..., axis: Axis = ..., copy: bool_t | None = ..., inplace: bool_t = ..., ) -> NDFrameT | None: ... def rename_axis( self: NDFrameT, mapper: IndexLabel | lib.NoDefault = lib.no_default, *, index=lib.no_default, columns=lib.no_default, axis: Axis = 0, copy: bool_t | None = None, inplace: bool_t = False, ) -> NDFrameT | None: """ Set the name of the axis for the index or columns. Parameters ---------- mapper : scalar, list-like, optional Value to set the axis name attribute. index, columns : scalar, list-like, dict-like or function, optional A scalar, list-like, dict-like or functions transformations to apply to that axis' values. Note that the ``columns`` parameter is not allowed if the object is a Series. This parameter only apply for DataFrame type objects. Use either ``mapper`` and ``axis`` to specify the axis to target with ``mapper``, or ``index`` and/or ``columns``. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to rename. For `Series` this parameter is unused and defaults to 0. copy : bool, default None Also copy underlying data. inplace : bool, default False Modifies the object directly, instead of creating a new Series or DataFrame. Returns ------- Series, DataFrame, or None The same type as the caller or None if ``inplace=True``. See Also -------- Series.rename : Alter Series index labels or name. DataFrame.rename : Alter DataFrame index labels or name. Index.rename : Set new names on index. Notes ----- ``DataFrame.rename_axis`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` The first calling convention will only modify the names of the index and/or the names of the Index object that is the columns. In this case, the parameter ``copy`` is ignored. The second calling convention will modify the names of the corresponding index if mapper is a list or a scalar. However, if mapper is dict-like or a function, it will use the deprecated behavior of modifying the axis *labels*. We *highly* recommend using keyword arguments to clarify your intent. Examples -------- **Series** >>> s = pd.Series(["dog", "cat", "monkey"]) >>> s 0 dog 1 cat 2 monkey dtype: object >>> s.rename_axis("animal") animal 0 dog 1 cat 2 monkey dtype: object **DataFrame** >>> df = pd.DataFrame({"num_legs": [4, 4, 2], ... "num_arms": [0, 0, 2]}, ... ["dog", "cat", "monkey"]) >>> df num_legs num_arms dog 4 0 cat 4 0 monkey 2 2 >>> df = df.rename_axis("animal") >>> df num_legs num_arms animal dog 4 0 cat 4 0 monkey 2 2 >>> df = df.rename_axis("limbs", axis="columns") >>> df limbs num_legs num_arms animal dog 4 0 cat 4 0 monkey 2 2 **MultiIndex** >>> df.index = pd.MultiIndex.from_product([['mammal'], ... ['dog', 'cat', 'monkey']], ... names=['type', 'name']) >>> df limbs num_legs num_arms type name mammal dog 4 0 cat 4 0 monkey 2 2 >>> df.rename_axis(index={'type': 'class'}) limbs num_legs num_arms class name mammal dog 4 0 cat 4 0 monkey 2 2 >>> df.rename_axis(columns=str.upper) LIMBS num_legs num_arms type name mammal dog 4 0 cat 4 0 monkey 2 2 """ axes = {"index": index, "columns": columns} if axis is not None: axis = self._get_axis_number(axis) inplace = validate_bool_kwarg(inplace, "inplace") if copy and using_copy_on_write(): copy = False if mapper is not lib.no_default: # Use v0.23 behavior if a scalar or list non_mapper = is_scalar(mapper) or ( is_list_like(mapper) and not is_dict_like(mapper) ) if non_mapper: return self._set_axis_name( mapper, axis=axis, inplace=inplace, copy=copy ) else: raise ValueError("Use `.rename` to alter labels with a mapper.") else: # Use new behavior. Means that index and/or columns # is specified result = self if inplace else self.copy(deep=copy) for axis in range(self._AXIS_LEN): v = axes.get(self._get_axis_name(axis)) if v is lib.no_default: continue non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v)) if non_mapper: newnames = v else: f = common.get_rename_function(v) curnames = self._get_axis(axis).names newnames = [f(name) for name in curnames] result._set_axis_name(newnames, axis=axis, inplace=True, copy=copy) if not inplace: return result return None def _set_axis_name( self, name, axis: Axis = 0, inplace: bool_t = False, copy: bool_t | None = True ): """ Set the name(s) of the axis. Parameters ---------- name : str or list of str Name(s) to set. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to set the label. The value 0 or 'index' specifies index, and the value 1 or 'columns' specifies columns. inplace : bool, default False If `True`, do operation inplace and return None. copy: Whether to make a copy of the result. Returns ------- Series, DataFrame, or None The same type as the caller or `None` if `inplace` is `True`. See Also -------- DataFrame.rename : Alter the axis labels of :class:`DataFrame`. Series.rename : Alter the index labels or set the index name of :class:`Series`. Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`. Examples -------- >>> df = pd.DataFrame({"num_legs": [4, 4, 2]}, ... ["dog", "cat", "monkey"]) >>> df num_legs dog 4 cat 4 monkey 2 >>> df._set_axis_name("animal") num_legs animal dog 4 cat 4 monkey 2 >>> df.index = pd.MultiIndex.from_product( ... [["mammal"], ['dog', 'cat', 'monkey']]) >>> df._set_axis_name(["type", "name"]) num_legs type name mammal dog 4 cat 4 monkey 2 """ axis = self._get_axis_number(axis) idx = self._get_axis(axis).set_names(name) inplace = validate_bool_kwarg(inplace, "inplace") renamed = self if inplace else self.copy(deep=copy) if axis == 0: renamed.index = idx else: renamed.columns = idx if not inplace: return renamed # ---------------------------------------------------------------------- # Comparison Methods def _indexed_same(self, other) -> bool_t: return all( self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS ) def equals(self, other: object) -> bool_t: """ Test whether two objects contain the same elements. This function allows two Series or DataFrames to be compared against each other to see if they have the same shape and elements. NaNs in the same location are considered equal. The row/column index do not need to have the same type, as long as the values are considered equal. Corresponding columns must be of the same dtype. Parameters ---------- other : Series or DataFrame The other Series or DataFrame to be compared with the first. Returns ------- bool True if all elements are the same in both objects, False otherwise. See Also -------- Series.eq : Compare two Series objects of the same length and return a Series where each element is True if the element in each Series is equal, False otherwise. DataFrame.eq : Compare two DataFrame objects of the same shape and return a DataFrame where each element is True if the respective element in each DataFrame is equal, False otherwise. testing.assert_series_equal : Raises an AssertionError if left and right are not equal. Provides an easy interface to ignore inequality in dtypes, indexes and precision among others. testing.assert_frame_equal : Like assert_series_equal, but targets DataFrames. numpy.array_equal : Return True if two arrays have the same shape and elements, False otherwise. Examples -------- >>> df = pd.DataFrame({1: [10], 2: [20]}) >>> df 1 2 0 10 20 DataFrames df and exactly_equal have the same types and values for their elements and column labels, which will return True. >>> exactly_equal = pd.DataFrame({1: [10], 2: [20]}) >>> exactly_equal 1 2 0 10 20 >>> df.equals(exactly_equal) True DataFrames df and different_column_type have the same element types and values, but have different types for the column labels, which will still return True. >>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]}) >>> different_column_type 1.0 2.0 0 10 20 >>> df.equals(different_column_type) True DataFrames df and different_data_type have different types for the same values for their elements, and will return False even though their column labels are the same values and types. >>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]}) >>> different_data_type 1 2 0 10.0 20.0 >>> df.equals(different_data_type) False """ if not (isinstance(other, type(self)) or isinstance(self, type(other))): return False other = cast(NDFrame, other) return self._mgr.equals(other._mgr) # ------------------------------------------------------------------------- # Unary Methods def __neg__(self: NDFrameT) -> NDFrameT: def blk_func(values: ArrayLike): if is_bool_dtype(values.dtype): # error: Argument 1 to "inv" has incompatible type "Union # [ExtensionArray, ndarray[Any, Any]]"; expected # "_SupportsInversion[ndarray[Any, dtype[bool_]]]" return operator.inv(values) # type: ignore[arg-type] else: # error: Argument 1 to "neg" has incompatible type "Union # [ExtensionArray, ndarray[Any, Any]]"; expected # "_SupportsNeg[ndarray[Any, dtype[Any]]]" return operator.neg(values) # type: ignore[arg-type] new_data = self._mgr.apply(blk_func) res = self._constructor(new_data) return res.__finalize__(self, method="__neg__") def __pos__(self: NDFrameT) -> NDFrameT: def blk_func(values: ArrayLike): if is_bool_dtype(values.dtype): return values.copy() else: # error: Argument 1 to "pos" has incompatible type "Union # [ExtensionArray, ndarray[Any, Any]]"; expected # "_SupportsPos[ndarray[Any, dtype[Any]]]" return operator.pos(values) # type: ignore[arg-type] new_data = self._mgr.apply(blk_func) res = self._constructor(new_data) return res.__finalize__(self, method="__pos__") def __invert__(self: NDFrameT) -> NDFrameT: if not self.size: # inv fails with 0 len return self.copy(deep=False) new_data = self._mgr.apply(operator.invert) return self._constructor(new_data).__finalize__(self, method="__invert__") def __nonzero__(self) -> NoReturn: raise ValueError( f"The truth value of a {type(self).__name__} is ambiguous. " "Use a.empty, a.bool(), a.item(), a.any() or a.all()." ) __bool__ = __nonzero__ def bool(self) -> bool_t: """ Return the bool of a single element Series or DataFrame. This must be a boolean scalar value, either True or False. It will raise a ValueError if the Series or DataFrame does not have exactly 1 element, or that element is not boolean (integer values 0 and 1 will also raise an exception). Returns ------- bool The value in the Series or DataFrame. See Also -------- Series.astype : Change the data type of a Series, including to boolean. DataFrame.astype : Change the data type of a DataFrame, including to boolean. numpy.bool_ : NumPy boolean data type, used by pandas for boolean values. Examples -------- The method will only work for single element objects with a boolean value: >>> pd.Series([True]).bool() True >>> pd.Series([False]).bool() False >>> pd.DataFrame({'col': [True]}).bool() True >>> pd.DataFrame({'col': [False]}).bool() False """ v = self.squeeze() if isinstance(v, (bool, np.bool_)): return bool(v) elif is_scalar(v): raise ValueError( "bool cannot act on a non-boolean single element " f"{type(self).__name__}" ) self.__nonzero__() # for mypy (__nonzero__ raises) return True def abs(self: NDFrameT) -> NDFrameT: """ Return a Series/DataFrame with absolute numeric value of each element. This function only applies to elements that are all numeric. Returns ------- abs Series/DataFrame containing the absolute value of each element. See Also -------- numpy.absolute : Calculate the absolute value element-wise. Notes ----- For ``complex`` inputs, ``1.2 + 1j``, the absolute value is :math:`\\sqrt{ a^2 + b^2 }`. Examples -------- Absolute numeric values in a Series. >>> s = pd.Series([-1.10, 2, -3.33, 4]) >>> s.abs() 0 1.10 1 2.00 2 3.33 3 4.00 dtype: float64 Absolute numeric values in a Series with complex numbers. >>> s = pd.Series([1.2 + 1j]) >>> s.abs() 0 1.56205 dtype: float64 Absolute numeric values in a Series with a Timedelta element. >>> s = pd.Series([pd.Timedelta('1 days')]) >>> s.abs() 0 1 days dtype: timedelta64[ns] Select rows with data closest to certain value using argsort (from `StackOverflow <https://stackoverflow.com/a/17758115>`__). >>> df = pd.DataFrame({ ... 'a': [4, 5, 6, 7], ... 'b': [10, 20, 30, 40], ... 'c': [100, 50, -30, -50] ... }) >>> df a b c 0 4 10 100 1 5 20 50 2 6 30 -30 3 7 40 -50 >>> df.loc[(df.c - 43).abs().argsort()] a b c 1 5 20 50 0 4 10 100 2 6 30 -30 3 7 40 -50 """ res_mgr = self._mgr.apply(np.abs) return self._constructor(res_mgr).__finalize__(self, name="abs") def __abs__(self: NDFrameT) -> NDFrameT: return self.abs() def __round__(self: NDFrameT, decimals: int = 0) -> NDFrameT: return self.round(decimals).__finalize__(self, method="__round__") # ------------------------------------------------------------------------- # Label or Level Combination Helpers # # A collection of helper methods for DataFrame/Series operations that # accept a combination of column/index labels and levels. All such # operations should utilize/extend these methods when possible so that we # have consistent precedence and validation logic throughout the library. def _is_level_reference(self, key: Level, axis: Axis = 0) -> bool_t: """ Test whether a key is a level reference for a given axis. To be considered a level reference, `key` must be a string that: - (axis=0): Matches the name of an index level and does NOT match a column label. - (axis=1): Matches the name of a column level and does NOT match an index label. Parameters ---------- key : Hashable Potential level name for the given axis axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- is_level : bool """ axis_int = self._get_axis_number(axis) return ( key is not None and is_hashable(key) and key in self.axes[axis_int].names and not self._is_label_reference(key, axis=axis_int) ) def _is_label_reference(self, key: Level, axis: Axis = 0) -> bool_t: """ Test whether a key is a label reference for a given axis. To be considered a label reference, `key` must be a string that: - (axis=0): Matches a column label - (axis=1): Matches an index label Parameters ---------- key : Hashable Potential label name, i.e. Index entry. axis : int, default 0 Axis perpendicular to the axis that labels are associated with (0 means search for column labels, 1 means search for index labels) Returns ------- is_label: bool """ axis_int = self._get_axis_number(axis) other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis_int) return ( key is not None and is_hashable(key) and any(key in self.axes[ax] for ax in other_axes) ) def _is_label_or_level_reference(self, key: Level, axis: AxisInt = 0) -> bool_t: """ Test whether a key is a label or level reference for a given axis. To be considered either a label or a level reference, `key` must be a string that: - (axis=0): Matches a column label or an index level - (axis=1): Matches an index label or a column level Parameters ---------- key : Hashable Potential label or level name axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- bool """ return self._is_level_reference(key, axis=axis) or self._is_label_reference( key, axis=axis ) def _check_label_or_level_ambiguity(self, key: Level, axis: Axis = 0) -> None: """ Check whether `key` is ambiguous. By ambiguous, we mean that it matches both a level of the input `axis` and a label of the other axis. Parameters ---------- key : Hashable Label or level name. axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns). Raises ------ ValueError: `key` is ambiguous """ axis_int = self._get_axis_number(axis) other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis_int) if ( key is not None and is_hashable(key) and key in self.axes[axis_int].names and any(key in self.axes[ax] for ax in other_axes) ): # Build an informative and grammatical warning level_article, level_type = ( ("an", "index") if axis_int == 0 else ("a", "column") ) label_article, label_type = ( ("a", "column") if axis_int == 0 else ("an", "index") ) msg = ( f"'{key}' is both {level_article} {level_type} level and " f"{label_article} {label_type} label, which is ambiguous." ) raise ValueError(msg) def _get_label_or_level_values(self, key: Level, axis: AxisInt = 0) -> ArrayLike: """ Return a 1-D array of values associated with `key`, a label or level from the given `axis`. Retrieval logic: - (axis=0): Return column values if `key` matches a column label. Otherwise return index level values if `key` matches an index level. - (axis=1): Return row values if `key` matches an index label. Otherwise return column level values if 'key' matches a column level Parameters ---------- key : Hashable Label or level name. axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- np.ndarray or ExtensionArray Raises ------ KeyError if `key` matches neither a label nor a level ValueError if `key` matches multiple labels """ axis = self._get_axis_number(axis) other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis] if self._is_label_reference(key, axis=axis): self._check_label_or_level_ambiguity(key, axis=axis) values = self.xs(key, axis=other_axes[0])._values elif self._is_level_reference(key, axis=axis): values = self.axes[axis].get_level_values(key)._values else: raise KeyError(key) # Check for duplicates if values.ndim > 1: if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex): multi_message = ( "\n" "For a multi-index, the label must be a " "tuple with elements corresponding to each level." ) else: multi_message = "" label_axis_name = "column" if axis == 0 else "index" raise ValueError( f"The {label_axis_name} label '{key}' is not unique.{multi_message}" ) return values def _drop_labels_or_levels(self, keys, axis: AxisInt = 0): """ Drop labels and/or levels for the given `axis`. For each key in `keys`: - (axis=0): If key matches a column label then drop the column. Otherwise if key matches an index level then drop the level. - (axis=1): If key matches an index label then drop the row. Otherwise if key matches a column level then drop the level. Parameters ---------- keys : str or list of str labels or levels to drop axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- dropped: DataFrame Raises ------ ValueError if any `keys` match neither a label nor a level """ axis = self._get_axis_number(axis) # Validate keys keys = common.maybe_make_list(keys) invalid_keys = [ k for k in keys if not self._is_label_or_level_reference(k, axis=axis) ] if invalid_keys: raise ValueError( "The following keys are not valid labels or " f"levels for axis {axis}: {invalid_keys}" ) # Compute levels and labels to drop levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)] labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)] # Perform copy upfront and then use inplace operations below. # This ensures that we always perform exactly one copy. # ``copy`` and/or ``inplace`` options could be added in the future. dropped = self.copy(deep=False) if axis == 0: # Handle dropping index levels if levels_to_drop: dropped.reset_index(levels_to_drop, drop=True, inplace=True) # Handle dropping columns labels if labels_to_drop: dropped.drop(labels_to_drop, axis=1, inplace=True) else: # Handle dropping column levels if levels_to_drop: if isinstance(dropped.columns, MultiIndex): # Drop the specified levels from the MultiIndex dropped.columns = dropped.columns.droplevel(levels_to_drop) else: # Drop the last level of Index by replacing with # a RangeIndex dropped.columns = RangeIndex(dropped.columns.size) # Handle dropping index labels if labels_to_drop: dropped.drop(labels_to_drop, axis=0, inplace=True) return dropped # ---------------------------------------------------------------------- # Iteration # https://github.com/python/typeshed/issues/2148#issuecomment-520783318 # Incompatible types in assignment (expression has type "None", base class # "object" defined the type as "Callable[[object], int]") __hash__: ClassVar[None] # type: ignore[assignment] def __iter__(self) -> Iterator: """ Iterate over info axis. Returns ------- iterator Info axis as iterator. """ return iter(self._info_axis) # can we get a better explanation of this? def keys(self) -> Index: """ Get the 'info axis' (see Indexing for more). This is index for Series, columns for DataFrame. Returns ------- Index Info axis. """ return self._info_axis def items(self): """ Iterate over (label, values) on info axis This is index for Series and columns for DataFrame. Returns ------- Generator """ for h in self._info_axis: yield h, self[h] def __len__(self) -> int: """Returns length of info axis""" return len(self._info_axis) def __contains__(self, key) -> bool_t: """True if the key is in the info axis""" return key in self._info_axis def empty(self) -> bool_t: """ Indicator whether Series/DataFrame is empty. True if Series/DataFrame is entirely empty (no items), meaning any of the axes are of length 0. Returns ------- bool If Series/DataFrame is empty, return True, if not return False. See Also -------- Series.dropna : Return series without null values. DataFrame.dropna : Return DataFrame with labels on given axis omitted where (all or any) data are missing. Notes ----- If Series/DataFrame contains only NaNs, it is still not considered empty. See the example below. Examples -------- An example of an actual empty DataFrame. Notice the index is empty: >>> df_empty = pd.DataFrame({'A' : []}) >>> df_empty Empty DataFrame Columns: [A] Index: [] >>> df_empty.empty True If we only have NaNs in our DataFrame, it is not considered empty! We will need to drop the NaNs to make the DataFrame empty: >>> df = pd.DataFrame({'A' : [np.nan]}) >>> df A 0 NaN >>> df.empty False >>> df.dropna().empty True >>> ser_empty = pd.Series({'A' : []}) >>> ser_empty A [] dtype: object >>> ser_empty.empty False >>> ser_empty = pd.Series() >>> ser_empty.empty True """ return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS) # ---------------------------------------------------------------------- # Array Interface # This is also set in IndexOpsMixin # GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented __array_priority__: int = 1000 def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: values = self._values arr = np.asarray(values, dtype=dtype) if ( astype_is_view(values.dtype, arr.dtype) and using_copy_on_write() and self._mgr.is_single_block ): # Check if both conversions can be done without a copy if astype_is_view(self.dtypes.iloc[0], values.dtype) and astype_is_view( values.dtype, arr.dtype ): arr = arr.view() arr.flags.writeable = False return arr def __array_ufunc__( self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any ): return arraylike.array_ufunc(self, ufunc, method, *inputs, **kwargs) # ---------------------------------------------------------------------- # Picklability def __getstate__(self) -> dict[str, Any]: meta = {k: getattr(self, k, None) for k in self._metadata} return { "_mgr": self._mgr, "_typ": self._typ, "_metadata": self._metadata, "attrs": self.attrs, "_flags": {k: self.flags[k] for k in self.flags._keys}, **meta, } def __setstate__(self, state) -> None: if isinstance(state, BlockManager): self._mgr = state elif isinstance(state, dict): if "_data" in state and "_mgr" not in state: # compat for older pickles state["_mgr"] = state.pop("_data") typ = state.get("_typ") if typ is not None: attrs = state.get("_attrs", {}) object.__setattr__(self, "_attrs", attrs) flags = state.get("_flags", {"allows_duplicate_labels": True}) object.__setattr__(self, "_flags", Flags(self, **flags)) # set in the order of internal names # to avoid definitional recursion # e.g. say fill_value needing _mgr to be # defined meta = set(self._internal_names + self._metadata) for k in list(meta): if k in state and k != "_flags": v = state[k] object.__setattr__(self, k, v) for k, v in state.items(): if k not in meta: object.__setattr__(self, k, v) else: raise NotImplementedError("Pre-0.12 pickles are no longer supported") elif len(state) == 2: raise NotImplementedError("Pre-0.12 pickles are no longer supported") self._item_cache: dict[Hashable, Series] = {} # ---------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str: # string representation based upon iterating over self # (since, by definition, `PandasContainers` are iterable) prepr = f"[{','.join(map(pprint_thing, self))}]" return f"{type(self).__name__}({prepr})" def _repr_latex_(self): """ Returns a LaTeX representation for a particular object. Mainly for use with nbconvert (jupyter notebook conversion to pdf). """ if config.get_option("styler.render.repr") == "latex": return self.to_latex() else: return None def _repr_data_resource_(self): """ Not a real Jupyter special repr method, but we use the same naming convention. """ if config.get_option("display.html.table_schema"): data = self.head(config.get_option("display.max_rows")) as_json = data.to_json(orient="table") as_json = cast(str, as_json) return loads(as_json, object_pairs_hook=collections.OrderedDict) # ---------------------------------------------------------------------- # I/O Methods klass="object", storage_options=_shared_docs["storage_options"], storage_options_versionadded="1.2.0", ) def to_excel( self, excel_writer, sheet_name: str = "Sheet1", na_rep: str = "", float_format: str | None = None, columns: Sequence[Hashable] | None = None, header: Sequence[Hashable] | bool_t = True, index: bool_t = True, index_label: IndexLabel = None, startrow: int = 0, startcol: int = 0, engine: str | None = None, merge_cells: bool_t = True, inf_rep: str = "inf", freeze_panes: tuple[int, int] | None = None, storage_options: StorageOptions = None, ) -> None: """ Write {klass} to an Excel sheet. To write a single {klass} to an Excel .xlsx file it is only necessary to specify a target file name. To write to multiple sheets it is necessary to create an `ExcelWriter` object with a target file name, and specify a sheet in the file to write to. Multiple sheets may be written to by specifying unique `sheet_name`. With all data written to the file it is necessary to save the changes. Note that creating an `ExcelWriter` object with a file name that already exists will result in the contents of the existing file being erased. Parameters ---------- excel_writer : path-like, file-like, or ExcelWriter object File path or existing ExcelWriter. sheet_name : str, default 'Sheet1' Name of sheet which will contain DataFrame. na_rep : str, default '' Missing data representation. float_format : str, optional Format string for floating point numbers. For example ``float_format="%.2f"`` will format 0.1234 to 0.12. columns : sequence or list of str, optional Columns to write. header : bool or list of str, default True Write out the column names. If a list of string is given it is assumed to be aliases for the column names. index : bool, default True Write row names (index). index_label : str or sequence, optional Column label for index column(s) if desired. If not specified, and `header` and `index` are True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. startrow : int, default 0 Upper left cell row to dump data frame. startcol : int, default 0 Upper left cell column to dump data frame. engine : str, optional Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this via the options ``io.excel.xlsx.writer`` or ``io.excel.xlsm.writer``. merge_cells : bool, default True Write MultiIndex and Hierarchical Rows as merged cells. inf_rep : str, default 'inf' Representation for infinity (there is no native representation for infinity in Excel). freeze_panes : tuple of int (length 2), optional Specifies the one-based bottommost row and rightmost column that is to be frozen. {storage_options} .. versionadded:: {storage_options_versionadded} See Also -------- to_csv : Write DataFrame to a comma-separated values (csv) file. ExcelWriter : Class for writing DataFrame objects into excel sheets. read_excel : Read an Excel file into a pandas DataFrame. read_csv : Read a comma-separated values (csv) file into DataFrame. io.formats.style.Styler.to_excel : Add styles to Excel sheet. Notes ----- For compatibility with :meth:`~DataFrame.to_csv`, to_excel serializes lists and dicts to strings before writing. Once a workbook has been saved it is not possible to write further data without rewriting the whole workbook. Examples -------- Create, write to and save a workbook: >>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']], ... index=['row 1', 'row 2'], ... columns=['col 1', 'col 2']) >>> df1.to_excel("output.xlsx") # doctest: +SKIP To specify the sheet name: >>> df1.to_excel("output.xlsx", ... sheet_name='Sheet_name_1') # doctest: +SKIP If you wish to write to more than one sheet in the workbook, it is necessary to specify an ExcelWriter object: >>> df2 = df1.copy() >>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP ... df1.to_excel(writer, sheet_name='Sheet_name_1') ... df2.to_excel(writer, sheet_name='Sheet_name_2') ExcelWriter can also be used to append to an existing Excel file: >>> with pd.ExcelWriter('output.xlsx', ... mode='a') as writer: # doctest: +SKIP ... df.to_excel(writer, sheet_name='Sheet_name_3') To set the library that is used to write the Excel file, you can pass the `engine` keyword (the default engine is automatically chosen depending on the file extension): >>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP """ df = self if isinstance(self, ABCDataFrame) else self.to_frame() from pandas.io.formats.excel import ExcelFormatter formatter = ExcelFormatter( df, na_rep=na_rep, cols=columns, header=header, float_format=float_format, index=index, index_label=index_label, merge_cells=merge_cells, inf_rep=inf_rep, ) formatter.write( excel_writer, sheet_name=sheet_name, startrow=startrow, startcol=startcol, freeze_panes=freeze_panes, engine=engine, storage_options=storage_options, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buf", ) def to_json( self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, orient: str | None = None, date_format: str | None = None, double_precision: int = 10, force_ascii: bool_t = True, date_unit: str = "ms", default_handler: Callable[[Any], JSONSerializable] | None = None, lines: bool_t = False, compression: CompressionOptions = "infer", index: bool_t = True, indent: int | None = None, storage_options: StorageOptions = None, mode: Literal["a", "w"] = "w", ) -> str | None: """ Convert the object to a JSON string. Note NaN's and None will be converted to null and datetime objects will be converted to UNIX timestamps. Parameters ---------- path_or_buf : str, path object, file-like object, or None, default None String, path object (implementing os.PathLike[str]), or file-like object implementing a write() function. If None, the result is returned as a string. orient : str Indication of expected JSON string format. * Series: - default is 'index' - allowed values are: {{'split', 'records', 'index', 'table'}}. * DataFrame: - default is 'columns' - allowed values are: {{'split', 'records', 'index', 'columns', 'values', 'table'}}. * The format of the JSON string: - 'split' : dict like {{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}} - 'records' : list like [{{column -> value}}, ... , {{column -> value}}] - 'index' : dict like {{index -> {{column -> value}}}} - 'columns' : dict like {{column -> {{index -> value}}}} - 'values' : just the values array - 'table' : dict like {{'schema': {{schema}}, 'data': {{data}}}} Describing the data, where data component is like ``orient='records'``. date_format : {{None, 'epoch', 'iso'}} Type of date conversion. 'epoch' = epoch milliseconds, 'iso' = ISO8601. The default depends on the `orient`. For ``orient='table'``, the default is 'iso'. For all other orients, the default is 'epoch'. double_precision : int, default 10 The number of decimal places to use when encoding floating point values. force_ascii : bool, default True Force encoded string to be ASCII. date_unit : str, default 'ms' (milliseconds) The time unit to encode to, governs timestamp and ISO8601 precision. One of 's', 'ms', 'us', 'ns' for second, millisecond, microsecond, and nanosecond respectively. default_handler : callable, default None Handler to call if object cannot otherwise be converted to a suitable format for JSON. Should receive a single argument which is the object to convert and return a serialisable object. lines : bool, default False If 'orient' is 'records' write out line-delimited json format. Will throw ValueError if incorrect 'orient' since others are not list-like. {compression_options} .. versionchanged:: 1.4.0 Zstandard support. index : bool, default True Whether to include the index values in the JSON string. Not including the index (``index=False``) is only supported when orient is 'split' or 'table'. indent : int, optional Length of whitespace used to indent each record. {storage_options} .. versionadded:: 1.2.0 mode : str, default 'w' (writing) Specify the IO mode for output when supplying a path_or_buf. Accepted args are 'w' (writing) and 'a' (append) only. mode='a' is only supported when lines is True and orient is 'records'. Returns ------- None or str If path_or_buf is None, returns the resulting json format as a string. Otherwise returns None. See Also -------- read_json : Convert a JSON string to pandas object. Notes ----- The behavior of ``indent=0`` varies from the stdlib, which does not indent the output but does insert newlines. Currently, ``indent=0`` and the default ``indent=None`` are equivalent in pandas, though this may change in a future release. ``orient='table'`` contains a 'pandas_version' field under 'schema'. This stores the version of `pandas` used in the latest revision of the schema. Examples -------- >>> from json import loads, dumps >>> df = pd.DataFrame( ... [["a", "b"], ["c", "d"]], ... index=["row 1", "row 2"], ... columns=["col 1", "col 2"], ... ) >>> result = df.to_json(orient="split") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "columns": [ "col 1", "col 2" ], "index": [ "row 1", "row 2" ], "data": [ [ "a", "b" ], [ "c", "d" ] ] }} Encoding/decoding a Dataframe using ``'records'`` formatted JSON. Note that index labels are not preserved with this encoding. >>> result = df.to_json(orient="records") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP [ {{ "col 1": "a", "col 2": "b" }}, {{ "col 1": "c", "col 2": "d" }} ] Encoding/decoding a Dataframe using ``'index'`` formatted JSON: >>> result = df.to_json(orient="index") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "row 1": {{ "col 1": "a", "col 2": "b" }}, "row 2": {{ "col 1": "c", "col 2": "d" }} }} Encoding/decoding a Dataframe using ``'columns'`` formatted JSON: >>> result = df.to_json(orient="columns") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "col 1": {{ "row 1": "a", "row 2": "c" }}, "col 2": {{ "row 1": "b", "row 2": "d" }} }} Encoding/decoding a Dataframe using ``'values'`` formatted JSON: >>> result = df.to_json(orient="values") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP [ [ "a", "b" ], [ "c", "d" ] ] Encoding with Table Schema: >>> result = df.to_json(orient="table") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "schema": {{ "fields": [ {{ "name": "index", "type": "string" }}, {{ "name": "col 1", "type": "string" }}, {{ "name": "col 2", "type": "string" }} ], "primaryKey": [ "index" ], "pandas_version": "1.4.0" }}, "data": [ {{ "index": "row 1", "col 1": "a", "col 2": "b" }}, {{ "index": "row 2", "col 1": "c", "col 2": "d" }} ] }} """ from pandas.io import json if date_format is None and orient == "table": date_format = "iso" elif date_format is None: date_format = "epoch" config.is_nonnegative_int(indent) indent = indent or 0 return json.to_json( path_or_buf=path_or_buf, obj=self, orient=orient, date_format=date_format, double_precision=double_precision, force_ascii=force_ascii, date_unit=date_unit, default_handler=default_handler, lines=lines, compression=compression, index=index, indent=indent, storage_options=storage_options, mode=mode, ) def to_hdf( self, path_or_buf: FilePath | HDFStore, key: str, mode: str = "a", complevel: int | None = None, complib: str | None = None, append: bool_t = False, format: str | None = None, index: bool_t = True, min_itemsize: int | dict[str, int] | None = None, nan_rep=None, dropna: bool_t | None = None, data_columns: Literal[True] | list[str] | None = None, errors: str = "strict", encoding: str = "UTF-8", ) -> None: """ Write the contained data to an HDF5 file using HDFStore. Hierarchical Data Format (HDF) is self-describing, allowing an application to interpret the structure and contents of a file with no outside information. One HDF file can hold a mix of related objects which can be accessed as a group or as individual objects. In order to add another DataFrame or Series to an existing HDF file please use append mode and a different a key. .. warning:: One can store a subclass of ``DataFrame`` or ``Series`` to HDF5, but the type of the subclass is lost upon storing. For more information see the :ref:`user guide <io.hdf5>`. Parameters ---------- path_or_buf : str or pandas.HDFStore File path or HDFStore object. key : str Identifier for the group in the store. mode : {'a', 'w', 'r+'}, default 'a' Mode to open file: - 'w': write, a new file is created (an existing file with the same name would be deleted). - 'a': append, an existing file is opened for reading and writing, and if the file does not exist it is created. - 'r+': similar to 'a', but the file must already exist. complevel : {0-9}, default None Specifies a compression level for data. A value of 0 or None disables compression. complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib' Specifies the compression library to be used. As of v0.20.2 these additional compressors for Blosc are supported (default if no compressor specified: 'blosc:blosclz'): {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy', 'blosc:zlib', 'blosc:zstd'}. Specifying a compression library which is not available issues a ValueError. append : bool, default False For Table formats, append the input data to the existing. format : {'fixed', 'table', None}, default 'fixed' Possible values: - 'fixed': Fixed format. Fast writing/reading. Not-appendable, nor searchable. - 'table': Table format. Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching / selecting subsets of the data. - If None, pd.get_option('io.hdf.default_format') is checked, followed by fallback to "fixed". index : bool, default True Write DataFrame index as a column. min_itemsize : dict or int, optional Map column names to minimum string sizes for columns. nan_rep : Any, optional How to represent null values as str. Not allowed with append=True. dropna : bool, default False, optional Remove missing values. data_columns : list of columns or True, optional List of columns to create as indexed data columns for on-disk queries, or True to use all columns. By default only the axes of the object are indexed. See :ref:`Query via data columns<io.hdf5-query-data-columns>`. for more information. Applicable only to format='table'. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. encoding : str, default "UTF-8" See Also -------- read_hdf : Read from HDF file. DataFrame.to_orc : Write a DataFrame to the binary orc format. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. DataFrame.to_sql : Write to a SQL table. DataFrame.to_feather : Write out feather-format for DataFrames. DataFrame.to_csv : Write out to a csv file. Examples -------- >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}, ... index=['a', 'b', 'c']) # doctest: +SKIP >>> df.to_hdf('data.h5', key='df', mode='w') # doctest: +SKIP We can add another object to the same file: >>> s = pd.Series([1, 2, 3, 4]) # doctest: +SKIP >>> s.to_hdf('data.h5', key='s') # doctest: +SKIP Reading from HDF file: >>> pd.read_hdf('data.h5', 'df') # doctest: +SKIP A B a 1 4 b 2 5 c 3 6 >>> pd.read_hdf('data.h5', 's') # doctest: +SKIP 0 1 1 2 2 3 3 4 dtype: int64 """ from pandas.io import pytables # Argument 3 to "to_hdf" has incompatible type "NDFrame"; expected # "Union[DataFrame, Series]" [arg-type] pytables.to_hdf( path_or_buf, key, self, # type: ignore[arg-type] mode=mode, complevel=complevel, complib=complib, append=append, format=format, index=index, min_itemsize=min_itemsize, nan_rep=nan_rep, dropna=dropna, data_columns=data_columns, errors=errors, encoding=encoding, ) def to_sql( self, name: str, con, schema: str | None = None, if_exists: Literal["fail", "replace", "append"] = "fail", index: bool_t = True, index_label: IndexLabel = None, chunksize: int | None = None, dtype: DtypeArg | None = None, method: str | None = None, ) -> int | None: """ Write records stored in a DataFrame to a SQL database. Databases supported by SQLAlchemy [1]_ are supported. Tables can be newly created, appended to, or overwritten. Parameters ---------- name : str Name of SQL table. con : sqlalchemy.engine.(Engine or Connection) or sqlite3.Connection Using SQLAlchemy makes it possible to use any DB supported by that library. Legacy support is provided for sqlite3.Connection objects. The user is responsible for engine disposal and connection closure for the SQLAlchemy connectable. See `here \ <https://docs.sqlalchemy.org/en/20/core/connections.html>`_. If passing a sqlalchemy.engine.Connection which is already in a transaction, the transaction will not be committed. If passing a sqlite3.Connection, it will not be possible to roll back the record insertion. schema : str, optional Specify the schema (if database flavor supports this). If None, use default schema. if_exists : {'fail', 'replace', 'append'}, default 'fail' How to behave if the table already exists. * fail: Raise a ValueError. * replace: Drop the table before inserting new values. * append: Insert new values to the existing table. index : bool, default True Write DataFrame index as a column. Uses `index_label` as the column name in the table. index_label : str or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. chunksize : int, optional Specify the number of rows in each batch to be written at a time. By default, all rows will be written at once. dtype : dict or scalar, optional Specifying the datatype for columns. If a dictionary is used, the keys should be the column names and the values should be the SQLAlchemy types or strings for the sqlite3 legacy mode. If a scalar is provided, it will be applied to all columns. method : {None, 'multi', callable}, optional Controls the SQL insertion clause used: * None : Uses standard SQL ``INSERT`` clause (one per row). * 'multi': Pass multiple values in a single ``INSERT`` clause. * callable with signature ``(pd_table, conn, keys, data_iter)``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. Returns ------- None or int Number of rows affected by to_sql. None is returned if the callable passed into ``method`` does not return an integer number of rows. The number of returned rows affected is the sum of the ``rowcount`` attribute of ``sqlite3.Cursor`` or SQLAlchemy connectable which may not reflect the exact number of written rows as stipulated in the `sqlite3 <https://docs.python.org/3/library/sqlite3.html#sqlite3.Cursor.rowcount>`__ or `SQLAlchemy <https://docs.sqlalchemy.org/en/20/core/connections.html#sqlalchemy.engine.CursorResult.rowcount>`__. .. versionadded:: 1.4.0 Raises ------ ValueError When the table already exists and `if_exists` is 'fail' (the default). See Also -------- read_sql : Read a DataFrame from a table. Notes ----- Timezone aware datetime columns will be written as ``Timestamp with timezone`` type with SQLAlchemy if supported by the database. Otherwise, the datetimes will be stored as timezone unaware timestamps local to the original timezone. References ---------- .. [1] https://docs.sqlalchemy.org .. [2] https://www.python.org/dev/peps/pep-0249/ Examples -------- Create an in-memory SQLite database. >>> from sqlalchemy import create_engine >>> engine = create_engine('sqlite://', echo=False) Create a table from scratch with 3 rows. >>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']}) >>> df name 0 User 1 1 User 2 2 User 3 >>> df.to_sql('users', con=engine) 3 >>> from sqlalchemy import text >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM users")).fetchall() [(0, 'User 1'), (1, 'User 2'), (2, 'User 3')] An `sqlalchemy.engine.Connection` can also be passed to `con`: >>> with engine.begin() as connection: ... df1 = pd.DataFrame({'name' : ['User 4', 'User 5']}) ... df1.to_sql('users', con=connection, if_exists='append') 2 This is allowed to support operations that require that the same DBAPI connection is used for the entire operation. >>> df2 = pd.DataFrame({'name' : ['User 6', 'User 7']}) >>> df2.to_sql('users', con=engine, if_exists='append') 2 >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM users")).fetchall() [(0, 'User 1'), (1, 'User 2'), (2, 'User 3'), (0, 'User 4'), (1, 'User 5'), (0, 'User 6'), (1, 'User 7')] Overwrite the table with just ``df2``. >>> df2.to_sql('users', con=engine, if_exists='replace', ... index_label='id') 2 >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM users")).fetchall() [(0, 'User 6'), (1, 'User 7')] Specify the dtype (especially useful for integers with missing values). Notice that while pandas is forced to store the data as floating point, the database supports nullable integers. When fetching the data with Python, we get back integer scalars. >>> df = pd.DataFrame({"A": [1, None, 2]}) >>> df A 0 1.0 1 NaN 2 2.0 >>> from sqlalchemy.types import Integer >>> df.to_sql('integers', con=engine, index=False, ... dtype={"A": Integer()}) 3 >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM integers")).fetchall() [(1,), (None,), (2,)] """ # noqa:E501 from pandas.io import sql return sql.to_sql( self, name, con, schema=schema, if_exists=if_exists, index=index, index_label=index_label, chunksize=chunksize, dtype=dtype, method=method, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path", ) def to_pickle( self, path: FilePath | WriteBuffer[bytes], compression: CompressionOptions = "infer", protocol: int = pickle.HIGHEST_PROTOCOL, storage_options: StorageOptions = None, ) -> None: """ Pickle (serialize) object to file. Parameters ---------- path : str, path object, or file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. File path where the pickled object will be stored. {compression_options} protocol : int Int which indicates which protocol should be used by the pickler, default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible values are 0, 1, 2, 3, 4, 5. A negative value for the protocol parameter is equivalent to setting its value to HIGHEST_PROTOCOL. .. [1] https://docs.python.org/3/library/pickle.html. {storage_options} .. versionadded:: 1.2.0 See Also -------- read_pickle : Load pickled pandas object (or any object) from file. DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_sql : Write DataFrame to a SQL database. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Examples -------- >>> original_df = pd.DataFrame({{"foo": range(5), "bar": range(5, 10)}}) # doctest: +SKIP >>> original_df # doctest: +SKIP foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> original_df.to_pickle("./dummy.pkl") # doctest: +SKIP >>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP >>> unpickled_df # doctest: +SKIP foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 """ # noqa: E501 from pandas.io.pickle import to_pickle to_pickle( self, path, compression=compression, protocol=protocol, storage_options=storage_options, ) def to_clipboard( self, excel: bool_t = True, sep: str | None = None, **kwargs ) -> None: r""" Copy object to the system clipboard. Write a text representation of object to the system clipboard. This can be pasted into Excel, for example. Parameters ---------- excel : bool, default True Produce output in a csv format for easy pasting into excel. - True, use the provided separator for csv pasting. - False, write a string representation of the object to the clipboard. sep : str, default ``'\t'`` Field delimiter. **kwargs These parameters will be passed to DataFrame.to_csv. See Also -------- DataFrame.to_csv : Write a DataFrame to a comma-separated values (csv) file. read_clipboard : Read text from clipboard and pass to read_csv. Notes ----- Requirements for your platform. - Linux : `xclip`, or `xsel` (with `PyQt4` modules) - Windows : none - macOS : none This method uses the processes developed for the package `pyperclip`. A solution to render any output string format is given in the examples. Examples -------- Copy the contents of a DataFrame to the clipboard. >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) >>> df.to_clipboard(sep=',') # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # ,A,B,C ... # 0,1,2,3 ... # 1,4,5,6 We can omit the index by passing the keyword `index` and setting it to false. >>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # A,B,C ... # 1,2,3 ... # 4,5,6 Using the original `pyperclip` package for any string output format. .. code-block:: python import pyperclip html = df.style.to_html() pyperclip.copy(html) """ from pandas.io import clipboards clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs) def to_xarray(self): """ Return an xarray object from the pandas object. Returns ------- xarray.DataArray or xarray.Dataset Data in the pandas structure converted to Dataset if the object is a DataFrame, or a DataArray if the object is a Series. See Also -------- DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Notes ----- See the `xarray docs <https://xarray.pydata.org/en/stable/>`__ Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2), ... ('parrot', 'bird', 24.0, 2), ... ('lion', 'mammal', 80.5, 4), ... ('monkey', 'mammal', np.nan, 4)], ... columns=['name', 'class', 'max_speed', ... 'num_legs']) >>> df name class max_speed num_legs 0 falcon bird 389.0 2 1 parrot bird 24.0 2 2 lion mammal 80.5 4 3 monkey mammal NaN 4 >>> df.to_xarray() <xarray.Dataset> Dimensions: (index: 4) Coordinates: * index (index) int64 0 1 2 3 Data variables: name (index) object 'falcon' 'parrot' 'lion' 'monkey' class (index) object 'bird' 'bird' 'mammal' 'mammal' max_speed (index) float64 389.0 24.0 80.5 nan num_legs (index) int64 2 2 4 4 >>> df['max_speed'].to_xarray() <xarray.DataArray 'max_speed' (index: 4)> array([389. , 24. , 80.5, nan]) Coordinates: * index (index) int64 0 1 2 3 >>> dates = pd.to_datetime(['2018-01-01', '2018-01-01', ... '2018-01-02', '2018-01-02']) >>> df_multiindex = pd.DataFrame({'date': dates, ... 'animal': ['falcon', 'parrot', ... 'falcon', 'parrot'], ... 'speed': [350, 18, 361, 15]}) >>> df_multiindex = df_multiindex.set_index(['date', 'animal']) >>> df_multiindex speed date animal 2018-01-01 falcon 350 parrot 18 2018-01-02 falcon 361 parrot 15 >>> df_multiindex.to_xarray() <xarray.Dataset> Dimensions: (date: 2, animal: 2) Coordinates: * date (date) datetime64[ns] 2018-01-01 2018-01-02 * animal (animal) object 'falcon' 'parrot' Data variables: speed (date, animal) int64 350 18 361 15 """ xarray = import_optional_dependency("xarray") if self.ndim == 1: return xarray.DataArray.from_series(self) else: return xarray.Dataset.from_dataframe(self) def to_latex( self, buf: None = ..., columns: Sequence[Hashable] | None = ..., header: bool_t | Sequence[str] = ..., index: bool_t = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool_t | None = ..., index_names: bool_t = ..., bold_rows: bool_t = ..., column_format: str | None = ..., longtable: bool_t | None = ..., escape: bool_t | None = ..., encoding: str | None = ..., decimal: str = ..., multicolumn: bool_t | None = ..., multicolumn_format: str | None = ..., multirow: bool_t | None = ..., caption: str | tuple[str, str] | None = ..., label: str | None = ..., position: str | None = ..., ) -> str: ... def to_latex( self, buf: FilePath | WriteBuffer[str], columns: Sequence[Hashable] | None = ..., header: bool_t | Sequence[str] = ..., index: bool_t = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool_t | None = ..., index_names: bool_t = ..., bold_rows: bool_t = ..., column_format: str | None = ..., longtable: bool_t | None = ..., escape: bool_t | None = ..., encoding: str | None = ..., decimal: str = ..., multicolumn: bool_t | None = ..., multicolumn_format: str | None = ..., multirow: bool_t | None = ..., caption: str | tuple[str, str] | None = ..., label: str | None = ..., position: str | None = ..., ) -> None: ... def to_latex( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[Hashable] | None = None, header: bool_t | Sequence[str] = True, index: bool_t = True, na_rep: str = "NaN", formatters: FormattersType | None = None, float_format: FloatFormatType | None = None, sparsify: bool_t | None = None, index_names: bool_t = True, bold_rows: bool_t = False, column_format: str | None = None, longtable: bool_t | None = None, escape: bool_t | None = None, encoding: str | None = None, decimal: str = ".", multicolumn: bool_t | None = None, multicolumn_format: str | None = None, multirow: bool_t | None = None, caption: str | tuple[str, str] | None = None, label: str | None = None, position: str | None = None, ) -> str | None: r""" Render object to a LaTeX tabular, longtable, or nested table. Requires ``\usepackage{{booktabs}}``. The output can be copy/pasted into a main LaTeX document or read from an external file with ``\input{{table.tex}}``. .. versionchanged:: 1.2.0 Added position argument, changed meaning of caption argument. .. versionchanged:: 2.0.0 Refactored to use the Styler implementation via jinja2 templating. Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. columns : list of label, optional The subset of columns to write. Writes all columns by default. header : bool or list of str, default True Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names. index : bool, default True Write row names (index). na_rep : str, default 'NaN' Missing data representation. formatters : list of functions or dict of {{str: function}}, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function or str, optional, default None Formatter for floating point numbers. For example ``float_format="%.2f"`` and ``float_format="{{:0.2f}}".format`` will both result in 0.1234 being formatted as 0.12. sparsify : bool, optional Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. By default, the value will be read from the config module. index_names : bool, default True Prints the names of the indexes. bold_rows : bool, default False Make the row labels bold in the output. column_format : str, optional The columns format as specified in `LaTeX table format <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3 columns. By default, 'l' will be used for all columns except columns of numbers, which default to 'r'. longtable : bool, optional Use a longtable environment instead of tabular. Requires adding a \usepackage{{longtable}} to your LaTeX preamble. By default, the value will be read from the pandas config module, and set to `True` if the option ``styler.latex.environment`` is `"longtable"`. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed. escape : bool, optional By default, the value will be read from the pandas config module and set to `True` if the option ``styler.format.escape`` is `"latex"`. When set to False prevents from escaping latex special characters in column names. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed, as has the default value to `False`. encoding : str, optional A string representing the encoding to use in the output file, defaults to 'utf-8'. decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. multicolumn : bool, default True Use \multicolumn to enhance MultiIndex columns. The default will be read from the config module, and is set as the option ``styler.sparse.columns``. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed. multicolumn_format : str, default 'r' The alignment for multicolumns, similar to `column_format` The default will be read from the config module, and is set as the option ``styler.latex.multicol_align``. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed, as has the default value to "r". multirow : bool, default True Use \multirow to enhance MultiIndex rows. Requires adding a \usepackage{{multirow}} to your LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained rows, separating groups via clines. The default will be read from the pandas config module, and is set as the option ``styler.sparse.index``. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed, as has the default value to `True`. caption : str or tuple, optional Tuple (full_caption, short_caption), which results in ``\caption[short_caption]{{full_caption}}``; if a single string is passed, no short caption will be set. .. versionchanged:: 1.2.0 Optionally allow caption to be a tuple ``(full_caption, short_caption)``. label : str, optional The LaTeX label to be placed inside ``\label{{}}`` in the output. This is used with ``\ref{{}}`` in the main ``.tex`` file. position : str, optional The LaTeX positional argument for tables, to be placed after ``\begin{{}}`` in the output. .. versionadded:: 1.2.0 Returns ------- str or None If buf is None, returns the result as a string. Otherwise returns None. See Also -------- io.formats.style.Styler.to_latex : Render a DataFrame to LaTeX with conditional formatting. DataFrame.to_string : Render a DataFrame to a console-friendly tabular output. DataFrame.to_html : Render a DataFrame as an HTML table. Notes ----- As of v2.0.0 this method has changed to use the Styler implementation as part of :meth:`.Styler.to_latex` via ``jinja2`` templating. This means that ``jinja2`` is a requirement, and needs to be installed, for this method to function. It is advised that users switch to using Styler, since that implementation is more frequently updated and contains much more flexibility with the output. Examples -------- Convert a general DataFrame to LaTeX with formatting: >>> df = pd.DataFrame(dict(name=['Raphael', 'Donatello'], ... age=[26, 45], ... height=[181.23, 177.65])) >>> print(df.to_latex(index=False, ... formatters={"name": str.upper}, ... float_format="{:.1f}".format, ... )) # doctest: +SKIP \begin{tabular}{lrr} \toprule name & age & height \\ \midrule RAPHAEL & 26 & 181.2 \\ DONATELLO & 45 & 177.7 \\ \bottomrule \end{tabular} """ # Get defaults from the pandas config if self.ndim == 1: self = self.to_frame() if longtable is None: longtable = config.get_option("styler.latex.environment") == "longtable" if escape is None: escape = config.get_option("styler.format.escape") == "latex" if multicolumn is None: multicolumn = config.get_option("styler.sparse.columns") if multicolumn_format is None: multicolumn_format = config.get_option("styler.latex.multicol_align") if multirow is None: multirow = config.get_option("styler.sparse.index") if column_format is not None and not isinstance(column_format, str): raise ValueError("`column_format` must be str or unicode") length = len(self.columns) if columns is None else len(columns) if isinstance(header, (list, tuple)) and len(header) != length: raise ValueError(f"Writing {length} cols but got {len(header)} aliases") # Refactor formatters/float_format/decimal/na_rep/escape to Styler structure base_format_ = { "na_rep": na_rep, "escape": "latex" if escape else None, "decimal": decimal, } index_format_: dict[str, Any] = {"axis": 0, **base_format_} column_format_: dict[str, Any] = {"axis": 1, **base_format_} if isinstance(float_format, str): float_format_: Callable | None = lambda x: float_format % x else: float_format_ = float_format def _wrap(x, alt_format_): if isinstance(x, (float, complex)) and float_format_ is not None: return float_format_(x) else: return alt_format_(x) formatters_: list | tuple | dict | Callable | None = None if isinstance(formatters, list): formatters_ = { c: partial(_wrap, alt_format_=formatters[i]) for i, c in enumerate(self.columns) } elif isinstance(formatters, dict): index_formatter = formatters.pop("__index__", None) column_formatter = formatters.pop("__columns__", None) if index_formatter is not None: index_format_.update({"formatter": index_formatter}) if column_formatter is not None: column_format_.update({"formatter": column_formatter}) formatters_ = formatters float_columns = self.select_dtypes(include="float").columns for col in float_columns: if col not in formatters.keys(): formatters_.update({col: float_format_}) elif formatters is None and float_format is not None: formatters_ = partial(_wrap, alt_format_=lambda v: v) format_index_ = [index_format_, column_format_] # Deal with hiding indexes and relabelling column names hide_: list[dict] = [] relabel_index_: list[dict] = [] if columns: hide_.append( { "subset": [c for c in self.columns if c not in columns], "axis": "columns", } ) if header is False: hide_.append({"axis": "columns"}) elif isinstance(header, (list, tuple)): relabel_index_.append({"labels": header, "axis": "columns"}) format_index_ = [index_format_] # column_format is overwritten if index is False: hide_.append({"axis": "index"}) if index_names is False: hide_.append({"names": True, "axis": "index"}) render_kwargs_ = { "hrules": True, "sparse_index": sparsify, "sparse_columns": sparsify, "environment": "longtable" if longtable else None, "multicol_align": multicolumn_format if multicolumn else f"naive-{multicolumn_format}", "multirow_align": "t" if multirow else "naive", "encoding": encoding, "caption": caption, "label": label, "position": position, "column_format": column_format, "clines": "skip-last;data" if (multirow and isinstance(self.index, MultiIndex)) else None, "bold_rows": bold_rows, } return self._to_latex_via_styler( buf, hide=hide_, relabel_index=relabel_index_, format={"formatter": formatters_, **base_format_}, format_index=format_index_, render_kwargs=render_kwargs_, ) def _to_latex_via_styler( self, buf=None, *, hide: dict | list[dict] | None = None, relabel_index: dict | list[dict] | None = None, format: dict | list[dict] | None = None, format_index: dict | list[dict] | None = None, render_kwargs: dict | None = None, ): """ Render object to a LaTeX tabular, longtable, or nested table. Uses the ``Styler`` implementation with the following, ordered, method chaining: .. code-block:: python styler = Styler(DataFrame) styler.hide(**hide) styler.relabel_index(**relabel_index) styler.format(**format) styler.format_index(**format_index) styler.to_latex(buf=buf, **render_kwargs) Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. hide : dict, list of dict Keyword args to pass to the method call of ``Styler.hide``. If a list will call the method numerous times. relabel_index : dict, list of dict Keyword args to pass to the method of ``Styler.relabel_index``. If a list will call the method numerous times. format : dict, list of dict Keyword args to pass to the method call of ``Styler.format``. If a list will call the method numerous times. format_index : dict, list of dict Keyword args to pass to the method call of ``Styler.format_index``. If a list will call the method numerous times. render_kwargs : dict Keyword args to pass to the method call of ``Styler.to_latex``. Returns ------- str or None If buf is None, returns the result as a string. Otherwise returns None. """ from pandas.io.formats.style import Styler self = cast("DataFrame", self) styler = Styler(self, uuid="") for kw_name in ["hide", "relabel_index", "format", "format_index"]: kw = vars()[kw_name] if isinstance(kw, dict): getattr(styler, kw_name)(**kw) elif isinstance(kw, list): for sub_kw in kw: getattr(styler, kw_name)(**sub_kw) # bold_rows is not a direct kwarg of Styler.to_latex render_kwargs = {} if render_kwargs is None else render_kwargs if render_kwargs.pop("bold_rows"): styler.applymap_index(lambda v: "textbf:--rwrap;") return styler.to_latex(buf=buf, **render_kwargs) def to_csv( self, path_or_buf: None = ..., sep: str = ..., na_rep: str = ..., float_format: str | Callable | None = ..., columns: Sequence[Hashable] | None = ..., header: bool_t | list[str] = ..., index: bool_t = ..., index_label: IndexLabel | None = ..., mode: str = ..., encoding: str | None = ..., compression: CompressionOptions = ..., quoting: int | None = ..., quotechar: str = ..., lineterminator: str | None = ..., chunksize: int | None = ..., date_format: str | None = ..., doublequote: bool_t = ..., escapechar: str | None = ..., decimal: str = ..., errors: str = ..., storage_options: StorageOptions = ..., ) -> str: ... def to_csv( self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str], sep: str = ..., na_rep: str = ..., float_format: str | Callable | None = ..., columns: Sequence[Hashable] | None = ..., header: bool_t | list[str] = ..., index: bool_t = ..., index_label: IndexLabel | None = ..., mode: str = ..., encoding: str | None = ..., compression: CompressionOptions = ..., quoting: int | None = ..., quotechar: str = ..., lineterminator: str | None = ..., chunksize: int | None = ..., date_format: str | None = ..., doublequote: bool_t = ..., escapechar: str | None = ..., decimal: str = ..., errors: str = ..., storage_options: StorageOptions = ..., ) -> None: ... storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buf", ) def to_csv( self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, sep: str = ",", na_rep: str = "", float_format: str | Callable | None = None, columns: Sequence[Hashable] | None = None, header: bool_t | list[str] = True, index: bool_t = True, index_label: IndexLabel | None = None, mode: str = "w", encoding: str | None = None, compression: CompressionOptions = "infer", quoting: int | None = None, quotechar: str = '"', lineterminator: str | None = None, chunksize: int | None = None, date_format: str | None = None, doublequote: bool_t = True, escapechar: str | None = None, decimal: str = ".", errors: str = "strict", storage_options: StorageOptions = None, ) -> str | None: r""" Write object to a comma-separated values (csv) file. Parameters ---------- path_or_buf : str, path object, file-like object, or None, default None String, path object (implementing os.PathLike[str]), or file-like object implementing a write() function. If None, the result is returned as a string. If a non-binary file object is passed, it should be opened with `newline=''`, disabling universal newlines. If a binary file object is passed, `mode` might need to contain a `'b'`. .. versionchanged:: 1.2.0 Support for binary file objects was introduced. sep : str, default ',' String of length 1. Field delimiter for the output file. na_rep : str, default '' Missing data representation. float_format : str, Callable, default None Format string for floating point numbers. If a Callable is given, it takes precedence over other numeric formatting parameters, like decimal. columns : sequence, optional Columns to write. header : bool or list of str, default True Write out the column names. If a list of strings is given it is assumed to be aliases for the column names. index : bool, default True Write row names (index). index_label : str or sequence, or False, default None Column label for index column(s) if desired. If None is given, and `header` and `index` are True, then the index names are used. A sequence should be given if the object uses MultiIndex. If False do not print fields for index names. Use index_label=False for easier importing in R. mode : str, default 'w' Python write mode. The available write modes are the same as :py:func:`open`. encoding : str, optional A string representing the encoding to use in the output file, defaults to 'utf-8'. `encoding` is not supported if `path_or_buf` is a non-binary file object. {compression_options} .. versionchanged:: 1.0.0 May now be a dict with key 'method' as compression mode and other entries as additional compression options if compression mode is 'zip'. .. versionchanged:: 1.1.0 Passing compression options as keys in dict is supported for compression modes 'gzip', 'bz2', 'zstd', and 'zip'. .. versionchanged:: 1.2.0 Compression is supported for binary file objects. .. versionchanged:: 1.2.0 Previous versions forwarded dict entries for 'gzip' to `gzip.open` instead of `gzip.GzipFile` which prevented setting `mtime`. quoting : optional constant from csv module Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` then floats are converted to strings and thus csv.QUOTE_NONNUMERIC will treat them as non-numeric. quotechar : str, default '\"' String of length 1. Character used to quote fields. lineterminator : str, optional The newline character or character sequence to use in the output file. Defaults to `os.linesep`, which depends on the OS in which this method is called ('\\n' for linux, '\\r\\n' for Windows, i.e.). .. versionchanged:: 1.5.0 Previously was line_terminator, changed for consistency with read_csv and the standard library 'csv' module. chunksize : int or None Rows to write at a time. date_format : str, default None Format string for datetime objects. doublequote : bool, default True Control quoting of `quotechar` inside a field. escapechar : str, default None String of length 1. Character used to escape `sep` and `quotechar` when appropriate. decimal : str, default '.' Character recognized as decimal separator. E.g. use ',' for European data. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. .. versionadded:: 1.1.0 {storage_options} .. versionadded:: 1.2.0 Returns ------- None or str If path_or_buf is None, returns the resulting csv format as a string. Otherwise returns None. See Also -------- read_csv : Load a CSV file into a DataFrame. to_excel : Write DataFrame to an Excel file. Examples -------- >>> df = pd.DataFrame({{'name': ['Raphael', 'Donatello'], ... 'mask': ['red', 'purple'], ... 'weapon': ['sai', 'bo staff']}}) >>> df.to_csv(index=False) 'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n' Create 'out.zip' containing 'out.csv' >>> compression_opts = dict(method='zip', ... archive_name='out.csv') # doctest: +SKIP >>> df.to_csv('out.zip', index=False, ... compression=compression_opts) # doctest: +SKIP To write a csv file to a new folder or nested folder you will first need to create it using either Pathlib or os: >>> from pathlib import Path # doctest: +SKIP >>> filepath = Path('folder/subfolder/out.csv') # doctest: +SKIP >>> filepath.parent.mkdir(parents=True, exist_ok=True) # doctest: +SKIP >>> df.to_csv(filepath) # doctest: +SKIP >>> import os # doctest: +SKIP >>> os.makedirs('folder/subfolder', exist_ok=True) # doctest: +SKIP >>> df.to_csv('folder/subfolder/out.csv') # doctest: +SKIP """ df = self if isinstance(self, ABCDataFrame) else self.to_frame() formatter = DataFrameFormatter( frame=df, header=header, index=index, na_rep=na_rep, float_format=float_format, decimal=decimal, ) return DataFrameRenderer(formatter).to_csv( path_or_buf, lineterminator=lineterminator, sep=sep, encoding=encoding, errors=errors, compression=compression, quoting=quoting, columns=columns, index_label=index_label, mode=mode, chunksize=chunksize, quotechar=quotechar, date_format=date_format, doublequote=doublequote, escapechar=escapechar, storage_options=storage_options, ) # ---------------------------------------------------------------------- # Lookup Caching def _reset_cacher(self) -> None: """ Reset the cacher. """ raise AbstractMethodError(self) def _maybe_update_cacher( self, clear: bool_t = False, verify_is_copy: bool_t = True, inplace: bool_t = False, ) -> None: """ See if we need to update our parent cacher if clear, then clear our cache. Parameters ---------- clear : bool, default False Clear the item cache. verify_is_copy : bool, default True Provide is_copy checks. """ if using_copy_on_write(): return if verify_is_copy: self._check_setitem_copy(t="referent") if clear: self._clear_item_cache() def _clear_item_cache(self) -> None: raise AbstractMethodError(self) # ---------------------------------------------------------------------- # Indexing Methods def take(self: NDFrameT, indices, axis: Axis = 0, **kwargs) -> NDFrameT: """ Return the elements in the given *positional* indices along an axis. This means that we are not indexing according to actual values in the index attribute of the object. We are indexing according to the actual position of the element in the object. Parameters ---------- indices : array-like An array of ints indicating which positions to take. axis : {0 or 'index', 1 or 'columns', None}, default 0 The axis on which to select elements. ``0`` means that we are selecting rows, ``1`` means that we are selecting columns. For `Series` this parameter is unused and defaults to 0. **kwargs For compatibility with :meth:`numpy.take`. Has no effect on the output. Returns ------- same type as caller An array-like containing the elements taken from the object. See Also -------- DataFrame.loc : Select a subset of a DataFrame by labels. DataFrame.iloc : Select a subset of a DataFrame by positions. numpy.take : Take elements from an array along an axis. Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey', 'mammal', np.nan)], ... columns=['name', 'class', 'max_speed'], ... index=[0, 2, 3, 1]) >>> df name class max_speed 0 falcon bird 389.0 2 parrot bird 24.0 3 lion mammal 80.5 1 monkey mammal NaN Take elements at positions 0 and 3 along the axis 0 (default). Note how the actual indices selected (0 and 1) do not correspond to our selected indices 0 and 3. That's because we are selecting the 0th and 3rd rows, not rows whose indices equal 0 and 3. >>> df.take([0, 3]) name class max_speed 0 falcon bird 389.0 1 monkey mammal NaN Take elements at indices 1 and 2 along the axis 1 (column selection). >>> df.take([1, 2], axis=1) class max_speed 0 bird 389.0 2 bird 24.0 3 mammal 80.5 1 mammal NaN We may take elements using negative integers for positive indices, starting from the end of the object, just like with Python lists. >>> df.take([-1, -2]) name class max_speed 1 monkey mammal NaN 3 lion mammal 80.5 """ nv.validate_take((), kwargs) return self._take(indices, axis) def _take( self: NDFrameT, indices, axis: Axis = 0, convert_indices: bool_t = True, ) -> NDFrameT: """ Internal version of the `take` allowing specification of additional args. See the docstring of `take` for full explanation of the parameters. """ if not isinstance(indices, slice): indices = np.asarray(indices, dtype=np.intp) if ( axis == 0 and indices.ndim == 1 and using_copy_on_write() and is_range_indexer(indices, len(self)) ): return self.copy(deep=None) new_data = self._mgr.take( indices, axis=self._get_block_manager_axis(axis), verify=True, convert_indices=convert_indices, ) return self._constructor(new_data).__finalize__(self, method="take") def _take_with_is_copy(self: NDFrameT, indices, axis: Axis = 0) -> NDFrameT: """ Internal version of the `take` method that sets the `_is_copy` attribute to keep track of the parent dataframe (using in indexing for the SettingWithCopyWarning). See the docstring of `take` for full explanation of the parameters. """ result = self._take(indices=indices, axis=axis) # Maybe set copy if we didn't actually change the index. if not result._get_axis(axis).equals(self._get_axis(axis)): result._set_is_copy(self) return result def xs( self: NDFrameT, key: IndexLabel, axis: Axis = 0, level: IndexLabel = None, drop_level: bool_t = True, ) -> NDFrameT: """ Return cross-section from the Series/DataFrame. This method takes a `key` argument to select data at a particular level of a MultiIndex. Parameters ---------- key : label or tuple of label Label contained in the index, or partially in a MultiIndex. axis : {0 or 'index', 1 or 'columns'}, default 0 Axis to retrieve cross-section on. level : object, defaults to first n levels (n=1 or len(key)) In case of a key partially contained in a MultiIndex, indicate which levels are used. Levels can be referred by label or position. drop_level : bool, default True If False, returns object with same levels as self. Returns ------- Series or DataFrame Cross-section from the original Series or DataFrame corresponding to the selected index levels. See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. DataFrame.iloc : Purely integer-location based indexing for selection by position. Notes ----- `xs` can not be used to set values. MultiIndex Slicers is a generic way to get/set values on any level or levels. It is a superset of `xs` functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`. Examples -------- >>> d = {'num_legs': [4, 4, 2, 2], ... 'num_wings': [0, 0, 2, 2], ... 'class': ['mammal', 'mammal', 'mammal', 'bird'], ... 'animal': ['cat', 'dog', 'bat', 'penguin'], ... 'locomotion': ['walks', 'walks', 'flies', 'walks']} >>> df = pd.DataFrame(data=d) >>> df = df.set_index(['class', 'animal', 'locomotion']) >>> df num_legs num_wings class animal locomotion mammal cat walks 4 0 dog walks 4 0 bat flies 2 2 bird penguin walks 2 2 Get values at specified index >>> df.xs('mammal') num_legs num_wings animal locomotion cat walks 4 0 dog walks 4 0 bat flies 2 2 Get values at several indexes >>> df.xs(('mammal', 'dog', 'walks')) num_legs 4 num_wings 0 Name: (mammal, dog, walks), dtype: int64 Get values at specified index and level >>> df.xs('cat', level=1) num_legs num_wings class locomotion mammal walks 4 0 Get values at several indexes and levels >>> df.xs(('bird', 'walks'), ... level=[0, 'locomotion']) num_legs num_wings animal penguin 2 2 Get values at specified column and axis >>> df.xs('num_wings', axis=1) class animal locomotion mammal cat walks 0 dog walks 0 bat flies 2 bird penguin walks 2 Name: num_wings, dtype: int64 """ axis = self._get_axis_number(axis) labels = self._get_axis(axis) if isinstance(key, list): raise TypeError("list keys are not supported in xs, pass a tuple instead") if level is not None: if not isinstance(labels, MultiIndex): raise TypeError("Index must be a MultiIndex") loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level) # create the tuple of the indexer _indexer = [slice(None)] * self.ndim _indexer[axis] = loc indexer = tuple(_indexer) result = self.iloc[indexer] setattr(result, result._get_axis_name(axis), new_ax) return result if axis == 1: if drop_level: return self[key] index = self.columns else: index = self.index if isinstance(index, MultiIndex): loc, new_index = index._get_loc_level(key, level=0) if not drop_level: if lib.is_integer(loc): new_index = index[loc : loc + 1] else: new_index = index[loc] else: loc = index.get_loc(key) if isinstance(loc, np.ndarray): if loc.dtype == np.bool_: (inds,) = loc.nonzero() return self._take_with_is_copy(inds, axis=axis) else: return self._take_with_is_copy(loc, axis=axis) if not is_scalar(loc): new_index = index[loc] if is_scalar(loc) and axis == 0: # In this case loc should be an integer if self.ndim == 1: # if we encounter an array-like and we only have 1 dim # that means that their are list/ndarrays inside the Series! # so just return them (GH 6394) return self._values[loc] new_mgr = self._mgr.fast_xs(loc) result = self._constructor_sliced( new_mgr, name=self.index[loc] ).__finalize__(self) elif is_scalar(loc): result = self.iloc[:, slice(loc, loc + 1)] elif axis == 1: result = self.iloc[:, loc] else: result = self.iloc[loc] result.index = new_index # this could be a view # but only in a single-dtyped view sliceable case result._set_is_copy(self, copy=not result._is_view) return result def __getitem__(self, item): raise AbstractMethodError(self) def _slice(self: NDFrameT, slobj: slice, axis: Axis = 0) -> NDFrameT: """ Construct a slice of this container. Slicing with this method is *always* positional. """ assert isinstance(slobj, slice), type(slobj) axis = self._get_block_manager_axis(axis) result = self._constructor(self._mgr.get_slice(slobj, axis=axis)) result = result.__finalize__(self) # this could be a view # but only in a single-dtyped view sliceable case is_copy = axis != 0 or result._is_view result._set_is_copy(self, copy=is_copy) return result def _set_is_copy(self, ref: NDFrame, copy: bool_t = True) -> None: if not copy: self._is_copy = None else: assert ref is not None self._is_copy = weakref.ref(ref) def _check_is_chained_assignment_possible(self) -> bool_t: """ Check if we are a view, have a cacher, and are of mixed type. If so, then force a setitem_copy check. Should be called just near setting a value Will return a boolean if it we are a view and are cached, but a single-dtype meaning that the cacher should be updated following setting. """ if self._is_copy: self._check_setitem_copy(t="referent") return False def _check_setitem_copy(self, t: str = "setting", force: bool_t = False): """ Parameters ---------- t : str, the type of setting error force : bool, default False If True, then force showing an error. validate if we are doing a setitem on a chained copy. It is technically possible to figure out that we are setting on a copy even WITH a multi-dtyped pandas object. In other words, some blocks may be views while other are not. Currently _is_view will ALWAYS return False for multi-blocks to avoid having to handle this case. df = DataFrame(np.arange(0,9), columns=['count']) df['group'] = 'b' # This technically need not raise SettingWithCopy if both are view # (which is not generally guaranteed but is usually True. However, # this is in general not a good practice and we recommend using .loc. df.iloc[0:5]['group'] = 'a' """ if using_copy_on_write(): return # return early if the check is not needed if not (force or self._is_copy): return value = config.get_option("mode.chained_assignment") if value is None: return # see if the copy is not actually referred; if so, then dissolve # the copy weakref if self._is_copy is not None and not isinstance(self._is_copy, str): r = self._is_copy() if not gc.get_referents(r) or (r is not None and r.shape == self.shape): self._is_copy = None return # a custom message if isinstance(self._is_copy, str): t = self._is_copy elif t == "referent": t = ( "\n" "A value is trying to be set on a copy of a slice from a " "DataFrame\n\n" "See the caveats in the documentation: " "https://pandas.pydata.org/pandas-docs/stable/user_guide/" "indexing.html#returning-a-view-versus-a-copy" ) else: t = ( "\n" "A value is trying to be set on a copy of a slice from a " "DataFrame.\n" "Try using .loc[row_indexer,col_indexer] = value " "instead\n\nSee the caveats in the documentation: " "https://pandas.pydata.org/pandas-docs/stable/user_guide/" "indexing.html#returning-a-view-versus-a-copy" ) if value == "raise": raise SettingWithCopyError(t) if value == "warn": warnings.warn(t, SettingWithCopyWarning, stacklevel=find_stack_level()) def __delitem__(self, key) -> None: """ Delete item """ deleted = False maybe_shortcut = False if self.ndim == 2 and isinstance(self.columns, MultiIndex): try: # By using engine's __contains__ we effectively # restrict to same-length tuples maybe_shortcut = key not in self.columns._engine except TypeError: pass if maybe_shortcut: # Allow shorthand to delete all columns whose first len(key) # elements match key: if not isinstance(key, tuple): key = (key,) for col in self.columns: if isinstance(col, tuple) and col[: len(key)] == key: del self[col] deleted = True if not deleted: # If the above loop ran and didn't delete anything because # there was no match, this call should raise the appropriate # exception: loc = self.axes[-1].get_loc(key) self._mgr = self._mgr.idelete(loc) # delete from the caches try: del self._item_cache[key] except KeyError: pass # ---------------------------------------------------------------------- # Unsorted def _check_inplace_and_allows_duplicate_labels(self, inplace): if inplace and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'inplace=True' when " "'self.flags.allows_duplicate_labels' is False." ) def get(self, key, default=None): """ Get item from object for given key (ex: DataFrame column). Returns default value if not found. Parameters ---------- key : object Returns ------- same type as items contained in object Examples -------- >>> df = pd.DataFrame( ... [ ... [24.3, 75.7, "high"], ... [31, 87.8, "high"], ... [22, 71.6, "medium"], ... [35, 95, "medium"], ... ], ... columns=["temp_celsius", "temp_fahrenheit", "windspeed"], ... index=pd.date_range(start="2014-02-12", end="2014-02-15", freq="D"), ... ) >>> df temp_celsius temp_fahrenheit windspeed 2014-02-12 24.3 75.7 high 2014-02-13 31.0 87.8 high 2014-02-14 22.0 71.6 medium 2014-02-15 35.0 95.0 medium >>> df.get(["temp_celsius", "windspeed"]) temp_celsius windspeed 2014-02-12 24.3 high 2014-02-13 31.0 high 2014-02-14 22.0 medium 2014-02-15 35.0 medium >>> ser = df['windspeed'] >>> ser.get('2014-02-13') 'high' If the key isn't found, the default value will be used. >>> df.get(["temp_celsius", "temp_kelvin"], default="default_value") 'default_value' >>> ser.get('2014-02-10', '[unknown]') '[unknown]' """ try: return self[key] except (KeyError, ValueError, IndexError): return default def _is_view(self) -> bool_t: """Return boolean indicating if self is view of another array""" return self._mgr.is_view def reindex_like( self: NDFrameT, other, method: Literal["backfill", "bfill", "pad", "ffill", "nearest"] | None = None, copy: bool_t | None = None, limit=None, tolerance=None, ) -> NDFrameT: """ Return an object with matching indices as other object. Conform the object to the same index on all axes. Optional filling logic, placing NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and copy=False. Parameters ---------- other : Object of the same data type Its row and column indices are used to define the new indices of this object. method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'} Method to use for filling holes in reindexed DataFrame. Please note: this is only applicable to DataFrames/Series with a monotonically increasing/decreasing index. * None (default): don't fill gaps * pad / ffill: propagate last valid observation forward to next valid * backfill / bfill: use next valid observation to fill gap * nearest: use nearest valid observations to fill gap. copy : bool, default True Return a new object, even if the passed indexes are the same. limit : int, default None Maximum number of consecutive labels to fill for inexact matches. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- Series or DataFrame Same type as caller, but with changed indices on each axis. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. DataFrame.reindex : Change to new indices or expand indices. Notes ----- Same as calling ``.reindex(index=other.index, columns=other.columns,...)``. Examples -------- >>> df1 = pd.DataFrame([[24.3, 75.7, 'high'], ... [31, 87.8, 'high'], ... [22, 71.6, 'medium'], ... [35, 95, 'medium']], ... columns=['temp_celsius', 'temp_fahrenheit', ... 'windspeed'], ... index=pd.date_range(start='2014-02-12', ... end='2014-02-15', freq='D')) >>> df1 temp_celsius temp_fahrenheit windspeed 2014-02-12 24.3 75.7 high 2014-02-13 31.0 87.8 high 2014-02-14 22.0 71.6 medium 2014-02-15 35.0 95.0 medium >>> df2 = pd.DataFrame([[28, 'low'], ... [30, 'low'], ... [35.1, 'medium']], ... columns=['temp_celsius', 'windspeed'], ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13', ... '2014-02-15'])) >>> df2 temp_celsius windspeed 2014-02-12 28.0 low 2014-02-13 30.0 low 2014-02-15 35.1 medium >>> df2.reindex_like(df1) temp_celsius temp_fahrenheit windspeed 2014-02-12 28.0 NaN low 2014-02-13 30.0 NaN low 2014-02-14 NaN NaN NaN 2014-02-15 35.1 NaN medium """ d = other._construct_axes_dict( axes=self._AXIS_ORDERS, method=method, copy=copy, limit=limit, tolerance=tolerance, ) return self.reindex(**d) def drop( self, labels: IndexLabel = ..., *, axis: Axis = ..., index: IndexLabel = ..., columns: IndexLabel = ..., level: Level | None = ..., inplace: Literal[True], errors: IgnoreRaise = ..., ) -> None: ... def drop( self: NDFrameT, labels: IndexLabel = ..., *, axis: Axis = ..., index: IndexLabel = ..., columns: IndexLabel = ..., level: Level | None = ..., inplace: Literal[False] = ..., errors: IgnoreRaise = ..., ) -> NDFrameT: ... def drop( self: NDFrameT, labels: IndexLabel = ..., *, axis: Axis = ..., index: IndexLabel = ..., columns: IndexLabel = ..., level: Level | None = ..., inplace: bool_t = ..., errors: IgnoreRaise = ..., ) -> NDFrameT | None: ... def drop( self: NDFrameT, labels: IndexLabel = None, *, axis: Axis = 0, index: IndexLabel = None, columns: IndexLabel = None, level: Level | None = None, inplace: bool_t = False, errors: IgnoreRaise = "raise", ) -> NDFrameT | None: inplace = validate_bool_kwarg(inplace, "inplace") if labels is not None: if index is not None or columns is not None: raise ValueError("Cannot specify both 'labels' and 'index'/'columns'") axis_name = self._get_axis_name(axis) axes = {axis_name: labels} elif index is not None or columns is not None: axes = {"index": index} if self.ndim == 2: axes["columns"] = columns else: raise ValueError( "Need to specify at least one of 'labels', 'index' or 'columns'" ) obj = self for axis, labels in axes.items(): if labels is not None: obj = obj._drop_axis(labels, axis, level=level, errors=errors) if inplace: self._update_inplace(obj) return None else: return obj def _drop_axis( self: NDFrameT, labels, axis, level=None, errors: IgnoreRaise = "raise", only_slice: bool_t = False, ) -> NDFrameT: """ Drop labels from specified axis. Used in the ``drop`` method internally. Parameters ---------- labels : single label or list-like axis : int or axis name level : int or level name, default None For MultiIndex errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and existing labels are dropped. only_slice : bool, default False Whether indexing along columns should be view-only. """ axis_num = self._get_axis_number(axis) axis = self._get_axis(axis) if axis.is_unique: if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError("axis must be a MultiIndex") new_axis = axis.drop(labels, level=level, errors=errors) else: new_axis = axis.drop(labels, errors=errors) indexer = axis.get_indexer(new_axis) # Case for non-unique axis else: is_tuple_labels = is_nested_list_like(labels) or isinstance(labels, tuple) labels = ensure_object(common.index_labels_to_array(labels)) if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError("axis must be a MultiIndex") mask = ~axis.get_level_values(level).isin(labels) # GH 18561 MultiIndex.drop should raise if label is absent if errors == "raise" and mask.all(): raise KeyError(f"{labels} not found in axis") elif ( isinstance(axis, MultiIndex) and labels.dtype == "object" and not is_tuple_labels ): # Set level to zero in case of MultiIndex and label is string, # because isin can't handle strings for MultiIndexes GH#36293 # In case of tuples we get dtype object but have to use isin GH#42771 mask = ~axis.get_level_values(0).isin(labels) else: mask = ~axis.isin(labels) # Check if label doesn't exist along axis labels_missing = (axis.get_indexer_for(labels) == -1).any() if errors == "raise" and labels_missing: raise KeyError(f"{labels} not found in axis") if is_extension_array_dtype(mask.dtype): # GH#45860 mask = mask.to_numpy(dtype=bool) indexer = mask.nonzero()[0] new_axis = axis.take(indexer) bm_axis = self.ndim - axis_num - 1 new_mgr = self._mgr.reindex_indexer( new_axis, indexer, axis=bm_axis, allow_dups=True, copy=None, only_slice=only_slice, ) result = self._constructor(new_mgr) if self.ndim == 1: result.name = self.name return result.__finalize__(self) def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None: """ Replace self internals with result. Parameters ---------- result : same type as self verify_is_copy : bool, default True Provide is_copy checks. """ # NOTE: This does *not* call __finalize__ and that's an explicit # decision that we may revisit in the future. self._reset_cache() self._clear_item_cache() self._mgr = result._mgr self._maybe_update_cacher(verify_is_copy=verify_is_copy, inplace=True) def add_prefix(self: NDFrameT, prefix: str, axis: Axis | None = None) -> NDFrameT: """ Prefix labels with string `prefix`. For Series, the row labels are prefixed. For DataFrame, the column labels are prefixed. Parameters ---------- prefix : str The string to add before each label. axis : {{0 or 'index', 1 or 'columns', None}}, default None Axis to add prefix on .. versionadded:: 2.0.0 Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_suffix: Suffix row labels with string `suffix`. DataFrame.add_suffix: Suffix column labels with string `suffix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_prefix('item_') item_0 1 item_1 2 item_2 3 item_3 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_prefix('col_') col_A col_B 0 1 3 1 2 4 2 3 5 3 4 6 """ f = lambda x: f"{prefix}{x}" axis_name = self._info_axis_name if axis is not None: axis_name = self._get_axis_name(axis) mapper = {axis_name: f} # error: Incompatible return value type (got "Optional[NDFrameT]", # expected "NDFrameT") # error: Argument 1 to "rename" of "NDFrame" has incompatible type # "**Dict[str, partial[str]]"; expected "Union[str, int, None]" # error: Keywords must be strings return self._rename(**mapper) # type: ignore[return-value, arg-type, misc] def add_suffix(self: NDFrameT, suffix: str, axis: Axis | None = None) -> NDFrameT: """ Suffix labels with string `suffix`. For Series, the row labels are suffixed. For DataFrame, the column labels are suffixed. Parameters ---------- suffix : str The string to add after each label. axis : {{0 or 'index', 1 or 'columns', None}}, default None Axis to add suffix on .. versionadded:: 2.0.0 Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string `prefix`. DataFrame.add_prefix: Prefix column labels with string `prefix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_suffix('_item') 0_item 1 1_item 2 2_item 3 3_item 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_suffix('_col') A_col B_col 0 1 3 1 2 4 2 3 5 3 4 6 """ f = lambda x: f"{x}{suffix}" axis_name = self._info_axis_name if axis is not None: axis_name = self._get_axis_name(axis) mapper = {axis_name: f} # error: Incompatible return value type (got "Optional[NDFrameT]", # expected "NDFrameT") # error: Argument 1 to "rename" of "NDFrame" has incompatible type # "**Dict[str, partial[str]]"; expected "Union[str, int, None]" # error: Keywords must be strings return self._rename(**mapper) # type: ignore[return-value, arg-type, misc] def sort_values( self: NDFrameT, *, axis: Axis = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[False] = ..., kind: str = ..., na_position: str = ..., ignore_index: bool_t = ..., key: ValueKeyFunc = ..., ) -> NDFrameT: ... def sort_values( self, *, axis: Axis = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[True], kind: str = ..., na_position: str = ..., ignore_index: bool_t = ..., key: ValueKeyFunc = ..., ) -> None: ... def sort_values( self: NDFrameT, *, axis: Axis = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: bool_t = ..., kind: str = ..., na_position: str = ..., ignore_index: bool_t = ..., key: ValueKeyFunc = ..., ) -> NDFrameT | None: ... def sort_values( self: NDFrameT, *, axis: Axis = 0, ascending: bool_t | Sequence[bool_t] = True, inplace: bool_t = False, kind: str = "quicksort", na_position: str = "last", ignore_index: bool_t = False, key: ValueKeyFunc = None, ) -> NDFrameT | None: """ Sort by the values along either axis. Parameters ----------%(optional_by)s axis : %(axes_single_arg)s, default 0 Axis to be sorted. ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. `mergesort` and `stable` are the only stable algorithms. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' Puts NaNs at the beginning if `first`; `last` puts NaNs at the end. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional Apply the key function to the values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect a ``Series`` and return a Series with the same shape as the input. It will be applied to each column in `by` independently. .. versionadded:: 1.1.0 Returns ------- DataFrame or None DataFrame with sorted values or None if ``inplace=True``. See Also -------- DataFrame.sort_index : Sort a DataFrame by the index. Series.sort_values : Similar method for a Series. Examples -------- >>> df = pd.DataFrame({ ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'], ... 'col2': [2, 1, 9, 8, 7, 4], ... 'col3': [0, 1, 9, 4, 2, 3], ... 'col4': ['a', 'B', 'c', 'D', 'e', 'F'] ... }) >>> df col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F Sort by col1 >>> df.sort_values(by=['col1']) col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 5 C 4 3 F 4 D 7 2 e 3 NaN 8 4 D Sort by multiple columns >>> df.sort_values(by=['col1', 'col2']) col1 col2 col3 col4 1 A 1 1 B 0 A 2 0 a 2 B 9 9 c 5 C 4 3 F 4 D 7 2 e 3 NaN 8 4 D Sort Descending >>> df.sort_values(by='col1', ascending=False) col1 col2 col3 col4 4 D 7 2 e 5 C 4 3 F 2 B 9 9 c 0 A 2 0 a 1 A 1 1 B 3 NaN 8 4 D Putting NAs first >>> df.sort_values(by='col1', ascending=False, na_position='first') col1 col2 col3 col4 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F 2 B 9 9 c 0 A 2 0 a 1 A 1 1 B Sorting with a key function >>> df.sort_values(by='col4', key=lambda col: col.str.lower()) col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F Natural sort with the key argument, using the `natsort <https://github.com/SethMMorton/natsort>` package. >>> df = pd.DataFrame({ ... "time": ['0hr', '128hr', '72hr', '48hr', '96hr'], ... "value": [10, 20, 30, 40, 50] ... }) >>> df time value 0 0hr 10 1 128hr 20 2 72hr 30 3 48hr 40 4 96hr 50 >>> from natsort import index_natsorted >>> df.sort_values( ... by="time", ... key=lambda x: np.argsort(index_natsorted(df["time"])) ... ) time value 0 0hr 10 3 48hr 40 2 72hr 30 4 96hr 50 1 128hr 20 """ raise AbstractMethodError(self) def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[True], kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool_t = ..., ignore_index: bool_t = ..., key: IndexKeyFunc = ..., ) -> None: ... def sort_index( self: NDFrameT, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[False] = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool_t = ..., ignore_index: bool_t = ..., key: IndexKeyFunc = ..., ) -> NDFrameT: ... def sort_index( self: NDFrameT, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: bool_t = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool_t = ..., ignore_index: bool_t = ..., key: IndexKeyFunc = ..., ) -> NDFrameT | None: ... def sort_index( self: NDFrameT, *, axis: Axis = 0, level: IndexLabel = None, ascending: bool_t | Sequence[bool_t] = True, inplace: bool_t = False, kind: SortKind = "quicksort", na_position: NaPosition = "last", sort_remaining: bool_t = True, ignore_index: bool_t = False, key: IndexKeyFunc = None, ) -> NDFrameT | None: inplace = validate_bool_kwarg(inplace, "inplace") axis = self._get_axis_number(axis) ascending = validate_ascending(ascending) target = self._get_axis(axis) indexer = get_indexer_indexer( target, level, ascending, kind, na_position, sort_remaining, key ) if indexer is None: if inplace: result = self else: result = self.copy(deep=None) if ignore_index: result.index = default_index(len(self)) if inplace: return None else: return result baxis = self._get_block_manager_axis(axis) new_data = self._mgr.take(indexer, axis=baxis, verify=False) # reconstruct axis if needed new_data.set_axis(baxis, new_data.axes[baxis]._sort_levels_monotonic()) if ignore_index: axis = 1 if isinstance(self, ABCDataFrame) else 0 new_data.set_axis(axis, default_index(len(indexer))) result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="sort_index") klass=_shared_doc_kwargs["klass"], optional_reindex="", ) def reindex( self: NDFrameT, labels=None, index=None, columns=None, axis: Axis | None = None, method: str | None = None, copy: bool_t | None = None, level: Level | None = None, fill_value: Scalar | None = np.nan, limit: int | None = None, tolerance=None, ) -> NDFrameT: """ Conform {klass} to new index with optional filling logic. Places NA/NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and ``copy=False``. Parameters ---------- {optional_reindex} method : {{None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}} Method to use for filling holes in reindexed DataFrame. Please note: this is only applicable to DataFrames/Series with a monotonically increasing/decreasing index. * None (default): don't fill gaps * pad / ffill: Propagate last valid observation forward to next valid. * backfill / bfill: Use next valid observation to fill gap. * nearest: Use nearest valid observations to fill gap. copy : bool, default True Return a new object, even if the passed indexes are the same. level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level. fill_value : scalar, default np.NaN Value to use for missing values. Defaults to NaN, but can be any "compatible" value. limit : int, default None Maximum number of consecutive elements to forward or backward fill. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations most satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- {klass} with changed index. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. DataFrame.reindex_like : Change to same indices as other DataFrame. Examples -------- ``DataFrame.reindex`` supports two calling conventions * ``(index=index_labels, columns=column_labels, ...)`` * ``(labels, axis={{'index', 'columns'}}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. Create a dataframe with some fictional data. >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror'] >>> df = pd.DataFrame({{'http_status': [200, 200, 404, 404, 301], ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]}}, ... index=index) >>> df http_status response_time Firefox 200 0.04 Chrome 200 0.02 Safari 404 0.07 IE10 404 0.08 Konqueror 301 1.00 Create a new index and reindex the dataframe. By default values in the new index that do not have corresponding records in the dataframe are assigned ``NaN``. >>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10', ... 'Chrome'] >>> df.reindex(new_index) http_status response_time Safari 404.0 0.07 Iceweasel NaN NaN Comodo Dragon NaN NaN IE10 404.0 0.08 Chrome 200.0 0.02 We can fill in the missing values by passing a value to the keyword ``fill_value``. Because the index is not monotonically increasing or decreasing, we cannot use arguments to the keyword ``method`` to fill the ``NaN`` values. >>> df.reindex(new_index, fill_value=0) http_status response_time Safari 404 0.07 Iceweasel 0 0.00 Comodo Dragon 0 0.00 IE10 404 0.08 Chrome 200 0.02 >>> df.reindex(new_index, fill_value='missing') http_status response_time Safari 404 0.07 Iceweasel missing missing Comodo Dragon missing missing IE10 404 0.08 Chrome 200 0.02 We can also reindex the columns. >>> df.reindex(columns=['http_status', 'user_agent']) http_status user_agent Firefox 200 NaN Chrome 200 NaN Safari 404 NaN IE10 404 NaN Konqueror 301 NaN Or we can use "axis-style" keyword arguments >>> df.reindex(['http_status', 'user_agent'], axis="columns") http_status user_agent Firefox 200 NaN Chrome 200 NaN Safari 404 NaN IE10 404 NaN Konqueror 301 NaN To further illustrate the filling functionality in ``reindex``, we will create a dataframe with a monotonically increasing index (for example, a sequence of dates). >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D') >>> df2 = pd.DataFrame({{"prices": [100, 101, np.nan, 100, 89, 88]}}, ... index=date_index) >>> df2 prices 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 Suppose we decide to expand the dataframe to cover a wider date range. >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D') >>> df2.reindex(date_index2) prices 2009-12-29 NaN 2009-12-30 NaN 2009-12-31 NaN 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 2010-01-07 NaN The index entries that did not have a value in the original data frame (for example, '2009-12-29') are by default filled with ``NaN``. If desired, we can fill in the missing values using one of several options. For example, to back-propagate the last valid value to fill the ``NaN`` values, pass ``bfill`` as an argument to the ``method`` keyword. >>> df2.reindex(date_index2, method='bfill') prices 2009-12-29 100.0 2009-12-30 100.0 2009-12-31 100.0 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 2010-01-07 NaN Please note that the ``NaN`` value present in the original dataframe (at index value 2010-01-03) will not be filled by any of the value propagation schemes. This is because filling while reindexing does not look at dataframe values, but only compares the original and desired indexes. If you do want to fill in the ``NaN`` values present in the original dataframe, use the ``fillna()`` method. See the :ref:`user guide <basics.reindexing>` for more. """ # TODO: Decide if we care about having different examples for different # kinds if index is not None and columns is not None and labels is not None: raise TypeError("Cannot specify all of 'labels', 'index', 'columns'.") elif index is not None or columns is not None: if axis is not None: raise TypeError( "Cannot specify both 'axis' and any of 'index' or 'columns'" ) if labels is not None: if index is not None: columns = labels else: index = labels else: if axis and self._get_axis_number(axis) == 1: columns = labels else: index = labels axes: dict[Literal["index", "columns"], Any] = { "index": index, "columns": columns, } method = clean_reindex_fill_method(method) # if all axes that are requested to reindex are equal, then only copy # if indicated must have index names equal here as well as values if copy and using_copy_on_write(): copy = False if all( self._get_axis(axis_name).identical(ax) for axis_name, ax in axes.items() if ax is not None ): return self.copy(deep=copy) # check if we are a multi reindex if self._needs_reindex_multi(axes, method, level): return self._reindex_multi(axes, copy, fill_value) # perform the reindex on the axes return self._reindex_axes( axes, level, limit, tolerance, method, fill_value, copy ).__finalize__(self, method="reindex") def _reindex_axes( self: NDFrameT, axes, level, limit, tolerance, method, fill_value, copy ) -> NDFrameT: """Perform the reindex for all the axes.""" obj = self for a in self._AXIS_ORDERS: labels = axes[a] if labels is None: continue ax = self._get_axis(a) new_index, indexer = ax.reindex( labels, level=level, limit=limit, tolerance=tolerance, method=method ) axis = self._get_axis_number(a) obj = obj._reindex_with_indexers( {axis: [new_index, indexer]}, fill_value=fill_value, copy=copy, allow_dups=False, ) # If we've made a copy once, no need to make another one copy = False return obj def _needs_reindex_multi(self, axes, method, level) -> bool_t: """Check if we do need a multi reindex.""" return ( (common.count_not_none(*axes.values()) == self._AXIS_LEN) and method is None and level is None and not self._is_mixed_type and not ( self.ndim == 2 and len(self.dtypes) == 1 and is_extension_array_dtype(self.dtypes.iloc[0]) ) ) def _reindex_multi(self, axes, copy, fill_value): raise AbstractMethodError(self) def _reindex_with_indexers( self: NDFrameT, reindexers, fill_value=None, copy: bool_t | None = False, allow_dups: bool_t = False, ) -> NDFrameT: """allow_dups indicates an internal call here""" # reindex doing multiple operations on different axes if indicated new_data = self._mgr for axis in sorted(reindexers.keys()): index, indexer = reindexers[axis] baxis = self._get_block_manager_axis(axis) if index is None: continue index = ensure_index(index) if indexer is not None: indexer = ensure_platform_int(indexer) # TODO: speed up on homogeneous DataFrame objects (see _reindex_multi) new_data = new_data.reindex_indexer( index, indexer, axis=baxis, fill_value=fill_value, allow_dups=allow_dups, copy=copy, ) # If we've made a copy once, no need to make another one copy = False if ( (copy or copy is None) and new_data is self._mgr and not using_copy_on_write() ): new_data = new_data.copy(deep=copy) elif using_copy_on_write() and new_data is self._mgr: new_data = new_data.copy(deep=False) return self._constructor(new_data).__finalize__(self) def filter( self: NDFrameT, items=None, like: str | None = None, regex: str | None = None, axis: Axis | None = None, ) -> NDFrameT: """ Subset the dataframe rows or columns according to the specified index labels. Note that this routine does not filter a dataframe on its contents. The filter is applied to the labels of the index. Parameters ---------- items : list-like Keep labels from axis which are in items. like : str Keep labels from axis for which "like in label == True". regex : str (regular expression) Keep labels from axis for which re.search(regex, label) == True. axis : {0 or ‘index’, 1 or ‘columns’, None}, default None The axis to filter on, expressed either as an index (int) or axis name (str). By default this is the info axis, 'columns' for DataFrame. For `Series` this parameter is unused and defaults to `None`. Returns ------- same type as input object See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. Notes ----- The ``items``, ``like``, and ``regex`` parameters are enforced to be mutually exclusive. ``axis`` defaults to the info axis that is used when indexing with ``[]``. Examples -------- >>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])), ... index=['mouse', 'rabbit'], ... columns=['one', 'two', 'three']) >>> df one two three mouse 1 2 3 rabbit 4 5 6 >>> # select columns by name >>> df.filter(items=['one', 'three']) one three mouse 1 3 rabbit 4 6 >>> # select columns by regular expression >>> df.filter(regex='e$', axis=1) one three mouse 1 3 rabbit 4 6 >>> # select rows containing 'bbi' >>> df.filter(like='bbi', axis=0) one two three rabbit 4 5 6 """ nkw = common.count_not_none(items, like, regex) if nkw > 1: raise TypeError( "Keyword arguments `items`, `like`, or `regex` " "are mutually exclusive" ) if axis is None: axis = self._info_axis_name labels = self._get_axis(axis) if items is not None: name = self._get_axis_name(axis) # error: Keywords must be strings return self.reindex( # type: ignore[misc] **{name: [r for r in items if r in labels]} # type: ignore[arg-type] ) elif like: def f(x) -> bool_t: assert like is not None # needed for mypy return like in ensure_str(x) values = labels.map(f) return self.loc(axis=axis)[values] elif regex: def f(x) -> bool_t: return matcher.search(ensure_str(x)) is not None matcher = re.compile(regex) values = labels.map(f) return self.loc(axis=axis)[values] else: raise TypeError("Must pass either `items`, `like`, or `regex`") def head(self: NDFrameT, n: int = 5) -> NDFrameT: """ Return the first `n` rows. This function returns the first `n` rows for the object based on position. It is useful for quickly testing if your object has the right type of data in it. For negative values of `n`, this function returns all rows except the last `|n|` rows, equivalent to ``df[:n]``. If n is larger than the number of rows, this function returns all rows. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- same type as caller The first `n` rows of the caller object. See Also -------- DataFrame.tail: Returns the last `n` rows. Examples -------- >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the first 5 lines >>> df.head() animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey Viewing the first `n` lines (three in this case) >>> df.head(3) animal 0 alligator 1 bee 2 falcon For negative values of `n` >>> df.head(-3) animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot """ return self.iloc[:n] def tail(self: NDFrameT, n: int = 5) -> NDFrameT: """ Return the last `n` rows. This function returns last `n` rows from the object based on position. It is useful for quickly verifying data, for example, after sorting or appending rows. For negative values of `n`, this function returns all rows except the first `|n|` rows, equivalent to ``df[|n|:]``. If n is larger than the number of rows, this function returns all rows. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- type of caller The last `n` rows of the caller object. See Also -------- DataFrame.head : The first `n` rows of the caller object. Examples -------- >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last 5 lines >>> df.tail() animal 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last `n` lines (three in this case) >>> df.tail(3) animal 6 shark 7 whale 8 zebra For negative values of `n` >>> df.tail(-3) animal 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra """ if n == 0: return self.iloc[0:0] return self.iloc[-n:] def sample( self: NDFrameT, n: int | None = None, frac: float | None = None, replace: bool_t = False, weights=None, random_state: RandomState | None = None, axis: Axis | None = None, ignore_index: bool_t = False, ) -> NDFrameT: """ Return a random sample of items from an axis of object. You can use `random_state` for reproducibility. Parameters ---------- n : int, optional Number of items from axis to return. Cannot be used with `frac`. Default = 1 if `frac` = None. frac : float, optional Fraction of axis items to return. Cannot be used with `n`. replace : bool, default False Allow or disallow sampling of the same row more than once. weights : str or ndarray-like, optional Default 'None' results in equal probability weighting. If passed a Series, will align with target object on index. Index values in weights not found in sampled object will be ignored and index values in sampled object not in weights will be assigned weights of zero. If called on a DataFrame, will accept the name of a column when axis = 0. Unless weights are a Series, weights must be same length as axis being sampled. If weights do not sum to 1, they will be normalized to sum to 1. Missing values in the weights column will be treated as zero. Infinite values not allowed. random_state : int, array-like, BitGenerator, np.random.RandomState, np.random.Generator, optional If int, array-like, or BitGenerator, seed for random number generator. If np.random.RandomState or np.random.Generator, use as given. .. versionchanged:: 1.1.0 array-like and BitGenerator object now passed to np.random.RandomState() as seed .. versionchanged:: 1.4.0 np.random.Generator objects now accepted axis : {0 or ‘index’, 1 or ‘columns’, None}, default None Axis to sample. Accepts axis number or name. Default is stat axis for given data type. For `Series` this parameter is unused and defaults to `None`. ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 1.3.0 Returns ------- Series or DataFrame A new object of same type as caller containing `n` items randomly sampled from the caller object. See Also -------- DataFrameGroupBy.sample: Generates random samples from each group of a DataFrame object. SeriesGroupBy.sample: Generates random samples from each group of a Series object. numpy.random.choice: Generates a random sample from a given 1-D numpy array. Notes ----- If `frac` > 1, `replacement` should be set to `True`. Examples -------- >>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0], ... 'num_wings': [2, 0, 0, 0], ... 'num_specimen_seen': [10, 2, 1, 8]}, ... index=['falcon', 'dog', 'spider', 'fish']) >>> df num_legs num_wings num_specimen_seen falcon 2 2 10 dog 4 0 2 spider 8 0 1 fish 0 0 8 Extract 3 random elements from the ``Series`` ``df['num_legs']``: Note that we use `random_state` to ensure the reproducibility of the examples. >>> df['num_legs'].sample(n=3, random_state=1) fish 0 spider 8 falcon 2 Name: num_legs, dtype: int64 A random 50% sample of the ``DataFrame`` with replacement: >>> df.sample(frac=0.5, replace=True, random_state=1) num_legs num_wings num_specimen_seen dog 4 0 2 fish 0 0 8 An upsample sample of the ``DataFrame`` with replacement: Note that `replace` parameter has to be `True` for `frac` parameter > 1. >>> df.sample(frac=2, replace=True, random_state=1) num_legs num_wings num_specimen_seen dog 4 0 2 fish 0 0 8 falcon 2 2 10 falcon 2 2 10 fish 0 0 8 dog 4 0 2 fish 0 0 8 dog 4 0 2 Using a DataFrame column as weights. Rows with larger value in the `num_specimen_seen` column are more likely to be sampled. >>> df.sample(n=2, weights='num_specimen_seen', random_state=1) num_legs num_wings num_specimen_seen falcon 2 2 10 fish 0 0 8 """ # noqa:E501 if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) obj_len = self.shape[axis] # Process random_state argument rs = common.random_state(random_state) size = sample.process_sampling_size(n, frac, replace) if size is None: assert frac is not None size = round(frac * obj_len) if weights is not None: weights = sample.preprocess_weights(self, weights, axis) sampled_indices = sample.sample(obj_len, size, replace, weights, rs) result = self.take(sampled_indices, axis=axis) if ignore_index: result.index = default_index(len(result)) return result def pipe( self, func: Callable[..., T] | tuple[Callable[..., T], str], *args, **kwargs, ) -> T: r""" Apply chainable functions that expect Series or DataFrames. Parameters ---------- func : function Function to apply to the {klass}. ``args``, and ``kwargs`` are passed into ``func``. Alternatively a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of ``callable`` that expects the {klass}. args : iterable, optional Positional arguments passed into ``func``. kwargs : mapping, optional A dictionary of keyword arguments passed into ``func``. Returns ------- the return type of ``func``. See Also -------- DataFrame.apply : Apply a function along input axis of DataFrame. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Series.map : Apply a mapping correspondence on a :class:`~pandas.Series`. Notes ----- Use ``.pipe`` when chaining together functions that expect Series, DataFrames or GroupBy objects. Instead of writing >>> func(g(h(df), arg1=a), arg2=b, arg3=c) # doctest: +SKIP You can write >>> (df.pipe(h) ... .pipe(g, arg1=a) ... .pipe(func, arg2=b, arg3=c) ... ) # doctest: +SKIP If you have a function that takes the data as (say) the second argument, pass a tuple indicating which keyword expects the data. For example, suppose ``func`` takes its data as ``arg2``: >>> (df.pipe(h) ... .pipe(g, arg1=a) ... .pipe((func, 'arg2'), arg1=a, arg3=c) ... ) # doctest: +SKIP """ if using_copy_on_write(): return common.pipe(self.copy(deep=None), func, *args, **kwargs) return common.pipe(self, func, *args, **kwargs) # ---------------------------------------------------------------------- # Attribute access def __finalize__( self: NDFrameT, other, method: str | None = None, **kwargs ) -> NDFrameT: """ Propagate metadata from other to self. Parameters ---------- other : the object from which to get the attributes that we are going to propagate method : str, optional A passed method name providing context on where ``__finalize__`` was called. .. warning:: The value passed as `method` are not currently considered stable across pandas releases. """ if isinstance(other, NDFrame): for name in other.attrs: self.attrs[name] = other.attrs[name] self.flags.allows_duplicate_labels = other.flags.allows_duplicate_labels # For subclasses using _metadata. for name in set(self._metadata) & set(other._metadata): assert isinstance(name, str) object.__setattr__(self, name, getattr(other, name, None)) if method == "concat": attrs = other.objs[0].attrs check_attrs = all(objs.attrs == attrs for objs in other.objs[1:]) if check_attrs: for name in attrs: self.attrs[name] = attrs[name] allows_duplicate_labels = all( x.flags.allows_duplicate_labels for x in other.objs ) self.flags.allows_duplicate_labels = allows_duplicate_labels return self def __getattr__(self, name: str): """ After regular attribute access, try looking up the name This allows simpler access to columns for interactive use. """ # Note: obj.x will always call obj.__getattribute__('x') prior to # calling obj.__getattr__('x'). if ( name not in self._internal_names_set and name not in self._metadata and name not in self._accessors and self._info_axis._can_hold_identifiers_and_holds_name(name) ): return self[name] return object.__getattribute__(self, name) def __setattr__(self, name: str, value) -> None: """ After regular attribute access, try setting the name This allows simpler access to columns for interactive use. """ # first try regular attribute access via __getattribute__, so that # e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify # the same attribute. try: object.__getattribute__(self, name) return object.__setattr__(self, name, value) except AttributeError: pass # if this fails, go on to more involved attribute setting # (note that this matches __getattr__, above). if name in self._internal_names_set: object.__setattr__(self, name, value) elif name in self._metadata: object.__setattr__(self, name, value) else: try: existing = getattr(self, name) if isinstance(existing, Index): object.__setattr__(self, name, value) elif name in self._info_axis: self[name] = value else: object.__setattr__(self, name, value) except (AttributeError, TypeError): if isinstance(self, ABCDataFrame) and (is_list_like(value)): warnings.warn( "Pandas doesn't allow columns to be " "created via a new attribute name - see " "https://pandas.pydata.org/pandas-docs/" "stable/indexing.html#attribute-access", stacklevel=find_stack_level(), ) object.__setattr__(self, name, value) def _dir_additions(self) -> set[str]: """ add the string-like attributes from the info_axis. If info_axis is a MultiIndex, its first level values are used. """ additions = super()._dir_additions() if self._info_axis._can_hold_strings: additions.update(self._info_axis._dir_additions_for_owner) return additions # ---------------------------------------------------------------------- # Consolidation of internals def _protect_consolidate(self, f): """ Consolidate _mgr -- if the blocks have changed, then clear the cache """ if isinstance(self._mgr, (ArrayManager, SingleArrayManager)): return f() blocks_before = len(self._mgr.blocks) result = f() if len(self._mgr.blocks) != blocks_before: self._clear_item_cache() return result def _consolidate_inplace(self) -> None: """Consolidate data in place and return None""" def f() -> None: self._mgr = self._mgr.consolidate() self._protect_consolidate(f) def _consolidate(self): """ Compute NDFrame with "consolidated" internals (data of each dtype grouped together in a single ndarray). Returns ------- consolidated : same type as caller """ f = lambda: self._mgr.consolidate() cons_data = self._protect_consolidate(f) return self._constructor(cons_data).__finalize__(self) def _is_mixed_type(self) -> bool_t: if self._mgr.is_single_block: return False if self._mgr.any_extension_types: # Even if they have the same dtype, we can't consolidate them, # so we pretend this is "mixed'" return True return self.dtypes.nunique() > 1 def _check_inplace_setting(self, value) -> bool_t: """check whether we allow in-place setting with this type of value""" if self._is_mixed_type and not self._mgr.is_numeric_mixed_type: # allow an actual np.nan through if is_float(value) and np.isnan(value) or value is lib.no_default: return True raise TypeError( "Cannot do inplace boolean setting on " "mixed-types with a non np.nan value" ) return True def _get_numeric_data(self: NDFrameT) -> NDFrameT: return self._constructor(self._mgr.get_numeric_data()).__finalize__(self) def _get_bool_data(self): return self._constructor(self._mgr.get_bool_data()).__finalize__(self) # ---------------------------------------------------------------------- # Internal Interface Methods def values(self): raise AbstractMethodError(self) def _values(self) -> ArrayLike: """internal implementation""" raise AbstractMethodError(self) def dtypes(self): """ Return the dtypes in the DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the ``object`` dtype. See :ref:`the User Guide <basics.dtypes>` for more. Returns ------- pandas.Series The data type of each column. Examples -------- >>> df = pd.DataFrame({'float': [1.0], ... 'int': [1], ... 'datetime': [pd.Timestamp('20180310')], ... 'string': ['foo']}) >>> df.dtypes float float64 int int64 datetime datetime64[ns] string object dtype: object """ data = self._mgr.get_dtypes() return self._constructor_sliced(data, index=self._info_axis, dtype=np.object_) def astype( self: NDFrameT, dtype, copy: bool_t | None = None, errors: IgnoreRaise = "raise" ) -> NDFrameT: """ Cast a pandas object to a specified dtype ``dtype``. Parameters ---------- dtype : str, data type, Series or Mapping of column name -> data type Use a str, numpy.dtype, pandas.ExtensionDtype or Python type to cast entire pandas object to the same type. Alternatively, use a mapping, e.g. {col: dtype, ...}, where col is a column label and dtype is a numpy.dtype or Python type to cast one or more of the DataFrame's columns to column-specific types. copy : bool, default True Return a copy when ``copy=True`` (be very careful setting ``copy=False`` as changes to values then may propagate to other pandas objects). errors : {'raise', 'ignore'}, default 'raise' Control raising of exceptions on invalid data for provided dtype. - ``raise`` : allow exceptions to be raised - ``ignore`` : suppress exceptions. On error return original object. Returns ------- same type as caller See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to a numeric type. numpy.ndarray.astype : Cast a numpy array to a specified type. Notes ----- .. versionchanged:: 2.0.0 Using ``astype`` to convert from timezone-naive dtype to timezone-aware dtype will raise an exception. Use :meth:`Series.dt.tz_localize` instead. Examples -------- Create a DataFrame: >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df.dtypes col1 int64 col2 int64 dtype: object Cast all columns to int32: >>> df.astype('int32').dtypes col1 int32 col2 int32 dtype: object Cast col1 to int32 using a dictionary: >>> df.astype({'col1': 'int32'}).dtypes col1 int32 col2 int64 dtype: object Create a series: >>> ser = pd.Series([1, 2], dtype='int32') >>> ser 0 1 1 2 dtype: int32 >>> ser.astype('int64') 0 1 1 2 dtype: int64 Convert to categorical type: >>> ser.astype('category') 0 1 1 2 dtype: category Categories (2, int32): [1, 2] Convert to ordered categorical type with custom ordering: >>> from pandas.api.types import CategoricalDtype >>> cat_dtype = CategoricalDtype( ... categories=[2, 1], ordered=True) >>> ser.astype(cat_dtype) 0 1 1 2 dtype: category Categories (2, int64): [2 < 1] Create a series of dates: >>> ser_date = pd.Series(pd.date_range('20200101', periods=3)) >>> ser_date 0 2020-01-01 1 2020-01-02 2 2020-01-03 dtype: datetime64[ns] """ if copy and using_copy_on_write(): copy = False if is_dict_like(dtype): if self.ndim == 1: # i.e. Series if len(dtype) > 1 or self.name not in dtype: raise KeyError( "Only the Series name can be used for " "the key in Series dtype mappings." ) new_type = dtype[self.name] return self.astype(new_type, copy, errors) # GH#44417 cast to Series so we can use .iat below, which will be # robust in case we from pandas import Series dtype_ser = Series(dtype, dtype=object) for col_name in dtype_ser.index: if col_name not in self: raise KeyError( "Only a column name can be used for the " "key in a dtype mappings argument. " f"'{col_name}' not found in columns." ) dtype_ser = dtype_ser.reindex(self.columns, fill_value=None, copy=False) results = [] for i, (col_name, col) in enumerate(self.items()): cdt = dtype_ser.iat[i] if isna(cdt): res_col = col.copy(deep=copy) else: try: res_col = col.astype(dtype=cdt, copy=copy, errors=errors) except ValueError as ex: ex.args = ( f"{ex}: Error while type casting for column '{col_name}'", ) raise results.append(res_col) elif is_extension_array_dtype(dtype) and self.ndim > 1: # GH 18099/22869: columnwise conversion to extension dtype # GH 24704: use iloc to handle duplicate column names # TODO(EA2D): special case not needed with 2D EAs results = [ self.iloc[:, i].astype(dtype, copy=copy) for i in range(len(self.columns)) ] else: # else, only a single dtype is given new_data = self._mgr.astype(dtype=dtype, copy=copy, errors=errors) return self._constructor(new_data).__finalize__(self, method="astype") # GH 33113: handle empty frame or series if not results: return self.copy(deep=None) # GH 19920: retain column metadata after concat result = concat(results, axis=1, copy=False) # GH#40810 retain subclass # error: Incompatible types in assignment # (expression has type "NDFrameT", variable has type "DataFrame") result = self._constructor(result) # type: ignore[assignment] result.columns = self.columns result = result.__finalize__(self, method="astype") # https://github.com/python/mypy/issues/8354 return cast(NDFrameT, result) def copy(self: NDFrameT, deep: bool_t | None = True) -> NDFrameT: """ Make a copy of this object's indices and data. When ``deep=True`` (default), a new object will be created with a copy of the calling object's data and indices. Modifications to the data or indices of the copy will not be reflected in the original object (see notes below). When ``deep=False``, a new object will be created without copying the calling object's data or index (only references to the data and index are copied). Any changes to the data of the original will be reflected in the shallow copy (and vice versa). Parameters ---------- deep : bool, default True Make a deep copy, including a copy of the data and the indices. With ``deep=False`` neither the indices nor the data are copied. Returns ------- Series or DataFrame Object type matches caller. Notes ----- When ``deep=True``, data is copied but actual Python objects will not be copied recursively, only the reference to the object. This is in contrast to `copy.deepcopy` in the Standard Library, which recursively copies object data (see examples below). While ``Index`` objects are copied when ``deep=True``, the underlying numpy array is not copied for performance reasons. Since ``Index`` is immutable, the underlying data can be safely shared and a copy is not needed. Since pandas is not thread safe, see the :ref:`gotchas <gotchas.thread-safety>` when copying in a threading environment. Examples -------- >>> s = pd.Series([1, 2], index=["a", "b"]) >>> s a 1 b 2 dtype: int64 >>> s_copy = s.copy() >>> s_copy a 1 b 2 dtype: int64 **Shallow copy versus default (deep) copy:** >>> s = pd.Series([1, 2], index=["a", "b"]) >>> deep = s.copy() >>> shallow = s.copy(deep=False) Shallow copy shares data and index with original. >>> s is shallow False >>> s.values is shallow.values and s.index is shallow.index True Deep copy has own copy of data and index. >>> s is deep False >>> s.values is deep.values or s.index is deep.index False Updates to the data shared by shallow copy and original is reflected in both; deep copy remains unchanged. >>> s[0] = 3 >>> shallow[1] = 4 >>> s a 3 b 4 dtype: int64 >>> shallow a 3 b 4 dtype: int64 >>> deep a 1 b 2 dtype: int64 Note that when copying an object containing Python objects, a deep copy will copy the data, but will not do so recursively. Updating a nested data object will be reflected in the deep copy. >>> s = pd.Series([[1, 2], [3, 4]]) >>> deep = s.copy() >>> s[0][0] = 10 >>> s 0 [10, 2] 1 [3, 4] dtype: object >>> deep 0 [10, 2] 1 [3, 4] dtype: object """ data = self._mgr.copy(deep=deep) self._clear_item_cache() return self._constructor(data).__finalize__(self, method="copy") def __copy__(self: NDFrameT, deep: bool_t = True) -> NDFrameT: return self.copy(deep=deep) def __deepcopy__(self: NDFrameT, memo=None) -> NDFrameT: """ Parameters ---------- memo, default None Standard signature. Unused """ return self.copy(deep=True) def infer_objects(self: NDFrameT, copy: bool_t | None = None) -> NDFrameT: """ Attempt to infer better dtypes for object columns. Attempts soft conversion of object-dtyped columns, leaving non-object and unconvertible columns unchanged. The inference rules are the same as during normal Series/DataFrame construction. Parameters ---------- copy : bool, default True Whether to make a copy for non-object or non-inferrable columns or Series. Returns ------- same type as input object See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to numeric type. convert_dtypes : Convert argument to best possible dtype. Examples -------- >>> df = pd.DataFrame({"A": ["a", 1, 2, 3]}) >>> df = df.iloc[1:] >>> df A 1 1 2 2 3 3 >>> df.dtypes A object dtype: object >>> df.infer_objects().dtypes A int64 dtype: object """ new_mgr = self._mgr.convert(copy=copy) return self._constructor(new_mgr).__finalize__(self, method="infer_objects") def convert_dtypes( self: NDFrameT, infer_objects: bool_t = True, convert_string: bool_t = True, convert_integer: bool_t = True, convert_boolean: bool_t = True, convert_floating: bool_t = True, dtype_backend: DtypeBackend = "numpy_nullable", ) -> NDFrameT: """ Convert columns to the best possible dtypes using dtypes supporting ``pd.NA``. Parameters ---------- infer_objects : bool, default True Whether object dtypes should be converted to the best possible types. convert_string : bool, default True Whether object dtypes should be converted to ``StringDtype()``. convert_integer : bool, default True Whether, if possible, conversion can be done to integer extension types. convert_boolean : bool, defaults True Whether object dtypes should be converted to ``BooleanDtypes()``. convert_floating : bool, defaults True Whether, if possible, conversion can be done to floating extension types. If `convert_integer` is also True, preference will be give to integer dtypes if the floats can be faithfully casted to integers. .. versionadded:: 1.2.0 dtype_backend : {"numpy_nullable", "pyarrow"}, default "numpy_nullable" Which dtype_backend to use, e.g. whether a DataFrame should use nullable dtypes for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 Returns ------- Series or DataFrame Copy of input object with new dtype. See Also -------- infer_objects : Infer dtypes of objects. to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to a numeric type. Notes ----- By default, ``convert_dtypes`` will attempt to convert a Series (or each Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options ``convert_string``, ``convert_integer``, ``convert_boolean`` and ``convert_floating``, it is possible to turn off individual conversions to ``StringDtype``, the integer extension types, ``BooleanDtype`` or floating extension types, respectively. For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference rules as during normal Series/DataFrame construction. Then, if possible, convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer or floating extension type, otherwise leave as ``object``. If the dtype is integer, convert to an appropriate integer extension type. If the dtype is numeric, and consists of all integers, convert to an appropriate integer extension type. Otherwise, convert to an appropriate floating extension type. .. versionchanged:: 1.2 Starting with pandas 1.2, this method also converts float columns to the nullable floating extension type. In the future, as new dtypes are added that support ``pd.NA``, the results of this method will change to support those new dtypes. .. versionadded:: 2.0 The nullable dtype implementation can be configured by calling ``pd.set_option("mode.dtype_backend", "pandas")`` to use numpy-backed nullable dtypes or ``pd.set_option("mode.dtype_backend", "pyarrow")`` to use pyarrow-backed nullable dtypes (using ``pd.ArrowDtype``). Examples -------- >>> df = pd.DataFrame( ... { ... "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")), ... "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")), ... "c": pd.Series([True, False, np.nan], dtype=np.dtype("O")), ... "d": pd.Series(["h", "i", np.nan], dtype=np.dtype("O")), ... "e": pd.Series([10, np.nan, 20], dtype=np.dtype("float")), ... "f": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")), ... } ... ) Start with a DataFrame with default dtypes. >>> df a b c d e f 0 1 x True h 10.0 NaN 1 2 y False i NaN 100.5 2 3 z NaN NaN 20.0 200.0 >>> df.dtypes a int32 b object c object d object e float64 f float64 dtype: object Convert the DataFrame to use best possible dtypes. >>> dfn = df.convert_dtypes() >>> dfn a b c d e f 0 1 x True h 10 <NA> 1 2 y False i <NA> 100.5 2 3 z <NA> <NA> 20 200.0 >>> dfn.dtypes a Int32 b string[python] c boolean d string[python] e Int64 f Float64 dtype: object Start with a Series of strings and missing data represented by ``np.nan``. >>> s = pd.Series(["a", "b", np.nan]) >>> s 0 a 1 b 2 NaN dtype: object Obtain a Series with dtype ``StringDtype``. >>> s.convert_dtypes() 0 a 1 b 2 <NA> dtype: string """ check_dtype_backend(dtype_backend) if self.ndim == 1: return self._convert_dtypes( infer_objects, convert_string, convert_integer, convert_boolean, convert_floating, dtype_backend=dtype_backend, ) else: results = [ col._convert_dtypes( infer_objects, convert_string, convert_integer, convert_boolean, convert_floating, dtype_backend=dtype_backend, ) for col_name, col in self.items() ] if len(results) > 0: result = concat(results, axis=1, copy=False, keys=self.columns) cons = cast(Type["DataFrame"], self._constructor) result = cons(result) result = result.__finalize__(self, method="convert_dtypes") # https://github.com/python/mypy/issues/8354 return cast(NDFrameT, result) else: return self.copy(deep=None) # ---------------------------------------------------------------------- # Filling NA's def fillna( self: NDFrameT, value: Hashable | Mapping | Series | DataFrame = ..., *, method: FillnaOptions | None = ..., axis: Axis | None = ..., inplace: Literal[False] = ..., limit: int | None = ..., downcast: dict | None = ..., ) -> NDFrameT: ... def fillna( self, value: Hashable | Mapping | Series | DataFrame = ..., *, method: FillnaOptions | None = ..., axis: Axis | None = ..., inplace: Literal[True], limit: int | None = ..., downcast: dict | None = ..., ) -> None: ... def fillna( self: NDFrameT, value: Hashable | Mapping | Series | DataFrame = ..., *, method: FillnaOptions | None = ..., axis: Axis | None = ..., inplace: bool_t = ..., limit: int | None = ..., downcast: dict | None = ..., ) -> NDFrameT | None: ... def fillna( self: NDFrameT, value: Hashable | Mapping | Series | DataFrame = None, *, method: FillnaOptions | None = None, axis: Axis | None = None, inplace: bool_t = False, limit: int | None = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Fill NA/NaN values using the specified method. Parameters ---------- value : scalar, dict, Series, or DataFrame Value to use to fill holes (e.g. 0), alternately a dict/Series/DataFrame of values specifying which value to use for each index (for a Series) or column (for a DataFrame). Values not in the dict/Series/DataFrame will not be filled. This value cannot be a list. method : {{'backfill', 'bfill', 'ffill', None}}, default None Method to use for filling holes in reindexed Series: * ffill: propagate last valid observation forward to next valid. * backfill / bfill: use next valid observation to fill gap. axis : {axes_single_arg} Axis along which to fill missing values. For `Series` this parameter is unused and defaults to 0. inplace : bool, default False If True, fill in-place. Note: this will modify any other views on this object (e.g., a no-copy slice for a column in a DataFrame). limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None. downcast : dict, default is None A dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate equal type (e.g. float64 to int64 if possible). Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. See Also -------- interpolate : Fill NaN values using interpolation. reindex : Conform object to new index. asfreq : Convert TimeSeries to specified frequency. Examples -------- >>> df = pd.DataFrame([[np.nan, 2, np.nan, 0], ... [3, 4, np.nan, 1], ... [np.nan, np.nan, np.nan, np.nan], ... [np.nan, 3, np.nan, 4]], ... columns=list("ABCD")) >>> df A B C D 0 NaN 2.0 NaN 0.0 1 3.0 4.0 NaN 1.0 2 NaN NaN NaN NaN 3 NaN 3.0 NaN 4.0 Replace all NaN elements with 0s. >>> df.fillna(0) A B C D 0 0.0 2.0 0.0 0.0 1 3.0 4.0 0.0 1.0 2 0.0 0.0 0.0 0.0 3 0.0 3.0 0.0 4.0 We can also propagate non-null values forward or backward. >>> df.fillna(method="ffill") A B C D 0 NaN 2.0 NaN 0.0 1 3.0 4.0 NaN 1.0 2 3.0 4.0 NaN 1.0 3 3.0 3.0 NaN 4.0 Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1, 2, and 3 respectively. >>> values = {{"A": 0, "B": 1, "C": 2, "D": 3}} >>> df.fillna(value=values) A B C D 0 0.0 2.0 2.0 0.0 1 3.0 4.0 2.0 1.0 2 0.0 1.0 2.0 3.0 3 0.0 3.0 2.0 4.0 Only replace the first NaN element. >>> df.fillna(value=values, limit=1) A B C D 0 0.0 2.0 2.0 0.0 1 3.0 4.0 NaN 1.0 2 NaN 1.0 NaN 3.0 3 NaN 3.0 NaN 4.0 When filling using a DataFrame, replacement happens along the same column names and same indices >>> df2 = pd.DataFrame(np.zeros((4, 4)), columns=list("ABCE")) >>> df.fillna(df2) A B C D 0 0.0 2.0 0.0 0.0 1 3.0 4.0 0.0 1.0 2 0.0 0.0 0.0 NaN 3 0.0 3.0 0.0 4.0 Note that column D is not affected since it is not present in df2. """ inplace = validate_bool_kwarg(inplace, "inplace") value, method = validate_fillna_kwargs(value, method) # set the default here, so functions examining the signaure # can detect if something was set (e.g. in groupby) (GH9221) if axis is None: axis = 0 axis = self._get_axis_number(axis) if value is None: if not self._mgr.is_single_block and axis == 1: if inplace: raise NotImplementedError() result = self.T.fillna(method=method, limit=limit).T return result new_data = self._mgr.interpolate( method=method, axis=axis, limit=limit, inplace=inplace, downcast=downcast, ) else: if self.ndim == 1: if isinstance(value, (dict, ABCSeries)): if not len(value): # test_fillna_nonscalar if inplace: return None return self.copy(deep=None) from pandas import Series value = Series(value) value = value.reindex(self.index, copy=False) value = value._values elif not is_list_like(value): pass else: raise TypeError( '"value" parameter must be a scalar, dict ' "or Series, but you passed a " f'"{type(value).__name__}"' ) new_data = self._mgr.fillna( value=value, limit=limit, inplace=inplace, downcast=downcast ) elif isinstance(value, (dict, ABCSeries)): if axis == 1: raise NotImplementedError( "Currently only can fill " "with dict/Series column " "by column" ) if using_copy_on_write(): result = self.copy(deep=None) else: result = self if inplace else self.copy() is_dict = isinstance(downcast, dict) for k, v in value.items(): if k not in result: continue # error: Item "None" of "Optional[Dict[Any, Any]]" has no # attribute "get" downcast_k = ( downcast if not is_dict else downcast.get(k) # type: ignore[union-attr] ) res_k = result[k].fillna(v, limit=limit, downcast=downcast_k) if not inplace: result[k] = res_k else: # We can write into our existing column(s) iff dtype # was preserved. if isinstance(res_k, ABCSeries): # i.e. 'k' only shows up once in self.columns if res_k.dtype == result[k].dtype: result.loc[:, k] = res_k else: # Different dtype -> no way to do inplace. result[k] = res_k else: # see test_fillna_dict_inplace_nonunique_columns locs = result.columns.get_loc(k) if isinstance(locs, slice): locs = np.arange(self.shape[1])[locs] elif ( isinstance(locs, np.ndarray) and locs.dtype.kind == "b" ): locs = locs.nonzero()[0] elif not ( isinstance(locs, np.ndarray) and locs.dtype.kind == "i" ): # Should never be reached, but let's cover our bases raise NotImplementedError( "Unexpected get_loc result, please report a bug at " "https://github.com/pandas-dev/pandas" ) for i, loc in enumerate(locs): res_loc = res_k.iloc[:, i] target = self.iloc[:, loc] if res_loc.dtype == target.dtype: result.iloc[:, loc] = res_loc else: result.isetitem(loc, res_loc) if inplace: return self._update_inplace(result) else: return result elif not is_list_like(value): if axis == 1: result = self.T.fillna(value=value, limit=limit).T new_data = result else: new_data = self._mgr.fillna( value=value, limit=limit, inplace=inplace, downcast=downcast ) elif isinstance(value, ABCDataFrame) and self.ndim == 2: new_data = self.where(self.notna(), value)._mgr else: raise ValueError(f"invalid fill value with a {type(value)}") result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="fillna") def ffill( self: NDFrameT, *, axis: None | Axis = ..., inplace: Literal[False] = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT: ... def ffill( self, *, axis: None | Axis = ..., inplace: Literal[True], limit: None | int = ..., downcast: dict | None = ..., ) -> None: ... def ffill( self: NDFrameT, *, axis: None | Axis = ..., inplace: bool_t = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT | None: ... def ffill( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ return self.fillna( method="ffill", axis=axis, inplace=inplace, limit=limit, downcast=downcast ) def pad( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``. .. deprecated:: 2.0 {klass}.pad is deprecated. Use {klass}.ffill instead. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ warnings.warn( "DataFrame.pad/Series.pad is deprecated. Use " "DataFrame.ffill/Series.ffill instead", FutureWarning, stacklevel=find_stack_level(), ) return self.ffill(axis=axis, inplace=inplace, limit=limit, downcast=downcast) def bfill( self: NDFrameT, *, axis: None | Axis = ..., inplace: Literal[False] = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT: ... def bfill( self, *, axis: None | Axis = ..., inplace: Literal[True], limit: None | int = ..., downcast: dict | None = ..., ) -> None: ... def bfill( self: NDFrameT, *, axis: None | Axis = ..., inplace: bool_t = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT | None: ... def bfill( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ return self.fillna( method="bfill", axis=axis, inplace=inplace, limit=limit, downcast=downcast ) def backfill( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``. .. deprecated:: 2.0 {klass}.backfill is deprecated. Use {klass}.bfill instead. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ warnings.warn( "DataFrame.backfill/Series.backfill is deprecated. Use " "DataFrame.bfill/Series.bfill instead", FutureWarning, stacklevel=find_stack_level(), ) return self.bfill(axis=axis, inplace=inplace, limit=limit, downcast=downcast) def replace( self: NDFrameT, to_replace=..., value=..., *, inplace: Literal[False] = ..., limit: int | None = ..., regex: bool_t = ..., method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., ) -> NDFrameT: ... def replace( self, to_replace=..., value=..., *, inplace: Literal[True], limit: int | None = ..., regex: bool_t = ..., method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., ) -> None: ... def replace( self: NDFrameT, to_replace=..., value=..., *, inplace: bool_t = ..., limit: int | None = ..., regex: bool_t = ..., method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., ) -> NDFrameT | None: ... _shared_docs["replace"], klass=_shared_doc_kwargs["klass"], inplace=_shared_doc_kwargs["inplace"], replace_iloc=_shared_doc_kwargs["replace_iloc"], ) def replace( self: NDFrameT, to_replace=None, value=lib.no_default, *, inplace: bool_t = False, limit: int | None = None, regex: bool_t = False, method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = lib.no_default, ) -> NDFrameT | None: if not ( is_scalar(to_replace) or is_re_compilable(to_replace) or is_list_like(to_replace) ): raise TypeError( "Expecting 'to_replace' to be either a scalar, array-like, " "dict or None, got invalid type " f"{repr(type(to_replace).__name__)}" ) inplace = validate_bool_kwarg(inplace, "inplace") if not is_bool(regex) and to_replace is not None: raise ValueError("'to_replace' must be 'None' if 'regex' is not a bool") if value is lib.no_default or method is not lib.no_default: # GH#36984 if the user explicitly passes value=None we want to # respect that. We have the corner case where the user explicitly # passes value=None *and* a method, which we interpret as meaning # they want the (documented) default behavior. if method is lib.no_default: # TODO: get this to show up as the default in the docs? method = "pad" # passing a single value that is scalar like # when value is None (GH5319), for compat if not is_dict_like(to_replace) and not is_dict_like(regex): to_replace = [to_replace] if isinstance(to_replace, (tuple, list)): # TODO: Consider copy-on-write for non-replaced columns's here if isinstance(self, ABCDataFrame): from pandas import Series result = self.apply( Series._replace_single, args=(to_replace, method, inplace, limit), ) if inplace: return None return result return self._replace_single(to_replace, method, inplace, limit) if not is_dict_like(to_replace): if not is_dict_like(regex): raise TypeError( 'If "to_replace" and "value" are both None ' 'and "to_replace" is not a list, then ' "regex must be a mapping" ) to_replace = regex regex = True items = list(to_replace.items()) if items: keys, values = zip(*items) else: keys, values = ([], []) are_mappings = [is_dict_like(v) for v in values] if any(are_mappings): if not all(are_mappings): raise TypeError( "If a nested mapping is passed, all values " "of the top level mapping must be mappings" ) # passed a nested dict/Series to_rep_dict = {} value_dict = {} for k, v in items: keys, values = list(zip(*v.items())) or ([], []) to_rep_dict[k] = list(keys) value_dict[k] = list(values) to_replace, value = to_rep_dict, value_dict else: to_replace, value = keys, values return self.replace( to_replace, value, inplace=inplace, limit=limit, regex=regex ) else: # need a non-zero len on all axes if not self.size: if inplace: return None return self.copy(deep=None) if is_dict_like(to_replace): if is_dict_like(value): # {'A' : NA} -> {'A' : 0} # Note: Checking below for `in foo.keys()` instead of # `in foo` is needed for when we have a Series and not dict mapping = { col: (to_replace[col], value[col]) for col in to_replace.keys() if col in value.keys() and col in self } return self._replace_columnwise(mapping, inplace, regex) # {'A': NA} -> 0 elif not is_list_like(value): # Operate column-wise if self.ndim == 1: raise ValueError( "Series.replace cannot use dict-like to_replace " "and non-None value" ) mapping = { col: (to_rep, value) for col, to_rep in to_replace.items() } return self._replace_columnwise(mapping, inplace, regex) else: raise TypeError("value argument must be scalar, dict, or Series") elif is_list_like(to_replace): if not is_list_like(value): # e.g. to_replace = [NA, ''] and value is 0, # so we replace NA with 0 and then replace '' with 0 value = [value] * len(to_replace) # e.g. we have to_replace = [NA, ''] and value = [0, 'missing'] if len(to_replace) != len(value): raise ValueError( f"Replacement lists must match in length. " f"Expecting {len(to_replace)} got {len(value)} " ) new_data = self._mgr.replace_list( src_list=to_replace, dest_list=value, inplace=inplace, regex=regex, ) elif to_replace is None: if not ( is_re_compilable(regex) or is_list_like(regex) or is_dict_like(regex) ): raise TypeError( f"'regex' must be a string or a compiled regular expression " f"or a list or dict of strings or regular expressions, " f"you passed a {repr(type(regex).__name__)}" ) return self.replace( regex, value, inplace=inplace, limit=limit, regex=True ) else: # dest iterable dict-like if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1} # Operate column-wise if self.ndim == 1: raise ValueError( "Series.replace cannot use dict-value and " "non-None to_replace" ) mapping = {col: (to_replace, val) for col, val in value.items()} return self._replace_columnwise(mapping, inplace, regex) elif not is_list_like(value): # NA -> 0 regex = should_use_regex(regex, to_replace) if regex: new_data = self._mgr.replace_regex( to_replace=to_replace, value=value, inplace=inplace, ) else: new_data = self._mgr.replace( to_replace=to_replace, value=value, inplace=inplace ) else: raise TypeError( f'Invalid "to_replace" type: {repr(type(to_replace).__name__)}' ) result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="replace") def interpolate( self: NDFrameT, method: str = "linear", *, axis: Axis = 0, limit: int | None = None, inplace: bool_t = False, limit_direction: str | None = None, limit_area: str | None = None, downcast: str | None = None, **kwargs, ) -> NDFrameT | None: """ Fill NaN values using an interpolation method. Please note that only ``method='linear'`` is supported for DataFrame/Series with a MultiIndex. Parameters ---------- method : str, default 'linear' Interpolation technique to use. One of: * 'linear': Ignore the index and treat the values as equally spaced. This is the only method supported on MultiIndexes. * 'time': Works on daily and higher resolution data to interpolate given length of interval. * 'index', 'values': use the actual numerical values of the index. * 'pad': Fill in NaNs using existing values. * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'barycentric', 'polynomial': Passed to `scipy.interpolate.interp1d`, whereas 'spline' is passed to `scipy.interpolate.UnivariateSpline`. These methods use the numerical values of the index. Both 'polynomial' and 'spline' require that you also specify an `order` (int), e.g. ``df.interpolate(method='polynomial', order=5)``. Note that, `slinear` method in Pandas refers to the Scipy first order `spline` instead of Pandas first order `spline`. * 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima', 'cubicspline': Wrappers around the SciPy interpolation methods of similar names. See `Notes`. * 'from_derivatives': Refers to `scipy.interpolate.BPoly.from_derivatives` which replaces 'piecewise_polynomial' interpolation method in scipy 0.18. axis : {{0 or 'index', 1 or 'columns', None}}, default None Axis to interpolate along. For `Series` this parameter is unused and defaults to 0. limit : int, optional Maximum number of consecutive NaNs to fill. Must be greater than 0. inplace : bool, default False Update the data in place if possible. limit_direction : {{'forward', 'backward', 'both'}}, Optional Consecutive NaNs will be filled in this direction. If limit is specified: * If 'method' is 'pad' or 'ffill', 'limit_direction' must be 'forward'. * If 'method' is 'backfill' or 'bfill', 'limit_direction' must be 'backwards'. If 'limit' is not specified: * If 'method' is 'backfill' or 'bfill', the default is 'backward' * else the default is 'forward' .. versionchanged:: 1.1.0 raises ValueError if `limit_direction` is 'forward' or 'both' and method is 'backfill' or 'bfill'. raises ValueError if `limit_direction` is 'backward' or 'both' and method is 'pad' or 'ffill'. limit_area : {{`None`, 'inside', 'outside'}}, default None If limit is specified, consecutive NaNs will be filled with this restriction. * ``None``: No fill restriction. * 'inside': Only fill NaNs surrounded by valid values (interpolate). * 'outside': Only fill NaNs outside valid values (extrapolate). downcast : optional, 'infer' or None, defaults to None Downcast dtypes if possible. ``**kwargs`` : optional Keyword arguments to pass on to the interpolating function. Returns ------- Series or DataFrame or None Returns the same object type as the caller, interpolated at some or all ``NaN`` values or None if ``inplace=True``. See Also -------- fillna : Fill missing values using different methods. scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials (Akima interpolator). scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the Bernstein basis. scipy.interpolate.interp1d : Interpolate a 1-D function. scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh interpolator). scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic interpolation. scipy.interpolate.CubicSpline : Cubic spline data interpolator. Notes ----- The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima' methods are wrappers around the respective SciPy implementations of similar names. These use the actual numerical values of the index. For more information on their behavior, see the `SciPy documentation <https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__. Examples -------- Filling in ``NaN`` in a :class:`~pandas.Series` via linear interpolation. >>> s = pd.Series([0, 1, np.nan, 3]) >>> s 0 0.0 1 1.0 2 NaN 3 3.0 dtype: float64 >>> s.interpolate() 0 0.0 1 1.0 2 2.0 3 3.0 dtype: float64 Filling in ``NaN`` in a Series by padding, but filling at most two consecutive ``NaN`` at a time. >>> s = pd.Series([np.nan, "single_one", np.nan, ... "fill_two_more", np.nan, np.nan, np.nan, ... 4.71, np.nan]) >>> s 0 NaN 1 single_one 2 NaN 3 fill_two_more 4 NaN 5 NaN 6 NaN 7 4.71 8 NaN dtype: object >>> s.interpolate(method='pad', limit=2) 0 NaN 1 single_one 2 single_one 3 fill_two_more 4 fill_two_more 5 fill_two_more 6 NaN 7 4.71 8 4.71 dtype: object Filling in ``NaN`` in a Series via polynomial interpolation or splines: Both 'polynomial' and 'spline' methods require that you also specify an ``order`` (int). >>> s = pd.Series([0, 2, np.nan, 8]) >>> s.interpolate(method='polynomial', order=2) 0 0.000000 1 2.000000 2 4.666667 3 8.000000 dtype: float64 Fill the DataFrame forward (that is, going down) along each column using linear interpolation. Note how the last entry in column 'a' is interpolated differently, because there is no entry after it to use for interpolation. Note how the first entry in column 'b' remains ``NaN``, because there is no entry before it to use for interpolation. >>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0), ... (np.nan, 2.0, np.nan, np.nan), ... (2.0, 3.0, np.nan, 9.0), ... (np.nan, 4.0, -4.0, 16.0)], ... columns=list('abcd')) >>> df a b c d 0 0.0 NaN -1.0 1.0 1 NaN 2.0 NaN NaN 2 2.0 3.0 NaN 9.0 3 NaN 4.0 -4.0 16.0 >>> df.interpolate(method='linear', limit_direction='forward', axis=0) a b c d 0 0.0 NaN -1.0 1.0 1 1.0 2.0 -2.0 5.0 2 2.0 3.0 -3.0 9.0 3 2.0 4.0 -4.0 16.0 Using polynomial interpolation. >>> df['d'].interpolate(method='polynomial', order=2) 0 1.0 1 4.0 2 9.0 3 16.0 Name: d, dtype: float64 """ inplace = validate_bool_kwarg(inplace, "inplace") axis = self._get_axis_number(axis) fillna_methods = ["ffill", "bfill", "pad", "backfill"] should_transpose = axis == 1 and method not in fillna_methods obj = self.T if should_transpose else self if obj.empty: return self.copy() if method not in fillna_methods: axis = self._info_axis_number if isinstance(obj.index, MultiIndex) and method != "linear": raise ValueError( "Only `method=linear` interpolation is supported on MultiIndexes." ) # Set `limit_direction` depending on `method` if limit_direction is None: limit_direction = ( "backward" if method in ("backfill", "bfill") else "forward" ) else: if method in ("pad", "ffill") and limit_direction != "forward": raise ValueError( f"`limit_direction` must be 'forward' for method `{method}`" ) if method in ("backfill", "bfill") and limit_direction != "backward": raise ValueError( f"`limit_direction` must be 'backward' for method `{method}`" ) if obj.ndim == 2 and np.all(obj.dtypes == np.dtype("object")): raise TypeError( "Cannot interpolate with all object-dtype columns " "in the DataFrame. Try setting at least one " "column to a numeric dtype." ) # create/use the index if method == "linear": # prior default index = Index(np.arange(len(obj.index))) else: index = obj.index methods = {"index", "values", "nearest", "time"} is_numeric_or_datetime = ( is_numeric_dtype(index.dtype) or is_datetime64_any_dtype(index.dtype) or is_timedelta64_dtype(index.dtype) ) if method not in methods and not is_numeric_or_datetime: raise ValueError( "Index column must be numeric or datetime type when " f"using {method} method other than linear. " "Try setting a numeric or datetime index column before " "interpolating." ) if isna(index).any(): raise NotImplementedError( "Interpolation with NaNs in the index " "has not been implemented. Try filling " "those NaNs before interpolating." ) new_data = obj._mgr.interpolate( method=method, axis=axis, index=index, limit=limit, limit_direction=limit_direction, limit_area=limit_area, inplace=inplace, downcast=downcast, **kwargs, ) result = self._constructor(new_data) if should_transpose: result = result.T if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="interpolate") # ---------------------------------------------------------------------- # Timeseries methods Methods def asof(self, where, subset=None): """ Return the last row(s) without any NaNs before `where`. The last row (for each element in `where`, if list) without any NaN is taken. In case of a :class:`~pandas.DataFrame`, the last row without NaN considering only the subset of columns (if not `None`) If there is no good value, NaN is returned for a Series or a Series of NaN values for a DataFrame Parameters ---------- where : date or array-like of dates Date(s) before which the last row(s) are returned. subset : str or array-like of str, default `None` For DataFrame, if not `None`, only use these columns to check for NaNs. Returns ------- scalar, Series, or DataFrame The return can be: * scalar : when `self` is a Series and `where` is a scalar * Series: when `self` is a Series and `where` is an array-like, or when `self` is a DataFrame and `where` is a scalar * DataFrame : when `self` is a DataFrame and `where` is an array-like Return scalar, Series, or DataFrame. See Also -------- merge_asof : Perform an asof merge. Similar to left join. Notes ----- Dates are assumed to be sorted. Raises if this is not the case. Examples -------- A Series and a scalar `where`. >>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40]) >>> s 10 1.0 20 2.0 30 NaN 40 4.0 dtype: float64 >>> s.asof(20) 2.0 For a sequence `where`, a Series is returned. The first value is NaN, because the first element of `where` is before the first index value. >>> s.asof([5, 20]) 5 NaN 20 2.0 dtype: float64 Missing values are not considered. The following is ``2.0``, not NaN, even though NaN is at the index location for ``30``. >>> s.asof(30) 2.0 Take all columns into consideration >>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50], ... 'b': [None, None, None, None, 500]}, ... index=pd.DatetimeIndex(['2018-02-27 09:01:00', ... '2018-02-27 09:02:00', ... '2018-02-27 09:03:00', ... '2018-02-27 09:04:00', ... '2018-02-27 09:05:00'])) >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', ... '2018-02-27 09:04:30'])) a b 2018-02-27 09:03:30 NaN NaN 2018-02-27 09:04:30 NaN NaN Take a single column into consideration >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', ... '2018-02-27 09:04:30']), ... subset=['a']) a b 2018-02-27 09:03:30 30 NaN 2018-02-27 09:04:30 40 NaN """ if isinstance(where, str): where = Timestamp(where) if not self.index.is_monotonic_increasing: raise ValueError("asof requires a sorted index") is_series = isinstance(self, ABCSeries) if is_series: if subset is not None: raise ValueError("subset is not valid for Series") else: if subset is None: subset = self.columns if not is_list_like(subset): subset = [subset] is_list = is_list_like(where) if not is_list: start = self.index[0] if isinstance(self.index, PeriodIndex): where = Period(where, freq=self.index.freq) if where < start: if not is_series: return self._constructor_sliced( index=self.columns, name=where, dtype=np.float64 ) return np.nan # It's always much faster to use a *while* loop here for # Series than pre-computing all the NAs. However a # *while* loop is extremely expensive for DataFrame # so we later pre-compute all the NAs and use the same # code path whether *where* is a scalar or list. # See PR: https://github.com/pandas-dev/pandas/pull/14476 if is_series: loc = self.index.searchsorted(where, side="right") if loc > 0: loc -= 1 values = self._values while loc > 0 and isna(values[loc]): loc -= 1 return values[loc] if not isinstance(where, Index): where = Index(where) if is_list else Index([where]) nulls = self.isna() if is_series else self[subset].isna().any(axis=1) if nulls.all(): if is_series: self = cast("Series", self) return self._constructor(np.nan, index=where, name=self.name) elif is_list: self = cast("DataFrame", self) return self._constructor(np.nan, index=where, columns=self.columns) else: self = cast("DataFrame", self) return self._constructor_sliced( np.nan, index=self.columns, name=where[0] ) locs = self.index.asof_locs(where, ~(nulls._values)) # mask the missing missing = locs == -1 data = self.take(locs) data.index = where if missing.any(): # GH#16063 only do this setting when necessary, otherwise # we'd cast e.g. bools to floats data.loc[missing] = np.nan return data if is_list else data.iloc[-1] # ---------------------------------------------------------------------- # Action Methods def isna(self: NDFrameT) -> NDFrameT: """ Detect missing values. Return a boolean same-sized object indicating if the values are NA. NA values, such as None or :attr:`numpy.NaN`, gets mapped to True values. Everything else gets mapped to False values. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). Returns ------- {klass} Mask of bool values for each element in {klass} that indicates whether an element is an NA value. See Also -------- {klass}.isnull : Alias of isna. {klass}.notna : Boolean inverse of isna. {klass}.dropna : Omit axes labels with missing values. isna : Top-level isna. Examples -------- Show which entries in a DataFrame are NA. >>> df = pd.DataFrame(dict(age=[5, 6, np.NaN], ... born=[pd.NaT, pd.Timestamp('1939-05-27'), ... pd.Timestamp('1940-04-25')], ... name=['Alfred', 'Batman', ''], ... toy=[None, 'Batmobile', 'Joker'])) >>> df age born name toy 0 5.0 NaT Alfred None 1 6.0 1939-05-27 Batman Batmobile 2 NaN 1940-04-25 Joker >>> df.isna() age born name toy 0 False True False True 1 False False False False 2 True False False False Show which entries in a Series are NA. >>> ser = pd.Series([5, 6, np.NaN]) >>> ser 0 5.0 1 6.0 2 NaN dtype: float64 >>> ser.isna() 0 False 1 False 2 True dtype: bool """ return isna(self).__finalize__(self, method="isna") def isnull(self: NDFrameT) -> NDFrameT: return isna(self).__finalize__(self, method="isnull") def notna(self: NDFrameT) -> NDFrameT: """ Detect existing (non-missing) values. Return a boolean same-sized object indicating if the values are not NA. Non-missing values get mapped to True. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). NA values, such as None or :attr:`numpy.NaN`, get mapped to False values. Returns ------- {klass} Mask of bool values for each element in {klass} that indicates whether an element is not an NA value. See Also -------- {klass}.notnull : Alias of notna. {klass}.isna : Boolean inverse of notna. {klass}.dropna : Omit axes labels with missing values. notna : Top-level notna. Examples -------- Show which entries in a DataFrame are not NA. >>> df = pd.DataFrame(dict(age=[5, 6, np.NaN], ... born=[pd.NaT, pd.Timestamp('1939-05-27'), ... pd.Timestamp('1940-04-25')], ... name=['Alfred', 'Batman', ''], ... toy=[None, 'Batmobile', 'Joker'])) >>> df age born name toy 0 5.0 NaT Alfred None 1 6.0 1939-05-27 Batman Batmobile 2 NaN 1940-04-25 Joker >>> df.notna() age born name toy 0 True False True False 1 True True True True 2 False True True True Show which entries in a Series are not NA. >>> ser = pd.Series([5, 6, np.NaN]) >>> ser 0 5.0 1 6.0 2 NaN dtype: float64 >>> ser.notna() 0 True 1 True 2 False dtype: bool """ return notna(self).__finalize__(self, method="notna") def notnull(self: NDFrameT) -> NDFrameT: return notna(self).__finalize__(self, method="notnull") def _clip_with_scalar(self, lower, upper, inplace: bool_t = False): if (lower is not None and np.any(isna(lower))) or ( upper is not None and np.any(isna(upper)) ): raise ValueError("Cannot use an NA value as a clip threshold") result = self mask = isna(self._values) with np.errstate(all="ignore"): if upper is not None: subset = self <= upper result = result.where(subset, upper, axis=None, inplace=False) if lower is not None: subset = self >= lower result = result.where(subset, lower, axis=None, inplace=False) if np.any(mask): result[mask] = np.nan if inplace: return self._update_inplace(result) else: return result def _clip_with_one_bound(self, threshold, method, axis, inplace): if axis is not None: axis = self._get_axis_number(axis) # method is self.le for upper bound and self.ge for lower bound if is_scalar(threshold) and is_number(threshold): if method.__name__ == "le": return self._clip_with_scalar(None, threshold, inplace=inplace) return self._clip_with_scalar(threshold, None, inplace=inplace) # GH #15390 # In order for where method to work, the threshold must # be transformed to NDFrame from other array like structure. if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold): if isinstance(self, ABCSeries): threshold = self._constructor(threshold, index=self.index) else: threshold = align_method_FRAME(self, threshold, axis, flex=None)[1] # GH 40420 # Treat missing thresholds as no bounds, not clipping the values if is_list_like(threshold): fill_value = np.inf if method.__name__ == "le" else -np.inf threshold_inf = threshold.fillna(fill_value) else: threshold_inf = threshold subset = method(threshold_inf, axis=axis) | isna(self) # GH 40420 return self.where(subset, threshold, axis=axis, inplace=inplace) def clip( self: NDFrameT, lower=None, upper=None, *, axis: Axis | None = None, inplace: bool_t = False, **kwargs, ) -> NDFrameT | None: """ Trim values at input threshold(s). Assigns values outside boundary to boundary values. Thresholds can be singular values or array like, and in the latter case the clipping is performed element-wise in the specified axis. Parameters ---------- lower : float or array-like, default None Minimum threshold value. All values below this threshold will be set to it. A missing threshold (e.g `NA`) will not clip the value. upper : float or array-like, default None Maximum threshold value. All values above this threshold will be set to it. A missing threshold (e.g `NA`) will not clip the value. axis : {{0 or 'index', 1 or 'columns', None}}, default None Align object with lower and upper along the given axis. For `Series` this parameter is unused and defaults to `None`. inplace : bool, default False Whether to perform the operation in place on the data. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with numpy. Returns ------- Series or DataFrame or None Same type as calling object with the values outside the clip boundaries replaced or None if ``inplace=True``. See Also -------- Series.clip : Trim values at input threshold in series. DataFrame.clip : Trim values at input threshold in dataframe. numpy.clip : Clip (limit) the values in an array. Examples -------- >>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]} >>> df = pd.DataFrame(data) >>> df col_0 col_1 0 9 -2 1 -3 -7 2 0 6 3 -1 8 4 5 -5 Clips per column using lower and upper thresholds: >>> df.clip(-4, 6) col_0 col_1 0 6 -2 1 -3 -4 2 0 6 3 -1 6 4 5 -4 Clips using specific lower and upper thresholds per column element: >>> t = pd.Series([2, -4, -1, 6, 3]) >>> t 0 2 1 -4 2 -1 3 6 4 3 dtype: int64 >>> df.clip(t, t + 4, axis=0) col_0 col_1 0 6 2 1 -3 -4 2 0 3 3 6 8 4 5 3 Clips using specific lower threshold per column element, with missing values: >>> t = pd.Series([2, -4, np.NaN, 6, 3]) >>> t 0 2.0 1 -4.0 2 NaN 3 6.0 4 3.0 dtype: float64 >>> df.clip(t, axis=0) col_0 col_1 0 9 2 1 -3 -4 2 0 6 3 6 8 4 5 3 """ inplace = validate_bool_kwarg(inplace, "inplace") axis = nv.validate_clip_with_axis(axis, (), kwargs) if axis is not None: axis = self._get_axis_number(axis) # GH 17276 # numpy doesn't like NaN as a clip value # so ignore # GH 19992 # numpy doesn't drop a list-like bound containing NaN isna_lower = isna(lower) if not is_list_like(lower): if np.any(isna_lower): lower = None elif np.all(isna_lower): lower = None isna_upper = isna(upper) if not is_list_like(upper): if np.any(isna_upper): upper = None elif np.all(isna_upper): upper = None # GH 2747 (arguments were reversed) if ( lower is not None and upper is not None and is_scalar(lower) and is_scalar(upper) ): lower, upper = min(lower, upper), max(lower, upper) # fast-path for scalars if (lower is None or (is_scalar(lower) and is_number(lower))) and ( upper is None or (is_scalar(upper) and is_number(upper)) ): return self._clip_with_scalar(lower, upper, inplace=inplace) result = self if lower is not None: result = result._clip_with_one_bound( lower, method=self.ge, axis=axis, inplace=inplace ) if upper is not None: if inplace: result = self result = result._clip_with_one_bound( upper, method=self.le, axis=axis, inplace=inplace ) return result def asfreq( self: NDFrameT, freq: Frequency, method: FillnaOptions | None = None, how: str | None = None, normalize: bool_t = False, fill_value: Hashable = None, ) -> NDFrameT: """ Convert time series to specified frequency. Returns the original data conformed to a new index with the specified frequency. If the index of this {klass} is a :class:`~pandas.PeriodIndex`, the new index is the result of transforming the original index with :meth:`PeriodIndex.asfreq <pandas.PeriodIndex.asfreq>` (so the original index will map one-to-one to the new index). Otherwise, the new index will be equivalent to ``pd.date_range(start, end, freq=freq)`` where ``start`` and ``end`` are, respectively, the first and last entries in the original index (see :func:`pandas.date_range`). The values corresponding to any timesteps in the new index which were not present in the original index will be null (``NaN``), unless a method for filling such unknowns is provided (see the ``method`` parameter below). The :meth:`resample` method is more appropriate if an operation on each group of timesteps (such as an aggregate) is necessary to represent the data at the new frequency. Parameters ---------- freq : DateOffset or str Frequency DateOffset or string. method : {{'backfill'/'bfill', 'pad'/'ffill'}}, default None Method to use for filling holes in reindexed Series (note this does not fill NaNs that already were present): * 'pad' / 'ffill': propagate last valid observation forward to next valid * 'backfill' / 'bfill': use NEXT valid observation to fill. how : {{'start', 'end'}}, default end For PeriodIndex only (see PeriodIndex.asfreq). normalize : bool, default False Whether to reset output index to midnight. fill_value : scalar, optional Value to use for missing values, applied during upsampling (note this does not fill NaNs that already were present). Returns ------- {klass} {klass} object reindexed to the specified frequency. See Also -------- reindex : Conform DataFrame to new index with optional filling logic. Notes ----- To learn more about the frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- Start by creating a series with 4 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=4, freq='T') >>> series = pd.Series([0.0, None, 2.0, 3.0], index=index) >>> df = pd.DataFrame({{'s': series}}) >>> df s 2000-01-01 00:00:00 0.0 2000-01-01 00:01:00 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:03:00 3.0 Upsample the series into 30 second bins. >>> df.asfreq(freq='30S') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 NaN 2000-01-01 00:03:00 3.0 Upsample again, providing a ``fill value``. >>> df.asfreq(freq='30S', fill_value=9.0) s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 9.0 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 9.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 9.0 2000-01-01 00:03:00 3.0 Upsample again, providing a ``method``. >>> df.asfreq(freq='30S', method='bfill') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 2.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 3.0 2000-01-01 00:03:00 3.0 """ from pandas.core.resample import asfreq return asfreq( self, freq, method=method, how=how, normalize=normalize, fill_value=fill_value, ) def at_time( self: NDFrameT, time, asof: bool_t = False, axis: Axis | None = None ) -> NDFrameT: """ Select values at particular time of day (e.g., 9:30AM). Parameters ---------- time : datetime.time or str The values to select. axis : {0 or 'index', 1 or 'columns'}, default 0 For `Series` this parameter is unused and defaults to 0. Returns ------- Series or DataFrame Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- between_time : Select values between particular times of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_at_time : Get just the index locations for values at particular time of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='12H') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 00:00:00 1 2018-04-09 12:00:00 2 2018-04-10 00:00:00 3 2018-04-10 12:00:00 4 >>> ts.at_time('12:00') A 2018-04-09 12:00:00 2 2018-04-10 12:00:00 4 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) index = self._get_axis(axis) if not isinstance(index, DatetimeIndex): raise TypeError("Index must be DatetimeIndex") indexer = index.indexer_at_time(time, asof=asof) return self._take_with_is_copy(indexer, axis=axis) def between_time( self: NDFrameT, start_time, end_time, inclusive: IntervalClosedType = "both", axis: Axis | None = None, ) -> NDFrameT: """ Select values between particular times of the day (e.g., 9:00-9:30 AM). By setting ``start_time`` to be later than ``end_time``, you can get the times that are *not* between the two times. Parameters ---------- start_time : datetime.time or str Initial time as a time filter limit. end_time : datetime.time or str End time as a time filter limit. inclusive : {"both", "neither", "left", "right"}, default "both" Include boundaries; whether to set each bound as closed or open. axis : {0 or 'index', 1 or 'columns'}, default 0 Determine range time on index or columns value. For `Series` this parameter is unused and defaults to 0. Returns ------- Series or DataFrame Data from the original object filtered to the specified dates range. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- at_time : Select values at a particular time of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_between_time : Get just the index locations for values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 00:00:00 1 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 2018-04-12 01:00:00 4 >>> ts.between_time('0:15', '0:45') A 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 You get the times that are *not* between two times by setting ``start_time`` later than ``end_time``: >>> ts.between_time('0:45', '0:15') A 2018-04-09 00:00:00 1 2018-04-12 01:00:00 4 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) index = self._get_axis(axis) if not isinstance(index, DatetimeIndex): raise TypeError("Index must be DatetimeIndex") left_inclusive, right_inclusive = validate_inclusive(inclusive) indexer = index.indexer_between_time( start_time, end_time, include_start=left_inclusive, include_end=right_inclusive, ) return self._take_with_is_copy(indexer, axis=axis) def resample( self, rule, axis: Axis = 0, closed: str | None = None, label: str | None = None, convention: str = "start", kind: str | None = None, on: Level = None, level: Level = None, origin: str | TimestampConvertibleTypes = "start_day", offset: TimedeltaConvertibleTypes | None = None, group_keys: bool_t = False, ) -> Resampler: """ Resample time-series data. Convenience method for frequency conversion and resampling of time series. The object must have a datetime-like index (`DatetimeIndex`, `PeriodIndex`, or `TimedeltaIndex`), or the caller must pass the label of a datetime-like series/index to the ``on``/``level`` keyword parameter. Parameters ---------- rule : DateOffset, Timedelta or str The offset string or object representing target conversion. axis : {{0 or 'index', 1 or 'columns'}}, default 0 Which axis to use for up- or down-sampling. For `Series` this parameter is unused and defaults to 0. Must be `DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`. closed : {{'right', 'left'}}, default None Which side of bin interval is closed. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. label : {{'right', 'left'}}, default None Which bin edge label to label bucket with. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. convention : {{'start', 'end', 's', 'e'}}, default 'start' For `PeriodIndex` only, controls whether to use the start or end of `rule`. kind : {{'timestamp', 'period'}}, optional, default None Pass 'timestamp' to convert the resulting index to a `DateTimeIndex` or 'period' to convert it to a `PeriodIndex`. By default the input representation is retained. on : str, optional For a DataFrame, column to use instead of index for resampling. Column must be datetime-like. level : str or int, optional For a MultiIndex, level (name or number) to use for resampling. `level` must be datetime-like. origin : Timestamp or str, default 'start_day' The timestamp on which to adjust the grouping. The timezone of origin must match the timezone of the index. If string, must be one of the following: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries .. versionadded:: 1.1.0 - 'end': `origin` is the last value of the timeseries - 'end_day': `origin` is the ceiling midnight of the last day .. versionadded:: 1.3.0 offset : Timedelta or str, default is None An offset timedelta added to the origin. .. versionadded:: 1.1.0 group_keys : bool, default False Whether to include the group keys in the result index when using ``.apply()`` on the resampled object. .. versionadded:: 1.5.0 Not specifying ``group_keys`` will retain values-dependent behavior from pandas 1.4 and earlier (see :ref:`pandas 1.5.0 Release notes <whatsnew_150.enhancements.resample_group_keys>` for examples). .. versionchanged:: 2.0.0 ``group_keys`` now defaults to ``False``. Returns ------- pandas.core.Resampler :class:`~pandas.core.Resampler` object. See Also -------- Series.resample : Resample a Series. DataFrame.resample : Resample a DataFrame. groupby : Group {klass} by mapping, function, label, or list of labels. asfreq : Reindex a {klass} with the given frequency without grouping. Notes ----- See the `user guide <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`__ for more. To learn more about the offset strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__. Examples -------- Start by creating a series with 9 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=9, freq='T') >>> series = pd.Series(range(9), index=index) >>> series 2000-01-01 00:00:00 0 2000-01-01 00:01:00 1 2000-01-01 00:02:00 2 2000-01-01 00:03:00 3 2000-01-01 00:04:00 4 2000-01-01 00:05:00 5 2000-01-01 00:06:00 6 2000-01-01 00:07:00 7 2000-01-01 00:08:00 8 Freq: T, dtype: int64 Downsample the series into 3 minute bins and sum the values of the timestamps falling into a bin. >>> series.resample('3T').sum() 2000-01-01 00:00:00 3 2000-01-01 00:03:00 12 2000-01-01 00:06:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but label each bin using the right edge instead of the left. Please note that the value in the bucket used as the label is not included in the bucket, which it labels. For example, in the original series the bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed value in the resampled bucket with the label ``2000-01-01 00:03:00`` does not include 3 (if it did, the summed value would be 6, not 3). To include this value close the right side of the bin interval as illustrated in the example below this one. >>> series.resample('3T', label='right').sum() 2000-01-01 00:03:00 3 2000-01-01 00:06:00 12 2000-01-01 00:09:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but close the right side of the bin interval. >>> series.resample('3T', label='right', closed='right').sum() 2000-01-01 00:00:00 0 2000-01-01 00:03:00 6 2000-01-01 00:06:00 15 2000-01-01 00:09:00 15 Freq: 3T, dtype: int64 Upsample the series into 30 second bins. >>> series.resample('30S').asfreq()[0:5] # Select first 5 rows 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 1.0 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 Freq: 30S, dtype: float64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``ffill`` method. >>> series.resample('30S').ffill()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 0 2000-01-01 00:01:00 1 2000-01-01 00:01:30 1 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``bfill`` method. >>> series.resample('30S').bfill()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 1 2000-01-01 00:01:00 1 2000-01-01 00:01:30 2 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Pass a custom function via ``apply`` >>> def custom_resampler(arraylike): ... return np.sum(arraylike) + 5 ... >>> series.resample('3T').apply(custom_resampler) 2000-01-01 00:00:00 8 2000-01-01 00:03:00 17 2000-01-01 00:06:00 26 Freq: 3T, dtype: int64 For a Series with a PeriodIndex, the keyword `convention` can be used to control whether to use the start or end of `rule`. Resample a year by quarter using 'start' `convention`. Values are assigned to the first quarter of the period. >>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01', ... freq='A', ... periods=2)) >>> s 2012 1 2013 2 Freq: A-DEC, dtype: int64 >>> s.resample('Q', convention='start').asfreq() 2012Q1 1.0 2012Q2 NaN 2012Q3 NaN 2012Q4 NaN 2013Q1 2.0 2013Q2 NaN 2013Q3 NaN 2013Q4 NaN Freq: Q-DEC, dtype: float64 Resample quarters by month using 'end' `convention`. Values are assigned to the last month of the period. >>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01', ... freq='Q', ... periods=4)) >>> q 2018Q1 1 2018Q2 2 2018Q3 3 2018Q4 4 Freq: Q-DEC, dtype: int64 >>> q.resample('M', convention='end').asfreq() 2018-03 1.0 2018-04 NaN 2018-05 NaN 2018-06 2.0 2018-07 NaN 2018-08 NaN 2018-09 3.0 2018-10 NaN 2018-11 NaN 2018-12 4.0 Freq: M, dtype: float64 For DataFrame objects, the keyword `on` can be used to specify the column instead of the index for resampling. >>> d = {{'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}} >>> df = pd.DataFrame(d) >>> df['week_starting'] = pd.date_range('01/01/2018', ... periods=8, ... freq='W') >>> df price volume week_starting 0 10 50 2018-01-07 1 11 60 2018-01-14 2 9 40 2018-01-21 3 13 100 2018-01-28 4 14 50 2018-02-04 5 18 100 2018-02-11 6 17 40 2018-02-18 7 19 50 2018-02-25 >>> df.resample('M', on='week_starting').mean() price volume week_starting 2018-01-31 10.75 62.5 2018-02-28 17.00 60.0 For a DataFrame with MultiIndex, the keyword `level` can be used to specify on which level the resampling needs to take place. >>> days = pd.date_range('1/1/2000', periods=4, freq='D') >>> d2 = {{'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}} >>> df2 = pd.DataFrame( ... d2, ... index=pd.MultiIndex.from_product( ... [days, ['morning', 'afternoon']] ... ) ... ) >>> df2 price volume 2000-01-01 morning 10 50 afternoon 11 60 2000-01-02 morning 9 40 afternoon 13 100 2000-01-03 morning 14 50 afternoon 18 100 2000-01-04 morning 17 40 afternoon 19 50 >>> df2.resample('D', level=0).sum() price volume 2000-01-01 21 110 2000-01-02 22 140 2000-01-03 32 150 2000-01-04 36 90 If you want to adjust the start of the bins based on a fixed timestamp: >>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00' >>> rng = pd.date_range(start, end, freq='7min') >>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng) >>> ts 2000-10-01 23:30:00 0 2000-10-01 23:37:00 3 2000-10-01 23:44:00 6 2000-10-01 23:51:00 9 2000-10-01 23:58:00 12 2000-10-02 00:05:00 15 2000-10-02 00:12:00 18 2000-10-02 00:19:00 21 2000-10-02 00:26:00 24 Freq: 7T, dtype: int64 >>> ts.resample('17min').sum() 2000-10-01 23:14:00 0 2000-10-01 23:31:00 9 2000-10-01 23:48:00 21 2000-10-02 00:05:00 54 2000-10-02 00:22:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', origin='epoch').sum() 2000-10-01 23:18:00 0 2000-10-01 23:35:00 18 2000-10-01 23:52:00 27 2000-10-02 00:09:00 39 2000-10-02 00:26:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', origin='2000-01-01').sum() 2000-10-01 23:24:00 3 2000-10-01 23:41:00 15 2000-10-01 23:58:00 45 2000-10-02 00:15:00 45 Freq: 17T, dtype: int64 If you want to adjust the start of the bins with an `offset` Timedelta, the two following lines are equivalent: >>> ts.resample('17min', origin='start').sum() 2000-10-01 23:30:00 9 2000-10-01 23:47:00 21 2000-10-02 00:04:00 54 2000-10-02 00:21:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', offset='23h30min').sum() 2000-10-01 23:30:00 9 2000-10-01 23:47:00 21 2000-10-02 00:04:00 54 2000-10-02 00:21:00 24 Freq: 17T, dtype: int64 If you want to take the largest Timestamp as the end of the bins: >>> ts.resample('17min', origin='end').sum() 2000-10-01 23:35:00 0 2000-10-01 23:52:00 18 2000-10-02 00:09:00 27 2000-10-02 00:26:00 63 Freq: 17T, dtype: int64 In contrast with the `start_day`, you can use `end_day` to take the ceiling midnight of the largest Timestamp as the end of the bins and drop the bins not containing data: >>> ts.resample('17min', origin='end_day').sum() 2000-10-01 23:38:00 3 2000-10-01 23:55:00 15 2000-10-02 00:12:00 45 2000-10-02 00:29:00 45 Freq: 17T, dtype: int64 """ from pandas.core.resample import get_resampler axis = self._get_axis_number(axis) return get_resampler( cast("Series | DataFrame", self), freq=rule, label=label, closed=closed, axis=axis, kind=kind, convention=convention, key=on, level=level, origin=origin, offset=offset, group_keys=group_keys, ) def first(self: NDFrameT, offset) -> NDFrameT: """ Select initial periods of time series data based on a date offset. When having a DataFrame with dates as index, this function can select the first few rows based on a date offset. Parameters ---------- offset : str, DateOffset or dateutil.relativedelta The offset length of the data that will be selected. For instance, '1M' will display all the rows having their index within the first month. Returns ------- Series or DataFrame A subset of the caller. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- last : Select final periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the first 3 days: >>> ts.first('3D') A 2018-04-09 1 2018-04-11 2 Notice the data for 3 first calendar days were returned, not the first 3 days observed in the dataset, and therefore data for 2018-04-13 was not returned. """ if not isinstance(self.index, DatetimeIndex): raise TypeError("'first' only supports a DatetimeIndex index") if len(self.index) == 0: return self.copy(deep=False) offset = to_offset(offset) if not isinstance(offset, Tick) and offset.is_on_offset(self.index[0]): # GH#29623 if first value is end of period, remove offset with n = 1 # before adding the real offset end_date = end = self.index[0] - offset.base + offset else: end_date = end = self.index[0] + offset # Tick-like, e.g. 3 weeks if isinstance(offset, Tick) and end_date in self.index: end = self.index.searchsorted(end_date, side="left") return self.iloc[:end] return self.loc[:end] def last(self: NDFrameT, offset) -> NDFrameT: """ Select final periods of time series data based on a date offset. For a DataFrame with a sorted DatetimeIndex, this function selects the last few rows based on a date offset. Parameters ---------- offset : str, DateOffset, dateutil.relativedelta The offset length of the data that will be selected. For instance, '3D' will display all the rows having their index within the last 3 days. Returns ------- Series or DataFrame A subset of the caller. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- first : Select initial periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the last 3 days: >>> ts.last('3D') A 2018-04-13 3 2018-04-15 4 Notice the data for 3 last calendar days were returned, not the last 3 observed days in the dataset, and therefore data for 2018-04-11 was not returned. """ if not isinstance(self.index, DatetimeIndex): raise TypeError("'last' only supports a DatetimeIndex index") if len(self.index) == 0: return self.copy(deep=False) offset = to_offset(offset) start_date = self.index[-1] - offset start = self.index.searchsorted(start_date, side="right") return self.iloc[start:] def rank( self: NDFrameT, axis: Axis = 0, method: str = "average", numeric_only: bool_t = False, na_option: str = "keep", ascending: bool_t = True, pct: bool_t = False, ) -> NDFrameT: """ Compute numerical data ranks (1 through n) along axis. By default, equal values are assigned a rank that is the average of the ranks of those values. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 Index to direct ranking. For `Series` this parameter is unused and defaults to 0. method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' How to rank the group of records that have the same value (i.e. ties): * average: average rank of the group * min: lowest rank in the group * max: highest rank in the group * first: ranks assigned in order they appear in the array * dense: like 'min', but rank always increases by 1 between groups. numeric_only : bool, default False For DataFrame objects, rank only numeric columns if set to True. .. versionchanged:: 2.0.0 The default value of ``numeric_only`` is now ``False``. na_option : {'keep', 'top', 'bottom'}, default 'keep' How to rank NaN values: * keep: assign NaN rank to NaN values * top: assign lowest rank to NaN values * bottom: assign highest rank to NaN values ascending : bool, default True Whether or not the elements should be ranked in ascending order. pct : bool, default False Whether or not to display the returned rankings in percentile form. Returns ------- same type as caller Return a Series or DataFrame with data ranks as values. See Also -------- core.groupby.DataFrameGroupBy.rank : Rank of values within each group. core.groupby.SeriesGroupBy.rank : Rank of values within each group. Examples -------- >>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog', ... 'spider', 'snake'], ... 'Number_legs': [4, 2, 4, 8, np.nan]}) >>> df Animal Number_legs 0 cat 4.0 1 penguin 2.0 2 dog 4.0 3 spider 8.0 4 snake NaN Ties are assigned the mean of the ranks (by default) for the group. >>> s = pd.Series(range(5), index=list("abcde")) >>> s["d"] = s["b"] >>> s.rank() a 1.0 b 2.5 c 4.0 d 2.5 e 5.0 dtype: float64 The following example shows how the method behaves with the above parameters: * default_rank: this is the default behaviour obtained without using any parameter. * max_rank: setting ``method = 'max'`` the records that have the same values are ranked using the highest rank (e.g.: since 'cat' and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.) * NA_bottom: choosing ``na_option = 'bottom'``, if there are records with NaN values they are placed at the bottom of the ranking. * pct_rank: when setting ``pct = True``, the ranking is expressed as percentile rank. >>> df['default_rank'] = df['Number_legs'].rank() >>> df['max_rank'] = df['Number_legs'].rank(method='max') >>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom') >>> df['pct_rank'] = df['Number_legs'].rank(pct=True) >>> df Animal Number_legs default_rank max_rank NA_bottom pct_rank 0 cat 4.0 2.5 3.0 2.5 0.625 1 penguin 2.0 1.0 1.0 1.0 0.250 2 dog 4.0 2.5 3.0 2.5 0.625 3 spider 8.0 4.0 4.0 4.0 1.000 4 snake NaN NaN NaN 5.0 NaN """ axis_int = self._get_axis_number(axis) if na_option not in {"keep", "top", "bottom"}: msg = "na_option must be one of 'keep', 'top', or 'bottom'" raise ValueError(msg) def ranker(data): if data.ndim == 2: # i.e. DataFrame, we cast to ndarray values = data.values else: # i.e. Series, can dispatch to EA values = data._values if isinstance(values, ExtensionArray): ranks = values._rank( axis=axis_int, method=method, ascending=ascending, na_option=na_option, pct=pct, ) else: ranks = algos.rank( values, axis=axis_int, method=method, ascending=ascending, na_option=na_option, pct=pct, ) ranks_obj = self._constructor(ranks, **data._construct_axes_dict()) return ranks_obj.__finalize__(self, method="rank") if numeric_only: if self.ndim == 1 and not is_numeric_dtype(self.dtype): # GH#47500 raise TypeError( "Series.rank does not allow numeric_only=True with " "non-numeric dtype." ) data = self._get_numeric_data() else: data = self return ranker(data) def compare( self, other, align_axis: Axis = 1, keep_shape: bool_t = False, keep_equal: bool_t = False, result_names: Suffixes = ("self", "other"), ): if type(self) is not type(other): cls_self, cls_other = type(self).__name__, type(other).__name__ raise TypeError( f"can only compare '{cls_self}' (not '{cls_other}') with '{cls_self}'" ) mask = ~((self == other) | (self.isna() & other.isna())) mask.fillna(True, inplace=True) if not keep_equal: self = self.where(mask) other = other.where(mask) if not keep_shape: if isinstance(self, ABCDataFrame): cmask = mask.any() rmask = mask.any(axis=1) self = self.loc[rmask, cmask] other = other.loc[rmask, cmask] else: self = self[mask] other = other[mask] if not isinstance(result_names, tuple): raise TypeError( f"Passing 'result_names' as a {type(result_names)} is not " "supported. Provide 'result_names' as a tuple instead." ) if align_axis in (1, "columns"): # This is needed for Series axis = 1 else: axis = self._get_axis_number(align_axis) diff = concat([self, other], axis=axis, keys=result_names) if axis >= self.ndim: # No need to reorganize data if stacking on new axis # This currently applies for stacking two Series on columns return diff ax = diff._get_axis(axis) ax_names = np.array(ax.names) # set index names to positions to avoid confusion ax.names = np.arange(len(ax_names)) # bring self-other to inner level order = list(range(1, ax.nlevels)) + [0] if isinstance(diff, ABCDataFrame): diff = diff.reorder_levels(order, axis=axis) else: diff = diff.reorder_levels(order) # restore the index names in order diff._get_axis(axis=axis).names = ax_names[order] # reorder axis to keep things organized indices = ( np.arange(diff.shape[axis]).reshape([2, diff.shape[axis] // 2]).T.flatten() ) diff = diff.take(indices, axis=axis) return diff def align( self: NDFrameT, other: NDFrameT, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool_t | None = None, fill_value: Hashable = None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> NDFrameT: """ Align two objects on their axes with the specified join method. Join method is specified for each axis Index. Parameters ---------- other : DataFrame or Series join : {{'outer', 'inner', 'left', 'right'}}, default 'outer' axis : allowed axis of the other object, default None Align on index (0), columns (1), or both (None). level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level. copy : bool, default True Always returns new objects. If copy=False and no reindexing is required then original objects are returned. fill_value : scalar, default np.NaN Value to use for missing values. Defaults to NaN, but can be any "compatible" value. method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None Method to use for filling holes in reindexed Series: - pad / ffill: propagate last valid observation forward to next valid. - backfill / bfill: use NEXT valid observation to fill gap. limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None. fill_axis : {axes_single_arg}, default 0 Filling axis, method and limit. broadcast_axis : {axes_single_arg}, default None Broadcast values along this axis, if aligning two objects of different dimensions. Returns ------- tuple of ({klass}, type of other) Aligned objects. Examples -------- >>> df = pd.DataFrame( ... [[1, 2, 3, 4], [6, 7, 8, 9]], columns=["D", "B", "E", "A"], index=[1, 2] ... ) >>> other = pd.DataFrame( ... [[10, 20, 30, 40], [60, 70, 80, 90], [600, 700, 800, 900]], ... columns=["A", "B", "C", "D"], ... index=[2, 3, 4], ... ) >>> df D B E A 1 1 2 3 4 2 6 7 8 9 >>> other A B C D 2 10 20 30 40 3 60 70 80 90 4 600 700 800 900 Align on columns: >>> left, right = df.align(other, join="outer", axis=1) >>> left A B C D E 1 4 2 NaN 1 3 2 9 7 NaN 6 8 >>> right A B C D E 2 10 20 30 40 NaN 3 60 70 80 90 NaN 4 600 700 800 900 NaN We can also align on the index: >>> left, right = df.align(other, join="outer", axis=0) >>> left D B E A 1 1.0 2.0 3.0 4.0 2 6.0 7.0 8.0 9.0 3 NaN NaN NaN NaN 4 NaN NaN NaN NaN >>> right A B C D 1 NaN NaN NaN NaN 2 10.0 20.0 30.0 40.0 3 60.0 70.0 80.0 90.0 4 600.0 700.0 800.0 900.0 Finally, the default `axis=None` will align on both index and columns: >>> left, right = df.align(other, join="outer", axis=None) >>> left A B C D E 1 4.0 2.0 NaN 1.0 3.0 2 9.0 7.0 NaN 6.0 8.0 3 NaN NaN NaN NaN NaN 4 NaN NaN NaN NaN NaN >>> right A B C D E 1 NaN NaN NaN NaN NaN 2 10.0 20.0 30.0 40.0 NaN 3 60.0 70.0 80.0 90.0 NaN 4 600.0 700.0 800.0 900.0 NaN """ method = clean_fill_method(method) if broadcast_axis == 1 and self.ndim != other.ndim: if isinstance(self, ABCSeries): # this means other is a DataFrame, and we need to broadcast # self cons = self._constructor_expanddim df = cons( {c: self for c in other.columns}, **other._construct_axes_dict() ) return df._align_frame( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) elif isinstance(other, ABCSeries): # this means self is a DataFrame, and we need to broadcast # other cons = other._constructor_expanddim df = cons( {c: other for c in self.columns}, **self._construct_axes_dict() ) return self._align_frame( df, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) if axis is not None: axis = self._get_axis_number(axis) if isinstance(other, ABCDataFrame): return self._align_frame( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) elif isinstance(other, ABCSeries): return self._align_series( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def _align_frame( self, other, join: AlignJoin = "outer", axis: Axis | None = None, level=None, copy: bool_t | None = None, fill_value=None, method=None, limit=None, fill_axis: Axis = 0, ): # defaults join_index, join_columns = None, None ilidx, iridx = None, None clidx, cridx = None, None is_series = isinstance(self, ABCSeries) if (axis is None or axis == 0) and not self.index.equals(other.index): join_index, ilidx, iridx = self.index.join( other.index, how=join, level=level, return_indexers=True ) if ( (axis is None or axis == 1) and not is_series and not self.columns.equals(other.columns) ): join_columns, clidx, cridx = self.columns.join( other.columns, how=join, level=level, return_indexers=True ) if is_series: reindexers = {0: [join_index, ilidx]} else: reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]} left = self._reindex_with_indexers( reindexers, copy=copy, fill_value=fill_value, allow_dups=True ) # other must be always DataFrame right = other._reindex_with_indexers( {0: [join_index, iridx], 1: [join_columns, cridx]}, copy=copy, fill_value=fill_value, allow_dups=True, ) if method is not None: _left = left.fillna(method=method, axis=fill_axis, limit=limit) assert _left is not None # needed for mypy left = _left right = right.fillna(method=method, axis=fill_axis, limit=limit) # if DatetimeIndex have different tz, convert to UTC left, right = _align_as_utc(left, right, join_index) return ( left.__finalize__(self), right.__finalize__(other), ) def _align_series( self, other, join: AlignJoin = "outer", axis: Axis | None = None, level=None, copy: bool_t | None = None, fill_value=None, method=None, limit=None, fill_axis: Axis = 0, ): is_series = isinstance(self, ABCSeries) if copy and using_copy_on_write(): copy = False if (not is_series and axis is None) or axis not in [None, 0, 1]: raise ValueError("Must specify axis=0 or 1") if is_series and axis == 1: raise ValueError("cannot align series to a series other than axis 0") # series/series compat, other must always be a Series if not axis: # equal if self.index.equals(other.index): join_index, lidx, ridx = None, None, None else: join_index, lidx, ridx = self.index.join( other.index, how=join, level=level, return_indexers=True ) if is_series: left = self._reindex_indexer(join_index, lidx, copy) elif lidx is None or join_index is None: left = self.copy(deep=copy) else: left = self._constructor( self._mgr.reindex_indexer(join_index, lidx, axis=1, copy=copy) ) right = other._reindex_indexer(join_index, ridx, copy) else: # one has > 1 ndim fdata = self._mgr join_index = self.axes[1] lidx, ridx = None, None if not join_index.equals(other.index): join_index, lidx, ridx = join_index.join( other.index, how=join, level=level, return_indexers=True ) if lidx is not None: bm_axis = self._get_block_manager_axis(1) fdata = fdata.reindex_indexer(join_index, lidx, axis=bm_axis) if copy and fdata is self._mgr: fdata = fdata.copy() left = self._constructor(fdata) if ridx is None: right = other.copy(deep=copy) else: right = other.reindex(join_index, level=level) # fill fill_na = notna(fill_value) or (method is not None) if fill_na: left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis) right = right.fillna(fill_value, method=method, limit=limit) # if DatetimeIndex have different tz, convert to UTC if is_series or (not is_series and axis == 0): left, right = _align_as_utc(left, right, join_index) return ( left.__finalize__(self), right.__finalize__(other), ) def _where( self, cond, other=lib.no_default, inplace: bool_t = False, axis: Axis | None = None, level=None, ): """ Equivalent to public method `where`, except that `other` is not applied as a function even if callable. Used in __setitem__. """ inplace = validate_bool_kwarg(inplace, "inplace") if axis is not None: axis = self._get_axis_number(axis) # align the cond to same shape as myself cond = common.apply_if_callable(cond, self) if isinstance(cond, NDFrame): # CoW: Make sure reference is not kept alive cond = cond.align(self, join="right", broadcast_axis=1, copy=False)[0] else: if not hasattr(cond, "shape"): cond = np.asanyarray(cond) if cond.shape != self.shape: raise ValueError("Array conditional must be same shape as self") cond = self._constructor(cond, **self._construct_axes_dict(), copy=False) # make sure we are boolean fill_value = bool(inplace) cond = cond.fillna(fill_value) msg = "Boolean array expected for the condition, not {dtype}" if not cond.empty: if not isinstance(cond, ABCDataFrame): # This is a single-dimensional object. if not is_bool_dtype(cond): raise ValueError(msg.format(dtype=cond.dtype)) else: for _dt in cond.dtypes: if not is_bool_dtype(_dt): raise ValueError(msg.format(dtype=_dt)) else: # GH#21947 we have an empty DataFrame/Series, could be object-dtype cond = cond.astype(bool) cond = -cond if inplace else cond cond = cond.reindex(self._info_axis, axis=self._info_axis_number, copy=False) # try to align with other if isinstance(other, NDFrame): # align with me if other.ndim <= self.ndim: # CoW: Make sure reference is not kept alive other = self.align( other, join="left", axis=axis, level=level, fill_value=None, copy=False, )[1] # if we are NOT aligned, raise as we cannot where index if axis is None and not other._indexed_same(self): raise InvalidIndexError if other.ndim < self.ndim: # TODO(EA2D): avoid object-dtype cast in EA case GH#38729 other = other._values if axis == 0: other = np.reshape(other, (-1, 1)) elif axis == 1: other = np.reshape(other, (1, -1)) other = np.broadcast_to(other, self.shape) # slice me out of the other else: raise NotImplementedError( "cannot align with a higher dimensional NDFrame" ) elif not isinstance(other, (MultiIndex, NDFrame)): # mainly just catching Index here other = extract_array(other, extract_numpy=True) if isinstance(other, (np.ndarray, ExtensionArray)): if other.shape != self.shape: if self.ndim != 1: # In the ndim == 1 case we may have # other length 1, which we treat as scalar (GH#2745, GH#4192) # or len(other) == icond.sum(), which we treat like # __setitem__ (GH#3235) raise ValueError( "other must be the same shape as self when an ndarray" ) # we are the same shape, so create an actual object for alignment else: other = self._constructor( other, **self._construct_axes_dict(), copy=False ) if axis is None: axis = 0 if self.ndim == getattr(other, "ndim", 0): align = True else: align = self._get_axis_number(axis) == 1 if inplace: # we may have different type blocks come out of putmask, so # reconstruct the block manager self._check_inplace_setting(other) new_data = self._mgr.putmask(mask=cond, new=other, align=align) result = self._constructor(new_data) return self._update_inplace(result) else: new_data = self._mgr.where( other=other, cond=cond, align=align, ) result = self._constructor(new_data) return result.__finalize__(self) def where( self: NDFrameT, cond, other=..., *, inplace: Literal[False] = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT: ... def where( self, cond, other=..., *, inplace: Literal[True], axis: Axis | None = ..., level: Level = ..., ) -> None: ... def where( self: NDFrameT, cond, other=..., *, inplace: bool_t = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT | None: ... klass=_shared_doc_kwargs["klass"], cond="True", cond_rev="False", name="where", name_other="mask", ) def where( self: NDFrameT, cond, other=np.nan, *, inplace: bool_t = False, axis: Axis | None = None, level: Level = None, ) -> NDFrameT | None: """ Replace values where the condition is {cond_rev}. Parameters ---------- cond : bool {klass}, array-like, or callable Where `cond` is {cond}, keep the original value. Where {cond_rev}, replace with corresponding value from `other`. If `cond` is callable, it is computed on the {klass} and should return boolean {klass} or array. The callable must not change input {klass} (though pandas doesn't check it). other : scalar, {klass}, or callable Entries where `cond` is {cond_rev} are replaced with corresponding value from `other`. If other is callable, it is computed on the {klass} and should return scalar or {klass}. The callable must not change input {klass} (though pandas doesn't check it). If not specified, entries will be filled with the corresponding NULL value (``np.nan`` for numpy dtypes, ``pd.NA`` for extension dtypes). inplace : bool, default False Whether to perform the operation in place on the data. axis : int, default None Alignment axis if needed. For `Series` this parameter is unused and defaults to 0. level : int, default None Alignment level if needed. Returns ------- Same type as caller or None if ``inplace=True``. See Also -------- :func:`DataFrame.{name_other}` : Return an object of same shape as self. Notes ----- The {name} method is an application of the if-then idiom. For each element in the calling DataFrame, if ``cond`` is ``{cond}`` the element is used; otherwise the corresponding element from the DataFrame ``other`` is used. If the axis of ``other`` does not align with axis of ``cond`` {klass}, the misaligned index positions will be filled with {cond_rev}. The signature for :func:`DataFrame.where` differs from :func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to ``np.where(m, df1, df2)``. For further details and examples see the ``{name}`` documentation in :ref:`indexing <indexing.where_mask>`. The dtype of the object takes precedence. The fill value is casted to the object's dtype, if this can be done losslessly. Examples -------- >>> s = pd.Series(range(5)) >>> s.where(s > 0) 0 NaN 1 1.0 2 2.0 3 3.0 4 4.0 dtype: float64 >>> s.mask(s > 0) 0 0.0 1 NaN 2 NaN 3 NaN 4 NaN dtype: float64 >>> s = pd.Series(range(5)) >>> t = pd.Series([True, False]) >>> s.where(t, 99) 0 0 1 99 2 99 3 99 4 99 dtype: int64 >>> s.mask(t, 99) 0 99 1 1 2 99 3 99 4 99 dtype: int64 >>> s.where(s > 1, 10) 0 10 1 10 2 2 3 3 4 4 dtype: int64 >>> s.mask(s > 1, 10) 0 0 1 1 2 10 3 10 4 10 dtype: int64 >>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B']) >>> df A B 0 0 1 1 2 3 2 4 5 3 6 7 4 8 9 >>> m = df % 3 == 0 >>> df.where(m, -df) A B 0 0 -1 1 -2 3 2 -4 -5 3 6 -7 4 -8 9 >>> df.where(m, -df) == np.where(m, df, -df) A B 0 True True 1 True True 2 True True 3 True True 4 True True >>> df.where(m, -df) == df.mask(~m, -df) A B 0 True True 1 True True 2 True True 3 True True 4 True True """ other = common.apply_if_callable(other, self) return self._where(cond, other, inplace, axis, level) def mask( self: NDFrameT, cond, other=..., *, inplace: Literal[False] = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT: ... def mask( self, cond, other=..., *, inplace: Literal[True], axis: Axis | None = ..., level: Level = ..., ) -> None: ... def mask( self: NDFrameT, cond, other=..., *, inplace: bool_t = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT | None: ... where, klass=_shared_doc_kwargs["klass"], cond="False", cond_rev="True", name="mask", name_other="where", ) def mask( self: NDFrameT, cond, other=lib.no_default, *, inplace: bool_t = False, axis: Axis | None = None, level: Level = None, ) -> NDFrameT | None: inplace = validate_bool_kwarg(inplace, "inplace") cond = common.apply_if_callable(cond, self) # see gh-21891 if not hasattr(cond, "__invert__"): cond = np.array(cond) return self.where( ~cond, other=other, inplace=inplace, axis=axis, level=level, ) def shift( self: NDFrameT, periods: int = 1, freq=None, axis: Axis = 0, fill_value: Hashable = None, ) -> NDFrameT: """ Shift index by desired number of periods with an optional time `freq`. When `freq` is not passed, shift the index without realigning the data. If `freq` is passed (in this case, the index must be date or datetime, or it will raise a `NotImplementedError`), the index will be increased using the periods and the `freq`. `freq` can be inferred when specified as "infer" as long as either freq or inferred_freq attribute is set in the index. Parameters ---------- periods : int Number of periods to shift. Can be positive or negative. freq : DateOffset, tseries.offsets, timedelta, or str, optional Offset to use from the tseries module or time rule (e.g. 'EOM'). If `freq` is specified then the index values are shifted but the data is not realigned. That is, use `freq` if you would like to extend the index when shifting and preserve the original data. If `freq` is specified as "infer" then it will be inferred from the freq or inferred_freq attributes of the index. If neither of those attributes exist, a ValueError is thrown. axis : {{0 or 'index', 1 or 'columns', None}}, default None Shift direction. For `Series` this parameter is unused and defaults to 0. fill_value : object, optional The scalar value to use for newly introduced missing values. the default depends on the dtype of `self`. For numeric data, ``np.nan`` is used. For datetime, timedelta, or period data, etc. :attr:`NaT` is used. For extension dtypes, ``self.dtype.na_value`` is used. .. versionchanged:: 1.1.0 Returns ------- {klass} Copy of input object, shifted. See Also -------- Index.shift : Shift values of Index. DatetimeIndex.shift : Shift values of DatetimeIndex. PeriodIndex.shift : Shift values of PeriodIndex. Examples -------- >>> df = pd.DataFrame({{"Col1": [10, 20, 15, 30, 45], ... "Col2": [13, 23, 18, 33, 48], ... "Col3": [17, 27, 22, 37, 52]}}, ... index=pd.date_range("2020-01-01", "2020-01-05")) >>> df Col1 Col2 Col3 2020-01-01 10 13 17 2020-01-02 20 23 27 2020-01-03 15 18 22 2020-01-04 30 33 37 2020-01-05 45 48 52 >>> df.shift(periods=3) Col1 Col2 Col3 2020-01-01 NaN NaN NaN 2020-01-02 NaN NaN NaN 2020-01-03 NaN NaN NaN 2020-01-04 10.0 13.0 17.0 2020-01-05 20.0 23.0 27.0 >>> df.shift(periods=1, axis="columns") Col1 Col2 Col3 2020-01-01 NaN 10 13 2020-01-02 NaN 20 23 2020-01-03 NaN 15 18 2020-01-04 NaN 30 33 2020-01-05 NaN 45 48 >>> df.shift(periods=3, fill_value=0) Col1 Col2 Col3 2020-01-01 0 0 0 2020-01-02 0 0 0 2020-01-03 0 0 0 2020-01-04 10 13 17 2020-01-05 20 23 27 >>> df.shift(periods=3, freq="D") Col1 Col2 Col3 2020-01-04 10 13 17 2020-01-05 20 23 27 2020-01-06 15 18 22 2020-01-07 30 33 37 2020-01-08 45 48 52 >>> df.shift(periods=3, freq="infer") Col1 Col2 Col3 2020-01-04 10 13 17 2020-01-05 20 23 27 2020-01-06 15 18 22 2020-01-07 30 33 37 2020-01-08 45 48 52 """ if periods == 0: return self.copy(deep=None) if freq is None: # when freq is None, data is shifted, index is not axis = self._get_axis_number(axis) new_data = self._mgr.shift( periods=periods, axis=axis, fill_value=fill_value ) return self._constructor(new_data).__finalize__(self, method="shift") # when freq is given, index is shifted, data is not index = self._get_axis(axis) if freq == "infer": freq = getattr(index, "freq", None) if freq is None: freq = getattr(index, "inferred_freq", None) if freq is None: msg = "Freq was not set in the index hence cannot be inferred" raise ValueError(msg) elif isinstance(freq, str): freq = to_offset(freq) if isinstance(index, PeriodIndex): orig_freq = to_offset(index.freq) if freq != orig_freq: assert orig_freq is not None # for mypy raise ValueError( f"Given freq {freq.rule_code} does not match " f"PeriodIndex freq {orig_freq.rule_code}" ) new_ax = index.shift(periods) else: new_ax = index.shift(periods, freq) result = self.set_axis(new_ax, axis=axis) return result.__finalize__(self, method="shift") def truncate( self: NDFrameT, before=None, after=None, axis: Axis | None = None, copy: bool_t | None = None, ) -> NDFrameT: """ Truncate a Series or DataFrame before and after some index value. This is a useful shorthand for boolean indexing based on index values above or below certain thresholds. Parameters ---------- before : date, str, int Truncate all rows before this index value. after : date, str, int Truncate all rows after this index value. axis : {0 or 'index', 1 or 'columns'}, optional Axis to truncate. Truncates the index (rows) by default. For `Series` this parameter is unused and defaults to 0. copy : bool, default is True, Return a copy of the truncated section. Returns ------- type of caller The truncated Series or DataFrame. See Also -------- DataFrame.loc : Select a subset of a DataFrame by label. DataFrame.iloc : Select a subset of a DataFrame by position. Notes ----- If the index being truncated contains only datetime values, `before` and `after` may be specified as strings instead of Timestamps. Examples -------- >>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'], ... 'B': ['f', 'g', 'h', 'i', 'j'], ... 'C': ['k', 'l', 'm', 'n', 'o']}, ... index=[1, 2, 3, 4, 5]) >>> df A B C 1 a f k 2 b g l 3 c h m 4 d i n 5 e j o >>> df.truncate(before=2, after=4) A B C 2 b g l 3 c h m 4 d i n The columns of a DataFrame can be truncated. >>> df.truncate(before="A", after="B", axis="columns") A B 1 a f 2 b g 3 c h 4 d i 5 e j For Series, only rows can be truncated. >>> df['A'].truncate(before=2, after=4) 2 b 3 c 4 d Name: A, dtype: object The index values in ``truncate`` can be datetimes or string dates. >>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s') >>> df = pd.DataFrame(index=dates, data={'A': 1}) >>> df.tail() A 2016-01-31 23:59:56 1 2016-01-31 23:59:57 1 2016-01-31 23:59:58 1 2016-01-31 23:59:59 1 2016-02-01 00:00:00 1 >>> df.truncate(before=pd.Timestamp('2016-01-05'), ... after=pd.Timestamp('2016-01-10')).tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Because the index is a DatetimeIndex containing only dates, we can specify `before` and `after` as strings. They will be coerced to Timestamps before truncation. >>> df.truncate('2016-01-05', '2016-01-10').tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Note that ``truncate`` assumes a 0 value for any unspecified time component (midnight). This differs from partial string slicing, which returns any partially matching dates. >>> df.loc['2016-01-05':'2016-01-10', :].tail() A 2016-01-10 23:59:55 1 2016-01-10 23:59:56 1 2016-01-10 23:59:57 1 2016-01-10 23:59:58 1 2016-01-10 23:59:59 1 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) ax = self._get_axis(axis) # GH 17935 # Check that index is sorted if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing: raise ValueError("truncate requires a sorted index") # if we have a date index, convert to dates, otherwise # treat like a slice if ax._is_all_dates: from pandas.core.tools.datetimes import to_datetime before = to_datetime(before) after = to_datetime(after) if before is not None and after is not None and before > after: raise ValueError(f"Truncate: {after} must be after {before}") if len(ax) > 1 and ax.is_monotonic_decreasing and ax.nunique() > 1: before, after = after, before slicer = [slice(None, None)] * self._AXIS_LEN slicer[axis] = slice(before, after) result = self.loc[tuple(slicer)] if isinstance(ax, MultiIndex): setattr(result, self._get_axis_name(axis), ax.truncate(before, after)) result = result.copy(deep=copy and not using_copy_on_write()) return result def tz_convert( self: NDFrameT, tz, axis: Axis = 0, level=None, copy: bool_t | None = None ) -> NDFrameT: """ Convert tz-aware axis to target time zone. Parameters ---------- tz : str or tzinfo object or None Target time zone. Passing ``None`` will convert to UTC and remove the timezone information. axis : {{0 or 'index', 1 or 'columns'}}, default 0 The axis to convert level : int, str, default None If axis is a MultiIndex, convert a specific level. Otherwise must be None. copy : bool, default True Also make a copy of the underlying data. Returns ------- {klass} Object with time zone converted axis. Raises ------ TypeError If the axis is tz-naive. Examples -------- Change to another time zone: >>> s = pd.Series( ... [1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00']), ... ) >>> s.tz_convert('Asia/Shanghai') 2018-09-15 07:30:00+08:00 1 dtype: int64 Pass None to convert to UTC and get a tz-naive index: >>> s = pd.Series([1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00'])) >>> s.tz_convert(None) 2018-09-14 23:30:00 1 dtype: int64 """ axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_convert(ax, tz): if not hasattr(ax, "tz_convert"): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError( f"{ax_name} is not a valid DatetimeIndex or PeriodIndex" ) ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_convert(tz) return ax # if a level is given it must be a MultiIndex level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_convert(ax.levels[level], tz) ax = ax.set_levels(new_level, level=level) else: if level not in (None, 0, ax.name): raise ValueError(f"The level {level} is not valid") ax = _tz_convert(ax, tz) result = self.copy(deep=copy and not using_copy_on_write()) result = result.set_axis(ax, axis=axis, copy=False) return result.__finalize__(self, method="tz_convert") def tz_localize( self: NDFrameT, tz, axis: Axis = 0, level=None, copy: bool_t | None = None, ambiguous: TimeAmbiguous = "raise", nonexistent: TimeNonexistent = "raise", ) -> NDFrameT: """ Localize tz-naive index of a Series or DataFrame to target time zone. This operation localizes the Index. To localize the values in a timezone-naive Series, use :meth:`Series.dt.tz_localize`. Parameters ---------- tz : str or tzinfo or None Time zone to localize. Passing ``None`` will remove the time zone information and preserve local time. axis : {{0 or 'index', 1 or 'columns'}}, default 0 The axis to localize level : int, str, default None If axis ia a MultiIndex, localize a specific level. Otherwise must be None. copy : bool, default True Also make a copy of the underlying data. ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' When clocks moved backward due to DST, ambiguous times may arise. For example in Central European Time (UTC+01), when going from 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter dictates how ambiguous times should be handled. - 'infer' will attempt to infer fall dst-transition hours based on order - bool-ndarray where True signifies a DST time, False designates a non-DST time (note that this flag is only applicable for ambiguous times) - 'NaT' will return NaT where there are ambiguous times - 'raise' will raise an AmbiguousTimeError if there are ambiguous times. nonexistent : str, default 'raise' A nonexistent time does not exist in a particular timezone where clocks moved forward due to DST. Valid values are: - 'shift_forward' will shift the nonexistent time forward to the closest existing time - 'shift_backward' will shift the nonexistent time backward to the closest existing time - 'NaT' will return NaT where there are nonexistent times - timedelta objects will shift nonexistent times by the timedelta - 'raise' will raise an NonExistentTimeError if there are nonexistent times. Returns ------- {klass} Same type as the input. Raises ------ TypeError If the TimeSeries is tz-aware and tz is not None. Examples -------- Localize local times: >>> s = pd.Series( ... [1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00']), ... ) >>> s.tz_localize('CET') 2018-09-15 01:30:00+02:00 1 dtype: int64 Pass None to convert to tz-naive index and preserve local time: >>> s = pd.Series([1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00'])) >>> s.tz_localize(None) 2018-09-15 01:30:00 1 dtype: int64 Be careful with DST changes. When there is sequential data, pandas can infer the DST time: >>> s = pd.Series(range(7), ... index=pd.DatetimeIndex(['2018-10-28 01:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 03:00:00', ... '2018-10-28 03:30:00'])) >>> s.tz_localize('CET', ambiguous='infer') 2018-10-28 01:30:00+02:00 0 2018-10-28 02:00:00+02:00 1 2018-10-28 02:30:00+02:00 2 2018-10-28 02:00:00+01:00 3 2018-10-28 02:30:00+01:00 4 2018-10-28 03:00:00+01:00 5 2018-10-28 03:30:00+01:00 6 dtype: int64 In some cases, inferring the DST is impossible. In such cases, you can pass an ndarray to the ambiguous parameter to set the DST explicitly >>> s = pd.Series(range(3), ... index=pd.DatetimeIndex(['2018-10-28 01:20:00', ... '2018-10-28 02:36:00', ... '2018-10-28 03:46:00'])) >>> s.tz_localize('CET', ambiguous=np.array([True, True, False])) 2018-10-28 01:20:00+02:00 0 2018-10-28 02:36:00+02:00 1 2018-10-28 03:46:00+01:00 2 dtype: int64 If the DST transition causes nonexistent times, you can shift these dates forward or backward with a timedelta object or `'shift_forward'` or `'shift_backward'`. >>> s = pd.Series(range(2), ... index=pd.DatetimeIndex(['2015-03-29 02:30:00', ... '2015-03-29 03:30:00'])) >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward') 2015-03-29 03:00:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward') 2015-03-29 01:59:59.999999999+01:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H')) 2015-03-29 03:30:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 """ nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward") if nonexistent not in nonexistent_options and not isinstance( nonexistent, dt.timedelta ): raise ValueError( "The nonexistent argument must be one of 'raise', " "'NaT', 'shift_forward', 'shift_backward' or " "a timedelta object" ) axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_localize(ax, tz, ambiguous, nonexistent): if not hasattr(ax, "tz_localize"): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError( f"{ax_name} is not a valid DatetimeIndex or PeriodIndex" ) ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent) return ax # if a level is given it must be a MultiIndex level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent) ax = ax.set_levels(new_level, level=level) else: if level not in (None, 0, ax.name): raise ValueError(f"The level {level} is not valid") ax = _tz_localize(ax, tz, ambiguous, nonexistent) result = self.copy(deep=copy and not using_copy_on_write()) result = result.set_axis(ax, axis=axis, copy=False) return result.__finalize__(self, method="tz_localize") # ---------------------------------------------------------------------- # Numeric Methods def describe( self: NDFrameT, percentiles=None, include=None, exclude=None, ) -> NDFrameT: """ Generate descriptive statistics. Descriptive statistics include those that summarize the central tendency, dispersion and shape of a dataset's distribution, excluding ``NaN`` values. Analyzes both numeric and object series, as well as ``DataFrame`` column sets of mixed data types. The output will vary depending on what is provided. Refer to the notes below for more detail. Parameters ---------- percentiles : list-like of numbers, optional The percentiles to include in the output. All should fall between 0 and 1. The default is ``[.25, .5, .75]``, which returns the 25th, 50th, and 75th percentiles. include : 'all', list-like of dtypes or None (default), optional A white list of data types to include in the result. Ignored for ``Series``. Here are the options: - 'all' : All columns of the input will be included in the output. - A list-like of dtypes : Limits the results to the provided data types. To limit the result to numeric types submit ``numpy.number``. To limit it instead to object columns submit the ``numpy.object`` data type. Strings can also be used in the style of ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To select pandas categorical columns, use ``'category'`` - None (default) : The result will include all numeric columns. exclude : list-like of dtypes or None (default), optional, A black list of data types to omit from the result. Ignored for ``Series``. Here are the options: - A list-like of dtypes : Excludes the provided data types from the result. To exclude numeric types submit ``numpy.number``. To exclude object columns submit the data type ``numpy.object``. Strings can also be used in the style of ``select_dtypes`` (e.g. ``df.describe(exclude=['O'])``). To exclude pandas categorical columns, use ``'category'`` - None (default) : The result will exclude nothing. Returns ------- Series or DataFrame Summary statistics of the Series or Dataframe provided. See Also -------- DataFrame.count: Count number of non-NA/null observations. DataFrame.max: Maximum of the values in the object. DataFrame.min: Minimum of the values in the object. DataFrame.mean: Mean of the values. DataFrame.std: Standard deviation of the observations. DataFrame.select_dtypes: Subset of a DataFrame including/excluding columns based on their dtype. Notes ----- For numeric data, the result's index will include ``count``, ``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and upper percentiles. By default the lower percentile is ``25`` and the upper percentile is ``75``. The ``50`` percentile is the same as the median. For object data (e.g. strings or timestamps), the result's index will include ``count``, ``unique``, ``top``, and ``freq``. The ``top`` is the most common value. The ``freq`` is the most common value's frequency. Timestamps also include the ``first`` and ``last`` items. If multiple object values have the highest count, then the ``count`` and ``top`` results will be arbitrarily chosen from among those with the highest count. For mixed data types provided via a ``DataFrame``, the default is to return only an analysis of numeric columns. If the dataframe consists only of object and categorical data without any numeric columns, the default is to return an analysis of both the object and categorical columns. If ``include='all'`` is provided as an option, the result will include a union of attributes of each type. The `include` and `exclude` parameters can be used to limit which columns in a ``DataFrame`` are analyzed for the output. The parameters are ignored when analyzing a ``Series``. Examples -------- Describing a numeric ``Series``. >>> s = pd.Series([1, 2, 3]) >>> s.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 dtype: float64 Describing a categorical ``Series``. >>> s = pd.Series(['a', 'a', 'b', 'c']) >>> s.describe() count 4 unique 3 top a freq 2 dtype: object Describing a timestamp ``Series``. >>> s = pd.Series([ ... np.datetime64("2000-01-01"), ... np.datetime64("2010-01-01"), ... np.datetime64("2010-01-01") ... ]) >>> s.describe() count 3 mean 2006-09-01 08:00:00 min 2000-01-01 00:00:00 25% 2004-12-31 12:00:00 50% 2010-01-01 00:00:00 75% 2010-01-01 00:00:00 max 2010-01-01 00:00:00 dtype: object Describing a ``DataFrame``. By default only numeric fields are returned. >>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']), ... 'numeric': [1, 2, 3], ... 'object': ['a', 'b', 'c'] ... }) >>> df.describe() numeric count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Describing all columns of a ``DataFrame`` regardless of data type. >>> df.describe(include='all') # doctest: +SKIP categorical numeric object count 3 3.0 3 unique 3 NaN 3 top f NaN a freq 1 NaN 1 mean NaN 2.0 NaN std NaN 1.0 NaN min NaN 1.0 NaN 25% NaN 1.5 NaN 50% NaN 2.0 NaN 75% NaN 2.5 NaN max NaN 3.0 NaN Describing a column from a ``DataFrame`` by accessing it as an attribute. >>> df.numeric.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Name: numeric, dtype: float64 Including only numeric columns in a ``DataFrame`` description. >>> df.describe(include=[np.number]) numeric count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Including only string columns in a ``DataFrame`` description. >>> df.describe(include=[object]) # doctest: +SKIP object count 3 unique 3 top a freq 1 Including only categorical columns from a ``DataFrame`` description. >>> df.describe(include=['category']) categorical count 3 unique 3 top d freq 1 Excluding numeric columns from a ``DataFrame`` description. >>> df.describe(exclude=[np.number]) # doctest: +SKIP categorical object count 3 3 unique 3 3 top f a freq 1 1 Excluding object columns from a ``DataFrame`` description. >>> df.describe(exclude=[object]) # doctest: +SKIP categorical numeric count 3 3.0 unique 3 NaN top f NaN freq 1 NaN mean NaN 2.0 std NaN 1.0 min NaN 1.0 25% NaN 1.5 50% NaN 2.0 75% NaN 2.5 max NaN 3.0 """ return describe_ndframe( obj=self, include=include, exclude=exclude, percentiles=percentiles, ) def pct_change( self: NDFrameT, periods: int = 1, fill_method: Literal["backfill", "bfill", "pad", "ffill"] | None = "pad", limit=None, freq=None, **kwargs, ) -> NDFrameT: """ Percentage change between the current and a prior element. Computes the percentage change from the immediately previous row by default. This is useful in comparing the percentage of change in a time series of elements. Parameters ---------- periods : int, default 1 Periods to shift for forming percent change. fill_method : {'backfill', 'bfill', 'pad', 'ffill', None}, default 'pad' How to handle NAs **before** computing percent changes. limit : int, default None The number of consecutive NAs to fill before stopping. freq : DateOffset, timedelta, or str, optional Increment to use from time series API (e.g. 'M' or BDay()). **kwargs Additional keyword arguments are passed into `DataFrame.shift` or `Series.shift`. Returns ------- Series or DataFrame The same type as the calling object. See Also -------- Series.diff : Compute the difference of two elements in a Series. DataFrame.diff : Compute the difference of two elements in a DataFrame. Series.shift : Shift the index by some number of periods. DataFrame.shift : Shift the index by some number of periods. Examples -------- **Series** >>> s = pd.Series([90, 91, 85]) >>> s 0 90 1 91 2 85 dtype: int64 >>> s.pct_change() 0 NaN 1 0.011111 2 -0.065934 dtype: float64 >>> s.pct_change(periods=2) 0 NaN 1 NaN 2 -0.055556 dtype: float64 See the percentage change in a Series where filling NAs with last valid observation forward to next valid. >>> s = pd.Series([90, 91, None, 85]) >>> s 0 90.0 1 91.0 2 NaN 3 85.0 dtype: float64 >>> s.pct_change(fill_method='ffill') 0 NaN 1 0.011111 2 0.000000 3 -0.065934 dtype: float64 **DataFrame** Percentage change in French franc, Deutsche Mark, and Italian lira from 1980-01-01 to 1980-03-01. >>> df = pd.DataFrame({ ... 'FR': [4.0405, 4.0963, 4.3149], ... 'GR': [1.7246, 1.7482, 1.8519], ... 'IT': [804.74, 810.01, 860.13]}, ... index=['1980-01-01', '1980-02-01', '1980-03-01']) >>> df FR GR IT 1980-01-01 4.0405 1.7246 804.74 1980-02-01 4.0963 1.7482 810.01 1980-03-01 4.3149 1.8519 860.13 >>> df.pct_change() FR GR IT 1980-01-01 NaN NaN NaN 1980-02-01 0.013810 0.013684 0.006549 1980-03-01 0.053365 0.059318 0.061876 Percentage of change in GOOG and APPL stock volume. Shows computing the percentage change between columns. >>> df = pd.DataFrame({ ... '2016': [1769950, 30586265], ... '2015': [1500923, 40912316], ... '2014': [1371819, 41403351]}, ... index=['GOOG', 'APPL']) >>> df 2016 2015 2014 GOOG 1769950 1500923 1371819 APPL 30586265 40912316 41403351 >>> df.pct_change(axis='columns', periods=-1) 2016 2015 2014 GOOG 0.179241 0.094112 NaN APPL -0.252395 -0.011860 NaN """ axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name)) if fill_method is None: data = self else: _data = self.fillna(method=fill_method, axis=axis, limit=limit) assert _data is not None # needed for mypy data = _data shifted = data.shift(periods=periods, freq=freq, axis=axis, **kwargs) # Unsupported left operand type for / ("NDFrameT") rs = data / shifted - 1 # type: ignore[operator] if freq is not None: # Shift method is implemented differently when freq is not None # We want to restore the original index rs = rs.loc[~rs.index.duplicated()] rs = rs.reindex_like(data) return rs.__finalize__(self, method="pct_change") def _logical_func( self, name: str, func, axis: Axis = 0, bool_only: bool_t = False, skipna: bool_t = True, **kwargs, ) -> Series | bool_t: nv.validate_logical_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if self.ndim > 1 and axis is None: # Reduce along one dimension then the other, to simplify DataFrame._reduce res = self._logical_func( name, func, axis=0, bool_only=bool_only, skipna=skipna, **kwargs ) return res._logical_func(name, func, skipna=skipna, **kwargs) if ( self.ndim > 1 and axis == 1 and len(self._mgr.arrays) > 1 # TODO(EA2D): special-case not needed and all(x.ndim == 2 for x in self._mgr.arrays) and not kwargs ): # Fastpath avoiding potentially expensive transpose obj = self if bool_only: obj = self._get_bool_data() return obj._reduce_axis1(name, func, skipna=skipna) return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=bool_only, filter_type="bool", ) def any( self, axis: Axis = 0, bool_only: bool_t = False, skipna: bool_t = True, **kwargs, ) -> DataFrame | Series | bool_t: return self._logical_func( "any", nanops.nanany, axis, bool_only, skipna, **kwargs ) def all( self, axis: Axis = 0, bool_only: bool_t = False, skipna: bool_t = True, **kwargs, ) -> Series | bool_t: return self._logical_func( "all", nanops.nanall, axis, bool_only, skipna, **kwargs ) def _accum_func( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs, ): skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name) if axis is None: axis = self._stat_axis_number else: axis = self._get_axis_number(axis) if axis == 1: return self.T._accum_func( name, func, axis=0, skipna=skipna, *args, **kwargs # noqa: B026 ).T def block_accum_func(blk_values): values = blk_values.T if hasattr(blk_values, "T") else blk_values result: np.ndarray | ExtensionArray if isinstance(values, ExtensionArray): result = values._accumulate(name, skipna=skipna, **kwargs) else: result = nanops.na_accum_func(values, func, skipna=skipna) result = result.T if hasattr(result, "T") else result return result result = self._mgr.apply(block_accum_func) return self._constructor(result).__finalize__(self, method=name) def cummax(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func( "cummax", np.maximum.accumulate, axis, skipna, *args, **kwargs ) def cummin(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func( "cummin", np.minimum.accumulate, axis, skipna, *args, **kwargs ) def cumsum(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func("cumsum", np.cumsum, axis, skipna, *args, **kwargs) def cumprod(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func("cumprod", np.cumprod, axis, skipna, *args, **kwargs) def _stat_function_ddof( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: nv.validate_stat_ddof_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if axis is None: axis = self._stat_axis_number return self._reduce( func, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof ) def sem( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function_ddof( "sem", nanops.nansem, axis, skipna, ddof, numeric_only, **kwargs ) def var( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function_ddof( "var", nanops.nanvar, axis, skipna, ddof, numeric_only, **kwargs ) def std( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function_ddof( "std", nanops.nanstd, axis, skipna, ddof, numeric_only, **kwargs ) def _stat_function( self, name: str, func, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): if name == "median": nv.validate_median((), kwargs) else: nv.validate_stat_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only ) def min( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return self._stat_function( "min", nanops.nanmin, axis, skipna, numeric_only, **kwargs, ) def max( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return self._stat_function( "max", nanops.nanmax, axis, skipna, numeric_only, **kwargs, ) def mean( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "mean", nanops.nanmean, axis, skipna, numeric_only, **kwargs ) def median( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "median", nanops.nanmedian, axis, skipna, numeric_only, **kwargs ) def skew( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "skew", nanops.nanskew, axis, skipna, numeric_only, **kwargs ) def kurt( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "kurt", nanops.nankurt, axis, skipna, numeric_only, **kwargs ) kurtosis = kurt def _min_count_stat_function( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): if name == "sum": nv.validate_sum((), kwargs) elif name == "prod": nv.validate_prod((), kwargs) else: nv.validate_stat_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if axis is None: axis = self._stat_axis_number return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count, ) def sum( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return self._min_count_stat_function( "sum", nanops.nansum, axis, skipna, numeric_only, min_count, **kwargs ) def prod( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return self._min_count_stat_function( "prod", nanops.nanprod, axis, skipna, numeric_only, min_count, **kwargs, ) product = prod def _add_numeric_operations(cls) -> None: """ Add the operations to the cls; evaluate the doc strings again """ axis_descr, name1, name2 = _doc_params(cls) _bool_doc, desc=_any_desc, name1=name1, name2=name2, axis_descr=axis_descr, see_also=_any_see_also, examples=_any_examples, empty_value=False, ) def any( self, *, axis: Axis = 0, bool_only=None, skipna: bool_t = True, **kwargs, ): return NDFrame.any( self, axis=axis, bool_only=bool_only, skipna=skipna, **kwargs, ) setattr(cls, "any", any) _bool_doc, desc=_all_desc, name1=name1, name2=name2, axis_descr=axis_descr, see_also=_all_see_also, examples=_all_examples, empty_value=True, ) def all( self, axis: Axis = 0, bool_only=None, skipna: bool_t = True, **kwargs, ): return NDFrame.all(self, axis, bool_only, skipna, **kwargs) setattr(cls, "all", all) _num_ddof_doc, desc="Return unbiased standard error of the mean over requested " "axis.\n\nNormalized by N-1 by default. This can be changed " "using the ddof argument", name1=name1, name2=name2, axis_descr=axis_descr, notes="", examples="", ) def sem( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ): return NDFrame.sem(self, axis, skipna, ddof, numeric_only, **kwargs) setattr(cls, "sem", sem) _num_ddof_doc, desc="Return unbiased variance over requested axis.\n\nNormalized by " "N-1 by default. This can be changed using the ddof argument.", name1=name1, name2=name2, axis_descr=axis_descr, notes="", examples=_var_examples, ) def var( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ): return NDFrame.var(self, axis, skipna, ddof, numeric_only, **kwargs) setattr(cls, "var", var) _num_ddof_doc, desc="Return sample standard deviation over requested axis." "\n\nNormalized by N-1 by default. This can be changed using the " "ddof argument.", name1=name1, name2=name2, axis_descr=axis_descr, notes=_std_notes, examples=_std_examples, ) def std( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ): return NDFrame.std(self, axis, skipna, ddof, numeric_only, **kwargs) setattr(cls, "std", std) _cnum_doc, desc="minimum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="min", examples=_cummin_examples, ) def cummin( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cummin(self, axis, skipna, *args, **kwargs) setattr(cls, "cummin", cummin) _cnum_doc, desc="maximum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="max", examples=_cummax_examples, ) def cummax( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cummax(self, axis, skipna, *args, **kwargs) setattr(cls, "cummax", cummax) _cnum_doc, desc="sum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="sum", examples=_cumsum_examples, ) def cumsum( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cumsum(self, axis, skipna, *args, **kwargs) setattr(cls, "cumsum", cumsum) _cnum_doc, desc="product", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="prod", examples=_cumprod_examples, ) def cumprod( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cumprod(self, axis, skipna, *args, **kwargs) setattr(cls, "cumprod", cumprod) # error: Untyped decorator makes function "sum" untyped _num_doc, desc="Return the sum of the values over the requested axis.\n\n" "This is equivalent to the method ``numpy.sum``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count=_min_count_stub, see_also=_stat_func_see_also, examples=_sum_examples, ) def sum( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return NDFrame.sum(self, axis, skipna, numeric_only, min_count, **kwargs) setattr(cls, "sum", sum) _num_doc, desc="Return the product of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count=_min_count_stub, see_also=_stat_func_see_also, examples=_prod_examples, ) def prod( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return NDFrame.prod(self, axis, skipna, numeric_only, min_count, **kwargs) setattr(cls, "prod", prod) cls.product = prod _num_doc, desc="Return the mean of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def mean( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.mean(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "mean", mean) _num_doc, desc="Return unbiased skew over requested axis.\n\nNormalized by N-1.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def skew( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.skew(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "skew", skew) _num_doc, desc="Return unbiased kurtosis over requested axis.\n\n" "Kurtosis obtained using Fisher's definition of\n" "kurtosis (kurtosis of normal == 0.0). Normalized " "by N-1.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def kurt( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.kurt(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "kurt", kurt) cls.kurtosis = kurt _num_doc, desc="Return the median of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def median( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.median(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "median", median) _num_doc, desc="Return the maximum of the values over the requested axis.\n\n" "If you want the *index* of the maximum, use ``idxmax``. This is " "the equivalent of the ``numpy.ndarray`` method ``argmax``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also=_stat_func_see_also, examples=_max_examples, ) def max( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.max(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "max", max) _num_doc, desc="Return the minimum of the values over the requested axis.\n\n" "If you want the *index* of the minimum, use ``idxmin``. This is " "the equivalent of the ``numpy.ndarray`` method ``argmin``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also=_stat_func_see_also, examples=_min_examples, ) def min( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.min(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "min", min) def rolling( self, window: int | dt.timedelta | str | BaseOffset | BaseIndexer, min_periods: int | None = None, center: bool_t = False, win_type: str | None = None, on: str | None = None, axis: Axis = 0, closed: str | None = None, step: int | None = None, method: str = "single", ) -> Window | Rolling: axis = self._get_axis_number(axis) if win_type is not None: return Window( self, window=window, min_periods=min_periods, center=center, win_type=win_type, on=on, axis=axis, closed=closed, step=step, method=method, ) return Rolling( self, window=window, min_periods=min_periods, center=center, win_type=win_type, on=on, axis=axis, closed=closed, step=step, method=method, ) def expanding( self, min_periods: int = 1, axis: Axis = 0, method: str = "single", ) -> Expanding: axis = self._get_axis_number(axis) return Expanding(self, min_periods=min_periods, axis=axis, method=method) def ewm( self, com: float | None = None, span: float | None = None, halflife: float | TimedeltaConvertibleTypes | None = None, alpha: float | None = None, min_periods: int | None = 0, adjust: bool_t = True, ignore_na: bool_t = False, axis: Axis = 0, times: np.ndarray | DataFrame | Series | None = None, method: str = "single", ) -> ExponentialMovingWindow: axis = self._get_axis_number(axis) return ExponentialMovingWindow( self, com=com, span=span, halflife=halflife, alpha=alpha, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, axis=axis, times=times, method=method, ) # ---------------------------------------------------------------------- # Arithmetic Methods def _inplace_method(self, other, op): """ Wrap arithmetic method to operate inplace. """ result = op(self, other) if ( self.ndim == 1 and result._indexed_same(self) and is_dtype_equal(result.dtype, self.dtype) ): # GH#36498 this inplace op can _actually_ be inplace. # Item "ArrayManager" of "Union[ArrayManager, SingleArrayManager, # BlockManager, SingleBlockManager]" has no attribute "setitem_inplace" self._mgr.setitem_inplace( # type: ignore[union-attr] slice(None), result._values ) return self # Delete cacher self._reset_cacher() # this makes sure that we are aligned like the input # we are updating inplace so we want to ignore is_copy self._update_inplace( result.reindex_like(self, copy=False), verify_is_copy=False ) return self def __iadd__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for + ("Type[NDFrame]") return self._inplace_method(other, type(self).__add__) # type: ignore[operator] def __isub__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for - ("Type[NDFrame]") return self._inplace_method(other, type(self).__sub__) # type: ignore[operator] def __imul__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for * ("Type[NDFrame]") return self._inplace_method(other, type(self).__mul__) # type: ignore[operator] def __itruediv__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for / ("Type[NDFrame]") return self._inplace_method( other, type(self).__truediv__ # type: ignore[operator] ) def __ifloordiv__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for // ("Type[NDFrame]") return self._inplace_method( other, type(self).__floordiv__ # type: ignore[operator] ) def __imod__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for % ("Type[NDFrame]") return self._inplace_method(other, type(self).__mod__) # type: ignore[operator] def __ipow__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for ** ("Type[NDFrame]") return self._inplace_method(other, type(self).__pow__) # type: ignore[operator] def __iand__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for & ("Type[NDFrame]") return self._inplace_method(other, type(self).__and__) # type: ignore[operator] def __ior__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for | ("Type[NDFrame]") return self._inplace_method(other, type(self).__or__) # type: ignore[operator] def __ixor__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for ^ ("Type[NDFrame]") return self._inplace_method(other, type(self).__xor__) # type: ignore[operator] # ---------------------------------------------------------------------- # Misc methods def _find_valid_index(self, *, how: str) -> Hashable | None: """ Retrieves the index of the first valid value. Parameters ---------- how : {'first', 'last'} Use this parameter to change between the first or last valid index. Returns ------- idx_first_valid : type of index """ idxpos = find_valid_index(self._values, how=how, is_valid=~isna(self._values)) if idxpos is None: return None return self.index[idxpos] def first_valid_index(self) -> Hashable | None: """ Return index for {position} non-NA value or None, if no non-NA value is found. Returns ------- type of index Notes ----- If all elements are non-NA/null, returns None. Also returns None for empty {klass}. """ return self._find_valid_index(how="first") def last_valid_index(self) -> Hashable | None: return self._find_valid_index(how="last") The provided code snippet includes necessary dependencies for implementing the `preprocess_weights` function. Write a Python function `def preprocess_weights(obj: NDFrame, weights, axis: AxisInt) -> np.ndarray` to solve the following problem: Process and validate the `weights` argument to `NDFrame.sample` and `.GroupBy.sample`. Returns `weights` as an ndarray[np.float64], validated except for normalizing weights (because that must be done groupwise in groupby sampling). Here is the function: def preprocess_weights(obj: NDFrame, weights, axis: AxisInt) -> np.ndarray: """ Process and validate the `weights` argument to `NDFrame.sample` and `.GroupBy.sample`. Returns `weights` as an ndarray[np.float64], validated except for normalizing weights (because that must be done groupwise in groupby sampling). """ # If a series, align with frame if isinstance(weights, ABCSeries): weights = weights.reindex(obj.axes[axis]) # Strings acceptable if a dataframe and axis = 0 if isinstance(weights, str): if isinstance(obj, ABCDataFrame): if axis == 0: try: weights = obj[weights] except KeyError as err: raise KeyError( "String passed to weights not a valid column" ) from err else: raise ValueError( "Strings can only be passed to " "weights when sampling from rows on " "a DataFrame" ) else: raise ValueError( "Strings cannot be passed as weights when sampling from a Series." ) if isinstance(obj, ABCSeries): func = obj._constructor else: func = obj._constructor_sliced weights = func(weights, dtype="float64")._values if len(weights) != obj.shape[axis]: raise ValueError("Weights and axis to be sampled must be of same length") if lib.has_infs(weights): raise ValueError("weight vector may not include `inf` values") if (weights < 0).any(): raise ValueError("weight vector many not include negative values") missing = np.isnan(weights) if missing.any(): # Don't modify weights in place weights = weights.copy() weights[missing] = 0 return weights
Process and validate the `weights` argument to `NDFrame.sample` and `.GroupBy.sample`. Returns `weights` as an ndarray[np.float64], validated except for normalizing weights (because that must be done groupwise in groupby sampling).
173,074
from __future__ import annotations from typing import TYPE_CHECKING import numpy as np from pandas._libs import lib from pandas._typing import AxisInt from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) The provided code snippet includes necessary dependencies for implementing the `process_sampling_size` function. Write a Python function `def process_sampling_size( n: int | None, frac: float | None, replace: bool ) -> int | None` to solve the following problem: Process and validate the `n` and `frac` arguments to `NDFrame.sample` and `.GroupBy.sample`. Returns None if `frac` should be used (variable sampling sizes), otherwise returns the constant sampling size. Here is the function: def process_sampling_size( n: int | None, frac: float | None, replace: bool ) -> int | None: """ Process and validate the `n` and `frac` arguments to `NDFrame.sample` and `.GroupBy.sample`. Returns None if `frac` should be used (variable sampling sizes), otherwise returns the constant sampling size. """ # If no frac or n, default to n=1. if n is None and frac is None: n = 1 elif n is not None and frac is not None: raise ValueError("Please enter a value for `frac` OR `n`, not both") elif n is not None: if n < 0: raise ValueError( "A negative number of rows requested. Please provide `n` >= 0." ) if n % 1 != 0: raise ValueError("Only integers accepted as `n` values") else: assert frac is not None # for mypy if frac > 1 and not replace: raise ValueError( "Replace has to be set to `True` when " "upsampling the population `frac` > 1." ) if frac < 0: raise ValueError( "A negative number of rows requested. Please provide `frac` >= 0." ) return n
Process and validate the `n` and `frac` arguments to `NDFrame.sample` and `.GroupBy.sample`. Returns None if `frac` should be used (variable sampling sizes), otherwise returns the constant sampling size.
173,075
from __future__ import annotations from typing import TYPE_CHECKING import numpy as np from pandas._libs import lib from pandas._typing import AxisInt from pandas.core.dtypes.generic import ( ABCDataFrame, ABCSeries, ) The provided code snippet includes necessary dependencies for implementing the `sample` function. Write a Python function `def sample( obj_len: int, size: int, replace: bool, weights: np.ndarray | None, random_state: np.random.RandomState | np.random.Generator, ) -> np.ndarray` to solve the following problem: Randomly sample `size` indices in `np.arange(obj_len)` Parameters ---------- obj_len : int The length of the indices being considered size : int The number of values to choose replace : bool Allow or disallow sampling of the same row more than once. weights : np.ndarray[np.float64] or None If None, equal probability weighting, otherwise weights according to the vector normalized random_state: np.random.RandomState or np.random.Generator State used for the random sampling Returns ------- np.ndarray[np.intp] Here is the function: def sample( obj_len: int, size: int, replace: bool, weights: np.ndarray | None, random_state: np.random.RandomState | np.random.Generator, ) -> np.ndarray: """ Randomly sample `size` indices in `np.arange(obj_len)` Parameters ---------- obj_len : int The length of the indices being considered size : int The number of values to choose replace : bool Allow or disallow sampling of the same row more than once. weights : np.ndarray[np.float64] or None If None, equal probability weighting, otherwise weights according to the vector normalized random_state: np.random.RandomState or np.random.Generator State used for the random sampling Returns ------- np.ndarray[np.intp] """ if weights is not None: weight_sum = weights.sum() if weight_sum != 0: weights = weights / weight_sum else: raise ValueError("Invalid weights: weights sum to zero") return random_state.choice(obj_len, size=size, replace=replace, p=weights).astype( np.intp, copy=False )
Randomly sample `size` indices in `np.arange(obj_len)` Parameters ---------- obj_len : int The length of the indices being considered size : int The number of values to choose replace : bool Allow or disallow sampling of the same row more than once. weights : np.ndarray[np.float64] or None If None, equal probability weighting, otherwise weights according to the vector normalized random_state: np.random.RandomState or np.random.Generator State used for the random sampling Returns ------- np.ndarray[np.intp]
173,076
from __future__ import annotations import numpy as np from pandas._libs.internals import BlockPlacement from pandas._typing import Dtype from pandas.core.dtypes.common import ( is_datetime64tz_dtype, is_period_dtype, pandas_dtype, ) from pandas.core.arrays import DatetimeArray from pandas.core.construction import extract_array from pandas.core.internals.blocks import ( Block, DatetimeTZBlock, ExtensionBlock, check_ndim, ensure_block_shape, extract_pandas_array, get_block_type, maybe_coerce_values, ) def maybe_infer_ndim(values, placement: BlockPlacement, ndim: int | None) -> int: """ If `ndim` is not provided, infer it from placement and values. """ if ndim is None: # GH#38134 Block constructor now assumes ndim is not None if not isinstance(values.dtype, np.dtype): if len(placement) != 1: ndim = 1 else: ndim = 2 else: ndim = values.ndim return ndim Dtype = Union["ExtensionDtype", NpDtype] def is_datetime64tz_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of a DatetimeTZDtype dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of a DatetimeTZDtype dtype. Examples -------- >>> is_datetime64tz_dtype(object) False >>> is_datetime64tz_dtype([1, 2, 3]) False >>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3])) # tz-naive False >>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) True >>> dtype = DatetimeTZDtype("ns", tz="US/Eastern") >>> s = pd.Series([], dtype=dtype) >>> is_datetime64tz_dtype(dtype) True >>> is_datetime64tz_dtype(s) True """ if isinstance(arr_or_dtype, DatetimeTZDtype): # GH#33400 fastpath for dtype object # GH 34986 return True if arr_or_dtype is None: return False return DatetimeTZDtype.is_dtype(arr_or_dtype) def is_period_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of the Period dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the Period dtype. Examples -------- >>> is_period_dtype(object) False >>> is_period_dtype(PeriodDtype(freq="D")) True >>> is_period_dtype([1, 2, 3]) False >>> is_period_dtype(pd.Period("2017-01-01")) False >>> is_period_dtype(pd.PeriodIndex([], freq="A")) True """ if isinstance(arr_or_dtype, ExtensionDtype): # GH#33400 fastpath for dtype object return arr_or_dtype.type is Period if arr_or_dtype is None: return False return PeriodDtype.is_dtype(arr_or_dtype) def pandas_dtype(dtype) -> DtypeObj: """ Convert input into a pandas only dtype object or a numpy dtype object. Parameters ---------- dtype : object to be converted Returns ------- np.dtype or a pandas dtype Raises ------ TypeError if not a dtype """ # short-circuit if isinstance(dtype, np.ndarray): return dtype.dtype elif isinstance(dtype, (np.dtype, ExtensionDtype)): return dtype # registered extension types result = registry.find(dtype) if result is not None: return result # try a numpy dtype # raise a consistent TypeError if failed try: with warnings.catch_warnings(): # GH#51523 - Series.astype(np.integer) doesn't show # numpy deprication warning of np.integer # Hence enabling DeprecationWarning warnings.simplefilter("always", DeprecationWarning) npdtype = np.dtype(dtype) except SyntaxError as err: # np.dtype uses `eval` which can raise SyntaxError raise TypeError(f"data type '{dtype}' not understood") from err # Any invalid dtype (such as pd.Timestamp) should raise an error. # np.dtype(invalid_type).kind = 0 for such objects. However, this will # also catch some valid dtypes such as object, np.object_ and 'object' # which we safeguard against by catching them earlier and returning # np.dtype(valid_dtype) before this condition is evaluated. if is_hashable(dtype) and dtype in [object, np.object_, "object", "O"]: # check hashability to avoid errors/DeprecationWarning when we get # here and `dtype` is an array return npdtype elif npdtype.kind == "O": raise TypeError(f"dtype '{dtype}' not understood") return npdtype def extract_array( obj: Series | Index, extract_numpy: bool = ..., extract_range: bool = ... ) -> ArrayLike: ... def extract_array( obj: T, extract_numpy: bool = ..., extract_range: bool = ... ) -> T | ArrayLike: ... def extract_array( obj: T, extract_numpy: bool = False, extract_range: bool = False ) -> T | ArrayLike: """ Extract the ndarray or ExtensionArray from a Series or Index. For all other types, `obj` is just returned as is. Parameters ---------- obj : object For Series / Index, the underlying ExtensionArray is unboxed. extract_numpy : bool, default False Whether to extract the ndarray from a PandasArray. extract_range : bool, default False If we have a RangeIndex, return range._values if True (which is a materialized integer ndarray), otherwise return unchanged. Returns ------- arr : object Examples -------- >>> extract_array(pd.Series(['a', 'b', 'c'], dtype='category')) ['a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] Other objects like lists, arrays, and DataFrames are just passed through. >>> extract_array([1, 2, 3]) [1, 2, 3] For an ndarray-backed Series / Index the ndarray is returned. >>> extract_array(pd.Series([1, 2, 3])) array([1, 2, 3]) To extract all the way down to the ndarray, pass ``extract_numpy=True``. >>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True) array([1, 2, 3]) """ if isinstance(obj, (ABCIndex, ABCSeries)): if isinstance(obj, ABCRangeIndex): if extract_range: return obj._values # https://github.com/python/mypy/issues/1081 # error: Incompatible return value type (got "RangeIndex", expected # "Union[T, Union[ExtensionArray, ndarray[Any, Any]]]") return obj # type: ignore[return-value] return obj._values elif extract_numpy and isinstance(obj, ABCPandasArray): return obj.to_numpy() return obj class Block(PandasObject): """ Canonical n-dimensional unit of homogeneous dtype contained in a pandas data structure Index-ignorant; let the container take care of that """ values: np.ndarray | ExtensionArray ndim: int refs: BlockValuesRefs __init__: Callable __slots__ = () is_numeric = False is_object = False is_extension = False _can_consolidate = True _validate_ndim = True def _consolidate_key(self): return self._can_consolidate, self.dtype.name def _can_hold_na(self) -> bool: """ Can we store NA values in this Block? """ dtype = self.dtype if isinstance(dtype, np.dtype): return dtype.kind not in ["b", "i", "u"] return dtype._can_hold_na def is_bool(self) -> bool: """ We can be bool if a) we are bool dtype or b) object dtype with bool objects. """ return self.values.dtype == np.dtype(bool) def external_values(self): return external_values(self.values) def fill_value(self): # Used in reindex_indexer return na_value_for_dtype(self.dtype, compat=False) def _standardize_fill_value(self, value): # if we are passed a scalar None, convert it here if self.dtype != _dtype_obj and is_valid_na_for_dtype(value, self.dtype): value = self.fill_value return value def mgr_locs(self) -> BlockPlacement: return self._mgr_locs def mgr_locs(self, new_mgr_locs: BlockPlacement) -> None: self._mgr_locs = new_mgr_locs def make_block( self, values, placement=None, refs: BlockValuesRefs | None = None ) -> Block: """ Create a new block, with type inference propagate any values that are not specified """ if placement is None: placement = self._mgr_locs if self.is_extension: values = ensure_block_shape(values, ndim=self.ndim) # TODO: perf by not going through new_block # We assume maybe_coerce_values has already been called return new_block(values, placement=placement, ndim=self.ndim, refs=refs) def make_block_same_class( self, values, placement: BlockPlacement | None = None, refs: BlockValuesRefs | None = None, ) -> Block: """Wrap given values in a block of same type as self.""" # Pre-2.0 we called ensure_wrapped_if_datetimelike because fastparquet # relied on it, as of 2.0 the caller is responsible for this. if placement is None: placement = self._mgr_locs # We assume maybe_coerce_values has already been called return type(self)(values, placement=placement, ndim=self.ndim, refs=refs) def __repr__(self) -> str: # don't want to print out all of the items here name = type(self).__name__ if self.ndim == 1: result = f"{name}: {len(self)} dtype: {self.dtype}" else: shape = " x ".join([str(s) for s in self.shape]) result = f"{name}: {self.mgr_locs.indexer}, {shape}, dtype: {self.dtype}" return result def __len__(self) -> int: return len(self.values) def getitem_block(self, slicer: slice | npt.NDArray[np.intp]) -> Block: """ Perform __getitem__-like, return result as block. Only supports slices that preserve dimensionality. """ # Note: the only place where we are called with ndarray[intp] # is from internals.concat, and we can verify that never happens # with 1-column blocks, i.e. never for ExtensionBlock. new_mgr_locs = self._mgr_locs[slicer] new_values = self._slice(slicer) refs = self.refs if isinstance(slicer, slice) else None return type(self)(new_values, new_mgr_locs, self.ndim, refs=refs) def getitem_block_columns( self, slicer: slice, new_mgr_locs: BlockPlacement ) -> Block: """ Perform __getitem__-like, return result as block. Only supports slices that preserve dimensionality. """ new_values = self._slice(slicer) if new_values.ndim != self.values.ndim: raise ValueError("Only same dim slicing is allowed") return type(self)(new_values, new_mgr_locs, self.ndim, refs=self.refs) def _can_hold_element(self, element: Any) -> bool: """require the same dtype as ourselves""" element = extract_array(element, extract_numpy=True) return can_hold_element(self.values, element) def should_store(self, value: ArrayLike) -> bool: """ Should we set self.values[indexer] = value inplace or do we need to cast? Parameters ---------- value : np.ndarray or ExtensionArray Returns ------- bool """ # faster equivalent to is_dtype_equal(value.dtype, self.dtype) try: return value.dtype == self.dtype except TypeError: return False # --------------------------------------------------------------------- # Apply/Reduce and Helpers def apply(self, func, **kwargs) -> list[Block]: """ apply the function to my values; return a block if we are not one """ result = func(self.values, **kwargs) return self._split_op_result(result) def reduce(self, func) -> list[Block]: # We will apply the function and reshape the result into a single-row # Block with the same mgr_locs; squeezing will be done at a higher level assert self.ndim == 2 result = func(self.values) if self.values.ndim == 1: # TODO(EA2D): special case not needed with 2D EAs res_values = np.array([[result]]) else: res_values = result.reshape(-1, 1) nb = self.make_block(res_values) return [nb] def _split_op_result(self, result: ArrayLike) -> list[Block]: # See also: split_and_operate if result.ndim > 1 and isinstance(result.dtype, ExtensionDtype): # TODO(EA2D): unnecessary with 2D EAs # if we get a 2D ExtensionArray, we need to split it into 1D pieces nbs = [] for i, loc in enumerate(self._mgr_locs): if not is_1d_only_ea_obj(result): vals = result[i : i + 1] else: vals = result[i] block = self.make_block(values=vals, placement=loc) nbs.append(block) return nbs nb = self.make_block(result) return [nb] def _split(self) -> list[Block]: """ Split a block into a list of single-column blocks. """ assert self.ndim == 2 new_blocks = [] for i, ref_loc in enumerate(self._mgr_locs): vals = self.values[slice(i, i + 1)] bp = BlockPlacement(ref_loc) nb = type(self)(vals, placement=bp, ndim=2, refs=self.refs) new_blocks.append(nb) return new_blocks def split_and_operate(self, func, *args, **kwargs) -> list[Block]: """ Split the block and apply func column-by-column. Parameters ---------- func : Block method *args **kwargs Returns ------- List[Block] """ assert self.ndim == 2 and self.shape[0] != 1 res_blocks = [] for nb in self._split(): rbs = func(nb, *args, **kwargs) res_blocks.extend(rbs) return res_blocks # --------------------------------------------------------------------- # Up/Down-casting def coerce_to_target_dtype(self, other) -> Block: """ coerce the current block to a dtype compat for other we will return a block, possibly object, and not raise we can also safely try to coerce to the same dtype and will receive the same block """ new_dtype = find_result_type(self.values, other) return self.astype(new_dtype, copy=False) def _maybe_downcast( self, blocks: list[Block], downcast=None, using_cow: bool = False ) -> list[Block]: if downcast is False: return blocks if self.dtype == _dtype_obj: # TODO: does it matter that self.dtype might not match blocks[i].dtype? # GH#44241 We downcast regardless of the argument; # respecting 'downcast=None' may be worthwhile at some point, # but ATM it breaks too much existing code. # split and convert the blocks return extend_blocks( [blk.convert(using_cow=using_cow, copy=not using_cow) for blk in blocks] ) if downcast is None: return blocks return extend_blocks([b._downcast_2d(downcast, using_cow) for b in blocks]) def _downcast_2d(self, dtype, using_cow: bool = False) -> list[Block]: """ downcast specialized to 2D case post-validation. Refactored to allow use of maybe_split. """ new_values = maybe_downcast_to_dtype(self.values, dtype=dtype) refs = self.refs if using_cow and new_values is self.values else None return [self.make_block(new_values, refs=refs)] def convert( self, *, copy: bool = True, using_cow: bool = False, ) -> list[Block]: """ attempt to coerce any object types to better types return a copy of the block (if copy = True) by definition we are not an ObjectBlock here! """ if not copy and using_cow: return [self.copy(deep=False)] return [self.copy()] if copy else [self] # --------------------------------------------------------------------- # Array-Like Methods def dtype(self) -> DtypeObj: return self.values.dtype def astype( self, dtype: DtypeObj, copy: bool = False, errors: IgnoreRaise = "raise", using_cow: bool = False, ) -> Block: """ Coerce to the new dtype. Parameters ---------- dtype : np.dtype or ExtensionDtype copy : bool, default False copy if indicated errors : str, {'raise', 'ignore'}, default 'raise' - ``raise`` : allow exceptions to be raised - ``ignore`` : suppress exceptions. On error return original object using_cow: bool, default False Signaling if copy on write copy logic is used. Returns ------- Block """ values = self.values new_values = astype_array_safe(values, dtype, copy=copy, errors=errors) new_values = maybe_coerce_values(new_values) refs = None if using_cow and astype_is_view(values.dtype, new_values.dtype): refs = self.refs newb = self.make_block(new_values, refs=refs) if newb.shape != self.shape: raise TypeError( f"cannot set astype for copy = [{copy}] for dtype " f"({self.dtype.name} [{self.shape}]) to different shape " f"({newb.dtype.name} [{newb.shape}])" ) return newb def to_native_types(self, na_rep: str = "nan", quoting=None, **kwargs) -> Block: """convert to our native types format""" result = to_native_types(self.values, na_rep=na_rep, quoting=quoting, **kwargs) return self.make_block(result) def copy(self, deep: bool = True) -> Block: """copy constructor""" values = self.values refs: BlockValuesRefs | None if deep: values = values.copy() refs = None else: refs = self.refs return type(self)(values, placement=self._mgr_locs, ndim=self.ndim, refs=refs) # --------------------------------------------------------------------- # Replace def replace( self, to_replace, value, inplace: bool = False, # mask may be pre-computed if we're called from replace_list mask: npt.NDArray[np.bool_] | None = None, using_cow: bool = False, ) -> list[Block]: """ replace the to_replace value with value, possible to create new blocks here this is just a call to putmask. """ # Note: the checks we do in NDFrame.replace ensure we never get # here with listlike to_replace or value, as those cases # go through replace_list values = self.values if isinstance(values, Categorical): # TODO: avoid special-casing # GH49404 if using_cow and (self.refs.has_reference() or not inplace): blk = self.copy() elif using_cow: blk = self.copy(deep=False) else: blk = self if inplace else self.copy() values = cast(Categorical, blk.values) values._replace(to_replace=to_replace, value=value, inplace=True) return [blk] if not self._can_hold_element(to_replace): # We cannot hold `to_replace`, so we know immediately that # replacing it is a no-op. # Note: If to_replace were a list, NDFrame.replace would call # replace_list instead of replace. if using_cow: return [self.copy(deep=False)] else: return [self] if inplace else [self.copy()] if mask is None: mask = missing.mask_missing(values, to_replace) if not mask.any(): # Note: we get here with test_replace_extension_other incorrectly # bc _can_hold_element is incorrect. if using_cow: return [self.copy(deep=False)] else: return [self] if inplace else [self.copy()] elif self._can_hold_element(value): # TODO(CoW): Maybe split here as well into columns where mask has True # and rest? if using_cow: if inplace: blk = self.copy(deep=self.refs.has_reference()) else: blk = self.copy() else: blk = self if inplace else self.copy() putmask_inplace(blk.values, mask, value) if not (self.is_object and value is None): # if the user *explicitly* gave None, we keep None, otherwise # may downcast to NaN blocks = blk.convert(copy=False, using_cow=using_cow) else: blocks = [blk] return blocks elif self.ndim == 1 or self.shape[0] == 1: if value is None or value is NA: blk = self.astype(np.dtype(object)) else: blk = self.coerce_to_target_dtype(value) return blk.replace( to_replace=to_replace, value=value, inplace=True, mask=mask, ) else: # split so that we only upcast where necessary blocks = [] for i, nb in enumerate(self._split()): blocks.extend( type(self).replace( nb, to_replace=to_replace, value=value, inplace=True, mask=mask[i : i + 1], using_cow=using_cow, ) ) return blocks def _replace_regex( self, to_replace, value, inplace: bool = False, mask=None, using_cow: bool = False, ) -> list[Block]: """ Replace elements by the given value. Parameters ---------- to_replace : object or pattern Scalar to replace or regular expression to match. value : object Replacement object. inplace : bool, default False Perform inplace modification. mask : array-like of bool, optional True indicate corresponding element is ignored. using_cow: bool, default False Specifying if copy on write is enabled. Returns ------- List[Block] """ if not self._can_hold_element(to_replace): # i.e. only ObjectBlock, but could in principle include a # String ExtensionBlock if using_cow: return [self.copy(deep=False)] return [self] if inplace else [self.copy()] rx = re.compile(to_replace) if using_cow: if inplace and not self.refs.has_reference(): refs = self.refs new_values = self.values else: refs = None new_values = self.values.copy() else: refs = None new_values = self.values if inplace else self.values.copy() replace_regex(new_values, rx, value, mask) block = self.make_block(new_values, refs=refs) return block.convert(copy=False, using_cow=using_cow) def replace_list( self, src_list: Iterable[Any], dest_list: Sequence[Any], inplace: bool = False, regex: bool = False, using_cow: bool = False, ) -> list[Block]: """ See BlockManager.replace_list docstring. """ values = self.values if isinstance(values, Categorical): # TODO: avoid special-casing # GH49404 if using_cow and inplace: blk = self.copy(deep=self.refs.has_reference()) else: blk = self if inplace else self.copy() values = cast(Categorical, blk.values) values._replace(to_replace=src_list, value=dest_list, inplace=True) return [blk] # Exclude anything that we know we won't contain pairs = [ (x, y) for x, y in zip(src_list, dest_list) if self._can_hold_element(x) ] if not len(pairs): if using_cow: return [self.copy(deep=False)] # shortcut, nothing to replace return [self] if inplace else [self.copy()] src_len = len(pairs) - 1 if is_string_dtype(values.dtype): # Calculate the mask once, prior to the call of comp # in order to avoid repeating the same computations na_mask = ~isna(values) masks: Iterable[npt.NDArray[np.bool_]] = ( extract_bool_array( cast( ArrayLike, compare_or_regex_search( values, s[0], regex=regex, mask=na_mask ), ) ) for s in pairs ) else: # GH#38086 faster if we know we dont need to check for regex masks = (missing.mask_missing(values, s[0]) for s in pairs) # Materialize if inplace = True, since the masks can change # as we replace if inplace: masks = list(masks) if using_cow and inplace: # Don't set up refs here, otherwise we will think that we have # references when we check again later rb = [self] else: rb = [self if inplace else self.copy()] for i, ((src, dest), mask) in enumerate(zip(pairs, masks)): convert = i == src_len # only convert once at the end new_rb: list[Block] = [] # GH-39338: _replace_coerce can split a block into # single-column blocks, so track the index so we know # where to index into the mask for blk_num, blk in enumerate(rb): if len(rb) == 1: m = mask else: mib = mask assert not isinstance(mib, bool) m = mib[blk_num : blk_num + 1] # error: Argument "mask" to "_replace_coerce" of "Block" has # incompatible type "Union[ExtensionArray, ndarray[Any, Any], bool]"; # expected "ndarray[Any, dtype[bool_]]" result = blk._replace_coerce( to_replace=src, value=dest, mask=m, inplace=inplace, regex=regex, using_cow=using_cow, ) if convert and blk.is_object and not all(x is None for x in dest_list): # GH#44498 avoid unwanted cast-back result = extend_blocks( [ b.convert(copy=True and not using_cow, using_cow=using_cow) for b in result ] ) new_rb.extend(result) rb = new_rb return rb def _replace_coerce( self, to_replace, value, mask: npt.NDArray[np.bool_], inplace: bool = True, regex: bool = False, using_cow: bool = False, ) -> list[Block]: """ Replace value corresponding to the given boolean array with another value. Parameters ---------- to_replace : object or pattern Scalar to replace or regular expression to match. value : object Replacement object. mask : np.ndarray[bool] True indicate corresponding element is ignored. inplace : bool, default True Perform inplace modification. regex : bool, default False If true, perform regular expression substitution. Returns ------- List[Block] """ if should_use_regex(regex, to_replace): return self._replace_regex( to_replace, value, inplace=inplace, mask=mask, ) else: if value is None: # gh-45601, gh-45836, gh-46634 if mask.any(): has_ref = self.refs.has_reference() nb = self.astype(np.dtype(object), copy=False, using_cow=using_cow) if (nb is self or using_cow) and not inplace: nb = nb.copy() elif inplace and has_ref and nb.refs.has_reference(): # no copy in astype and we had refs before nb = nb.copy() putmask_inplace(nb.values, mask, value) return [nb] if using_cow: return [self.copy(deep=False)] return [self] if inplace else [self.copy()] return self.replace( to_replace=to_replace, value=value, inplace=inplace, mask=mask, using_cow=using_cow, ) # --------------------------------------------------------------------- # 2D Methods - Shared by NumpyBlock and NDArrayBackedExtensionBlock # but not ExtensionBlock def _maybe_squeeze_arg(self, arg: np.ndarray) -> np.ndarray: """ For compatibility with 1D-only ExtensionArrays. """ return arg def _unwrap_setitem_indexer(self, indexer): """ For compatibility with 1D-only ExtensionArrays. """ return indexer # NB: this cannot be made cache_readonly because in mgr.set_values we pin # new .values that can have different shape GH#42631 def shape(self) -> Shape: return self.values.shape def iget(self, i: int | tuple[int, int] | tuple[slice, int]) -> np.ndarray: # In the case where we have a tuple[slice, int], the slice will always # be slice(None) # Note: only reached with self.ndim == 2 # Invalid index type "Union[int, Tuple[int, int], Tuple[slice, int]]" # for "Union[ndarray[Any, Any], ExtensionArray]"; expected type # "Union[int, integer[Any]]" return self.values[i] # type: ignore[index] def _slice( self, slicer: slice | npt.NDArray[np.bool_] | npt.NDArray[np.intp] ) -> ArrayLike: """return a slice of my values""" return self.values[slicer] def set_inplace(self, locs, values: ArrayLike, copy: bool = False) -> None: """ Modify block values in-place with new item value. If copy=True, first copy the underlying values in place before modifying (for Copy-on-Write). Notes ----- `set_inplace` never creates a new array or new Block, whereas `setitem` _may_ create a new array and always creates a new Block. Caller is responsible for checking values.dtype == self.dtype. """ if copy: self.values = self.values.copy() self.values[locs] = values def take_nd( self, indexer: npt.NDArray[np.intp], axis: AxisInt, new_mgr_locs: BlockPlacement | None = None, fill_value=lib.no_default, ) -> Block: """ Take values according to indexer and return them as a block. """ values = self.values if fill_value is lib.no_default: fill_value = self.fill_value allow_fill = False else: allow_fill = True # Note: algos.take_nd has upcast logic similar to coerce_to_target_dtype new_values = algos.take_nd( values, indexer, axis=axis, allow_fill=allow_fill, fill_value=fill_value ) # Called from three places in managers, all of which satisfy # these assertions if isinstance(self, ExtensionBlock): # NB: in this case, the 'axis' kwarg will be ignored in the # algos.take_nd call above. assert not (self.ndim == 1 and new_mgr_locs is None) assert not (axis == 0 and new_mgr_locs is None) if new_mgr_locs is None: new_mgr_locs = self._mgr_locs if not is_dtype_equal(new_values.dtype, self.dtype): return self.make_block(new_values, new_mgr_locs) else: return self.make_block_same_class(new_values, new_mgr_locs) def _unstack( self, unstacker, fill_value, new_placement: npt.NDArray[np.intp], needs_masking: npt.NDArray[np.bool_], ): """ Return a list of unstacked blocks of self Parameters ---------- unstacker : reshape._Unstacker fill_value : int Only used in ExtensionBlock._unstack new_placement : np.ndarray[np.intp] allow_fill : bool needs_masking : np.ndarray[bool] Returns ------- blocks : list of Block New blocks of unstacked values. mask : array-like of bool The mask of columns of `blocks` we should keep. """ new_values, mask = unstacker.get_new_values( self.values.T, fill_value=fill_value ) mask = mask.any(0) # TODO: in all tests we have mask.all(); can we rely on that? # Note: these next two lines ensure that # mask.sum() == sum(len(nb.mgr_locs) for nb in blocks) # which the calling function needs in order to pass verify_integrity=False # to the BlockManager constructor new_values = new_values.T[mask] new_placement = new_placement[mask] bp = BlockPlacement(new_placement) blocks = [new_block_2d(new_values, placement=bp)] return blocks, mask # --------------------------------------------------------------------- def setitem(self, indexer, value, using_cow: bool = False) -> Block: """ Attempt self.values[indexer] = value, possibly creating a new array. Parameters ---------- indexer : tuple, list-like, array-like, slice, int The subset of self.values to set value : object The value being set using_cow: bool, default False Signaling if CoW is used. Returns ------- Block Notes ----- `indexer` is a direct slice/positional indexer. `value` must be a compatible shape. """ value = self._standardize_fill_value(value) values = cast(np.ndarray, self.values) if self.ndim == 2: values = values.T # length checking check_setitem_lengths(indexer, value, values) value = extract_array(value, extract_numpy=True) try: casted = np_can_hold_element(values.dtype, value) except LossySetitemError: # current dtype cannot store value, coerce to common dtype nb = self.coerce_to_target_dtype(value) return nb.setitem(indexer, value) else: if self.dtype == _dtype_obj: # TODO: avoid having to construct values[indexer] vi = values[indexer] if lib.is_list_like(vi): # checking lib.is_scalar here fails on # test_iloc_setitem_custom_object casted = setitem_datetimelike_compat(values, len(vi), casted) if using_cow and self.refs.has_reference(): values = values.copy() self = self.make_block_same_class( values.T if values.ndim == 2 else values ) values[indexer] = casted return self def putmask(self, mask, new, using_cow: bool = False) -> list[Block]: """ putmask the data to the block; it is possible that we may create a new dtype of block Return the resulting block(s). Parameters ---------- mask : np.ndarray[bool], SparseArray[bool], or BooleanArray new : a ndarray/object using_cow: bool, default False Returns ------- List[Block] """ orig_mask = mask values = cast(np.ndarray, self.values) mask, noop = validate_putmask(values.T, mask) assert not isinstance(new, (ABCIndex, ABCSeries, ABCDataFrame)) if new is lib.no_default: new = self.fill_value new = self._standardize_fill_value(new) new = extract_array(new, extract_numpy=True) if noop: if using_cow: return [self.copy(deep=False)] return [self] try: casted = np_can_hold_element(values.dtype, new) if using_cow and self.refs.has_reference(): # Do this here to avoid copying twice values = values.copy() self = self.make_block_same_class(values) putmask_without_repeat(values.T, mask, casted) if using_cow: return [self.copy(deep=False)] return [self] except LossySetitemError: if self.ndim == 1 or self.shape[0] == 1: # no need to split columns if not is_list_like(new): # using just new[indexer] can't save us the need to cast return self.coerce_to_target_dtype(new).putmask(mask, new) else: indexer = mask.nonzero()[0] nb = self.setitem(indexer, new[indexer], using_cow=using_cow) return [nb] else: is_array = isinstance(new, np.ndarray) res_blocks = [] nbs = self._split() for i, nb in enumerate(nbs): n = new if is_array: # we have a different value per-column n = new[:, i : i + 1] submask = orig_mask[:, i : i + 1] rbs = nb.putmask(submask, n, using_cow=using_cow) res_blocks.extend(rbs) return res_blocks def where( self, other, cond, _downcast: str | bool = "infer", using_cow: bool = False ) -> list[Block]: """ evaluate the block; return result block(s) from the result Parameters ---------- other : a ndarray/object cond : np.ndarray[bool], SparseArray[bool], or BooleanArray _downcast : str or None, default "infer" Private because we only specify it when calling from fillna. Returns ------- List[Block] """ assert cond.ndim == self.ndim assert not isinstance(other, (ABCIndex, ABCSeries, ABCDataFrame)) transpose = self.ndim == 2 cond = extract_bool_array(cond) # EABlocks override where values = cast(np.ndarray, self.values) orig_other = other if transpose: values = values.T icond, noop = validate_putmask(values, ~cond) if noop: # GH-39595: Always return a copy; short-circuit up/downcasting if using_cow: return [self.copy(deep=False)] return [self.copy()] if other is lib.no_default: other = self.fill_value other = self._standardize_fill_value(other) try: # try/except here is equivalent to a self._can_hold_element check, # but this gets us back 'casted' which we will re-use below; # without using 'casted', expressions.where may do unwanted upcasts. casted = np_can_hold_element(values.dtype, other) except (ValueError, TypeError, LossySetitemError): # we cannot coerce, return a compat dtype if self.ndim == 1 or self.shape[0] == 1: # no need to split columns block = self.coerce_to_target_dtype(other) blocks = block.where(orig_other, cond, using_cow=using_cow) return self._maybe_downcast( blocks, downcast=_downcast, using_cow=using_cow ) else: # since _maybe_downcast would split blocks anyway, we # can avoid some potential upcast/downcast by splitting # on the front end. is_array = isinstance(other, (np.ndarray, ExtensionArray)) res_blocks = [] nbs = self._split() for i, nb in enumerate(nbs): oth = other if is_array: # we have a different value per-column oth = other[:, i : i + 1] submask = cond[:, i : i + 1] rbs = nb.where( oth, submask, _downcast=_downcast, using_cow=using_cow ) res_blocks.extend(rbs) return res_blocks else: other = casted alt = setitem_datetimelike_compat(values, icond.sum(), other) if alt is not other: if is_list_like(other) and len(other) < len(values): # call np.where with other to get the appropriate ValueError np.where(~icond, values, other) raise NotImplementedError( "This should not be reached; call to np.where above is " "expected to raise ValueError. Please report a bug at " "github.com/pandas-dev/pandas" ) result = values.copy() np.putmask(result, icond, alt) else: # By the time we get here, we should have all Series/Index # args extracted to ndarray if ( is_list_like(other) and not isinstance(other, np.ndarray) and len(other) == self.shape[-1] ): # If we don't do this broadcasting here, then expressions.where # will broadcast a 1D other to be row-like instead of # column-like. other = np.array(other).reshape(values.shape) # If lengths don't match (or len(other)==1), we will raise # inside expressions.where, see test_series_where # Note: expressions.where may upcast. result = expressions.where(~icond, values, other) # The np_can_hold_element check _should_ ensure that we always # have result.dtype == self.dtype here. if transpose: result = result.T return [self.make_block(result)] def fillna( self, value, limit: int | None = None, inplace: bool = False, downcast=None, using_cow: bool = False, ) -> list[Block]: """ fillna on the block with the value. If we fail, then convert to ObjectBlock and try again """ # Caller is responsible for validating limit; if int it is strictly positive inplace = validate_bool_kwarg(inplace, "inplace") if not self._can_hold_na: # can short-circuit the isna call noop = True else: mask = isna(self.values) mask, noop = validate_putmask(self.values, mask) if noop: # we can't process the value, but nothing to do if inplace: if using_cow: return [self.copy(deep=False)] # Arbitrarily imposing the convention that we ignore downcast # on no-op when inplace=True return [self] else: # GH#45423 consistent downcasting on no-ops. nb = self.copy(deep=not using_cow) nbs = nb._maybe_downcast([nb], downcast=downcast, using_cow=using_cow) return nbs if limit is not None: mask[mask.cumsum(self.ndim - 1) > limit] = False if inplace: nbs = self.putmask(mask.T, value, using_cow=using_cow) else: # without _downcast, we would break # test_fillna_dtype_conversion_equiv_replace nbs = self.where(value, ~mask.T, _downcast=False) # Note: blk._maybe_downcast vs self._maybe_downcast(nbs) # makes a difference bc blk may have object dtype, which has # different behavior in _maybe_downcast. return extend_blocks( [ blk._maybe_downcast([blk], downcast=downcast, using_cow=using_cow) for blk in nbs ] ) def interpolate( self, *, method: FillnaOptions = "pad", axis: AxisInt = 0, index: Index | None = None, inplace: bool = False, limit: int | None = None, limit_direction: str = "forward", limit_area: str | None = None, fill_value: Any | None = None, downcast: str | None = None, using_cow: bool = False, **kwargs, ) -> list[Block]: inplace = validate_bool_kwarg(inplace, "inplace") if not self._can_hold_na: # If there are no NAs, then interpolate is a no-op if using_cow: return [self.copy(deep=False)] return [self] if inplace else [self.copy()] try: m = missing.clean_fill_method(method) except ValueError: m = None if m is None and self.dtype.kind != "f": # only deal with floats # bc we already checked that can_hold_na, we don't have int dtype here # test_interp_basic checks that we make a copy here if using_cow: return [self.copy(deep=False)] return [self] if inplace else [self.copy()] if self.is_object and self.ndim == 2 and self.shape[0] != 1 and axis == 0: # split improves performance in ndarray.copy() return self.split_and_operate( type(self).interpolate, method=method, axis=axis, index=index, inplace=inplace, limit=limit, limit_direction=limit_direction, limit_area=limit_area, fill_value=fill_value, downcast=downcast, **kwargs, ) refs = None if inplace: if using_cow and self.refs.has_reference(): data = self.values.copy() else: data = self.values refs = self.refs else: data = self.values.copy() data = cast(np.ndarray, data) # bc overridden by ExtensionBlock missing.interpolate_array_2d( data, method=method, axis=axis, index=index, limit=limit, limit_direction=limit_direction, limit_area=limit_area, fill_value=fill_value, **kwargs, ) nb = self.make_block_same_class(data, refs=refs) return nb._maybe_downcast([nb], downcast, using_cow) def diff(self, n: int, axis: AxisInt = 1) -> list[Block]: """return block for the diff of the values""" # only reached with ndim == 2 and axis == 1 new_values = algos.diff(self.values, n, axis=axis) return [self.make_block(values=new_values)] def shift( self, periods: int, axis: AxisInt = 0, fill_value: Any = None ) -> list[Block]: """shift the block by periods, possibly upcast""" # convert integer to float if necessary. need to do a lot more than # that, handle boolean etc also # Note: periods is never 0 here, as that is handled at the top of # NDFrame.shift. If that ever changes, we can do a check for periods=0 # and possibly avoid coercing. if not lib.is_scalar(fill_value) and self.dtype != _dtype_obj: # with object dtype there is nothing to promote, and the user can # pass pretty much any weird fill_value they like # see test_shift_object_non_scalar_fill raise ValueError("fill_value must be a scalar") fill_value = self._standardize_fill_value(fill_value) try: # error: Argument 1 to "np_can_hold_element" has incompatible type # "Union[dtype[Any], ExtensionDtype]"; expected "dtype[Any]" casted = np_can_hold_element( self.dtype, fill_value # type: ignore[arg-type] ) except LossySetitemError: nb = self.coerce_to_target_dtype(fill_value) return nb.shift(periods, axis=axis, fill_value=fill_value) else: values = cast(np.ndarray, self.values) new_values = shift(values, periods, axis, casted) return [self.make_block(new_values)] def quantile( self, qs: Index, # with dtype float64 interpolation: QuantileInterpolation = "linear", axis: AxisInt = 0, ) -> Block: """ compute the quantiles of the Parameters ---------- qs : Index The quantiles to be computed in float64. interpolation : str, default 'linear' Type of interpolation. axis : int, default 0 Axis to compute. Returns ------- Block """ # We should always have ndim == 2 because Series dispatches to DataFrame assert self.ndim == 2 assert axis == 1 # only ever called this way assert is_list_like(qs) # caller is responsible for this result = quantile_compat(self.values, np.asarray(qs._values), interpolation) # ensure_block_shape needed for cases where we start with EA and result # is ndarray, e.g. IntegerArray, SparseArray result = ensure_block_shape(result, ndim=2) return new_block_2d(result, placement=self._mgr_locs) def round(self, decimals: int, using_cow: bool = False) -> Block: """ Rounds the values. If the block is not of an integer or float dtype, nothing happens. This is consistent with DataFrame.round behavivor. (Note: Series.round would raise) Parameters ---------- decimals: int, Number of decimal places to round to. Caller is responsible for validating this using_cow: bool, Whether Copy on Write is enabled right now """ if not self.is_numeric or self.is_bool: return self.copy(deep=not using_cow) refs = None # TODO: round only defined on BaseMaskedArray # Series also does this, so would need to fix both places # error: Item "ExtensionArray" of "Union[ndarray[Any, Any], ExtensionArray]" # has no attribute "round" values = self.values.round(decimals) # type: ignore[union-attr] if values is self.values: refs = self.refs if not using_cow: # Normally would need to do this before, but # numpy only returns same array when round operation # is no-op # https://github.com/numpy/numpy/blob/486878b37fc7439a3b2b87747f50db9b62fea8eb/numpy/core/src/multiarray/calculation.c#L625-L636 values = values.copy() return self.make_block_same_class(values, refs=refs) # --------------------------------------------------------------------- # Abstract Methods Overridden By EABackedBlock and NumpyBlock def delete(self, loc) -> list[Block]: """Deletes the locs from the block. We split the block to avoid copying the underlying data. We create new blocks for every connected segment of the initial block that is not deleted. The new blocks point to the initial array. """ if not is_list_like(loc): loc = [loc] if self.ndim == 1: values = cast(np.ndarray, self.values) values = np.delete(values, loc) mgr_locs = self._mgr_locs.delete(loc) return [type(self)(values, placement=mgr_locs, ndim=self.ndim)] if np.max(loc) >= self.values.shape[0]: raise IndexError # Add one out-of-bounds indexer as maximum to collect # all columns after our last indexer if any loc = np.concatenate([loc, [self.values.shape[0]]]) mgr_locs_arr = self._mgr_locs.as_array new_blocks: list[Block] = [] previous_loc = -1 # TODO(CoW): This is tricky, if parent block goes out of scope # all split blocks are referencing each other even though they # don't share data refs = self.refs if self.refs.has_reference() else None for idx in loc: if idx == previous_loc + 1: # There is no column between current and last idx pass else: # No overload variant of "__getitem__" of "ExtensionArray" matches # argument type "Tuple[slice, slice]" values = self.values[previous_loc + 1 : idx, :] # type: ignore[call-overload] # noqa locs = mgr_locs_arr[previous_loc + 1 : idx] nb = type(self)( values, placement=BlockPlacement(locs), ndim=self.ndim, refs=refs ) new_blocks.append(nb) previous_loc = idx return new_blocks def is_view(self) -> bool: """return a boolean if I am possibly a view""" raise AbstractMethodError(self) def array_values(self) -> ExtensionArray: """ The array that Series.array returns. Always an ExtensionArray. """ raise AbstractMethodError(self) def get_values(self, dtype: DtypeObj | None = None) -> np.ndarray: """ return an internal format, currently just the ndarray this is often overridden to handle to_dense like operations """ raise AbstractMethodError(self) def values_for_json(self) -> np.ndarray: raise AbstractMethodError(self) class ExtensionBlock(libinternals.Block, EABackedBlock): """ Block for holding extension types. Notes ----- This holds all 3rd-party extension array types. It's also the immediate parent class for our internal extension types' blocks. ExtensionArrays are limited to 1-D. """ _can_consolidate = False _validate_ndim = False is_extension = True values: ExtensionArray def fillna( self, value, limit: int | None = None, inplace: bool = False, downcast=None, using_cow: bool = False, ) -> list[Block]: if is_interval_dtype(self.dtype): # Block.fillna handles coercion (test_fillna_interval) return super().fillna( value=value, limit=limit, inplace=inplace, downcast=downcast, using_cow=using_cow, ) if using_cow and self._can_hold_na and not self.values._hasna: refs = self.refs new_values = self.values else: refs = None new_values = self.values.fillna(value=value, method=None, limit=limit) nb = self.make_block_same_class(new_values, refs=refs) return nb._maybe_downcast([nb], downcast, using_cow=using_cow) def shape(self) -> Shape: # TODO(EA2D): override unnecessary with 2D EAs if self.ndim == 1: return (len(self.values),) return len(self._mgr_locs), len(self.values) def iget(self, i: int | tuple[int, int] | tuple[slice, int]): # In the case where we have a tuple[slice, int], the slice will always # be slice(None) # We _could_ make the annotation more specific, but mypy would # complain about override mismatch: # Literal[0] | tuple[Literal[0], int] | tuple[slice, int] # Note: only reached with self.ndim == 2 if isinstance(i, tuple): # TODO(EA2D): unnecessary with 2D EAs col, loc = i if not com.is_null_slice(col) and col != 0: raise IndexError(f"{self} only contains one item") if isinstance(col, slice): # the is_null_slice check above assures that col is slice(None) # so what we want is a view on all our columns and row loc if loc < 0: loc += len(self.values) # Note: loc:loc+1 vs [[loc]] makes a difference when called # from fast_xs because we want to get a view back. return self.values[loc : loc + 1] return self.values[loc] else: if i != 0: raise IndexError(f"{self} only contains one item") return self.values def set_inplace(self, locs, values: ArrayLike, copy: bool = False) -> None: # When an ndarray, we should have locs.tolist() == [0] # When a BlockPlacement we should have list(locs) == [0] if copy: self.values = self.values.copy() self.values[:] = values def _maybe_squeeze_arg(self, arg): """ If necessary, squeeze a (N, 1) ndarray to (N,) """ # e.g. if we are passed a 2D mask for putmask if ( isinstance(arg, (np.ndarray, ExtensionArray)) and arg.ndim == self.values.ndim + 1 ): # TODO(EA2D): unnecessary with 2D EAs assert arg.shape[1] == 1 # error: No overload variant of "__getitem__" of "ExtensionArray" # matches argument type "Tuple[slice, int]" arg = arg[:, 0] # type: ignore[call-overload] elif isinstance(arg, ABCDataFrame): # 2022-01-06 only reached for setitem # TODO: should we avoid getting here with DataFrame? assert arg.shape[1] == 1 arg = arg._ixs(0, axis=1)._values return arg def _unwrap_setitem_indexer(self, indexer): """ Adapt a 2D-indexer to our 1D values. This is intended for 'setitem', not 'iget' or '_slice'. """ # TODO: ATM this doesn't work for iget/_slice, can we change that? if isinstance(indexer, tuple) and len(indexer) == 2: # TODO(EA2D): not needed with 2D EAs # Should never have length > 2. Caller is responsible for checking. # Length 1 is reached vis setitem_single_block and setitem_single_column # each of which pass indexer=(pi,) if all(isinstance(x, np.ndarray) and x.ndim == 2 for x in indexer): # GH#44703 went through indexing.maybe_convert_ix first, second = indexer if not ( second.size == 1 and (second == 0).all() and first.shape[1] == 1 ): raise NotImplementedError( "This should not be reached. Please report a bug at " "github.com/pandas-dev/pandas/" ) indexer = first[:, 0] elif lib.is_integer(indexer[1]) and indexer[1] == 0: # reached via setitem_single_block passing the whole indexer indexer = indexer[0] elif com.is_null_slice(indexer[1]): indexer = indexer[0] elif is_list_like(indexer[1]) and indexer[1][0] == 0: indexer = indexer[0] else: raise NotImplementedError( "This should not be reached. Please report a bug at " "github.com/pandas-dev/pandas/" ) return indexer def is_view(self) -> bool: """Extension arrays are never treated as views.""" return False def is_numeric(self): return self.values.dtype._is_numeric def _slice( self, slicer: slice | npt.NDArray[np.bool_] | npt.NDArray[np.intp] ) -> ExtensionArray: """ Return a slice of my values. Parameters ---------- slicer : slice, ndarray[int], or ndarray[bool] Valid (non-reducing) indexer for self.values. Returns ------- ExtensionArray """ # Notes: ndarray[bool] is only reachable when via getitem_mgr, which # is only for Series, i.e. self.ndim == 1. # return same dims as we currently have if self.ndim == 2: # reached via getitem_block via _slice_take_blocks_ax0 # TODO(EA2D): won't be necessary with 2D EAs if not isinstance(slicer, slice): raise AssertionError( "invalid slicing for a 1-ndim ExtensionArray", slicer ) # GH#32959 only full-slicers along fake-dim0 are valid # TODO(EA2D): won't be necessary with 2D EAs # range(1) instead of self._mgr_locs to avoid exception on [::-1] # see test_iloc_getitem_slice_negative_step_ea_block new_locs = range(1)[slicer] if not len(new_locs): raise AssertionError( "invalid slicing for a 1-ndim ExtensionArray", slicer ) slicer = slice(None) return self.values[slicer] def getitem_block_index(self, slicer: slice) -> ExtensionBlock: """ Perform __getitem__-like specialized to slicing along index. """ # GH#42787 in principle this is equivalent to values[..., slicer], but we don't # require subclasses of ExtensionArray to support that form (for now). new_values = self.values[slicer] return type(self)(new_values, self._mgr_locs, ndim=self.ndim, refs=self.refs) def diff(self, n: int, axis: AxisInt = 1) -> list[Block]: # only reached with ndim == 2 and axis == 1 # TODO(EA2D): Can share with NDArrayBackedExtensionBlock new_values = algos.diff(self.values, n, axis=0) return [self.make_block(values=new_values)] def shift( self, periods: int, axis: AxisInt = 0, fill_value: Any = None ) -> list[Block]: """ Shift the block by `periods`. Dispatches to underlying ExtensionArray and re-boxes in an ExtensionBlock. """ new_values = self.values.shift(periods=periods, fill_value=fill_value) return [self.make_block_same_class(new_values)] def _unstack( self, unstacker, fill_value, new_placement: npt.NDArray[np.intp], needs_masking: npt.NDArray[np.bool_], ): # ExtensionArray-safe unstack. # We override ObjectBlock._unstack, which unstacks directly on the # values of the array. For EA-backed blocks, this would require # converting to a 2-D ndarray of objects. # Instead, we unstack an ndarray of integer positions, followed by # a `take` on the actual values. # Caller is responsible for ensuring self.shape[-1] == len(unstacker.index) new_values, mask = unstacker.arange_result # Note: these next two lines ensure that # mask.sum() == sum(len(nb.mgr_locs) for nb in blocks) # which the calling function needs in order to pass verify_integrity=False # to the BlockManager constructor new_values = new_values.T[mask] new_placement = new_placement[mask] # needs_masking[i] calculated once in BlockManager.unstack tells # us if there are any -1s in the relevant indices. When False, # that allows us to go through a faster path in 'take', among # other things avoiding e.g. Categorical._validate_scalar. blocks = [ # TODO: could cast to object depending on fill_value? type(self)( self.values.take( indices, allow_fill=needs_masking[i], fill_value=fill_value ), BlockPlacement(place), ndim=2, ) for i, (indices, place) in enumerate(zip(new_values, new_placement)) ] return blocks, mask class DatetimeTZBlock(DatetimeLikeBlock): """implement a datetime64 block with a tz attribute""" values: DatetimeArray __slots__ = () is_extension = True _validate_ndim = True _can_consolidate = False # Don't use values_for_json from DatetimeLikeBlock since it is # an invalid optimization here(drop the tz) values_for_json = NDArrayBackedExtensionBlock.values_for_json def maybe_coerce_values(values: ArrayLike) -> ArrayLike: """ Input validation for values passed to __init__. Ensure that any datetime64/timedelta64 dtypes are in nanoseconds. Ensure that we do not have string dtypes. Parameters ---------- values : np.ndarray or ExtensionArray Returns ------- values : np.ndarray or ExtensionArray """ # Caller is responsible for ensuring PandasArray is already extracted. if isinstance(values, np.ndarray): values = ensure_wrapped_if_datetimelike(values) if issubclass(values.dtype.type, str): values = np.array(values, dtype=object) if isinstance(values, (DatetimeArray, TimedeltaArray)) and values.freq is not None: # freq is only stored in DatetimeIndex/TimedeltaIndex, not in Series/DataFrame values = values._with_freq(None) return values def get_block_type(dtype: DtypeObj): """ Find the appropriate Block subclass to use for the given values and dtype. Parameters ---------- dtype : numpy or pandas dtype Returns ------- cls : class, subclass of Block """ # We use kind checks because it is much more performant # than is_foo_dtype kind = dtype.kind cls: type[Block] if isinstance(dtype, SparseDtype): # Need this first(ish) so that Sparse[datetime] is sparse cls = ExtensionBlock elif isinstance(dtype, DatetimeTZDtype): cls = DatetimeTZBlock elif isinstance(dtype, PeriodDtype): cls = NDArrayBackedExtensionBlock elif isinstance(dtype, ExtensionDtype): # Note: need to be sure PandasArray is unwrapped before we get here cls = ExtensionBlock elif kind in ["M", "m"]: cls = DatetimeLikeBlock elif kind in ["f", "c", "i", "u", "b"]: cls = NumericBlock else: cls = ObjectBlock return cls def check_ndim(values, placement: BlockPlacement, ndim: int) -> None: """ ndim inference and validation. Validates that values.ndim and ndim are consistent. Validates that len(values) and len(placement) are consistent. Parameters ---------- values : array-like placement : BlockPlacement ndim : int Raises ------ ValueError : the number of dimensions do not match """ if values.ndim > ndim: # Check for both np.ndarray and ExtensionArray raise ValueError( "Wrong number of dimensions. " f"values.ndim > ndim [{values.ndim} > {ndim}]" ) if not is_1d_only_ea_dtype(values.dtype): # TODO(EA2D): special case not needed with 2D EAs if values.ndim != ndim: raise ValueError( "Wrong number of dimensions. " f"values.ndim != ndim [{values.ndim} != {ndim}]" ) if len(placement) != len(values): raise ValueError( f"Wrong number of items passed {len(values)}, " f"placement implies {len(placement)}" ) elif ndim == 2 and len(placement) != 1: # TODO(EA2D): special case unnecessary with 2D EAs raise ValueError("need to split") def extract_pandas_array( values: np.ndarray | ExtensionArray, dtype: DtypeObj | None, ndim: int ) -> tuple[np.ndarray | ExtensionArray, DtypeObj | None]: """ Ensure that we don't allow PandasArray / PandasDtype in internals. """ # For now, blocks should be backed by ndarrays when possible. if isinstance(values, ABCPandasArray): values = values.to_numpy() if ndim and ndim > 1: # TODO(EA2D): special case not needed with 2D EAs values = np.atleast_2d(values) if isinstance(dtype, PandasDtype): dtype = dtype.numpy_dtype return values, dtype def ensure_block_shape(values: ArrayLike, ndim: int = 1) -> ArrayLike: """ Reshape if possible to have values.ndim == ndim. """ if values.ndim < ndim: if not is_1d_only_ea_dtype(values.dtype): # TODO(EA2D): https://github.com/pandas-dev/pandas/issues/23023 # block.shape is incorrect for "2D" ExtensionArrays # We can't, and don't need to, reshape. values = cast("np.ndarray | DatetimeArray | TimedeltaArray", values) values = values.reshape(1, -1) return values The provided code snippet includes necessary dependencies for implementing the `make_block` function. Write a Python function `def make_block( values, placement, klass=None, ndim=None, dtype: Dtype | None = None ) -> Block` to solve the following problem: This is a pseudo-public analogue to blocks.new_block. We ask that downstream libraries use this rather than any fully-internal APIs, including but not limited to: - core.internals.blocks.make_block - Block.make_block - Block.make_block_same_class - Block.__init__ Here is the function: def make_block( values, placement, klass=None, ndim=None, dtype: Dtype | None = None ) -> Block: """ This is a pseudo-public analogue to blocks.new_block. We ask that downstream libraries use this rather than any fully-internal APIs, including but not limited to: - core.internals.blocks.make_block - Block.make_block - Block.make_block_same_class - Block.__init__ """ if dtype is not None: dtype = pandas_dtype(dtype) values, dtype = extract_pandas_array(values, dtype, ndim) if klass is ExtensionBlock and is_period_dtype(values.dtype): # GH-44681 changed PeriodArray to be stored in the 2D # NDArrayBackedExtensionBlock instead of ExtensionBlock # -> still allow ExtensionBlock to be passed in this case for back compat klass = None if klass is None: dtype = dtype or values.dtype klass = get_block_type(dtype) elif klass is DatetimeTZBlock and not is_datetime64tz_dtype(values.dtype): # pyarrow calls get here values = DatetimeArray._simple_new(values, dtype=dtype) if not isinstance(placement, BlockPlacement): placement = BlockPlacement(placement) ndim = maybe_infer_ndim(values, placement, ndim) if is_datetime64tz_dtype(values.dtype) or is_period_dtype(values.dtype): # GH#41168 ensure we can pass 1D dt64tz values # More generally, any EA dtype that isn't is_1d_only_ea_dtype values = extract_array(values, extract_numpy=True) values = ensure_block_shape(values, ndim) check_ndim(values, placement, ndim) values = maybe_coerce_values(values) return klass(values, ndim=ndim, placement=placement)
This is a pseudo-public analogue to blocks.new_block. We ask that downstream libraries use this rather than any fully-internal APIs, including but not limited to: - core.internals.blocks.make_block - Block.make_block - Block.make_block_same_class - Block.__init__
173,077
from __future__ import annotations import copy as cp import itertools from typing import ( TYPE_CHECKING, Sequence, cast, ) import numpy as np from pandas._libs import ( NaT, internals as libinternals, ) from pandas._libs.missing import NA from pandas._typing import ( ArrayLike, AxisInt, DtypeObj, Manager, Shape, ) from pandas.util._decorators import cache_readonly from pandas.core.dtypes.astype import astype_array from pandas.core.dtypes.cast import ( ensure_dtype_can_hold_na, find_common_type, ) from pandas.core.dtypes.common import ( is_1d_only_ea_dtype, is_dtype_equal, is_scalar, needs_i8_conversion, ) from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, ExtensionDtype, ) from pandas.core.dtypes.missing import ( is_valid_na_for_dtype, isna, isna_all, ) import pandas.core.algorithms as algos from pandas.core.arrays import ( DatetimeArray, ExtensionArray, ) from pandas.core.arrays.sparse import SparseDtype from pandas.core.construction import ensure_wrapped_if_datetimelike from pandas.core.internals.array_manager import ( ArrayManager, NullArrayProxy, ) from pandas.core.internals.blocks import ( ensure_block_shape, new_block_2d, ) from pandas.core.internals.managers import BlockManager def _concatenate_array_managers( mgrs_indexers, axes: list[Index], concat_axis: AxisInt, copy: bool ) -> Manager: """ Concatenate array managers into one. Parameters ---------- mgrs_indexers : list of (ArrayManager, {axis: indexer,...}) tuples axes : list of Index concat_axis : int copy : bool Returns ------- ArrayManager """ # reindex all arrays mgrs = [] for mgr, indexers in mgrs_indexers: axis1_made_copy = False for ax, indexer in indexers.items(): mgr = mgr.reindex_indexer( axes[ax], indexer, axis=ax, allow_dups=True, use_na_proxy=True ) if ax == 1 and indexer is not None: axis1_made_copy = True if copy and concat_axis == 0 and not axis1_made_copy: # for concat_axis 1 we will always get a copy through concat_arrays mgr = mgr.copy() mgrs.append(mgr) if concat_axis == 1: # concatting along the rows -> concat the reindexed arrays # TODO(ArrayManager) doesn't yet preserve the correct dtype arrays = [ concat_arrays([mgrs[i].arrays[j] for i in range(len(mgrs))]) for j in range(len(mgrs[0].arrays)) ] else: # concatting along the columns -> combine reindexed arrays in a single manager assert concat_axis == 0 arrays = list(itertools.chain.from_iterable([mgr.arrays for mgr in mgrs])) new_mgr = ArrayManager(arrays, [axes[1], axes[0]], verify_integrity=False) return new_mgr def _concat_managers_axis0( mgrs_indexers, axes: list[Index], copy: bool ) -> BlockManager: """ concat_managers specialized to concat_axis=0, with reindexing already having been done in _maybe_reindex_columns_na_proxy. """ had_reindexers = { i: len(mgrs_indexers[i][1]) > 0 for i in range(len(mgrs_indexers)) } mgrs_indexers = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers) mgrs = [x[0] for x in mgrs_indexers] offset = 0 blocks = [] for i, mgr in enumerate(mgrs): # If we already reindexed, then we definitely don't need another copy made_copy = had_reindexers[i] for blk in mgr.blocks: if made_copy: nb = blk.copy(deep=False) elif copy: nb = blk.copy() else: # by slicing instead of copy(deep=False), we get a new array # object, see test_concat_copy nb = blk.getitem_block(slice(None)) nb._mgr_locs = nb._mgr_locs.add(offset) blocks.append(nb) offset += len(mgr.items) result = BlockManager(tuple(blocks), axes) return result def _maybe_reindex_columns_na_proxy( axes: list[Index], mgrs_indexers: list[tuple[BlockManager, dict[int, np.ndarray]]] ) -> list[tuple[BlockManager, dict[int, np.ndarray]]]: """ Reindex along columns so that all of the BlockManagers being concatenated have matching columns. Columns added in this reindexing have dtype=np.void, indicating they should be ignored when choosing a column's final dtype. """ new_mgrs_indexers: list[tuple[BlockManager, dict[int, np.ndarray]]] = [] for mgr, indexers in mgrs_indexers: # For axis=0 (i.e. columns) we use_na_proxy and only_slice, so this # is a cheap reindexing. for i, indexer in indexers.items(): mgr = mgr.reindex_indexer( axes[i], indexers[i], axis=i, copy=False, only_slice=True, # only relevant for i==0 allow_dups=True, use_na_proxy=True, # only relevant for i==0 ) new_mgrs_indexers.append((mgr, {})) return new_mgrs_indexers def _get_mgr_concatenation_plan(mgr: BlockManager, indexers: dict[int, np.ndarray]): """ Construct concatenation plan for given block manager and indexers. Parameters ---------- mgr : BlockManager indexers : dict of {axis: indexer} Returns ------- plan : list of (BlockPlacement, JoinUnit) tuples """ assert len(indexers) == 0 # Calculate post-reindex shape, save for item axis which will be separate # for each block anyway. mgr_shape_list = list(mgr.shape) for ax, indexer in indexers.items(): mgr_shape_list[ax] = len(indexer) mgr_shape = tuple(mgr_shape_list) assert 0 not in indexers if mgr.is_single_block: blk = mgr.blocks[0] return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))] blknos = mgr.blknos blklocs = mgr.blklocs plan = [] for blkno, placements in libinternals.get_blkno_placements(blknos, group=False): assert placements.is_slice_like assert blkno != -1 join_unit_indexers = indexers.copy() shape_list = list(mgr_shape) shape_list[0] = len(placements) shape = tuple(shape_list) blk = mgr.blocks[blkno] ax0_blk_indexer = blklocs[placements.indexer] unit_no_ax0_reindexing = ( len(placements) == len(blk.mgr_locs) and # Fastpath detection of join unit not # needing to reindex its block: no ax0 # reindexing took place and block # placement was sequential before. ( (blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice.step == 1) or # Slow-ish detection: all indexer locs # are sequential (and length match is # checked above). (np.diff(ax0_blk_indexer) == 1).all() ) ) # Omit indexer if no item reindexing is required. if unit_no_ax0_reindexing: join_unit_indexers.pop(0, None) else: join_unit_indexers[0] = ax0_blk_indexer unit = JoinUnit(blk, shape, join_unit_indexers) plan.append((placements, unit)) return plan def _concatenate_join_units(join_units: list[JoinUnit], copy: bool) -> ArrayLike: """ Concatenate values from several join units along axis=1. """ empty_dtype = _get_empty_dtype(join_units) has_none_blocks = any(unit.block.dtype.kind == "V" for unit in join_units) upcasted_na = _dtype_to_na_value(empty_dtype, has_none_blocks) to_concat = [ ju.get_reindexed_values(empty_dtype=empty_dtype, upcasted_na=upcasted_na) for ju in join_units ] if len(to_concat) == 1: # Only one block, nothing to concatenate. concat_values = to_concat[0] if copy: if isinstance(concat_values, np.ndarray): # non-reindexed (=not yet copied) arrays are made into a view # in JoinUnit.get_reindexed_values if concat_values.base is not None: concat_values = concat_values.copy() else: concat_values = concat_values.copy() elif any(is_1d_only_ea_dtype(t.dtype) for t in to_concat): # TODO(EA2D): special case not needed if all EAs used HybridBlocks # error: No overload variant of "__getitem__" of "ExtensionArray" matches # argument type "Tuple[int, slice]" to_concat = [ t if is_1d_only_ea_dtype(t.dtype) else t[0, :] # type: ignore[call-overload] for t in to_concat ] concat_values = concat_compat(to_concat, axis=0, ea_compat_axis=True) concat_values = ensure_block_shape(concat_values, 2) else: concat_values = concat_compat(to_concat, axis=1) return concat_values def _is_uniform_join_units(join_units: list[JoinUnit]) -> bool: """ Check if the join units consist of blocks of uniform type that can be concatenated using Block.concat_same_type instead of the generic _concatenate_join_units (which uses `concat_compat`). """ first = join_units[0].block if first.dtype.kind == "V": return False return ( # exclude cases where a) ju.block is None or b) we have e.g. Int64+int64 all(type(ju.block) is type(first) for ju in join_units) and # e.g. DatetimeLikeBlock can be dt64 or td64, but these are not uniform all( is_dtype_equal(ju.block.dtype, first.dtype) # GH#42092 we only want the dtype_equal check for non-numeric blocks # (for now, may change but that would need a deprecation) or ju.block.dtype.kind in ["b", "i", "u"] for ju in join_units ) and # no blocks that would get missing values (can lead to type upcasts) # unless we're an extension dtype. all(not ju.is_na or ju.block.is_extension for ju in join_units) and # no blocks with indexers (as then the dimensions do not fit) all(not ju.indexers for ju in join_units) and # only use this path when there is something to concatenate len(join_units) > 1 ) def _combine_concat_plans(plans): """ Combine multiple concatenation plans into one. existing_plan is updated in-place. We only get here with concat_axis == 1. """ if len(plans) == 1: for p in plans[0]: yield p[0], [p[1]] else: # singleton list so we can modify it as a side-effect within _next_or_none num_ended = [0] def _next_or_none(seq): retval = next(seq, None) if retval is None: num_ended[0] += 1 return retval plans = list(map(iter, plans)) next_items = list(map(_next_or_none, plans)) while num_ended[0] != len(next_items): if num_ended[0] > 0: raise ValueError("Plan shapes are not aligned") placements, units = zip(*next_items) lengths = list(map(len, placements)) min_len, max_len = min(lengths), max(lengths) if min_len == max_len: yield placements[0], units next_items[:] = map(_next_or_none, plans) else: yielded_placement = None yielded_units = [None] * len(next_items) for i, (plc, unit) in enumerate(next_items): yielded_units[i] = unit if len(plc) > min_len: # _trim_join_unit updates unit in place, so only # placement needs to be sliced to skip min_len. next_items[i] = (plc[min_len:], _trim_join_unit(unit, min_len)) else: yielded_placement = plc next_items[i] = _next_or_none(plans[i]) yield yielded_placement, yielded_units AxisInt = int Manager = Union[ "ArrayManager", "SingleArrayManager", "BlockManager", "SingleBlockManager" ] def concat_compat(to_concat, axis: AxisInt = 0, ea_compat_axis: bool = False): """ provide concatenation of an array of arrays each of which is a single 'normalized' dtypes (in that for example, if it's object, then it is a non-datetimelike and provide a combined dtype for the resulting array that preserves the overall dtype if possible) Parameters ---------- to_concat : array of arrays axis : axis to provide concatenation ea_compat_axis : bool, default False For ExtensionArray compat, behave as if axis == 1 when determining whether to drop empty arrays. Returns ------- a single array, preserving the combined dtypes """ # filter empty arrays # 1-d dtypes always are included here def is_nonempty(x) -> bool: if x.ndim <= axis: return True return x.shape[axis] > 0 # If all arrays are empty, there's nothing to convert, just short-cut to # the concatenation, #3121. # # Creating an empty array directly is tempting, but the winnings would be # marginal given that it would still require shape & dtype calculation and # np.concatenate which has them both implemented is compiled. non_empties = [x for x in to_concat if is_nonempty(x)] if non_empties and axis == 0 and not ea_compat_axis: # ea_compat_axis see GH#39574 to_concat = non_empties dtypes = {obj.dtype for obj in to_concat} kinds = {obj.dtype.kind for obj in to_concat} contains_datetime = any( isinstance(dtype, (np.dtype, DatetimeTZDtype)) and dtype.kind in ["m", "M"] for dtype in dtypes ) or any(isinstance(obj, ABCExtensionArray) and obj.ndim > 1 for obj in to_concat) all_empty = not len(non_empties) single_dtype = len({x.dtype for x in to_concat}) == 1 any_ea = any(isinstance(x.dtype, ExtensionDtype) for x in to_concat) if contains_datetime: return _concat_datetime(to_concat, axis=axis) if any_ea: # we ignore axis here, as internally concatting with EAs is always # for axis=0 if not single_dtype: target_dtype = find_common_type([x.dtype for x in to_concat]) target_dtype = common_dtype_categorical_compat(to_concat, target_dtype) to_concat = [ astype_array(arr, target_dtype, copy=False) for arr in to_concat ] if isinstance(to_concat[0], ABCExtensionArray): # TODO: what about EA-backed Index? cls = type(to_concat[0]) return cls._concat_same_type(to_concat) else: return np.concatenate(to_concat) elif all_empty: # we have all empties, but may need to coerce the result dtype to # object if we have non-numeric type operands (numpy would otherwise # cast this to float) if len(kinds) != 1: if not len(kinds - {"i", "u", "f"}) or not len(kinds - {"b", "i", "u"}): # let numpy coerce pass else: # coerce to object to_concat = [x.astype("object") for x in to_concat] kinds = {"o"} result = np.concatenate(to_concat, axis=axis) if "b" in kinds and result.dtype.kind in ["i", "u", "f"]: # GH#39817 cast to object instead of casting bools to numeric result = result.astype(object, copy=False) return result def ensure_wrapped_if_datetimelike(arr): """ Wrap datetime64 and timedelta64 ndarrays in DatetimeArray/TimedeltaArray. """ if isinstance(arr, np.ndarray): if arr.dtype.kind == "M": from pandas.core.arrays import DatetimeArray return DatetimeArray._from_sequence(arr) elif arr.dtype.kind == "m": from pandas.core.arrays import TimedeltaArray return TimedeltaArray._from_sequence(arr) return arr class ArrayManager(BaseArrayManager): def ndim(self) -> Literal[2]: return 2 def __init__( self, arrays: list[np.ndarray | ExtensionArray], axes: list[Index], verify_integrity: bool = True, ) -> None: # Note: we are storing the axes in "_axes" in the (row, columns) order # which contrasts the order how it is stored in BlockManager self._axes = axes self.arrays = arrays if verify_integrity: self._axes = [ensure_index(ax) for ax in axes] arrays = [extract_pandas_array(x, None, 1)[0] for x in arrays] self.arrays = [maybe_coerce_values(arr) for arr in arrays] self._verify_integrity() def _verify_integrity(self) -> None: n_rows, n_columns = self.shape_proper if not len(self.arrays) == n_columns: raise ValueError( "Number of passed arrays must equal the size of the column Index: " f"{len(self.arrays)} arrays vs {n_columns} columns." ) for arr in self.arrays: if not len(arr) == n_rows: raise ValueError( "Passed arrays should have the same length as the rows Index: " f"{len(arr)} vs {n_rows} rows" ) if not isinstance(arr, (np.ndarray, ExtensionArray)): raise ValueError( "Passed arrays should be np.ndarray or ExtensionArray instances, " f"got {type(arr)} instead" ) if not arr.ndim == 1: raise ValueError( "Passed arrays should be 1-dimensional, got array with " f"{arr.ndim} dimensions instead." ) # -------------------------------------------------------------------- # Indexing def fast_xs(self, loc: int) -> SingleArrayManager: """ Return the array corresponding to `frame.iloc[loc]`. Parameters ---------- loc : int Returns ------- np.ndarray or ExtensionArray """ dtype = interleaved_dtype([arr.dtype for arr in self.arrays]) values = [arr[loc] for arr in self.arrays] if isinstance(dtype, ExtensionDtype): result = dtype.construct_array_type()._from_sequence(values, dtype=dtype) # for datetime64/timedelta64, the np.ndarray constructor cannot handle pd.NaT elif is_datetime64_ns_dtype(dtype): result = DatetimeArray._from_sequence(values, dtype=dtype)._ndarray elif is_timedelta64_ns_dtype(dtype): result = TimedeltaArray._from_sequence(values, dtype=dtype)._ndarray else: result = np.array(values, dtype=dtype) return SingleArrayManager([result], [self._axes[1]]) def get_slice(self, slobj: slice, axis: AxisInt = 0) -> ArrayManager: axis = self._normalize_axis(axis) if axis == 0: arrays = [arr[slobj] for arr in self.arrays] elif axis == 1: arrays = self.arrays[slobj] new_axes = list(self._axes) new_axes[axis] = new_axes[axis]._getitem_slice(slobj) return type(self)(arrays, new_axes, verify_integrity=False) def iget(self, i: int) -> SingleArrayManager: """ Return the data as a SingleArrayManager. """ values = self.arrays[i] return SingleArrayManager([values], [self._axes[0]]) def iget_values(self, i: int) -> ArrayLike: """ Return the data for column i as the values (ndarray or ExtensionArray). """ return self.arrays[i] def column_arrays(self) -> list[ArrayLike]: """ Used in the JSON C code to access column arrays. """ return [np.asarray(arr) for arr in self.arrays] def iset( self, loc: int | slice | np.ndarray, value: ArrayLike, inplace: bool = False ) -> None: """ Set new column(s). This changes the ArrayManager in-place, but replaces (an) existing column(s), not changing column values in-place). Parameters ---------- loc : integer, slice or boolean mask Positional location (already bounds checked) value : np.ndarray or ExtensionArray inplace : bool, default False Whether overwrite existing array as opposed to replacing it. """ # single column -> single integer index if lib.is_integer(loc): # TODO can we avoid needing to unpack this here? That means converting # DataFrame into 1D array when loc is an integer if isinstance(value, np.ndarray) and value.ndim == 2: assert value.shape[1] == 1 value = value[:, 0] # TODO we receive a datetime/timedelta64 ndarray from DataFrame._iset_item # but we should avoid that and pass directly the proper array value = maybe_coerce_values(value) assert isinstance(value, (np.ndarray, ExtensionArray)) assert value.ndim == 1 assert len(value) == len(self._axes[0]) self.arrays[loc] = value return # multiple columns -> convert slice or array to integer indices elif isinstance(loc, slice): indices = range( loc.start if loc.start is not None else 0, loc.stop if loc.stop is not None else self.shape_proper[1], loc.step if loc.step is not None else 1, ) else: assert isinstance(loc, np.ndarray) assert loc.dtype == "bool" # error: Incompatible types in assignment (expression has type "ndarray", # variable has type "range") indices = np.nonzero(loc)[0] # type: ignore[assignment] assert value.ndim == 2 assert value.shape[0] == len(self._axes[0]) for value_idx, mgr_idx in enumerate(indices): # error: No overload variant of "__getitem__" of "ExtensionArray" matches # argument type "Tuple[slice, int]" value_arr = value[:, value_idx] # type: ignore[call-overload] self.arrays[mgr_idx] = value_arr return def column_setitem( self, loc: int, idx: int | slice | np.ndarray, value, inplace_only: bool = False ) -> None: """ Set values ("setitem") into a single column (not setting the full column). This is a method on the ArrayManager level, to avoid creating an intermediate Series at the DataFrame level (`s = df[loc]; s[idx] = value`) """ if not is_integer(loc): raise TypeError("The column index should be an integer") arr = self.arrays[loc] mgr = SingleArrayManager([arr], [self._axes[0]]) if inplace_only: mgr.setitem_inplace(idx, value) else: new_mgr = mgr.setitem((idx,), value) # update existing ArrayManager in-place self.arrays[loc] = new_mgr.arrays[0] def insert(self, loc: int, item: Hashable, value: ArrayLike) -> None: """ Insert item at selected position. Parameters ---------- loc : int item : hashable value : np.ndarray or ExtensionArray """ # insert to the axis; this could possibly raise a TypeError new_axis = self.items.insert(loc, item) value = extract_array(value, extract_numpy=True) if value.ndim == 2: if value.shape[0] == 1: # error: No overload variant of "__getitem__" of "ExtensionArray" # matches argument type "Tuple[int, slice]" value = value[0, :] # type: ignore[call-overload] else: raise ValueError( f"Expected a 1D array, got an array with shape {value.shape}" ) value = maybe_coerce_values(value) # TODO self.arrays can be empty # assert len(value) == len(self.arrays[0]) # TODO is this copy needed? arrays = self.arrays.copy() arrays.insert(loc, value) self.arrays = arrays self._axes[1] = new_axis def idelete(self, indexer) -> ArrayManager: """ Delete selected locations in-place (new block and array, same BlockManager) """ to_keep = np.ones(self.shape[0], dtype=np.bool_) to_keep[indexer] = False self.arrays = [self.arrays[i] for i in np.nonzero(to_keep)[0]] self._axes = [self._axes[0], self._axes[1][to_keep]] return self # -------------------------------------------------------------------- # Array-wise Operation def grouped_reduce(self: T, func: Callable) -> T: """ Apply grouped reduction function columnwise, returning a new ArrayManager. Parameters ---------- func : grouped reduction function Returns ------- ArrayManager """ result_arrays: list[np.ndarray] = [] result_indices: list[int] = [] for i, arr in enumerate(self.arrays): # grouped_reduce functions all expect 2D arrays arr = ensure_block_shape(arr, ndim=2) res = func(arr) if res.ndim == 2: # reverse of ensure_block_shape assert res.shape[0] == 1 res = res[0] result_arrays.append(res) result_indices.append(i) if len(result_arrays) == 0: nrows = 0 else: nrows = result_arrays[0].shape[0] index = Index(range(nrows)) columns = self.items # error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]"; # expected "List[Union[ndarray, ExtensionArray]]" return type(self)(result_arrays, [index, columns]) # type: ignore[arg-type] def reduce(self: T, func: Callable) -> T: """ Apply reduction function column-wise, returning a single-row ArrayManager. Parameters ---------- func : reduction function Returns ------- ArrayManager """ result_arrays: list[np.ndarray] = [] for i, arr in enumerate(self.arrays): res = func(arr, axis=0) # TODO NaT doesn't preserve dtype, so we need to ensure to create # a timedelta result array if original was timedelta # what if datetime results in timedelta? (eg std) dtype = arr.dtype if res is NaT else None result_arrays.append( sanitize_array([res], None, dtype=dtype) # type: ignore[arg-type] ) index = Index._simple_new(np.array([None], dtype=object)) # placeholder columns = self.items # error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]"; # expected "List[Union[ndarray, ExtensionArray]]" new_mgr = type(self)(result_arrays, [index, columns]) # type: ignore[arg-type] return new_mgr def operate_blockwise(self, other: ArrayManager, array_op) -> ArrayManager: """ Apply array_op blockwise with another (aligned) BlockManager. """ # TODO what if `other` is BlockManager ? left_arrays = self.arrays right_arrays = other.arrays result_arrays = [ array_op(left, right) for left, right in zip(left_arrays, right_arrays) ] return type(self)(result_arrays, self._axes) def quantile( self, *, qs: Index, # with dtype float64 axis: AxisInt = 0, transposed: bool = False, interpolation: QuantileInterpolation = "linear", ) -> ArrayManager: arrs = [ensure_block_shape(x, 2) for x in self.arrays] assert axis == 1 new_arrs = [ quantile_compat(x, np.asarray(qs._values), interpolation) for x in arrs ] for i, arr in enumerate(new_arrs): if arr.ndim == 2: assert arr.shape[0] == 1, arr.shape new_arrs[i] = arr[0] axes = [qs, self._axes[1]] return type(self)(new_arrs, axes) # ---------------------------------------------------------------- def unstack(self, unstacker, fill_value) -> ArrayManager: """ Return a BlockManager with all blocks unstacked. Parameters ---------- unstacker : reshape._Unstacker fill_value : Any fill_value for newly introduced missing values. Returns ------- unstacked : BlockManager """ indexer, _ = unstacker._indexer_and_to_sort if unstacker.mask.all(): new_indexer = indexer allow_fill = False new_mask2D = None needs_masking = None else: new_indexer = np.full(unstacker.mask.shape, -1) new_indexer[unstacker.mask] = indexer allow_fill = True # calculating the full mask once and passing it to take_1d is faster # than letting take_1d calculate it in each repeated call new_mask2D = (~unstacker.mask).reshape(*unstacker.full_shape) needs_masking = new_mask2D.any(axis=0) new_indexer2D = new_indexer.reshape(*unstacker.full_shape) new_indexer2D = ensure_platform_int(new_indexer2D) new_arrays = [] for arr in self.arrays: for i in range(unstacker.full_shape[1]): if allow_fill: # error: Value of type "Optional[Any]" is not indexable [index] new_arr = take_1d( arr, new_indexer2D[:, i], allow_fill=needs_masking[i], # type: ignore[index] fill_value=fill_value, mask=new_mask2D[:, i], # type: ignore[index] ) else: new_arr = take_1d(arr, new_indexer2D[:, i], allow_fill=False) new_arrays.append(new_arr) new_index = unstacker.new_index new_columns = unstacker.get_new_columns(self._axes[1]) new_axes = [new_index, new_columns] return type(self)(new_arrays, new_axes, verify_integrity=False) def as_array( self, dtype=None, copy: bool = False, na_value: object = lib.no_default, ) -> np.ndarray: """ Convert the blockmanager data into an numpy array. Parameters ---------- dtype : object, default None Data type of the return array. copy : bool, default False If True then guarantee that a copy is returned. A value of False does not guarantee that the underlying data is not copied. na_value : object, default lib.no_default Value to be used as the missing value sentinel. Returns ------- arr : ndarray """ if len(self.arrays) == 0: empty_arr = np.empty(self.shape, dtype=float) return empty_arr.transpose() # We want to copy when na_value is provided to avoid # mutating the original object copy = copy or na_value is not lib.no_default if not dtype: dtype = interleaved_dtype([arr.dtype for arr in self.arrays]) if isinstance(dtype, SparseDtype): dtype = dtype.subtype elif isinstance(dtype, PandasDtype): dtype = dtype.numpy_dtype elif is_extension_array_dtype(dtype): dtype = "object" elif is_dtype_equal(dtype, str): dtype = "object" result = np.empty(self.shape_proper, dtype=dtype) for i, arr in enumerate(self.arrays): arr = arr.astype(dtype, copy=copy) result[:, i] = arr if na_value is not lib.no_default: result[isna(result)] = na_value return result def new_block_2d( values: ArrayLike, placement: BlockPlacement, refs: BlockValuesRefs | None = None ): # new_block specialized to case with # ndim=2 # isinstance(placement, BlockPlacement) # check_ndim/ensure_block_shape already checked klass = get_block_type(values.dtype) values = maybe_coerce_values(values) return klass(values, ndim=2, placement=placement, refs=refs) def ensure_block_shape(values: ArrayLike, ndim: int = 1) -> ArrayLike: """ Reshape if possible to have values.ndim == ndim. """ if values.ndim < ndim: if not is_1d_only_ea_dtype(values.dtype): # TODO(EA2D): https://github.com/pandas-dev/pandas/issues/23023 # block.shape is incorrect for "2D" ExtensionArrays # We can't, and don't need to, reshape. values = cast("np.ndarray | DatetimeArray | TimedeltaArray", values) values = values.reshape(1, -1) return values class BlockManager(libinternals.BlockManager, BaseBlockManager): """ BaseBlockManager that holds 2D blocks. """ ndim = 2 # ---------------------------------------------------------------- # Constructors def __init__( self, blocks: Sequence[Block], axes: Sequence[Index], verify_integrity: bool = True, ) -> None: if verify_integrity: # Assertion disabled for performance # assert all(isinstance(x, Index) for x in axes) for block in blocks: if self.ndim != block.ndim: raise AssertionError( f"Number of Block dimensions ({block.ndim}) must equal " f"number of axes ({self.ndim})" ) # As of 2.0, the caller is responsible for ensuring that # DatetimeTZBlock with block.ndim == 2 has block.values.ndim ==2; # previously there was a special check for fastparquet compat. self._verify_integrity() def _verify_integrity(self) -> None: mgr_shape = self.shape tot_items = sum(len(x.mgr_locs) for x in self.blocks) for block in self.blocks: if block.shape[1:] != mgr_shape[1:]: raise_construction_error(tot_items, block.shape[1:], self.axes) if len(self.items) != tot_items: raise AssertionError( "Number of manager items must equal union of " f"block items\n# manager items: {len(self.items)}, # " f"tot_items: {tot_items}" ) def from_blocks(cls, blocks: list[Block], axes: list[Index]) -> BlockManager: """ Constructor for BlockManager and SingleBlockManager with same signature. """ return cls(blocks, axes, verify_integrity=False) # ---------------------------------------------------------------- # Indexing def fast_xs(self, loc: int) -> SingleBlockManager: """ Return the array corresponding to `frame.iloc[loc]`. Parameters ---------- loc : int Returns ------- np.ndarray or ExtensionArray """ if len(self.blocks) == 1: # TODO: this could be wrong if blk.mgr_locs is not slice(None)-like; # is this ruled out in the general case? result = self.blocks[0].iget((slice(None), loc)) # in the case of a single block, the new block is a view block = new_block( result, placement=slice(0, len(result)), ndim=1, refs=self.blocks[0].refs, ) return SingleBlockManager(block, self.axes[0]) dtype = interleaved_dtype([blk.dtype for blk in self.blocks]) n = len(self) # GH#46406 immutable_ea = isinstance(dtype, SparseDtype) if isinstance(dtype, ExtensionDtype) and not immutable_ea: cls = dtype.construct_array_type() result = cls._empty((n,), dtype=dtype) else: # error: Argument "dtype" to "empty" has incompatible type # "Union[Type[object], dtype[Any], ExtensionDtype, None]"; expected # "None" result = np.empty( n, dtype=object if immutable_ea else dtype # type: ignore[arg-type] ) result = ensure_wrapped_if_datetimelike(result) for blk in self.blocks: # Such assignment may incorrectly coerce NaT to None # result[blk.mgr_locs] = blk._slice((slice(None), loc)) for i, rl in enumerate(blk.mgr_locs): result[rl] = blk.iget((i, loc)) if immutable_ea: dtype = cast(ExtensionDtype, dtype) result = dtype.construct_array_type()._from_sequence(result, dtype=dtype) block = new_block(result, placement=slice(0, len(result)), ndim=1) return SingleBlockManager(block, self.axes[0]) def iget(self, i: int, track_ref: bool = True) -> SingleBlockManager: """ Return the data as a SingleBlockManager. """ block = self.blocks[self.blknos[i]] values = block.iget(self.blklocs[i]) # shortcut for select a single-dim from a 2-dim BM bp = BlockPlacement(slice(0, len(values))) nb = type(block)( values, placement=bp, ndim=1, refs=block.refs if track_ref else None ) return SingleBlockManager(nb, self.axes[1]) def iget_values(self, i: int) -> ArrayLike: """ Return the data for column i as the values (ndarray or ExtensionArray). Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution. """ # TODO(CoW) making the arrays read-only might make this safer to use? block = self.blocks[self.blknos[i]] values = block.iget(self.blklocs[i]) return values def column_arrays(self) -> list[np.ndarray]: """ Used in the JSON C code to access column arrays. This optimizes compared to using `iget_values` by converting each Warning! This doesn't handle Copy-on-Write, so should be used with caution (current use case of consuming this in the JSON code is fine). """ # This is an optimized equivalent to # result = [self.iget_values(i) for i in range(len(self.items))] result: list[np.ndarray | None] = [None] * len(self.items) for blk in self.blocks: mgr_locs = blk._mgr_locs values = blk.values_for_json() if values.ndim == 1: # TODO(EA2D): special casing not needed with 2D EAs result[mgr_locs[0]] = values else: for i, loc in enumerate(mgr_locs): result[loc] = values[i] # error: Incompatible return value type (got "List[None]", # expected "List[ndarray[Any, Any]]") return result # type: ignore[return-value] def iset( self, loc: int | slice | np.ndarray, value: ArrayLike, inplace: bool = False ): """ Set new item in-place. Does not consolidate. Adds new Block if not contained in the current set of items """ # FIXME: refactor, clearly separate broadcasting & zip-like assignment # can prob also fix the various if tests for sparse/categorical if self._blklocs is None and self.ndim > 1: self._rebuild_blknos_and_blklocs() # Note: we exclude DTA/TDA here value_is_extension_type = is_1d_only_ea_dtype(value.dtype) if not value_is_extension_type: if value.ndim == 2: value = value.T else: value = ensure_block_shape(value, ndim=2) if value.shape[1:] != self.shape[1:]: raise AssertionError( "Shape of new values must be compatible with manager shape" ) if lib.is_integer(loc): # We have 6 tests where loc is _not_ an int. # In this case, get_blkno_placements will yield only one tuple, # containing (self._blknos[loc], BlockPlacement(slice(0, 1, 1))) # Check if we can use _iset_single fastpath loc = cast(int, loc) blkno = self.blknos[loc] blk = self.blocks[blkno] if len(blk._mgr_locs) == 1: # TODO: fastest way to check this? return self._iset_single( loc, value, inplace=inplace, blkno=blkno, blk=blk, ) # error: Incompatible types in assignment (expression has type # "List[Union[int, slice, ndarray]]", variable has type "Union[int, # slice, ndarray]") loc = [loc] # type: ignore[assignment] # categorical/sparse/datetimetz if value_is_extension_type: def value_getitem(placement): return value else: def value_getitem(placement): return value[placement.indexer] # Accessing public blknos ensures the public versions are initialized blknos = self.blknos[loc] blklocs = self.blklocs[loc].copy() unfit_mgr_locs = [] unfit_val_locs = [] removed_blknos = [] for blkno_l, val_locs in libinternals.get_blkno_placements(blknos, group=True): blk = self.blocks[blkno_l] blk_locs = blklocs[val_locs.indexer] if inplace and blk.should_store(value): # Updating inplace -> check if we need to do Copy-on-Write if using_copy_on_write() and not self._has_no_reference_block(blkno_l): self._iset_split_block(blkno_l, blk_locs, value_getitem(val_locs)) else: blk.set_inplace(blk_locs, value_getitem(val_locs)) continue else: unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs]) unfit_val_locs.append(val_locs) # If all block items are unfit, schedule the block for removal. if len(val_locs) == len(blk.mgr_locs): removed_blknos.append(blkno_l) continue else: # Defer setting the new values to enable consolidation self._iset_split_block(blkno_l, blk_locs) if len(removed_blknos): # Remove blocks & update blknos accordingly is_deleted = np.zeros(self.nblocks, dtype=np.bool_) is_deleted[removed_blknos] = True new_blknos = np.empty(self.nblocks, dtype=np.intp) new_blknos.fill(-1) new_blknos[~is_deleted] = np.arange(self.nblocks - len(removed_blknos)) self._blknos = new_blknos[self._blknos] self.blocks = tuple( blk for i, blk in enumerate(self.blocks) if i not in set(removed_blknos) ) if unfit_val_locs: unfit_idxr = np.concatenate(unfit_mgr_locs) unfit_count = len(unfit_idxr) new_blocks: list[Block] = [] # TODO(CoW) is this always correct to assume that the new_blocks # are not referencing anything else? if value_is_extension_type: # This code (ab-)uses the fact that EA blocks contain only # one item. # TODO(EA2D): special casing unnecessary with 2D EAs new_blocks.extend( new_block_2d( values=value, placement=BlockPlacement(slice(mgr_loc, mgr_loc + 1)), ) for mgr_loc in unfit_idxr ) self._blknos[unfit_idxr] = np.arange(unfit_count) + len(self.blocks) self._blklocs[unfit_idxr] = 0 else: # unfit_val_locs contains BlockPlacement objects unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:]) new_blocks.append( new_block_2d( values=value_getitem(unfit_val_items), placement=BlockPlacement(unfit_idxr), ) ) self._blknos[unfit_idxr] = len(self.blocks) self._blklocs[unfit_idxr] = np.arange(unfit_count) self.blocks += tuple(new_blocks) # Newly created block's dtype may already be present. self._known_consolidated = False def _iset_split_block( self, blkno_l: int, blk_locs: np.ndarray | list[int], value: ArrayLike | None = None, ) -> None: """Removes columns from a block by splitting the block. Avoids copying the whole block through slicing and updates the manager after determinint the new block structure. Optionally adds a new block, otherwise has to be done by the caller. Parameters ---------- blkno_l: The block number to operate on, relevant for updating the manager blk_locs: The locations of our block that should be deleted. value: The value to set as a replacement. """ blk = self.blocks[blkno_l] if self._blklocs is None: self._rebuild_blknos_and_blklocs() nbs_tup = tuple(blk.delete(blk_locs)) if value is not None: locs = blk.mgr_locs.as_array[blk_locs] first_nb = new_block_2d(value, BlockPlacement(locs)) else: first_nb = nbs_tup[0] nbs_tup = tuple(nbs_tup[1:]) nr_blocks = len(self.blocks) blocks_tup = ( self.blocks[:blkno_l] + (first_nb,) + self.blocks[blkno_l + 1 :] + nbs_tup ) self.blocks = blocks_tup if not nbs_tup and value is not None: # No need to update anything if split did not happen return self._blklocs[first_nb.mgr_locs.indexer] = np.arange(len(first_nb)) for i, nb in enumerate(nbs_tup): self._blklocs[nb.mgr_locs.indexer] = np.arange(len(nb)) self._blknos[nb.mgr_locs.indexer] = i + nr_blocks def _iset_single( self, loc: int, value: ArrayLike, inplace: bool, blkno: int, blk: Block ) -> None: """ Fastpath for iset when we are only setting a single position and the Block currently in that position is itself single-column. In this case we can swap out the entire Block and blklocs and blknos are unaffected. """ # Caller is responsible for verifying value.shape if inplace and blk.should_store(value): copy = False if using_copy_on_write() and not self._has_no_reference_block(blkno): # perform Copy-on-Write and clear the reference copy = True iloc = self.blklocs[loc] blk.set_inplace(slice(iloc, iloc + 1), value, copy=copy) return nb = new_block_2d(value, placement=blk._mgr_locs) old_blocks = self.blocks new_blocks = old_blocks[:blkno] + (nb,) + old_blocks[blkno + 1 :] self.blocks = new_blocks return def column_setitem( self, loc: int, idx: int | slice | np.ndarray, value, inplace_only: bool = False ) -> None: """ Set values ("setitem") into a single column (not setting the full column). This is a method on the BlockManager level, to avoid creating an intermediate Series at the DataFrame level (`s = df[loc]; s[idx] = value`) """ if using_copy_on_write() and not self._has_no_reference(loc): blkno = self.blknos[loc] # Split blocks to only copy the column we want to modify blk_loc = self.blklocs[loc] # Copy our values values = self.blocks[blkno].values if values.ndim == 1: values = values.copy() else: # Use [blk_loc] as indexer to keep ndim=2, this already results in a # copy values = values[[blk_loc]] self._iset_split_block(blkno, [blk_loc], values) # this manager is only created temporarily to mutate the values in place # so don't track references, otherwise the `setitem` would perform CoW again col_mgr = self.iget(loc, track_ref=False) if inplace_only: col_mgr.setitem_inplace(idx, value) else: new_mgr = col_mgr.setitem((idx,), value) self.iset(loc, new_mgr._block.values, inplace=True) def insert(self, loc: int, item: Hashable, value: ArrayLike) -> None: """ Insert item at selected position. Parameters ---------- loc : int item : hashable value : np.ndarray or ExtensionArray """ # insert to the axis; this could possibly raise a TypeError new_axis = self.items.insert(loc, item) if value.ndim == 2: value = value.T if len(value) > 1: raise ValueError( f"Expected a 1D array, got an array with shape {value.T.shape}" ) else: value = ensure_block_shape(value, ndim=self.ndim) bp = BlockPlacement(slice(loc, loc + 1)) # TODO(CoW) do we always "own" the passed `value`? block = new_block_2d(values=value, placement=bp) if not len(self.blocks): # Fastpath self._blklocs = np.array([0], dtype=np.intp) self._blknos = np.array([0], dtype=np.intp) else: self._insert_update_mgr_locs(loc) self._insert_update_blklocs_and_blknos(loc) self.axes[0] = new_axis self.blocks += (block,) self._known_consolidated = False if sum(not block.is_extension for block in self.blocks) > 100: warnings.warn( "DataFrame is highly fragmented. This is usually the result " "of calling `frame.insert` many times, which has poor performance. " "Consider joining all columns at once using pd.concat(axis=1) " "instead. To get a de-fragmented frame, use `newframe = frame.copy()`", PerformanceWarning, stacklevel=find_stack_level(), ) def _insert_update_mgr_locs(self, loc) -> None: """ When inserting a new Block at location 'loc', we increment all of the mgr_locs of blocks above that by one. """ for blkno, count in _fast_count_smallints(self.blknos[loc:]): # .620 this way, .326 of which is in increment_above blk = self.blocks[blkno] blk._mgr_locs = blk._mgr_locs.increment_above(loc) def _insert_update_blklocs_and_blknos(self, loc) -> None: """ When inserting a new Block at location 'loc', we update our _blklocs and _blknos. """ # Accessing public blklocs ensures the public versions are initialized if loc == self.blklocs.shape[0]: # np.append is a lot faster, let's use it if we can. self._blklocs = np.append(self._blklocs, 0) self._blknos = np.append(self._blknos, len(self.blocks)) elif loc == 0: # np.append is a lot faster, let's use it if we can. self._blklocs = np.append(self._blklocs[::-1], 0)[::-1] self._blknos = np.append(self._blknos[::-1], len(self.blocks))[::-1] else: new_blklocs, new_blknos = libinternals.update_blklocs_and_blknos( self.blklocs, self.blknos, loc, len(self.blocks) ) self._blklocs = new_blklocs self._blknos = new_blknos def idelete(self, indexer) -> BlockManager: """ Delete selected locations, returning a new BlockManager. """ is_deleted = np.zeros(self.shape[0], dtype=np.bool_) is_deleted[indexer] = True taker = (~is_deleted).nonzero()[0] nbs = self._slice_take_blocks_ax0(taker, only_slice=True) new_columns = self.items[~is_deleted] axes = [new_columns, self.axes[1]] return type(self)(tuple(nbs), axes, verify_integrity=False) # ---------------------------------------------------------------- # Block-wise Operation def grouped_reduce(self: T, func: Callable) -> T: """ Apply grouped reduction function blockwise, returning a new BlockManager. Parameters ---------- func : grouped reduction function Returns ------- BlockManager """ result_blocks: list[Block] = [] for blk in self.blocks: if blk.is_object: # split on object-dtype blocks bc some columns may raise # while others do not. for sb in blk._split(): applied = sb.apply(func) result_blocks = extend_blocks(applied, result_blocks) else: applied = blk.apply(func) result_blocks = extend_blocks(applied, result_blocks) if len(result_blocks) == 0: nrows = 0 else: nrows = result_blocks[0].values.shape[-1] index = Index(range(nrows)) return type(self).from_blocks(result_blocks, [self.axes[0], index]) def reduce(self: T, func: Callable) -> T: """ Apply reduction function blockwise, returning a single-row BlockManager. Parameters ---------- func : reduction function Returns ------- BlockManager """ # If 2D, we assume that we're operating column-wise assert self.ndim == 2 res_blocks: list[Block] = [] for blk in self.blocks: nbs = blk.reduce(func) res_blocks.extend(nbs) index = Index([None]) # placeholder new_mgr = type(self).from_blocks(res_blocks, [self.items, index]) return new_mgr def operate_blockwise(self, other: BlockManager, array_op) -> BlockManager: """ Apply array_op blockwise with another (aligned) BlockManager. """ return operate_blockwise(self, other, array_op) def _equal_values(self: BlockManager, other: BlockManager) -> bool: """ Used in .equals defined in base class. Only check the column values assuming shape and indexes have already been checked. """ return blockwise_all(self, other, array_equals) def quantile( self: T, *, qs: Index, # with dtype float 64 axis: AxisInt = 0, interpolation: QuantileInterpolation = "linear", ) -> T: """ Iterate over blocks applying quantile reduction. This routine is intended for reduction type operations and will do inference on the generated blocks. Parameters ---------- axis: reduction axis, default 0 consolidate: bool, default True. Join together blocks having same dtype interpolation : type of interpolation, default 'linear' qs : list of the quantiles to be computed Returns ------- BlockManager """ # Series dispatches to DataFrame for quantile, which allows us to # simplify some of the code here and in the blocks assert self.ndim >= 2 assert is_list_like(qs) # caller is responsible for this assert axis == 1 # only ever called this way new_axes = list(self.axes) new_axes[1] = Index(qs, dtype=np.float64) blocks = [ blk.quantile(axis=axis, qs=qs, interpolation=interpolation) for blk in self.blocks ] return type(self)(blocks, new_axes) # ---------------------------------------------------------------- def unstack(self, unstacker, fill_value) -> BlockManager: """ Return a BlockManager with all blocks unstacked. Parameters ---------- unstacker : reshape._Unstacker fill_value : Any fill_value for newly introduced missing values. Returns ------- unstacked : BlockManager """ new_columns = unstacker.get_new_columns(self.items) new_index = unstacker.new_index allow_fill = not unstacker.mask_all if allow_fill: # calculating the full mask once and passing it to Block._unstack is # faster than letting calculating it in each repeated call new_mask2D = (~unstacker.mask).reshape(*unstacker.full_shape) needs_masking = new_mask2D.any(axis=0) else: needs_masking = np.zeros(unstacker.full_shape[1], dtype=bool) new_blocks: list[Block] = [] columns_mask: list[np.ndarray] = [] if len(self.items) == 0: factor = 1 else: fac = len(new_columns) / len(self.items) assert fac == int(fac) factor = int(fac) for blk in self.blocks: mgr_locs = blk.mgr_locs new_placement = mgr_locs.tile_for_unstack(factor) blocks, mask = blk._unstack( unstacker, fill_value, new_placement=new_placement, needs_masking=needs_masking, ) new_blocks.extend(blocks) columns_mask.extend(mask) # Block._unstack should ensure this holds, assert mask.sum() == sum(len(nb._mgr_locs) for nb in blocks) # In turn this ensures that in the BlockManager call below # we have len(new_columns) == sum(x.shape[0] for x in new_blocks) # which suffices to allow us to pass verify_inegrity=False new_columns = new_columns[columns_mask] bm = BlockManager(new_blocks, [new_columns, new_index], verify_integrity=False) return bm def to_dict(self, copy: bool = True): """ Return a dict of str(dtype) -> BlockManager Parameters ---------- copy : bool, default True Returns ------- values : a dict of dtype -> BlockManager """ bd: dict[str, list[Block]] = {} for b in self.blocks: bd.setdefault(str(b.dtype), []).append(b) # TODO(EA2D): the combine will be unnecessary with 2D EAs return {dtype: self._combine(blocks, copy=copy) for dtype, blocks in bd.items()} def as_array( self, dtype: np.dtype | None = None, copy: bool = False, na_value: object = lib.no_default, ) -> np.ndarray: """ Convert the blockmanager data into an numpy array. Parameters ---------- dtype : np.dtype or None, default None Data type of the return array. copy : bool, default False If True then guarantee that a copy is returned. A value of False does not guarantee that the underlying data is not copied. na_value : object, default lib.no_default Value to be used as the missing value sentinel. Returns ------- arr : ndarray """ # TODO(CoW) handle case where resulting array is a view if len(self.blocks) == 0: arr = np.empty(self.shape, dtype=float) return arr.transpose() # We want to copy when na_value is provided to avoid # mutating the original object copy = copy or na_value is not lib.no_default if self.is_single_block: blk = self.blocks[0] if blk.is_extension: # Avoid implicit conversion of extension blocks to object # error: Item "ndarray" of "Union[ndarray, ExtensionArray]" has no # attribute "to_numpy" arr = blk.values.to_numpy( # type: ignore[union-attr] dtype=dtype, na_value=na_value, ).reshape(blk.shape) else: arr = np.asarray(blk.get_values()) if dtype: arr = arr.astype(dtype, copy=False) if copy: arr = arr.copy() elif using_copy_on_write(): arr = arr.view() arr.flags.writeable = False else: arr = self._interleave(dtype=dtype, na_value=na_value) # The underlying data was copied within _interleave, so no need # to further copy if copy=True or setting na_value if na_value is not lib.no_default: arr[isna(arr)] = na_value return arr.transpose() def _interleave( self, dtype: np.dtype | None = None, na_value: object = lib.no_default, ) -> np.ndarray: """ Return ndarray from blocks with specified item order Items must be contained in the blocks """ if not dtype: # Incompatible types in assignment (expression has type # "Optional[Union[dtype[Any], ExtensionDtype]]", variable has # type "Optional[dtype[Any]]") dtype = interleaved_dtype( # type: ignore[assignment] [blk.dtype for blk in self.blocks] ) # TODO: https://github.com/pandas-dev/pandas/issues/22791 # Give EAs some input on what happens here. Sparse needs this. if isinstance(dtype, SparseDtype): dtype = dtype.subtype dtype = cast(np.dtype, dtype) elif isinstance(dtype, ExtensionDtype): dtype = np.dtype("object") elif is_dtype_equal(dtype, str): dtype = np.dtype("object") result = np.empty(self.shape, dtype=dtype) itemmask = np.zeros(self.shape[0]) if dtype == np.dtype("object") and na_value is lib.no_default: # much more performant than using to_numpy below for blk in self.blocks: rl = blk.mgr_locs arr = blk.get_values(dtype) result[rl.indexer] = arr itemmask[rl.indexer] = 1 return result for blk in self.blocks: rl = blk.mgr_locs if blk.is_extension: # Avoid implicit conversion of extension blocks to object # error: Item "ndarray" of "Union[ndarray, ExtensionArray]" has no # attribute "to_numpy" arr = blk.values.to_numpy( # type: ignore[union-attr] dtype=dtype, na_value=na_value, ) else: arr = blk.get_values(dtype) result[rl.indexer] = arr itemmask[rl.indexer] = 1 if not itemmask.all(): raise AssertionError("Some items were not contained in blocks") return result # ---------------------------------------------------------------- # Consolidation def is_consolidated(self) -> bool: """ Return True if more than one block with the same dtype """ if not self._known_consolidated: self._consolidate_check() return self._is_consolidated def _consolidate_check(self) -> None: if len(self.blocks) == 1: # fastpath self._is_consolidated = True self._known_consolidated = True return dtypes = [blk.dtype for blk in self.blocks if blk._can_consolidate] self._is_consolidated = len(dtypes) == len(set(dtypes)) self._known_consolidated = True def _consolidate_inplace(self) -> None: # In general, _consolidate_inplace should only be called via # DataFrame._consolidate_inplace, otherwise we will fail to invalidate # the DataFrame's _item_cache. The exception is for newly-created # BlockManager objects not yet attached to a DataFrame. if not self.is_consolidated(): self.blocks = _consolidate(self.blocks) self._is_consolidated = True self._known_consolidated = True self._rebuild_blknos_and_blklocs() The provided code snippet includes necessary dependencies for implementing the `concatenate_managers` function. Write a Python function `def concatenate_managers( mgrs_indexers, axes: list[Index], concat_axis: AxisInt, copy: bool ) -> Manager` to solve the following problem: Concatenate block managers into one. Parameters ---------- mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples axes : list of Index concat_axis : int copy : bool Returns ------- BlockManager Here is the function: def concatenate_managers( mgrs_indexers, axes: list[Index], concat_axis: AxisInt, copy: bool ) -> Manager: """ Concatenate block managers into one. Parameters ---------- mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples axes : list of Index concat_axis : int copy : bool Returns ------- BlockManager """ # TODO(ArrayManager) this assumes that all managers are of the same type if isinstance(mgrs_indexers[0][0], ArrayManager): return _concatenate_array_managers(mgrs_indexers, axes, concat_axis, copy) # Assertions disabled for performance # for tup in mgrs_indexers: # # caller is responsible for ensuring this # indexers = tup[1] # assert concat_axis not in indexers if concat_axis == 0: return _concat_managers_axis0(mgrs_indexers, axes, copy) mgrs_indexers = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers) concat_plans = [ _get_mgr_concatenation_plan(mgr, indexers) for mgr, indexers in mgrs_indexers ] concat_plan = _combine_concat_plans(concat_plans) blocks = [] for placement, join_units in concat_plan: unit = join_units[0] blk = unit.block if len(join_units) == 1 and not join_units[0].indexers: values = blk.values if copy: values = values.copy() else: values = values.view() fastpath = True elif _is_uniform_join_units(join_units): vals = [ju.block.values for ju in join_units] if not blk.is_extension: # _is_uniform_join_units ensures a single dtype, so # we can use np.concatenate, which is more performant # than concat_compat values = np.concatenate(vals, axis=1) else: # TODO(EA2D): special-casing not needed with 2D EAs values = concat_compat(vals, axis=1) values = ensure_block_shape(values, ndim=2) values = ensure_wrapped_if_datetimelike(values) fastpath = blk.values.dtype == values.dtype else: values = _concatenate_join_units(join_units, copy=copy) fastpath = False if fastpath: b = blk.make_block_same_class(values, placement=placement) else: b = new_block_2d(values, placement=placement) blocks.append(b) return BlockManager(tuple(blocks), axes)
Concatenate block managers into one. Parameters ---------- mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples axes : list of Index concat_axis : int copy : bool Returns ------- BlockManager
173,078
from __future__ import annotations from typing import ( TYPE_CHECKING, Iterator, NamedTuple, ) from pandas._typing import ArrayLike def _iter_block_pairs( left: BlockManager, right: BlockManager ) -> Iterator[BlockPairInfo]: # At this point we have already checked the parent DataFrames for # assert rframe._indexed_same(lframe) for blk in left.blocks: locs = blk.mgr_locs blk_vals = blk.values left_ea = blk_vals.ndim == 1 rblks = right._slice_take_blocks_ax0(locs.indexer, only_slice=True) # Assertions are disabled for performance, but should hold: # if left_ea: # assert len(locs) == 1, locs # assert len(rblks) == 1, rblks # assert rblks[0].shape[0] == 1, rblks[0].shape for rblk in rblks: right_ea = rblk.values.ndim == 1 lvals, rvals = _get_same_shape_values(blk, rblk, left_ea, right_ea) info = BlockPairInfo(lvals, rvals, locs, left_ea, right_ea, rblk) yield info def _reset_block_mgr_locs(nbs: list[Block], locs) -> None: """ Reset mgr_locs to correspond to our original DataFrame. """ for nb in nbs: nblocs = locs[nb.mgr_locs.indexer] nb.mgr_locs = nblocs # Assertions are disabled for performance, but should hold: # assert len(nblocs) == nb.shape[0], (len(nblocs), nb.shape) # assert all(x in locs.as_array for x in nb.mgr_locs.as_array) class Block(PandasObject): """ Canonical n-dimensional unit of homogeneous dtype contained in a pandas data structure Index-ignorant; let the container take care of that """ values: np.ndarray | ExtensionArray ndim: int refs: BlockValuesRefs __init__: Callable __slots__ = () is_numeric = False is_object = False is_extension = False _can_consolidate = True _validate_ndim = True def _consolidate_key(self): return self._can_consolidate, self.dtype.name def _can_hold_na(self) -> bool: """ Can we store NA values in this Block? """ dtype = self.dtype if isinstance(dtype, np.dtype): return dtype.kind not in ["b", "i", "u"] return dtype._can_hold_na def is_bool(self) -> bool: """ We can be bool if a) we are bool dtype or b) object dtype with bool objects. """ return self.values.dtype == np.dtype(bool) def external_values(self): return external_values(self.values) def fill_value(self): # Used in reindex_indexer return na_value_for_dtype(self.dtype, compat=False) def _standardize_fill_value(self, value): # if we are passed a scalar None, convert it here if self.dtype != _dtype_obj and is_valid_na_for_dtype(value, self.dtype): value = self.fill_value return value def mgr_locs(self) -> BlockPlacement: return self._mgr_locs def mgr_locs(self, new_mgr_locs: BlockPlacement) -> None: self._mgr_locs = new_mgr_locs def make_block( self, values, placement=None, refs: BlockValuesRefs | None = None ) -> Block: """ Create a new block, with type inference propagate any values that are not specified """ if placement is None: placement = self._mgr_locs if self.is_extension: values = ensure_block_shape(values, ndim=self.ndim) # TODO: perf by not going through new_block # We assume maybe_coerce_values has already been called return new_block(values, placement=placement, ndim=self.ndim, refs=refs) def make_block_same_class( self, values, placement: BlockPlacement | None = None, refs: BlockValuesRefs | None = None, ) -> Block: """Wrap given values in a block of same type as self.""" # Pre-2.0 we called ensure_wrapped_if_datetimelike because fastparquet # relied on it, as of 2.0 the caller is responsible for this. if placement is None: placement = self._mgr_locs # We assume maybe_coerce_values has already been called return type(self)(values, placement=placement, ndim=self.ndim, refs=refs) def __repr__(self) -> str: # don't want to print out all of the items here name = type(self).__name__ if self.ndim == 1: result = f"{name}: {len(self)} dtype: {self.dtype}" else: shape = " x ".join([str(s) for s in self.shape]) result = f"{name}: {self.mgr_locs.indexer}, {shape}, dtype: {self.dtype}" return result def __len__(self) -> int: return len(self.values) def getitem_block(self, slicer: slice | npt.NDArray[np.intp]) -> Block: """ Perform __getitem__-like, return result as block. Only supports slices that preserve dimensionality. """ # Note: the only place where we are called with ndarray[intp] # is from internals.concat, and we can verify that never happens # with 1-column blocks, i.e. never for ExtensionBlock. new_mgr_locs = self._mgr_locs[slicer] new_values = self._slice(slicer) refs = self.refs if isinstance(slicer, slice) else None return type(self)(new_values, new_mgr_locs, self.ndim, refs=refs) def getitem_block_columns( self, slicer: slice, new_mgr_locs: BlockPlacement ) -> Block: """ Perform __getitem__-like, return result as block. Only supports slices that preserve dimensionality. """ new_values = self._slice(slicer) if new_values.ndim != self.values.ndim: raise ValueError("Only same dim slicing is allowed") return type(self)(new_values, new_mgr_locs, self.ndim, refs=self.refs) def _can_hold_element(self, element: Any) -> bool: """require the same dtype as ourselves""" element = extract_array(element, extract_numpy=True) return can_hold_element(self.values, element) def should_store(self, value: ArrayLike) -> bool: """ Should we set self.values[indexer] = value inplace or do we need to cast? Parameters ---------- value : np.ndarray or ExtensionArray Returns ------- bool """ # faster equivalent to is_dtype_equal(value.dtype, self.dtype) try: return value.dtype == self.dtype except TypeError: return False # --------------------------------------------------------------------- # Apply/Reduce and Helpers def apply(self, func, **kwargs) -> list[Block]: """ apply the function to my values; return a block if we are not one """ result = func(self.values, **kwargs) return self._split_op_result(result) def reduce(self, func) -> list[Block]: # We will apply the function and reshape the result into a single-row # Block with the same mgr_locs; squeezing will be done at a higher level assert self.ndim == 2 result = func(self.values) if self.values.ndim == 1: # TODO(EA2D): special case not needed with 2D EAs res_values = np.array([[result]]) else: res_values = result.reshape(-1, 1) nb = self.make_block(res_values) return [nb] def _split_op_result(self, result: ArrayLike) -> list[Block]: # See also: split_and_operate if result.ndim > 1 and isinstance(result.dtype, ExtensionDtype): # TODO(EA2D): unnecessary with 2D EAs # if we get a 2D ExtensionArray, we need to split it into 1D pieces nbs = [] for i, loc in enumerate(self._mgr_locs): if not is_1d_only_ea_obj(result): vals = result[i : i + 1] else: vals = result[i] block = self.make_block(values=vals, placement=loc) nbs.append(block) return nbs nb = self.make_block(result) return [nb] def _split(self) -> list[Block]: """ Split a block into a list of single-column blocks. """ assert self.ndim == 2 new_blocks = [] for i, ref_loc in enumerate(self._mgr_locs): vals = self.values[slice(i, i + 1)] bp = BlockPlacement(ref_loc) nb = type(self)(vals, placement=bp, ndim=2, refs=self.refs) new_blocks.append(nb) return new_blocks def split_and_operate(self, func, *args, **kwargs) -> list[Block]: """ Split the block and apply func column-by-column. Parameters ---------- func : Block method *args **kwargs Returns ------- List[Block] """ assert self.ndim == 2 and self.shape[0] != 1 res_blocks = [] for nb in self._split(): rbs = func(nb, *args, **kwargs) res_blocks.extend(rbs) return res_blocks # --------------------------------------------------------------------- # Up/Down-casting def coerce_to_target_dtype(self, other) -> Block: """ coerce the current block to a dtype compat for other we will return a block, possibly object, and not raise we can also safely try to coerce to the same dtype and will receive the same block """ new_dtype = find_result_type(self.values, other) return self.astype(new_dtype, copy=False) def _maybe_downcast( self, blocks: list[Block], downcast=None, using_cow: bool = False ) -> list[Block]: if downcast is False: return blocks if self.dtype == _dtype_obj: # TODO: does it matter that self.dtype might not match blocks[i].dtype? # GH#44241 We downcast regardless of the argument; # respecting 'downcast=None' may be worthwhile at some point, # but ATM it breaks too much existing code. # split and convert the blocks return extend_blocks( [blk.convert(using_cow=using_cow, copy=not using_cow) for blk in blocks] ) if downcast is None: return blocks return extend_blocks([b._downcast_2d(downcast, using_cow) for b in blocks]) def _downcast_2d(self, dtype, using_cow: bool = False) -> list[Block]: """ downcast specialized to 2D case post-validation. Refactored to allow use of maybe_split. """ new_values = maybe_downcast_to_dtype(self.values, dtype=dtype) refs = self.refs if using_cow and new_values is self.values else None return [self.make_block(new_values, refs=refs)] def convert( self, *, copy: bool = True, using_cow: bool = False, ) -> list[Block]: """ attempt to coerce any object types to better types return a copy of the block (if copy = True) by definition we are not an ObjectBlock here! """ if not copy and using_cow: return [self.copy(deep=False)] return [self.copy()] if copy else [self] # --------------------------------------------------------------------- # Array-Like Methods def dtype(self) -> DtypeObj: return self.values.dtype def astype( self, dtype: DtypeObj, copy: bool = False, errors: IgnoreRaise = "raise", using_cow: bool = False, ) -> Block: """ Coerce to the new dtype. Parameters ---------- dtype : np.dtype or ExtensionDtype copy : bool, default False copy if indicated errors : str, {'raise', 'ignore'}, default 'raise' - ``raise`` : allow exceptions to be raised - ``ignore`` : suppress exceptions. On error return original object using_cow: bool, default False Signaling if copy on write copy logic is used. Returns ------- Block """ values = self.values new_values = astype_array_safe(values, dtype, copy=copy, errors=errors) new_values = maybe_coerce_values(new_values) refs = None if using_cow and astype_is_view(values.dtype, new_values.dtype): refs = self.refs newb = self.make_block(new_values, refs=refs) if newb.shape != self.shape: raise TypeError( f"cannot set astype for copy = [{copy}] for dtype " f"({self.dtype.name} [{self.shape}]) to different shape " f"({newb.dtype.name} [{newb.shape}])" ) return newb def to_native_types(self, na_rep: str = "nan", quoting=None, **kwargs) -> Block: """convert to our native types format""" result = to_native_types(self.values, na_rep=na_rep, quoting=quoting, **kwargs) return self.make_block(result) def copy(self, deep: bool = True) -> Block: """copy constructor""" values = self.values refs: BlockValuesRefs | None if deep: values = values.copy() refs = None else: refs = self.refs return type(self)(values, placement=self._mgr_locs, ndim=self.ndim, refs=refs) # --------------------------------------------------------------------- # Replace def replace( self, to_replace, value, inplace: bool = False, # mask may be pre-computed if we're called from replace_list mask: npt.NDArray[np.bool_] | None = None, using_cow: bool = False, ) -> list[Block]: """ replace the to_replace value with value, possible to create new blocks here this is just a call to putmask. """ # Note: the checks we do in NDFrame.replace ensure we never get # here with listlike to_replace or value, as those cases # go through replace_list values = self.values if isinstance(values, Categorical): # TODO: avoid special-casing # GH49404 if using_cow and (self.refs.has_reference() or not inplace): blk = self.copy() elif using_cow: blk = self.copy(deep=False) else: blk = self if inplace else self.copy() values = cast(Categorical, blk.values) values._replace(to_replace=to_replace, value=value, inplace=True) return [blk] if not self._can_hold_element(to_replace): # We cannot hold `to_replace`, so we know immediately that # replacing it is a no-op. # Note: If to_replace were a list, NDFrame.replace would call # replace_list instead of replace. if using_cow: return [self.copy(deep=False)] else: return [self] if inplace else [self.copy()] if mask is None: mask = missing.mask_missing(values, to_replace) if not mask.any(): # Note: we get here with test_replace_extension_other incorrectly # bc _can_hold_element is incorrect. if using_cow: return [self.copy(deep=False)] else: return [self] if inplace else [self.copy()] elif self._can_hold_element(value): # TODO(CoW): Maybe split here as well into columns where mask has True # and rest? if using_cow: if inplace: blk = self.copy(deep=self.refs.has_reference()) else: blk = self.copy() else: blk = self if inplace else self.copy() putmask_inplace(blk.values, mask, value) if not (self.is_object and value is None): # if the user *explicitly* gave None, we keep None, otherwise # may downcast to NaN blocks = blk.convert(copy=False, using_cow=using_cow) else: blocks = [blk] return blocks elif self.ndim == 1 or self.shape[0] == 1: if value is None or value is NA: blk = self.astype(np.dtype(object)) else: blk = self.coerce_to_target_dtype(value) return blk.replace( to_replace=to_replace, value=value, inplace=True, mask=mask, ) else: # split so that we only upcast where necessary blocks = [] for i, nb in enumerate(self._split()): blocks.extend( type(self).replace( nb, to_replace=to_replace, value=value, inplace=True, mask=mask[i : i + 1], using_cow=using_cow, ) ) return blocks def _replace_regex( self, to_replace, value, inplace: bool = False, mask=None, using_cow: bool = False, ) -> list[Block]: """ Replace elements by the given value. Parameters ---------- to_replace : object or pattern Scalar to replace or regular expression to match. value : object Replacement object. inplace : bool, default False Perform inplace modification. mask : array-like of bool, optional True indicate corresponding element is ignored. using_cow: bool, default False Specifying if copy on write is enabled. Returns ------- List[Block] """ if not self._can_hold_element(to_replace): # i.e. only ObjectBlock, but could in principle include a # String ExtensionBlock if using_cow: return [self.copy(deep=False)] return [self] if inplace else [self.copy()] rx = re.compile(to_replace) if using_cow: if inplace and not self.refs.has_reference(): refs = self.refs new_values = self.values else: refs = None new_values = self.values.copy() else: refs = None new_values = self.values if inplace else self.values.copy() replace_regex(new_values, rx, value, mask) block = self.make_block(new_values, refs=refs) return block.convert(copy=False, using_cow=using_cow) def replace_list( self, src_list: Iterable[Any], dest_list: Sequence[Any], inplace: bool = False, regex: bool = False, using_cow: bool = False, ) -> list[Block]: """ See BlockManager.replace_list docstring. """ values = self.values if isinstance(values, Categorical): # TODO: avoid special-casing # GH49404 if using_cow and inplace: blk = self.copy(deep=self.refs.has_reference()) else: blk = self if inplace else self.copy() values = cast(Categorical, blk.values) values._replace(to_replace=src_list, value=dest_list, inplace=True) return [blk] # Exclude anything that we know we won't contain pairs = [ (x, y) for x, y in zip(src_list, dest_list) if self._can_hold_element(x) ] if not len(pairs): if using_cow: return [self.copy(deep=False)] # shortcut, nothing to replace return [self] if inplace else [self.copy()] src_len = len(pairs) - 1 if is_string_dtype(values.dtype): # Calculate the mask once, prior to the call of comp # in order to avoid repeating the same computations na_mask = ~isna(values) masks: Iterable[npt.NDArray[np.bool_]] = ( extract_bool_array( cast( ArrayLike, compare_or_regex_search( values, s[0], regex=regex, mask=na_mask ), ) ) for s in pairs ) else: # GH#38086 faster if we know we dont need to check for regex masks = (missing.mask_missing(values, s[0]) for s in pairs) # Materialize if inplace = True, since the masks can change # as we replace if inplace: masks = list(masks) if using_cow and inplace: # Don't set up refs here, otherwise we will think that we have # references when we check again later rb = [self] else: rb = [self if inplace else self.copy()] for i, ((src, dest), mask) in enumerate(zip(pairs, masks)): convert = i == src_len # only convert once at the end new_rb: list[Block] = [] # GH-39338: _replace_coerce can split a block into # single-column blocks, so track the index so we know # where to index into the mask for blk_num, blk in enumerate(rb): if len(rb) == 1: m = mask else: mib = mask assert not isinstance(mib, bool) m = mib[blk_num : blk_num + 1] # error: Argument "mask" to "_replace_coerce" of "Block" has # incompatible type "Union[ExtensionArray, ndarray[Any, Any], bool]"; # expected "ndarray[Any, dtype[bool_]]" result = blk._replace_coerce( to_replace=src, value=dest, mask=m, inplace=inplace, regex=regex, using_cow=using_cow, ) if convert and blk.is_object and not all(x is None for x in dest_list): # GH#44498 avoid unwanted cast-back result = extend_blocks( [ b.convert(copy=True and not using_cow, using_cow=using_cow) for b in result ] ) new_rb.extend(result) rb = new_rb return rb def _replace_coerce( self, to_replace, value, mask: npt.NDArray[np.bool_], inplace: bool = True, regex: bool = False, using_cow: bool = False, ) -> list[Block]: """ Replace value corresponding to the given boolean array with another value. Parameters ---------- to_replace : object or pattern Scalar to replace or regular expression to match. value : object Replacement object. mask : np.ndarray[bool] True indicate corresponding element is ignored. inplace : bool, default True Perform inplace modification. regex : bool, default False If true, perform regular expression substitution. Returns ------- List[Block] """ if should_use_regex(regex, to_replace): return self._replace_regex( to_replace, value, inplace=inplace, mask=mask, ) else: if value is None: # gh-45601, gh-45836, gh-46634 if mask.any(): has_ref = self.refs.has_reference() nb = self.astype(np.dtype(object), copy=False, using_cow=using_cow) if (nb is self or using_cow) and not inplace: nb = nb.copy() elif inplace and has_ref and nb.refs.has_reference(): # no copy in astype and we had refs before nb = nb.copy() putmask_inplace(nb.values, mask, value) return [nb] if using_cow: return [self.copy(deep=False)] return [self] if inplace else [self.copy()] return self.replace( to_replace=to_replace, value=value, inplace=inplace, mask=mask, using_cow=using_cow, ) # --------------------------------------------------------------------- # 2D Methods - Shared by NumpyBlock and NDArrayBackedExtensionBlock # but not ExtensionBlock def _maybe_squeeze_arg(self, arg: np.ndarray) -> np.ndarray: """ For compatibility with 1D-only ExtensionArrays. """ return arg def _unwrap_setitem_indexer(self, indexer): """ For compatibility with 1D-only ExtensionArrays. """ return indexer # NB: this cannot be made cache_readonly because in mgr.set_values we pin # new .values that can have different shape GH#42631 def shape(self) -> Shape: return self.values.shape def iget(self, i: int | tuple[int, int] | tuple[slice, int]) -> np.ndarray: # In the case where we have a tuple[slice, int], the slice will always # be slice(None) # Note: only reached with self.ndim == 2 # Invalid index type "Union[int, Tuple[int, int], Tuple[slice, int]]" # for "Union[ndarray[Any, Any], ExtensionArray]"; expected type # "Union[int, integer[Any]]" return self.values[i] # type: ignore[index] def _slice( self, slicer: slice | npt.NDArray[np.bool_] | npt.NDArray[np.intp] ) -> ArrayLike: """return a slice of my values""" return self.values[slicer] def set_inplace(self, locs, values: ArrayLike, copy: bool = False) -> None: """ Modify block values in-place with new item value. If copy=True, first copy the underlying values in place before modifying (for Copy-on-Write). Notes ----- `set_inplace` never creates a new array or new Block, whereas `setitem` _may_ create a new array and always creates a new Block. Caller is responsible for checking values.dtype == self.dtype. """ if copy: self.values = self.values.copy() self.values[locs] = values def take_nd( self, indexer: npt.NDArray[np.intp], axis: AxisInt, new_mgr_locs: BlockPlacement | None = None, fill_value=lib.no_default, ) -> Block: """ Take values according to indexer and return them as a block. """ values = self.values if fill_value is lib.no_default: fill_value = self.fill_value allow_fill = False else: allow_fill = True # Note: algos.take_nd has upcast logic similar to coerce_to_target_dtype new_values = algos.take_nd( values, indexer, axis=axis, allow_fill=allow_fill, fill_value=fill_value ) # Called from three places in managers, all of which satisfy # these assertions if isinstance(self, ExtensionBlock): # NB: in this case, the 'axis' kwarg will be ignored in the # algos.take_nd call above. assert not (self.ndim == 1 and new_mgr_locs is None) assert not (axis == 0 and new_mgr_locs is None) if new_mgr_locs is None: new_mgr_locs = self._mgr_locs if not is_dtype_equal(new_values.dtype, self.dtype): return self.make_block(new_values, new_mgr_locs) else: return self.make_block_same_class(new_values, new_mgr_locs) def _unstack( self, unstacker, fill_value, new_placement: npt.NDArray[np.intp], needs_masking: npt.NDArray[np.bool_], ): """ Return a list of unstacked blocks of self Parameters ---------- unstacker : reshape._Unstacker fill_value : int Only used in ExtensionBlock._unstack new_placement : np.ndarray[np.intp] allow_fill : bool needs_masking : np.ndarray[bool] Returns ------- blocks : list of Block New blocks of unstacked values. mask : array-like of bool The mask of columns of `blocks` we should keep. """ new_values, mask = unstacker.get_new_values( self.values.T, fill_value=fill_value ) mask = mask.any(0) # TODO: in all tests we have mask.all(); can we rely on that? # Note: these next two lines ensure that # mask.sum() == sum(len(nb.mgr_locs) for nb in blocks) # which the calling function needs in order to pass verify_integrity=False # to the BlockManager constructor new_values = new_values.T[mask] new_placement = new_placement[mask] bp = BlockPlacement(new_placement) blocks = [new_block_2d(new_values, placement=bp)] return blocks, mask # --------------------------------------------------------------------- def setitem(self, indexer, value, using_cow: bool = False) -> Block: """ Attempt self.values[indexer] = value, possibly creating a new array. Parameters ---------- indexer : tuple, list-like, array-like, slice, int The subset of self.values to set value : object The value being set using_cow: bool, default False Signaling if CoW is used. Returns ------- Block Notes ----- `indexer` is a direct slice/positional indexer. `value` must be a compatible shape. """ value = self._standardize_fill_value(value) values = cast(np.ndarray, self.values) if self.ndim == 2: values = values.T # length checking check_setitem_lengths(indexer, value, values) value = extract_array(value, extract_numpy=True) try: casted = np_can_hold_element(values.dtype, value) except LossySetitemError: # current dtype cannot store value, coerce to common dtype nb = self.coerce_to_target_dtype(value) return nb.setitem(indexer, value) else: if self.dtype == _dtype_obj: # TODO: avoid having to construct values[indexer] vi = values[indexer] if lib.is_list_like(vi): # checking lib.is_scalar here fails on # test_iloc_setitem_custom_object casted = setitem_datetimelike_compat(values, len(vi), casted) if using_cow and self.refs.has_reference(): values = values.copy() self = self.make_block_same_class( values.T if values.ndim == 2 else values ) values[indexer] = casted return self def putmask(self, mask, new, using_cow: bool = False) -> list[Block]: """ putmask the data to the block; it is possible that we may create a new dtype of block Return the resulting block(s). Parameters ---------- mask : np.ndarray[bool], SparseArray[bool], or BooleanArray new : a ndarray/object using_cow: bool, default False Returns ------- List[Block] """ orig_mask = mask values = cast(np.ndarray, self.values) mask, noop = validate_putmask(values.T, mask) assert not isinstance(new, (ABCIndex, ABCSeries, ABCDataFrame)) if new is lib.no_default: new = self.fill_value new = self._standardize_fill_value(new) new = extract_array(new, extract_numpy=True) if noop: if using_cow: return [self.copy(deep=False)] return [self] try: casted = np_can_hold_element(values.dtype, new) if using_cow and self.refs.has_reference(): # Do this here to avoid copying twice values = values.copy() self = self.make_block_same_class(values) putmask_without_repeat(values.T, mask, casted) if using_cow: return [self.copy(deep=False)] return [self] except LossySetitemError: if self.ndim == 1 or self.shape[0] == 1: # no need to split columns if not is_list_like(new): # using just new[indexer] can't save us the need to cast return self.coerce_to_target_dtype(new).putmask(mask, new) else: indexer = mask.nonzero()[0] nb = self.setitem(indexer, new[indexer], using_cow=using_cow) return [nb] else: is_array = isinstance(new, np.ndarray) res_blocks = [] nbs = self._split() for i, nb in enumerate(nbs): n = new if is_array: # we have a different value per-column n = new[:, i : i + 1] submask = orig_mask[:, i : i + 1] rbs = nb.putmask(submask, n, using_cow=using_cow) res_blocks.extend(rbs) return res_blocks def where( self, other, cond, _downcast: str | bool = "infer", using_cow: bool = False ) -> list[Block]: """ evaluate the block; return result block(s) from the result Parameters ---------- other : a ndarray/object cond : np.ndarray[bool], SparseArray[bool], or BooleanArray _downcast : str or None, default "infer" Private because we only specify it when calling from fillna. Returns ------- List[Block] """ assert cond.ndim == self.ndim assert not isinstance(other, (ABCIndex, ABCSeries, ABCDataFrame)) transpose = self.ndim == 2 cond = extract_bool_array(cond) # EABlocks override where values = cast(np.ndarray, self.values) orig_other = other if transpose: values = values.T icond, noop = validate_putmask(values, ~cond) if noop: # GH-39595: Always return a copy; short-circuit up/downcasting if using_cow: return [self.copy(deep=False)] return [self.copy()] if other is lib.no_default: other = self.fill_value other = self._standardize_fill_value(other) try: # try/except here is equivalent to a self._can_hold_element check, # but this gets us back 'casted' which we will re-use below; # without using 'casted', expressions.where may do unwanted upcasts. casted = np_can_hold_element(values.dtype, other) except (ValueError, TypeError, LossySetitemError): # we cannot coerce, return a compat dtype if self.ndim == 1 or self.shape[0] == 1: # no need to split columns block = self.coerce_to_target_dtype(other) blocks = block.where(orig_other, cond, using_cow=using_cow) return self._maybe_downcast( blocks, downcast=_downcast, using_cow=using_cow ) else: # since _maybe_downcast would split blocks anyway, we # can avoid some potential upcast/downcast by splitting # on the front end. is_array = isinstance(other, (np.ndarray, ExtensionArray)) res_blocks = [] nbs = self._split() for i, nb in enumerate(nbs): oth = other if is_array: # we have a different value per-column oth = other[:, i : i + 1] submask = cond[:, i : i + 1] rbs = nb.where( oth, submask, _downcast=_downcast, using_cow=using_cow ) res_blocks.extend(rbs) return res_blocks else: other = casted alt = setitem_datetimelike_compat(values, icond.sum(), other) if alt is not other: if is_list_like(other) and len(other) < len(values): # call np.where with other to get the appropriate ValueError np.where(~icond, values, other) raise NotImplementedError( "This should not be reached; call to np.where above is " "expected to raise ValueError. Please report a bug at " "github.com/pandas-dev/pandas" ) result = values.copy() np.putmask(result, icond, alt) else: # By the time we get here, we should have all Series/Index # args extracted to ndarray if ( is_list_like(other) and not isinstance(other, np.ndarray) and len(other) == self.shape[-1] ): # If we don't do this broadcasting here, then expressions.where # will broadcast a 1D other to be row-like instead of # column-like. other = np.array(other).reshape(values.shape) # If lengths don't match (or len(other)==1), we will raise # inside expressions.where, see test_series_where # Note: expressions.where may upcast. result = expressions.where(~icond, values, other) # The np_can_hold_element check _should_ ensure that we always # have result.dtype == self.dtype here. if transpose: result = result.T return [self.make_block(result)] def fillna( self, value, limit: int | None = None, inplace: bool = False, downcast=None, using_cow: bool = False, ) -> list[Block]: """ fillna on the block with the value. If we fail, then convert to ObjectBlock and try again """ # Caller is responsible for validating limit; if int it is strictly positive inplace = validate_bool_kwarg(inplace, "inplace") if not self._can_hold_na: # can short-circuit the isna call noop = True else: mask = isna(self.values) mask, noop = validate_putmask(self.values, mask) if noop: # we can't process the value, but nothing to do if inplace: if using_cow: return [self.copy(deep=False)] # Arbitrarily imposing the convention that we ignore downcast # on no-op when inplace=True return [self] else: # GH#45423 consistent downcasting on no-ops. nb = self.copy(deep=not using_cow) nbs = nb._maybe_downcast([nb], downcast=downcast, using_cow=using_cow) return nbs if limit is not None: mask[mask.cumsum(self.ndim - 1) > limit] = False if inplace: nbs = self.putmask(mask.T, value, using_cow=using_cow) else: # without _downcast, we would break # test_fillna_dtype_conversion_equiv_replace nbs = self.where(value, ~mask.T, _downcast=False) # Note: blk._maybe_downcast vs self._maybe_downcast(nbs) # makes a difference bc blk may have object dtype, which has # different behavior in _maybe_downcast. return extend_blocks( [ blk._maybe_downcast([blk], downcast=downcast, using_cow=using_cow) for blk in nbs ] ) def interpolate( self, *, method: FillnaOptions = "pad", axis: AxisInt = 0, index: Index | None = None, inplace: bool = False, limit: int | None = None, limit_direction: str = "forward", limit_area: str | None = None, fill_value: Any | None = None, downcast: str | None = None, using_cow: bool = False, **kwargs, ) -> list[Block]: inplace = validate_bool_kwarg(inplace, "inplace") if not self._can_hold_na: # If there are no NAs, then interpolate is a no-op if using_cow: return [self.copy(deep=False)] return [self] if inplace else [self.copy()] try: m = missing.clean_fill_method(method) except ValueError: m = None if m is None and self.dtype.kind != "f": # only deal with floats # bc we already checked that can_hold_na, we don't have int dtype here # test_interp_basic checks that we make a copy here if using_cow: return [self.copy(deep=False)] return [self] if inplace else [self.copy()] if self.is_object and self.ndim == 2 and self.shape[0] != 1 and axis == 0: # split improves performance in ndarray.copy() return self.split_and_operate( type(self).interpolate, method=method, axis=axis, index=index, inplace=inplace, limit=limit, limit_direction=limit_direction, limit_area=limit_area, fill_value=fill_value, downcast=downcast, **kwargs, ) refs = None if inplace: if using_cow and self.refs.has_reference(): data = self.values.copy() else: data = self.values refs = self.refs else: data = self.values.copy() data = cast(np.ndarray, data) # bc overridden by ExtensionBlock missing.interpolate_array_2d( data, method=method, axis=axis, index=index, limit=limit, limit_direction=limit_direction, limit_area=limit_area, fill_value=fill_value, **kwargs, ) nb = self.make_block_same_class(data, refs=refs) return nb._maybe_downcast([nb], downcast, using_cow) def diff(self, n: int, axis: AxisInt = 1) -> list[Block]: """return block for the diff of the values""" # only reached with ndim == 2 and axis == 1 new_values = algos.diff(self.values, n, axis=axis) return [self.make_block(values=new_values)] def shift( self, periods: int, axis: AxisInt = 0, fill_value: Any = None ) -> list[Block]: """shift the block by periods, possibly upcast""" # convert integer to float if necessary. need to do a lot more than # that, handle boolean etc also # Note: periods is never 0 here, as that is handled at the top of # NDFrame.shift. If that ever changes, we can do a check for periods=0 # and possibly avoid coercing. if not lib.is_scalar(fill_value) and self.dtype != _dtype_obj: # with object dtype there is nothing to promote, and the user can # pass pretty much any weird fill_value they like # see test_shift_object_non_scalar_fill raise ValueError("fill_value must be a scalar") fill_value = self._standardize_fill_value(fill_value) try: # error: Argument 1 to "np_can_hold_element" has incompatible type # "Union[dtype[Any], ExtensionDtype]"; expected "dtype[Any]" casted = np_can_hold_element( self.dtype, fill_value # type: ignore[arg-type] ) except LossySetitemError: nb = self.coerce_to_target_dtype(fill_value) return nb.shift(periods, axis=axis, fill_value=fill_value) else: values = cast(np.ndarray, self.values) new_values = shift(values, periods, axis, casted) return [self.make_block(new_values)] def quantile( self, qs: Index, # with dtype float64 interpolation: QuantileInterpolation = "linear", axis: AxisInt = 0, ) -> Block: """ compute the quantiles of the Parameters ---------- qs : Index The quantiles to be computed in float64. interpolation : str, default 'linear' Type of interpolation. axis : int, default 0 Axis to compute. Returns ------- Block """ # We should always have ndim == 2 because Series dispatches to DataFrame assert self.ndim == 2 assert axis == 1 # only ever called this way assert is_list_like(qs) # caller is responsible for this result = quantile_compat(self.values, np.asarray(qs._values), interpolation) # ensure_block_shape needed for cases where we start with EA and result # is ndarray, e.g. IntegerArray, SparseArray result = ensure_block_shape(result, ndim=2) return new_block_2d(result, placement=self._mgr_locs) def round(self, decimals: int, using_cow: bool = False) -> Block: """ Rounds the values. If the block is not of an integer or float dtype, nothing happens. This is consistent with DataFrame.round behavivor. (Note: Series.round would raise) Parameters ---------- decimals: int, Number of decimal places to round to. Caller is responsible for validating this using_cow: bool, Whether Copy on Write is enabled right now """ if not self.is_numeric or self.is_bool: return self.copy(deep=not using_cow) refs = None # TODO: round only defined on BaseMaskedArray # Series also does this, so would need to fix both places # error: Item "ExtensionArray" of "Union[ndarray[Any, Any], ExtensionArray]" # has no attribute "round" values = self.values.round(decimals) # type: ignore[union-attr] if values is self.values: refs = self.refs if not using_cow: # Normally would need to do this before, but # numpy only returns same array when round operation # is no-op # https://github.com/numpy/numpy/blob/486878b37fc7439a3b2b87747f50db9b62fea8eb/numpy/core/src/multiarray/calculation.c#L625-L636 values = values.copy() return self.make_block_same_class(values, refs=refs) # --------------------------------------------------------------------- # Abstract Methods Overridden By EABackedBlock and NumpyBlock def delete(self, loc) -> list[Block]: """Deletes the locs from the block. We split the block to avoid copying the underlying data. We create new blocks for every connected segment of the initial block that is not deleted. The new blocks point to the initial array. """ if not is_list_like(loc): loc = [loc] if self.ndim == 1: values = cast(np.ndarray, self.values) values = np.delete(values, loc) mgr_locs = self._mgr_locs.delete(loc) return [type(self)(values, placement=mgr_locs, ndim=self.ndim)] if np.max(loc) >= self.values.shape[0]: raise IndexError # Add one out-of-bounds indexer as maximum to collect # all columns after our last indexer if any loc = np.concatenate([loc, [self.values.shape[0]]]) mgr_locs_arr = self._mgr_locs.as_array new_blocks: list[Block] = [] previous_loc = -1 # TODO(CoW): This is tricky, if parent block goes out of scope # all split blocks are referencing each other even though they # don't share data refs = self.refs if self.refs.has_reference() else None for idx in loc: if idx == previous_loc + 1: # There is no column between current and last idx pass else: # No overload variant of "__getitem__" of "ExtensionArray" matches # argument type "Tuple[slice, slice]" values = self.values[previous_loc + 1 : idx, :] # type: ignore[call-overload] # noqa locs = mgr_locs_arr[previous_loc + 1 : idx] nb = type(self)( values, placement=BlockPlacement(locs), ndim=self.ndim, refs=refs ) new_blocks.append(nb) previous_loc = idx return new_blocks def is_view(self) -> bool: """return a boolean if I am possibly a view""" raise AbstractMethodError(self) def array_values(self) -> ExtensionArray: """ The array that Series.array returns. Always an ExtensionArray. """ raise AbstractMethodError(self) def get_values(self, dtype: DtypeObj | None = None) -> np.ndarray: """ return an internal format, currently just the ndarray this is often overridden to handle to_dense like operations """ raise AbstractMethodError(self) def values_for_json(self) -> np.ndarray: raise AbstractMethodError(self) class BlockManager(libinternals.BlockManager, BaseBlockManager): """ BaseBlockManager that holds 2D blocks. """ ndim = 2 # ---------------------------------------------------------------- # Constructors def __init__( self, blocks: Sequence[Block], axes: Sequence[Index], verify_integrity: bool = True, ) -> None: if verify_integrity: # Assertion disabled for performance # assert all(isinstance(x, Index) for x in axes) for block in blocks: if self.ndim != block.ndim: raise AssertionError( f"Number of Block dimensions ({block.ndim}) must equal " f"number of axes ({self.ndim})" ) # As of 2.0, the caller is responsible for ensuring that # DatetimeTZBlock with block.ndim == 2 has block.values.ndim ==2; # previously there was a special check for fastparquet compat. self._verify_integrity() def _verify_integrity(self) -> None: mgr_shape = self.shape tot_items = sum(len(x.mgr_locs) for x in self.blocks) for block in self.blocks: if block.shape[1:] != mgr_shape[1:]: raise_construction_error(tot_items, block.shape[1:], self.axes) if len(self.items) != tot_items: raise AssertionError( "Number of manager items must equal union of " f"block items\n# manager items: {len(self.items)}, # " f"tot_items: {tot_items}" ) def from_blocks(cls, blocks: list[Block], axes: list[Index]) -> BlockManager: """ Constructor for BlockManager and SingleBlockManager with same signature. """ return cls(blocks, axes, verify_integrity=False) # ---------------------------------------------------------------- # Indexing def fast_xs(self, loc: int) -> SingleBlockManager: """ Return the array corresponding to `frame.iloc[loc]`. Parameters ---------- loc : int Returns ------- np.ndarray or ExtensionArray """ if len(self.blocks) == 1: # TODO: this could be wrong if blk.mgr_locs is not slice(None)-like; # is this ruled out in the general case? result = self.blocks[0].iget((slice(None), loc)) # in the case of a single block, the new block is a view block = new_block( result, placement=slice(0, len(result)), ndim=1, refs=self.blocks[0].refs, ) return SingleBlockManager(block, self.axes[0]) dtype = interleaved_dtype([blk.dtype for blk in self.blocks]) n = len(self) # GH#46406 immutable_ea = isinstance(dtype, SparseDtype) if isinstance(dtype, ExtensionDtype) and not immutable_ea: cls = dtype.construct_array_type() result = cls._empty((n,), dtype=dtype) else: # error: Argument "dtype" to "empty" has incompatible type # "Union[Type[object], dtype[Any], ExtensionDtype, None]"; expected # "None" result = np.empty( n, dtype=object if immutable_ea else dtype # type: ignore[arg-type] ) result = ensure_wrapped_if_datetimelike(result) for blk in self.blocks: # Such assignment may incorrectly coerce NaT to None # result[blk.mgr_locs] = blk._slice((slice(None), loc)) for i, rl in enumerate(blk.mgr_locs): result[rl] = blk.iget((i, loc)) if immutable_ea: dtype = cast(ExtensionDtype, dtype) result = dtype.construct_array_type()._from_sequence(result, dtype=dtype) block = new_block(result, placement=slice(0, len(result)), ndim=1) return SingleBlockManager(block, self.axes[0]) def iget(self, i: int, track_ref: bool = True) -> SingleBlockManager: """ Return the data as a SingleBlockManager. """ block = self.blocks[self.blknos[i]] values = block.iget(self.blklocs[i]) # shortcut for select a single-dim from a 2-dim BM bp = BlockPlacement(slice(0, len(values))) nb = type(block)( values, placement=bp, ndim=1, refs=block.refs if track_ref else None ) return SingleBlockManager(nb, self.axes[1]) def iget_values(self, i: int) -> ArrayLike: """ Return the data for column i as the values (ndarray or ExtensionArray). Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution. """ # TODO(CoW) making the arrays read-only might make this safer to use? block = self.blocks[self.blknos[i]] values = block.iget(self.blklocs[i]) return values def column_arrays(self) -> list[np.ndarray]: """ Used in the JSON C code to access column arrays. This optimizes compared to using `iget_values` by converting each Warning! This doesn't handle Copy-on-Write, so should be used with caution (current use case of consuming this in the JSON code is fine). """ # This is an optimized equivalent to # result = [self.iget_values(i) for i in range(len(self.items))] result: list[np.ndarray | None] = [None] * len(self.items) for blk in self.blocks: mgr_locs = blk._mgr_locs values = blk.values_for_json() if values.ndim == 1: # TODO(EA2D): special casing not needed with 2D EAs result[mgr_locs[0]] = values else: for i, loc in enumerate(mgr_locs): result[loc] = values[i] # error: Incompatible return value type (got "List[None]", # expected "List[ndarray[Any, Any]]") return result # type: ignore[return-value] def iset( self, loc: int | slice | np.ndarray, value: ArrayLike, inplace: bool = False ): """ Set new item in-place. Does not consolidate. Adds new Block if not contained in the current set of items """ # FIXME: refactor, clearly separate broadcasting & zip-like assignment # can prob also fix the various if tests for sparse/categorical if self._blklocs is None and self.ndim > 1: self._rebuild_blknos_and_blklocs() # Note: we exclude DTA/TDA here value_is_extension_type = is_1d_only_ea_dtype(value.dtype) if not value_is_extension_type: if value.ndim == 2: value = value.T else: value = ensure_block_shape(value, ndim=2) if value.shape[1:] != self.shape[1:]: raise AssertionError( "Shape of new values must be compatible with manager shape" ) if lib.is_integer(loc): # We have 6 tests where loc is _not_ an int. # In this case, get_blkno_placements will yield only one tuple, # containing (self._blknos[loc], BlockPlacement(slice(0, 1, 1))) # Check if we can use _iset_single fastpath loc = cast(int, loc) blkno = self.blknos[loc] blk = self.blocks[blkno] if len(blk._mgr_locs) == 1: # TODO: fastest way to check this? return self._iset_single( loc, value, inplace=inplace, blkno=blkno, blk=blk, ) # error: Incompatible types in assignment (expression has type # "List[Union[int, slice, ndarray]]", variable has type "Union[int, # slice, ndarray]") loc = [loc] # type: ignore[assignment] # categorical/sparse/datetimetz if value_is_extension_type: def value_getitem(placement): return value else: def value_getitem(placement): return value[placement.indexer] # Accessing public blknos ensures the public versions are initialized blknos = self.blknos[loc] blklocs = self.blklocs[loc].copy() unfit_mgr_locs = [] unfit_val_locs = [] removed_blknos = [] for blkno_l, val_locs in libinternals.get_blkno_placements(blknos, group=True): blk = self.blocks[blkno_l] blk_locs = blklocs[val_locs.indexer] if inplace and blk.should_store(value): # Updating inplace -> check if we need to do Copy-on-Write if using_copy_on_write() and not self._has_no_reference_block(blkno_l): self._iset_split_block(blkno_l, blk_locs, value_getitem(val_locs)) else: blk.set_inplace(blk_locs, value_getitem(val_locs)) continue else: unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs]) unfit_val_locs.append(val_locs) # If all block items are unfit, schedule the block for removal. if len(val_locs) == len(blk.mgr_locs): removed_blknos.append(blkno_l) continue else: # Defer setting the new values to enable consolidation self._iset_split_block(blkno_l, blk_locs) if len(removed_blknos): # Remove blocks & update blknos accordingly is_deleted = np.zeros(self.nblocks, dtype=np.bool_) is_deleted[removed_blknos] = True new_blknos = np.empty(self.nblocks, dtype=np.intp) new_blknos.fill(-1) new_blknos[~is_deleted] = np.arange(self.nblocks - len(removed_blknos)) self._blknos = new_blknos[self._blknos] self.blocks = tuple( blk for i, blk in enumerate(self.blocks) if i not in set(removed_blknos) ) if unfit_val_locs: unfit_idxr = np.concatenate(unfit_mgr_locs) unfit_count = len(unfit_idxr) new_blocks: list[Block] = [] # TODO(CoW) is this always correct to assume that the new_blocks # are not referencing anything else? if value_is_extension_type: # This code (ab-)uses the fact that EA blocks contain only # one item. # TODO(EA2D): special casing unnecessary with 2D EAs new_blocks.extend( new_block_2d( values=value, placement=BlockPlacement(slice(mgr_loc, mgr_loc + 1)), ) for mgr_loc in unfit_idxr ) self._blknos[unfit_idxr] = np.arange(unfit_count) + len(self.blocks) self._blklocs[unfit_idxr] = 0 else: # unfit_val_locs contains BlockPlacement objects unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:]) new_blocks.append( new_block_2d( values=value_getitem(unfit_val_items), placement=BlockPlacement(unfit_idxr), ) ) self._blknos[unfit_idxr] = len(self.blocks) self._blklocs[unfit_idxr] = np.arange(unfit_count) self.blocks += tuple(new_blocks) # Newly created block's dtype may already be present. self._known_consolidated = False def _iset_split_block( self, blkno_l: int, blk_locs: np.ndarray | list[int], value: ArrayLike | None = None, ) -> None: """Removes columns from a block by splitting the block. Avoids copying the whole block through slicing and updates the manager after determinint the new block structure. Optionally adds a new block, otherwise has to be done by the caller. Parameters ---------- blkno_l: The block number to operate on, relevant for updating the manager blk_locs: The locations of our block that should be deleted. value: The value to set as a replacement. """ blk = self.blocks[blkno_l] if self._blklocs is None: self._rebuild_blknos_and_blklocs() nbs_tup = tuple(blk.delete(blk_locs)) if value is not None: locs = blk.mgr_locs.as_array[blk_locs] first_nb = new_block_2d(value, BlockPlacement(locs)) else: first_nb = nbs_tup[0] nbs_tup = tuple(nbs_tup[1:]) nr_blocks = len(self.blocks) blocks_tup = ( self.blocks[:blkno_l] + (first_nb,) + self.blocks[blkno_l + 1 :] + nbs_tup ) self.blocks = blocks_tup if not nbs_tup and value is not None: # No need to update anything if split did not happen return self._blklocs[first_nb.mgr_locs.indexer] = np.arange(len(first_nb)) for i, nb in enumerate(nbs_tup): self._blklocs[nb.mgr_locs.indexer] = np.arange(len(nb)) self._blknos[nb.mgr_locs.indexer] = i + nr_blocks def _iset_single( self, loc: int, value: ArrayLike, inplace: bool, blkno: int, blk: Block ) -> None: """ Fastpath for iset when we are only setting a single position and the Block currently in that position is itself single-column. In this case we can swap out the entire Block and blklocs and blknos are unaffected. """ # Caller is responsible for verifying value.shape if inplace and blk.should_store(value): copy = False if using_copy_on_write() and not self._has_no_reference_block(blkno): # perform Copy-on-Write and clear the reference copy = True iloc = self.blklocs[loc] blk.set_inplace(slice(iloc, iloc + 1), value, copy=copy) return nb = new_block_2d(value, placement=blk._mgr_locs) old_blocks = self.blocks new_blocks = old_blocks[:blkno] + (nb,) + old_blocks[blkno + 1 :] self.blocks = new_blocks return def column_setitem( self, loc: int, idx: int | slice | np.ndarray, value, inplace_only: bool = False ) -> None: """ Set values ("setitem") into a single column (not setting the full column). This is a method on the BlockManager level, to avoid creating an intermediate Series at the DataFrame level (`s = df[loc]; s[idx] = value`) """ if using_copy_on_write() and not self._has_no_reference(loc): blkno = self.blknos[loc] # Split blocks to only copy the column we want to modify blk_loc = self.blklocs[loc] # Copy our values values = self.blocks[blkno].values if values.ndim == 1: values = values.copy() else: # Use [blk_loc] as indexer to keep ndim=2, this already results in a # copy values = values[[blk_loc]] self._iset_split_block(blkno, [blk_loc], values) # this manager is only created temporarily to mutate the values in place # so don't track references, otherwise the `setitem` would perform CoW again col_mgr = self.iget(loc, track_ref=False) if inplace_only: col_mgr.setitem_inplace(idx, value) else: new_mgr = col_mgr.setitem((idx,), value) self.iset(loc, new_mgr._block.values, inplace=True) def insert(self, loc: int, item: Hashable, value: ArrayLike) -> None: """ Insert item at selected position. Parameters ---------- loc : int item : hashable value : np.ndarray or ExtensionArray """ # insert to the axis; this could possibly raise a TypeError new_axis = self.items.insert(loc, item) if value.ndim == 2: value = value.T if len(value) > 1: raise ValueError( f"Expected a 1D array, got an array with shape {value.T.shape}" ) else: value = ensure_block_shape(value, ndim=self.ndim) bp = BlockPlacement(slice(loc, loc + 1)) # TODO(CoW) do we always "own" the passed `value`? block = new_block_2d(values=value, placement=bp) if not len(self.blocks): # Fastpath self._blklocs = np.array([0], dtype=np.intp) self._blknos = np.array([0], dtype=np.intp) else: self._insert_update_mgr_locs(loc) self._insert_update_blklocs_and_blknos(loc) self.axes[0] = new_axis self.blocks += (block,) self._known_consolidated = False if sum(not block.is_extension for block in self.blocks) > 100: warnings.warn( "DataFrame is highly fragmented. This is usually the result " "of calling `frame.insert` many times, which has poor performance. " "Consider joining all columns at once using pd.concat(axis=1) " "instead. To get a de-fragmented frame, use `newframe = frame.copy()`", PerformanceWarning, stacklevel=find_stack_level(), ) def _insert_update_mgr_locs(self, loc) -> None: """ When inserting a new Block at location 'loc', we increment all of the mgr_locs of blocks above that by one. """ for blkno, count in _fast_count_smallints(self.blknos[loc:]): # .620 this way, .326 of which is in increment_above blk = self.blocks[blkno] blk._mgr_locs = blk._mgr_locs.increment_above(loc) def _insert_update_blklocs_and_blknos(self, loc) -> None: """ When inserting a new Block at location 'loc', we update our _blklocs and _blknos. """ # Accessing public blklocs ensures the public versions are initialized if loc == self.blklocs.shape[0]: # np.append is a lot faster, let's use it if we can. self._blklocs = np.append(self._blklocs, 0) self._blknos = np.append(self._blknos, len(self.blocks)) elif loc == 0: # np.append is a lot faster, let's use it if we can. self._blklocs = np.append(self._blklocs[::-1], 0)[::-1] self._blknos = np.append(self._blknos[::-1], len(self.blocks))[::-1] else: new_blklocs, new_blknos = libinternals.update_blklocs_and_blknos( self.blklocs, self.blknos, loc, len(self.blocks) ) self._blklocs = new_blklocs self._blknos = new_blknos def idelete(self, indexer) -> BlockManager: """ Delete selected locations, returning a new BlockManager. """ is_deleted = np.zeros(self.shape[0], dtype=np.bool_) is_deleted[indexer] = True taker = (~is_deleted).nonzero()[0] nbs = self._slice_take_blocks_ax0(taker, only_slice=True) new_columns = self.items[~is_deleted] axes = [new_columns, self.axes[1]] return type(self)(tuple(nbs), axes, verify_integrity=False) # ---------------------------------------------------------------- # Block-wise Operation def grouped_reduce(self: T, func: Callable) -> T: """ Apply grouped reduction function blockwise, returning a new BlockManager. Parameters ---------- func : grouped reduction function Returns ------- BlockManager """ result_blocks: list[Block] = [] for blk in self.blocks: if blk.is_object: # split on object-dtype blocks bc some columns may raise # while others do not. for sb in blk._split(): applied = sb.apply(func) result_blocks = extend_blocks(applied, result_blocks) else: applied = blk.apply(func) result_blocks = extend_blocks(applied, result_blocks) if len(result_blocks) == 0: nrows = 0 else: nrows = result_blocks[0].values.shape[-1] index = Index(range(nrows)) return type(self).from_blocks(result_blocks, [self.axes[0], index]) def reduce(self: T, func: Callable) -> T: """ Apply reduction function blockwise, returning a single-row BlockManager. Parameters ---------- func : reduction function Returns ------- BlockManager """ # If 2D, we assume that we're operating column-wise assert self.ndim == 2 res_blocks: list[Block] = [] for blk in self.blocks: nbs = blk.reduce(func) res_blocks.extend(nbs) index = Index([None]) # placeholder new_mgr = type(self).from_blocks(res_blocks, [self.items, index]) return new_mgr def operate_blockwise(self, other: BlockManager, array_op) -> BlockManager: """ Apply array_op blockwise with another (aligned) BlockManager. """ return operate_blockwise(self, other, array_op) def _equal_values(self: BlockManager, other: BlockManager) -> bool: """ Used in .equals defined in base class. Only check the column values assuming shape and indexes have already been checked. """ return blockwise_all(self, other, array_equals) def quantile( self: T, *, qs: Index, # with dtype float 64 axis: AxisInt = 0, interpolation: QuantileInterpolation = "linear", ) -> T: """ Iterate over blocks applying quantile reduction. This routine is intended for reduction type operations and will do inference on the generated blocks. Parameters ---------- axis: reduction axis, default 0 consolidate: bool, default True. Join together blocks having same dtype interpolation : type of interpolation, default 'linear' qs : list of the quantiles to be computed Returns ------- BlockManager """ # Series dispatches to DataFrame for quantile, which allows us to # simplify some of the code here and in the blocks assert self.ndim >= 2 assert is_list_like(qs) # caller is responsible for this assert axis == 1 # only ever called this way new_axes = list(self.axes) new_axes[1] = Index(qs, dtype=np.float64) blocks = [ blk.quantile(axis=axis, qs=qs, interpolation=interpolation) for blk in self.blocks ] return type(self)(blocks, new_axes) # ---------------------------------------------------------------- def unstack(self, unstacker, fill_value) -> BlockManager: """ Return a BlockManager with all blocks unstacked. Parameters ---------- unstacker : reshape._Unstacker fill_value : Any fill_value for newly introduced missing values. Returns ------- unstacked : BlockManager """ new_columns = unstacker.get_new_columns(self.items) new_index = unstacker.new_index allow_fill = not unstacker.mask_all if allow_fill: # calculating the full mask once and passing it to Block._unstack is # faster than letting calculating it in each repeated call new_mask2D = (~unstacker.mask).reshape(*unstacker.full_shape) needs_masking = new_mask2D.any(axis=0) else: needs_masking = np.zeros(unstacker.full_shape[1], dtype=bool) new_blocks: list[Block] = [] columns_mask: list[np.ndarray] = [] if len(self.items) == 0: factor = 1 else: fac = len(new_columns) / len(self.items) assert fac == int(fac) factor = int(fac) for blk in self.blocks: mgr_locs = blk.mgr_locs new_placement = mgr_locs.tile_for_unstack(factor) blocks, mask = blk._unstack( unstacker, fill_value, new_placement=new_placement, needs_masking=needs_masking, ) new_blocks.extend(blocks) columns_mask.extend(mask) # Block._unstack should ensure this holds, assert mask.sum() == sum(len(nb._mgr_locs) for nb in blocks) # In turn this ensures that in the BlockManager call below # we have len(new_columns) == sum(x.shape[0] for x in new_blocks) # which suffices to allow us to pass verify_inegrity=False new_columns = new_columns[columns_mask] bm = BlockManager(new_blocks, [new_columns, new_index], verify_integrity=False) return bm def to_dict(self, copy: bool = True): """ Return a dict of str(dtype) -> BlockManager Parameters ---------- copy : bool, default True Returns ------- values : a dict of dtype -> BlockManager """ bd: dict[str, list[Block]] = {} for b in self.blocks: bd.setdefault(str(b.dtype), []).append(b) # TODO(EA2D): the combine will be unnecessary with 2D EAs return {dtype: self._combine(blocks, copy=copy) for dtype, blocks in bd.items()} def as_array( self, dtype: np.dtype | None = None, copy: bool = False, na_value: object = lib.no_default, ) -> np.ndarray: """ Convert the blockmanager data into an numpy array. Parameters ---------- dtype : np.dtype or None, default None Data type of the return array. copy : bool, default False If True then guarantee that a copy is returned. A value of False does not guarantee that the underlying data is not copied. na_value : object, default lib.no_default Value to be used as the missing value sentinel. Returns ------- arr : ndarray """ # TODO(CoW) handle case where resulting array is a view if len(self.blocks) == 0: arr = np.empty(self.shape, dtype=float) return arr.transpose() # We want to copy when na_value is provided to avoid # mutating the original object copy = copy or na_value is not lib.no_default if self.is_single_block: blk = self.blocks[0] if blk.is_extension: # Avoid implicit conversion of extension blocks to object # error: Item "ndarray" of "Union[ndarray, ExtensionArray]" has no # attribute "to_numpy" arr = blk.values.to_numpy( # type: ignore[union-attr] dtype=dtype, na_value=na_value, ).reshape(blk.shape) else: arr = np.asarray(blk.get_values()) if dtype: arr = arr.astype(dtype, copy=False) if copy: arr = arr.copy() elif using_copy_on_write(): arr = arr.view() arr.flags.writeable = False else: arr = self._interleave(dtype=dtype, na_value=na_value) # The underlying data was copied within _interleave, so no need # to further copy if copy=True or setting na_value if na_value is not lib.no_default: arr[isna(arr)] = na_value return arr.transpose() def _interleave( self, dtype: np.dtype | None = None, na_value: object = lib.no_default, ) -> np.ndarray: """ Return ndarray from blocks with specified item order Items must be contained in the blocks """ if not dtype: # Incompatible types in assignment (expression has type # "Optional[Union[dtype[Any], ExtensionDtype]]", variable has # type "Optional[dtype[Any]]") dtype = interleaved_dtype( # type: ignore[assignment] [blk.dtype for blk in self.blocks] ) # TODO: https://github.com/pandas-dev/pandas/issues/22791 # Give EAs some input on what happens here. Sparse needs this. if isinstance(dtype, SparseDtype): dtype = dtype.subtype dtype = cast(np.dtype, dtype) elif isinstance(dtype, ExtensionDtype): dtype = np.dtype("object") elif is_dtype_equal(dtype, str): dtype = np.dtype("object") result = np.empty(self.shape, dtype=dtype) itemmask = np.zeros(self.shape[0]) if dtype == np.dtype("object") and na_value is lib.no_default: # much more performant than using to_numpy below for blk in self.blocks: rl = blk.mgr_locs arr = blk.get_values(dtype) result[rl.indexer] = arr itemmask[rl.indexer] = 1 return result for blk in self.blocks: rl = blk.mgr_locs if blk.is_extension: # Avoid implicit conversion of extension blocks to object # error: Item "ndarray" of "Union[ndarray, ExtensionArray]" has no # attribute "to_numpy" arr = blk.values.to_numpy( # type: ignore[union-attr] dtype=dtype, na_value=na_value, ) else: arr = blk.get_values(dtype) result[rl.indexer] = arr itemmask[rl.indexer] = 1 if not itemmask.all(): raise AssertionError("Some items were not contained in blocks") return result # ---------------------------------------------------------------- # Consolidation def is_consolidated(self) -> bool: """ Return True if more than one block with the same dtype """ if not self._known_consolidated: self._consolidate_check() return self._is_consolidated def _consolidate_check(self) -> None: if len(self.blocks) == 1: # fastpath self._is_consolidated = True self._known_consolidated = True return dtypes = [blk.dtype for blk in self.blocks if blk._can_consolidate] self._is_consolidated = len(dtypes) == len(set(dtypes)) self._known_consolidated = True def _consolidate_inplace(self) -> None: # In general, _consolidate_inplace should only be called via # DataFrame._consolidate_inplace, otherwise we will fail to invalidate # the DataFrame's _item_cache. The exception is for newly-created # BlockManager objects not yet attached to a DataFrame. if not self.is_consolidated(): self.blocks = _consolidate(self.blocks) self._is_consolidated = True self._known_consolidated = True self._rebuild_blknos_and_blklocs() def operate_blockwise( left: BlockManager, right: BlockManager, array_op ) -> BlockManager: # At this point we have already checked the parent DataFrames for # assert rframe._indexed_same(lframe) res_blks: list[Block] = [] for lvals, rvals, locs, left_ea, right_ea, rblk in _iter_block_pairs(left, right): res_values = array_op(lvals, rvals) if left_ea and not right_ea and hasattr(res_values, "reshape"): res_values = res_values.reshape(1, -1) nbs = rblk._split_op_result(res_values) # Assertions are disabled for performance, but should hold: # if right_ea or left_ea: # assert len(nbs) == 1 # else: # assert res_values.shape == lvals.shape, (res_values.shape, lvals.shape) _reset_block_mgr_locs(nbs, locs) res_blks.extend(nbs) # Assertions are disabled for performance, but should hold: # slocs = {y for nb in res_blks for y in nb.mgr_locs.as_array} # nlocs = sum(len(nb.mgr_locs.as_array) for nb in res_blks) # assert nlocs == len(left.items), (nlocs, len(left.items)) # assert len(slocs) == nlocs, (len(slocs), nlocs) # assert slocs == set(range(nlocs)), slocs new_mgr = type(right)(tuple(res_blks), axes=right.axes, verify_integrity=False) return new_mgr
null
173,079
from __future__ import annotations from typing import ( TYPE_CHECKING, Iterator, NamedTuple, ) from pandas._typing import ArrayLike def _iter_block_pairs( left: BlockManager, right: BlockManager ) -> Iterator[BlockPairInfo]: # At this point we have already checked the parent DataFrames for # assert rframe._indexed_same(lframe) for blk in left.blocks: locs = blk.mgr_locs blk_vals = blk.values left_ea = blk_vals.ndim == 1 rblks = right._slice_take_blocks_ax0(locs.indexer, only_slice=True) # Assertions are disabled for performance, but should hold: # if left_ea: # assert len(locs) == 1, locs # assert len(rblks) == 1, rblks # assert rblks[0].shape[0] == 1, rblks[0].shape for rblk in rblks: right_ea = rblk.values.ndim == 1 lvals, rvals = _get_same_shape_values(blk, rblk, left_ea, right_ea) info = BlockPairInfo(lvals, rvals, locs, left_ea, right_ea, rblk) yield info class BlockManager(libinternals.BlockManager, BaseBlockManager): """ BaseBlockManager that holds 2D blocks. """ ndim = 2 # ---------------------------------------------------------------- # Constructors def __init__( self, blocks: Sequence[Block], axes: Sequence[Index], verify_integrity: bool = True, ) -> None: if verify_integrity: # Assertion disabled for performance # assert all(isinstance(x, Index) for x in axes) for block in blocks: if self.ndim != block.ndim: raise AssertionError( f"Number of Block dimensions ({block.ndim}) must equal " f"number of axes ({self.ndim})" ) # As of 2.0, the caller is responsible for ensuring that # DatetimeTZBlock with block.ndim == 2 has block.values.ndim ==2; # previously there was a special check for fastparquet compat. self._verify_integrity() def _verify_integrity(self) -> None: mgr_shape = self.shape tot_items = sum(len(x.mgr_locs) for x in self.blocks) for block in self.blocks: if block.shape[1:] != mgr_shape[1:]: raise_construction_error(tot_items, block.shape[1:], self.axes) if len(self.items) != tot_items: raise AssertionError( "Number of manager items must equal union of " f"block items\n# manager items: {len(self.items)}, # " f"tot_items: {tot_items}" ) def from_blocks(cls, blocks: list[Block], axes: list[Index]) -> BlockManager: """ Constructor for BlockManager and SingleBlockManager with same signature. """ return cls(blocks, axes, verify_integrity=False) # ---------------------------------------------------------------- # Indexing def fast_xs(self, loc: int) -> SingleBlockManager: """ Return the array corresponding to `frame.iloc[loc]`. Parameters ---------- loc : int Returns ------- np.ndarray or ExtensionArray """ if len(self.blocks) == 1: # TODO: this could be wrong if blk.mgr_locs is not slice(None)-like; # is this ruled out in the general case? result = self.blocks[0].iget((slice(None), loc)) # in the case of a single block, the new block is a view block = new_block( result, placement=slice(0, len(result)), ndim=1, refs=self.blocks[0].refs, ) return SingleBlockManager(block, self.axes[0]) dtype = interleaved_dtype([blk.dtype for blk in self.blocks]) n = len(self) # GH#46406 immutable_ea = isinstance(dtype, SparseDtype) if isinstance(dtype, ExtensionDtype) and not immutable_ea: cls = dtype.construct_array_type() result = cls._empty((n,), dtype=dtype) else: # error: Argument "dtype" to "empty" has incompatible type # "Union[Type[object], dtype[Any], ExtensionDtype, None]"; expected # "None" result = np.empty( n, dtype=object if immutable_ea else dtype # type: ignore[arg-type] ) result = ensure_wrapped_if_datetimelike(result) for blk in self.blocks: # Such assignment may incorrectly coerce NaT to None # result[blk.mgr_locs] = blk._slice((slice(None), loc)) for i, rl in enumerate(blk.mgr_locs): result[rl] = blk.iget((i, loc)) if immutable_ea: dtype = cast(ExtensionDtype, dtype) result = dtype.construct_array_type()._from_sequence(result, dtype=dtype) block = new_block(result, placement=slice(0, len(result)), ndim=1) return SingleBlockManager(block, self.axes[0]) def iget(self, i: int, track_ref: bool = True) -> SingleBlockManager: """ Return the data as a SingleBlockManager. """ block = self.blocks[self.blknos[i]] values = block.iget(self.blklocs[i]) # shortcut for select a single-dim from a 2-dim BM bp = BlockPlacement(slice(0, len(values))) nb = type(block)( values, placement=bp, ndim=1, refs=block.refs if track_ref else None ) return SingleBlockManager(nb, self.axes[1]) def iget_values(self, i: int) -> ArrayLike: """ Return the data for column i as the values (ndarray or ExtensionArray). Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution. """ # TODO(CoW) making the arrays read-only might make this safer to use? block = self.blocks[self.blknos[i]] values = block.iget(self.blklocs[i]) return values def column_arrays(self) -> list[np.ndarray]: """ Used in the JSON C code to access column arrays. This optimizes compared to using `iget_values` by converting each Warning! This doesn't handle Copy-on-Write, so should be used with caution (current use case of consuming this in the JSON code is fine). """ # This is an optimized equivalent to # result = [self.iget_values(i) for i in range(len(self.items))] result: list[np.ndarray | None] = [None] * len(self.items) for blk in self.blocks: mgr_locs = blk._mgr_locs values = blk.values_for_json() if values.ndim == 1: # TODO(EA2D): special casing not needed with 2D EAs result[mgr_locs[0]] = values else: for i, loc in enumerate(mgr_locs): result[loc] = values[i] # error: Incompatible return value type (got "List[None]", # expected "List[ndarray[Any, Any]]") return result # type: ignore[return-value] def iset( self, loc: int | slice | np.ndarray, value: ArrayLike, inplace: bool = False ): """ Set new item in-place. Does not consolidate. Adds new Block if not contained in the current set of items """ # FIXME: refactor, clearly separate broadcasting & zip-like assignment # can prob also fix the various if tests for sparse/categorical if self._blklocs is None and self.ndim > 1: self._rebuild_blknos_and_blklocs() # Note: we exclude DTA/TDA here value_is_extension_type = is_1d_only_ea_dtype(value.dtype) if not value_is_extension_type: if value.ndim == 2: value = value.T else: value = ensure_block_shape(value, ndim=2) if value.shape[1:] != self.shape[1:]: raise AssertionError( "Shape of new values must be compatible with manager shape" ) if lib.is_integer(loc): # We have 6 tests where loc is _not_ an int. # In this case, get_blkno_placements will yield only one tuple, # containing (self._blknos[loc], BlockPlacement(slice(0, 1, 1))) # Check if we can use _iset_single fastpath loc = cast(int, loc) blkno = self.blknos[loc] blk = self.blocks[blkno] if len(blk._mgr_locs) == 1: # TODO: fastest way to check this? return self._iset_single( loc, value, inplace=inplace, blkno=blkno, blk=blk, ) # error: Incompatible types in assignment (expression has type # "List[Union[int, slice, ndarray]]", variable has type "Union[int, # slice, ndarray]") loc = [loc] # type: ignore[assignment] # categorical/sparse/datetimetz if value_is_extension_type: def value_getitem(placement): return value else: def value_getitem(placement): return value[placement.indexer] # Accessing public blknos ensures the public versions are initialized blknos = self.blknos[loc] blklocs = self.blklocs[loc].copy() unfit_mgr_locs = [] unfit_val_locs = [] removed_blknos = [] for blkno_l, val_locs in libinternals.get_blkno_placements(blknos, group=True): blk = self.blocks[blkno_l] blk_locs = blklocs[val_locs.indexer] if inplace and blk.should_store(value): # Updating inplace -> check if we need to do Copy-on-Write if using_copy_on_write() and not self._has_no_reference_block(blkno_l): self._iset_split_block(blkno_l, blk_locs, value_getitem(val_locs)) else: blk.set_inplace(blk_locs, value_getitem(val_locs)) continue else: unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs]) unfit_val_locs.append(val_locs) # If all block items are unfit, schedule the block for removal. if len(val_locs) == len(blk.mgr_locs): removed_blknos.append(blkno_l) continue else: # Defer setting the new values to enable consolidation self._iset_split_block(blkno_l, blk_locs) if len(removed_blknos): # Remove blocks & update blknos accordingly is_deleted = np.zeros(self.nblocks, dtype=np.bool_) is_deleted[removed_blknos] = True new_blknos = np.empty(self.nblocks, dtype=np.intp) new_blknos.fill(-1) new_blknos[~is_deleted] = np.arange(self.nblocks - len(removed_blknos)) self._blknos = new_blknos[self._blknos] self.blocks = tuple( blk for i, blk in enumerate(self.blocks) if i not in set(removed_blknos) ) if unfit_val_locs: unfit_idxr = np.concatenate(unfit_mgr_locs) unfit_count = len(unfit_idxr) new_blocks: list[Block] = [] # TODO(CoW) is this always correct to assume that the new_blocks # are not referencing anything else? if value_is_extension_type: # This code (ab-)uses the fact that EA blocks contain only # one item. # TODO(EA2D): special casing unnecessary with 2D EAs new_blocks.extend( new_block_2d( values=value, placement=BlockPlacement(slice(mgr_loc, mgr_loc + 1)), ) for mgr_loc in unfit_idxr ) self._blknos[unfit_idxr] = np.arange(unfit_count) + len(self.blocks) self._blklocs[unfit_idxr] = 0 else: # unfit_val_locs contains BlockPlacement objects unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:]) new_blocks.append( new_block_2d( values=value_getitem(unfit_val_items), placement=BlockPlacement(unfit_idxr), ) ) self._blknos[unfit_idxr] = len(self.blocks) self._blklocs[unfit_idxr] = np.arange(unfit_count) self.blocks += tuple(new_blocks) # Newly created block's dtype may already be present. self._known_consolidated = False def _iset_split_block( self, blkno_l: int, blk_locs: np.ndarray | list[int], value: ArrayLike | None = None, ) -> None: """Removes columns from a block by splitting the block. Avoids copying the whole block through slicing and updates the manager after determinint the new block structure. Optionally adds a new block, otherwise has to be done by the caller. Parameters ---------- blkno_l: The block number to operate on, relevant for updating the manager blk_locs: The locations of our block that should be deleted. value: The value to set as a replacement. """ blk = self.blocks[blkno_l] if self._blklocs is None: self._rebuild_blknos_and_blklocs() nbs_tup = tuple(blk.delete(blk_locs)) if value is not None: locs = blk.mgr_locs.as_array[blk_locs] first_nb = new_block_2d(value, BlockPlacement(locs)) else: first_nb = nbs_tup[0] nbs_tup = tuple(nbs_tup[1:]) nr_blocks = len(self.blocks) blocks_tup = ( self.blocks[:blkno_l] + (first_nb,) + self.blocks[blkno_l + 1 :] + nbs_tup ) self.blocks = blocks_tup if not nbs_tup and value is not None: # No need to update anything if split did not happen return self._blklocs[first_nb.mgr_locs.indexer] = np.arange(len(first_nb)) for i, nb in enumerate(nbs_tup): self._blklocs[nb.mgr_locs.indexer] = np.arange(len(nb)) self._blknos[nb.mgr_locs.indexer] = i + nr_blocks def _iset_single( self, loc: int, value: ArrayLike, inplace: bool, blkno: int, blk: Block ) -> None: """ Fastpath for iset when we are only setting a single position and the Block currently in that position is itself single-column. In this case we can swap out the entire Block and blklocs and blknos are unaffected. """ # Caller is responsible for verifying value.shape if inplace and blk.should_store(value): copy = False if using_copy_on_write() and not self._has_no_reference_block(blkno): # perform Copy-on-Write and clear the reference copy = True iloc = self.blklocs[loc] blk.set_inplace(slice(iloc, iloc + 1), value, copy=copy) return nb = new_block_2d(value, placement=blk._mgr_locs) old_blocks = self.blocks new_blocks = old_blocks[:blkno] + (nb,) + old_blocks[blkno + 1 :] self.blocks = new_blocks return def column_setitem( self, loc: int, idx: int | slice | np.ndarray, value, inplace_only: bool = False ) -> None: """ Set values ("setitem") into a single column (not setting the full column). This is a method on the BlockManager level, to avoid creating an intermediate Series at the DataFrame level (`s = df[loc]; s[idx] = value`) """ if using_copy_on_write() and not self._has_no_reference(loc): blkno = self.blknos[loc] # Split blocks to only copy the column we want to modify blk_loc = self.blklocs[loc] # Copy our values values = self.blocks[blkno].values if values.ndim == 1: values = values.copy() else: # Use [blk_loc] as indexer to keep ndim=2, this already results in a # copy values = values[[blk_loc]] self._iset_split_block(blkno, [blk_loc], values) # this manager is only created temporarily to mutate the values in place # so don't track references, otherwise the `setitem` would perform CoW again col_mgr = self.iget(loc, track_ref=False) if inplace_only: col_mgr.setitem_inplace(idx, value) else: new_mgr = col_mgr.setitem((idx,), value) self.iset(loc, new_mgr._block.values, inplace=True) def insert(self, loc: int, item: Hashable, value: ArrayLike) -> None: """ Insert item at selected position. Parameters ---------- loc : int item : hashable value : np.ndarray or ExtensionArray """ # insert to the axis; this could possibly raise a TypeError new_axis = self.items.insert(loc, item) if value.ndim == 2: value = value.T if len(value) > 1: raise ValueError( f"Expected a 1D array, got an array with shape {value.T.shape}" ) else: value = ensure_block_shape(value, ndim=self.ndim) bp = BlockPlacement(slice(loc, loc + 1)) # TODO(CoW) do we always "own" the passed `value`? block = new_block_2d(values=value, placement=bp) if not len(self.blocks): # Fastpath self._blklocs = np.array([0], dtype=np.intp) self._blknos = np.array([0], dtype=np.intp) else: self._insert_update_mgr_locs(loc) self._insert_update_blklocs_and_blknos(loc) self.axes[0] = new_axis self.blocks += (block,) self._known_consolidated = False if sum(not block.is_extension for block in self.blocks) > 100: warnings.warn( "DataFrame is highly fragmented. This is usually the result " "of calling `frame.insert` many times, which has poor performance. " "Consider joining all columns at once using pd.concat(axis=1) " "instead. To get a de-fragmented frame, use `newframe = frame.copy()`", PerformanceWarning, stacklevel=find_stack_level(), ) def _insert_update_mgr_locs(self, loc) -> None: """ When inserting a new Block at location 'loc', we increment all of the mgr_locs of blocks above that by one. """ for blkno, count in _fast_count_smallints(self.blknos[loc:]): # .620 this way, .326 of which is in increment_above blk = self.blocks[blkno] blk._mgr_locs = blk._mgr_locs.increment_above(loc) def _insert_update_blklocs_and_blknos(self, loc) -> None: """ When inserting a new Block at location 'loc', we update our _blklocs and _blknos. """ # Accessing public blklocs ensures the public versions are initialized if loc == self.blklocs.shape[0]: # np.append is a lot faster, let's use it if we can. self._blklocs = np.append(self._blklocs, 0) self._blknos = np.append(self._blknos, len(self.blocks)) elif loc == 0: # np.append is a lot faster, let's use it if we can. self._blklocs = np.append(self._blklocs[::-1], 0)[::-1] self._blknos = np.append(self._blknos[::-1], len(self.blocks))[::-1] else: new_blklocs, new_blknos = libinternals.update_blklocs_and_blknos( self.blklocs, self.blknos, loc, len(self.blocks) ) self._blklocs = new_blklocs self._blknos = new_blknos def idelete(self, indexer) -> BlockManager: """ Delete selected locations, returning a new BlockManager. """ is_deleted = np.zeros(self.shape[0], dtype=np.bool_) is_deleted[indexer] = True taker = (~is_deleted).nonzero()[0] nbs = self._slice_take_blocks_ax0(taker, only_slice=True) new_columns = self.items[~is_deleted] axes = [new_columns, self.axes[1]] return type(self)(tuple(nbs), axes, verify_integrity=False) # ---------------------------------------------------------------- # Block-wise Operation def grouped_reduce(self: T, func: Callable) -> T: """ Apply grouped reduction function blockwise, returning a new BlockManager. Parameters ---------- func : grouped reduction function Returns ------- BlockManager """ result_blocks: list[Block] = [] for blk in self.blocks: if blk.is_object: # split on object-dtype blocks bc some columns may raise # while others do not. for sb in blk._split(): applied = sb.apply(func) result_blocks = extend_blocks(applied, result_blocks) else: applied = blk.apply(func) result_blocks = extend_blocks(applied, result_blocks) if len(result_blocks) == 0: nrows = 0 else: nrows = result_blocks[0].values.shape[-1] index = Index(range(nrows)) return type(self).from_blocks(result_blocks, [self.axes[0], index]) def reduce(self: T, func: Callable) -> T: """ Apply reduction function blockwise, returning a single-row BlockManager. Parameters ---------- func : reduction function Returns ------- BlockManager """ # If 2D, we assume that we're operating column-wise assert self.ndim == 2 res_blocks: list[Block] = [] for blk in self.blocks: nbs = blk.reduce(func) res_blocks.extend(nbs) index = Index([None]) # placeholder new_mgr = type(self).from_blocks(res_blocks, [self.items, index]) return new_mgr def operate_blockwise(self, other: BlockManager, array_op) -> BlockManager: """ Apply array_op blockwise with another (aligned) BlockManager. """ return operate_blockwise(self, other, array_op) def _equal_values(self: BlockManager, other: BlockManager) -> bool: """ Used in .equals defined in base class. Only check the column values assuming shape and indexes have already been checked. """ return blockwise_all(self, other, array_equals) def quantile( self: T, *, qs: Index, # with dtype float 64 axis: AxisInt = 0, interpolation: QuantileInterpolation = "linear", ) -> T: """ Iterate over blocks applying quantile reduction. This routine is intended for reduction type operations and will do inference on the generated blocks. Parameters ---------- axis: reduction axis, default 0 consolidate: bool, default True. Join together blocks having same dtype interpolation : type of interpolation, default 'linear' qs : list of the quantiles to be computed Returns ------- BlockManager """ # Series dispatches to DataFrame for quantile, which allows us to # simplify some of the code here and in the blocks assert self.ndim >= 2 assert is_list_like(qs) # caller is responsible for this assert axis == 1 # only ever called this way new_axes = list(self.axes) new_axes[1] = Index(qs, dtype=np.float64) blocks = [ blk.quantile(axis=axis, qs=qs, interpolation=interpolation) for blk in self.blocks ] return type(self)(blocks, new_axes) # ---------------------------------------------------------------- def unstack(self, unstacker, fill_value) -> BlockManager: """ Return a BlockManager with all blocks unstacked. Parameters ---------- unstacker : reshape._Unstacker fill_value : Any fill_value for newly introduced missing values. Returns ------- unstacked : BlockManager """ new_columns = unstacker.get_new_columns(self.items) new_index = unstacker.new_index allow_fill = not unstacker.mask_all if allow_fill: # calculating the full mask once and passing it to Block._unstack is # faster than letting calculating it in each repeated call new_mask2D = (~unstacker.mask).reshape(*unstacker.full_shape) needs_masking = new_mask2D.any(axis=0) else: needs_masking = np.zeros(unstacker.full_shape[1], dtype=bool) new_blocks: list[Block] = [] columns_mask: list[np.ndarray] = [] if len(self.items) == 0: factor = 1 else: fac = len(new_columns) / len(self.items) assert fac == int(fac) factor = int(fac) for blk in self.blocks: mgr_locs = blk.mgr_locs new_placement = mgr_locs.tile_for_unstack(factor) blocks, mask = blk._unstack( unstacker, fill_value, new_placement=new_placement, needs_masking=needs_masking, ) new_blocks.extend(blocks) columns_mask.extend(mask) # Block._unstack should ensure this holds, assert mask.sum() == sum(len(nb._mgr_locs) for nb in blocks) # In turn this ensures that in the BlockManager call below # we have len(new_columns) == sum(x.shape[0] for x in new_blocks) # which suffices to allow us to pass verify_inegrity=False new_columns = new_columns[columns_mask] bm = BlockManager(new_blocks, [new_columns, new_index], verify_integrity=False) return bm def to_dict(self, copy: bool = True): """ Return a dict of str(dtype) -> BlockManager Parameters ---------- copy : bool, default True Returns ------- values : a dict of dtype -> BlockManager """ bd: dict[str, list[Block]] = {} for b in self.blocks: bd.setdefault(str(b.dtype), []).append(b) # TODO(EA2D): the combine will be unnecessary with 2D EAs return {dtype: self._combine(blocks, copy=copy) for dtype, blocks in bd.items()} def as_array( self, dtype: np.dtype | None = None, copy: bool = False, na_value: object = lib.no_default, ) -> np.ndarray: """ Convert the blockmanager data into an numpy array. Parameters ---------- dtype : np.dtype or None, default None Data type of the return array. copy : bool, default False If True then guarantee that a copy is returned. A value of False does not guarantee that the underlying data is not copied. na_value : object, default lib.no_default Value to be used as the missing value sentinel. Returns ------- arr : ndarray """ # TODO(CoW) handle case where resulting array is a view if len(self.blocks) == 0: arr = np.empty(self.shape, dtype=float) return arr.transpose() # We want to copy when na_value is provided to avoid # mutating the original object copy = copy or na_value is not lib.no_default if self.is_single_block: blk = self.blocks[0] if blk.is_extension: # Avoid implicit conversion of extension blocks to object # error: Item "ndarray" of "Union[ndarray, ExtensionArray]" has no # attribute "to_numpy" arr = blk.values.to_numpy( # type: ignore[union-attr] dtype=dtype, na_value=na_value, ).reshape(blk.shape) else: arr = np.asarray(blk.get_values()) if dtype: arr = arr.astype(dtype, copy=False) if copy: arr = arr.copy() elif using_copy_on_write(): arr = arr.view() arr.flags.writeable = False else: arr = self._interleave(dtype=dtype, na_value=na_value) # The underlying data was copied within _interleave, so no need # to further copy if copy=True or setting na_value if na_value is not lib.no_default: arr[isna(arr)] = na_value return arr.transpose() def _interleave( self, dtype: np.dtype | None = None, na_value: object = lib.no_default, ) -> np.ndarray: """ Return ndarray from blocks with specified item order Items must be contained in the blocks """ if not dtype: # Incompatible types in assignment (expression has type # "Optional[Union[dtype[Any], ExtensionDtype]]", variable has # type "Optional[dtype[Any]]") dtype = interleaved_dtype( # type: ignore[assignment] [blk.dtype for blk in self.blocks] ) # TODO: https://github.com/pandas-dev/pandas/issues/22791 # Give EAs some input on what happens here. Sparse needs this. if isinstance(dtype, SparseDtype): dtype = dtype.subtype dtype = cast(np.dtype, dtype) elif isinstance(dtype, ExtensionDtype): dtype = np.dtype("object") elif is_dtype_equal(dtype, str): dtype = np.dtype("object") result = np.empty(self.shape, dtype=dtype) itemmask = np.zeros(self.shape[0]) if dtype == np.dtype("object") and na_value is lib.no_default: # much more performant than using to_numpy below for blk in self.blocks: rl = blk.mgr_locs arr = blk.get_values(dtype) result[rl.indexer] = arr itemmask[rl.indexer] = 1 return result for blk in self.blocks: rl = blk.mgr_locs if blk.is_extension: # Avoid implicit conversion of extension blocks to object # error: Item "ndarray" of "Union[ndarray, ExtensionArray]" has no # attribute "to_numpy" arr = blk.values.to_numpy( # type: ignore[union-attr] dtype=dtype, na_value=na_value, ) else: arr = blk.get_values(dtype) result[rl.indexer] = arr itemmask[rl.indexer] = 1 if not itemmask.all(): raise AssertionError("Some items were not contained in blocks") return result # ---------------------------------------------------------------- # Consolidation def is_consolidated(self) -> bool: """ Return True if more than one block with the same dtype """ if not self._known_consolidated: self._consolidate_check() return self._is_consolidated def _consolidate_check(self) -> None: if len(self.blocks) == 1: # fastpath self._is_consolidated = True self._known_consolidated = True return dtypes = [blk.dtype for blk in self.blocks if blk._can_consolidate] self._is_consolidated = len(dtypes) == len(set(dtypes)) self._known_consolidated = True def _consolidate_inplace(self) -> None: # In general, _consolidate_inplace should only be called via # DataFrame._consolidate_inplace, otherwise we will fail to invalidate # the DataFrame's _item_cache. The exception is for newly-created # BlockManager objects not yet attached to a DataFrame. if not self.is_consolidated(): self.blocks = _consolidate(self.blocks) self._is_consolidated = True self._known_consolidated = True self._rebuild_blknos_and_blklocs() The provided code snippet includes necessary dependencies for implementing the `blockwise_all` function. Write a Python function `def blockwise_all(left: BlockManager, right: BlockManager, op) -> bool` to solve the following problem: Blockwise `all` reduction. Here is the function: def blockwise_all(left: BlockManager, right: BlockManager, op) -> bool: """ Blockwise `all` reduction. """ for info in _iter_block_pairs(left, right): res = op(info.lvals, info.rvals) if not res: return False return True
Blockwise `all` reduction.