id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
173,480
from __future__ import annotations from contextlib import contextmanager from csv import ( QUOTE_NONE, QUOTE_NONNUMERIC, ) from decimal import Decimal from functools import partial from io import StringIO import math import re from shutil import get_terminal_size from typing import ( IO, TYPE_CHECKING, Any, Callable, Final, Generator, Hashable, Iterable, List, Mapping, Sequence, cast, ) from unicodedata import east_asian_width import numpy as np from pandas._config.config import ( get_option, set_option, ) from pandas._libs import lib from pandas._libs.missing import NA from pandas._libs.tslibs import ( NaT, Timedelta, Timestamp, get_unit_from_dtype, iNaT, periods_per_day, ) from pandas._libs.tslibs.nattype import NaTType from pandas._typing import ( ArrayLike, Axes, ColspaceArgType, ColspaceType, CompressionOptions, FilePath, FloatFormatType, FormattersType, IndexLabel, StorageOptions, WriteBuffer, ) from pandas.core.dtypes.common import ( is_categorical_dtype, is_complex_dtype, is_datetime64_dtype, is_extension_array_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_list_like, is_numeric_dtype, is_scalar, is_timedelta64_dtype, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import ( isna, notna, ) from pandas.core.arrays import ( Categorical, DatetimeArray, TimedeltaArray, ) from pandas.core.arrays.string_ import StringDtype from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.construction import extract_array from pandas.core.indexes.api import ( Index, MultiIndex, PeriodIndex, ensure_index, ) from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.timedeltas import TimedeltaIndex from pandas.core.reshape.concat import concat from pandas.io.common import ( check_parent_directory, stringify_path, ) from pandas.io.formats import printing Any = object() The provided code snippet includes necessary dependencies for implementing the `get_level_lengths` function. Write a Python function `def get_level_lengths( levels: Any, sentinel: bool | object | str = "" ) -> list[dict[int, int]]` to solve the following problem: For each index in each level the function returns lengths of indexes. Parameters ---------- levels : list of lists List of values on for level. sentinel : string, optional Value which states that no new index starts on there. Returns ------- Returns list of maps. For each level returns map of indexes (key is index in row and value is length of index). Here is the function: def get_level_lengths( levels: Any, sentinel: bool | object | str = "" ) -> list[dict[int, int]]: """ For each index in each level the function returns lengths of indexes. Parameters ---------- levels : list of lists List of values on for level. sentinel : string, optional Value which states that no new index starts on there. Returns ------- Returns list of maps. For each level returns map of indexes (key is index in row and value is length of index). """ if len(levels) == 0: return [] control = [True] * len(levels[0]) result = [] for level in levels: last_index = 0 lengths = {} for i, key in enumerate(level): if control[i] and key == sentinel: pass else: control[i] = False lengths[last_index] = i - last_index last_index = i lengths[last_index] = len(level) - last_index result.append(lengths) return result
For each index in each level the function returns lengths of indexes. Parameters ---------- levels : list of lists List of values on for level. sentinel : string, optional Value which states that no new index starts on there. Returns ------- Returns list of maps. For each level returns map of indexes (key is index in row and value is length of index).
173,481
from __future__ import annotations from contextlib import contextmanager from csv import ( QUOTE_NONE, QUOTE_NONNUMERIC, ) from decimal import Decimal from functools import partial from io import StringIO import math import re from shutil import get_terminal_size from typing import ( IO, TYPE_CHECKING, Any, Callable, Final, Generator, Hashable, Iterable, List, Mapping, Sequence, cast, ) from unicodedata import east_asian_width import numpy as np from pandas._config.config import ( get_option, set_option, ) from pandas._libs import lib from pandas._libs.missing import NA from pandas._libs.tslibs import ( NaT, Timedelta, Timestamp, get_unit_from_dtype, iNaT, periods_per_day, ) from pandas._libs.tslibs.nattype import NaTType from pandas._typing import ( ArrayLike, Axes, ColspaceArgType, ColspaceType, CompressionOptions, FilePath, FloatFormatType, FormattersType, IndexLabel, StorageOptions, WriteBuffer, ) from pandas.core.dtypes.common import ( is_categorical_dtype, is_complex_dtype, is_datetime64_dtype, is_extension_array_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_list_like, is_numeric_dtype, is_scalar, is_timedelta64_dtype, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import ( isna, notna, ) from pandas.core.arrays import ( Categorical, DatetimeArray, TimedeltaArray, ) from pandas.core.arrays.string_ import StringDtype from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.construction import extract_array from pandas.core.indexes.api import ( Index, MultiIndex, PeriodIndex, ensure_index, ) from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.timedeltas import TimedeltaIndex from pandas.core.reshape.concat import concat from pandas.io.common import ( check_parent_directory, stringify_path, ) from pandas.io.formats import printing class WriteBuffer(BaseBuffer, Protocol[AnyStr_contra]): def write(self, __b: AnyStr_contra) -> Any: # for gzip.GzipFile, bz2.BZ2File ... def flush(self) -> Any: # for gzip.GzipFile, bz2.BZ2File ... The provided code snippet includes necessary dependencies for implementing the `buffer_put_lines` function. Write a Python function `def buffer_put_lines(buf: WriteBuffer[str], lines: list[str]) -> None` to solve the following problem: Appends lines to a buffer. Parameters ---------- buf The buffer to write to lines The lines to append. Here is the function: def buffer_put_lines(buf: WriteBuffer[str], lines: list[str]) -> None: """ Appends lines to a buffer. Parameters ---------- buf The buffer to write to lines The lines to append. """ if any(isinstance(x, str) for x in lines): lines = [str(x) for x in lines] buf.write("\n".join(lines))
Appends lines to a buffer. Parameters ---------- buf The buffer to write to lines The lines to append.
173,482
from __future__ import annotations from abc import ( ABC, abstractmethod, ) from contextlib import ( ExitStack, contextmanager, ) from datetime import ( date, datetime, time, ) from functools import partial import re from typing import ( TYPE_CHECKING, Any, Iterator, Literal, cast, overload, ) import warnings import numpy as np from pandas._libs import lib from pandas._typing import ( DateTimeErrorChoices, DtypeArg, DtypeBackend, IndexLabel, ) from pandas.compat._optional import import_optional_dependency from pandas.errors import ( AbstractMethodError, DatabaseError, ) from pandas.util._exceptions import find_stack_level from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( is_datetime64tz_dtype, is_dict_like, is_integer, is_list_like, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import isna from pandas import get_option from pandas.core.api import ( DataFrame, Series, ) from pandas.core.arrays import ArrowExtensionArray from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.internals.construction import convert_object_array from pandas.core.tools.datetimes import to_datetime def _parse_date_columns(data_frame, parse_dates): """ Force non-datetime columns to be read as such. Supports both string formatted and integer timestamp columns. """ parse_dates = _process_parse_dates_argument(parse_dates) # we want to coerce datetime64_tz dtypes for now to UTC # we could in theory do a 'nice' conversion from a FixedOffset tz # GH11216 for col_name, df_col in data_frame.items(): if is_datetime64tz_dtype(df_col.dtype) or col_name in parse_dates: try: fmt = parse_dates[col_name] except TypeError: fmt = None data_frame[col_name] = _handle_date_column(df_col, format=fmt) return data_frame def _convert_arrays_to_dataframe( data, columns, coerce_float: bool = True, dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", ) -> DataFrame: content = lib.to_object_array_tuples(data) arrays = convert_object_array( list(content.T), dtype=None, coerce_float=coerce_float, dtype_backend=dtype_backend, ) if dtype_backend == "pyarrow": pa = import_optional_dependency("pyarrow") arrays = [ ArrowExtensionArray(pa.array(arr, from_pandas=True)) for arr in arrays ] if arrays: return DataFrame(dict(zip(columns, arrays))) else: return DataFrame(columns=columns) Literal: _SpecialForm = ... DtypeArg = Union[Dtype, Dict[Hashable, Dtype]] DtypeBackend = Literal["pyarrow", "numpy_nullable"] The provided code snippet includes necessary dependencies for implementing the `_wrap_result` function. Write a Python function `def _wrap_result( data, columns, index_col=None, coerce_float: bool = True, parse_dates=None, dtype: DtypeArg | None = None, dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", )` to solve the following problem: Wrap result set of query in a DataFrame. Here is the function: def _wrap_result( data, columns, index_col=None, coerce_float: bool = True, parse_dates=None, dtype: DtypeArg | None = None, dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", ): """Wrap result set of query in a DataFrame.""" frame = _convert_arrays_to_dataframe(data, columns, coerce_float, dtype_backend) if dtype: frame = frame.astype(dtype) frame = _parse_date_columns(frame, parse_dates) if index_col is not None: frame = frame.set_index(index_col) return frame
Wrap result set of query in a DataFrame.
173,483
from __future__ import annotations from abc import ( ABC, abstractmethod, ) from contextlib import ( ExitStack, contextmanager, ) from datetime import ( date, datetime, time, ) from functools import partial import re from typing import ( TYPE_CHECKING, Any, Iterator, Literal, cast, overload, ) import warnings import numpy as np from pandas._libs import lib from pandas._typing import ( DateTimeErrorChoices, DtypeArg, DtypeBackend, IndexLabel, ) from pandas.compat._optional import import_optional_dependency from pandas.errors import ( AbstractMethodError, DatabaseError, ) from pandas.util._exceptions import find_stack_level from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( is_datetime64tz_dtype, is_dict_like, is_integer, is_list_like, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import isna from pandas import get_option from pandas.core.api import ( DataFrame, Series, ) from pandas.core.arrays import ArrowExtensionArray from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.internals.construction import convert_object_array from pandas.core.tools.datetimes import to_datetime def pandasSQL_builder( con, schema: str | None = None, need_transaction: bool = False, ) -> PandasSQL: """ Convenience function to return the correct PandasSQL subclass based on the provided parameters. Also creates a sqlalchemy connection and transaction if necessary. """ import sqlite3 if isinstance(con, sqlite3.Connection) or con is None: return SQLiteDatabase(con) sqlalchemy = import_optional_dependency("sqlalchemy", errors="ignore") if isinstance(con, str) and sqlalchemy is None: raise ImportError("Using URI string without sqlalchemy installed.") if sqlalchemy is not None and isinstance(con, (str, sqlalchemy.engine.Connectable)): return SQLDatabase(con, schema, need_transaction) warnings.warn( "pandas only supports SQLAlchemy connectable (engine/connection) or " "database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 " "objects are not tested. Please consider using SQLAlchemy.", UserWarning, stacklevel=find_stack_level(), ) return SQLiteDatabase(con) def import_optional_dependency( name: str, extra: str = "", errors: str = "raise", min_version: str | None = None, ): """ Import an optional dependency. By default, if a dependency is missing an ImportError with a nice message will be raised. If a dependency is present, but too old, we raise. Parameters ---------- name : str The module name. extra : str Additional text to include in the ImportError message. errors : str {'raise', 'warn', 'ignore'} What to do when a dependency is not found or its version is too old. * raise : Raise an ImportError * warn : Only applicable when a module's version is to old. Warns that the version is too old and returns None * ignore: If the module is not installed, return None, otherwise, return the module, even if the version is too old. It's expected that users validate the version locally when using ``errors="ignore"`` (see. ``io/html.py``) min_version : str, default None Specify a minimum version that is different from the global pandas minimum version required. Returns ------- maybe_module : Optional[ModuleType] The imported module, when found and the version is correct. None is returned when the package is not found and `errors` is False, or when the package's version is too old and `errors` is ``'warn'``. """ assert errors in {"warn", "raise", "ignore"} package_name = INSTALL_MAPPING.get(name) install_name = package_name if package_name is not None else name msg = ( f"Missing optional dependency '{install_name}'. {extra} " f"Use pip or conda to install {install_name}." ) try: module = importlib.import_module(name) except ImportError: if errors == "raise": raise ImportError(msg) return None # Handle submodules: if we have submodule, grab parent module from sys.modules parent = name.split(".")[0] if parent != name: install_name = parent module_to_get = sys.modules[install_name] else: module_to_get = module minimum_version = min_version if min_version is not None else VERSIONS.get(parent) if minimum_version: version = get_version(module_to_get) if version and Version(version) < Version(minimum_version): msg = ( f"Pandas requires version '{minimum_version}' or newer of '{parent}' " f"(version '{version}' currently installed)." ) if errors == "warn": warnings.warn( msg, UserWarning, stacklevel=find_stack_level(), ) return None elif errors == "raise": raise ImportError(msg) return module def find_stack_level() -> int: """ Find the first place in the stack that is not inside pandas (tests notwithstanding). """ import pandas as pd pkg_dir = os.path.dirname(pd.__file__) test_dir = os.path.join(pkg_dir, "tests") # https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow frame = inspect.currentframe() n = 0 while frame: fname = inspect.getfile(frame) if fname.startswith(pkg_dir) and not fname.startswith(test_dir): frame = frame.f_back n += 1 else: break return n The provided code snippet includes necessary dependencies for implementing the `execute` function. Write a Python function `def execute(sql, con, params=None)` to solve the following problem: Execute the given SQL query using the provided connection object. Parameters ---------- sql : string SQL query to be executed. con : SQLAlchemy connection or sqlite3 connection If a DBAPI2 object, only sqlite3 is supported. params : list or tuple, optional, default: None List of parameters to pass to execute method. Returns ------- Results Iterable Here is the function: def execute(sql, con, params=None): """ Execute the given SQL query using the provided connection object. Parameters ---------- sql : string SQL query to be executed. con : SQLAlchemy connection or sqlite3 connection If a DBAPI2 object, only sqlite3 is supported. params : list or tuple, optional, default: None List of parameters to pass to execute method. Returns ------- Results Iterable """ warnings.warn( "`pandas.io.sql.execute` is deprecated and " "will be removed in the future version.", FutureWarning, stacklevel=find_stack_level(), ) # GH50185 sqlalchemy = import_optional_dependency("sqlalchemy", errors="ignore") if sqlalchemy is not None and isinstance(con, (str, sqlalchemy.engine.Engine)): raise TypeError("pandas.io.sql.execute requires a connection") # GH50185 with pandasSQL_builder(con, need_transaction=True) as pandas_sql: return pandas_sql.execute(sql, params)
Execute the given SQL query using the provided connection object. Parameters ---------- sql : string SQL query to be executed. con : SQLAlchemy connection or sqlite3 connection If a DBAPI2 object, only sqlite3 is supported. params : list or tuple, optional, default: None List of parameters to pass to execute method. Returns ------- Results Iterable
173,484
from __future__ import annotations from abc import ( ABC, abstractmethod, ) from contextlib import ( ExitStack, contextmanager, ) from datetime import ( date, datetime, time, ) from functools import partial import re from typing import ( TYPE_CHECKING, Any, Iterator, Literal, cast, overload, ) import warnings import numpy as np from pandas._libs import lib from pandas._typing import ( DateTimeErrorChoices, DtypeArg, DtypeBackend, IndexLabel, ) from pandas.compat._optional import import_optional_dependency from pandas.errors import ( AbstractMethodError, DatabaseError, ) from pandas.util._exceptions import find_stack_level from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( is_datetime64tz_dtype, is_dict_like, is_integer, is_list_like, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import isna from pandas import get_option from pandas.core.api import ( DataFrame, Series, ) from pandas.core.arrays import ArrowExtensionArray from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.internals.construction import convert_object_array from pandas.core.tools.datetimes import to_datetime DtypeBackend = Literal["pyarrow", "numpy_nullable"] def read_sql_table( table_name, con, schema=..., index_col: str | list[str] | None = ..., coerce_float=..., parse_dates: list[str] | dict[str, str] | None = ..., columns: list[str] | None = ..., chunksize: None = ..., dtype_backend: DtypeBackend | lib.NoDefault = ..., ) -> DataFrame: ...
null
173,485
from __future__ import annotations from abc import ( ABC, abstractmethod, ) from contextlib import ( ExitStack, contextmanager, ) from datetime import ( date, datetime, time, ) from functools import partial import re from typing import ( TYPE_CHECKING, Any, Iterator, Literal, cast, overload, ) import warnings import numpy as np from pandas._libs import lib from pandas._typing import ( DateTimeErrorChoices, DtypeArg, DtypeBackend, IndexLabel, ) from pandas.compat._optional import import_optional_dependency from pandas.errors import ( AbstractMethodError, DatabaseError, ) from pandas.util._exceptions import find_stack_level from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( is_datetime64tz_dtype, is_dict_like, is_integer, is_list_like, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import isna from pandas import get_option from pandas.core.api import ( DataFrame, Series, ) from pandas.core.arrays import ArrowExtensionArray from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.internals.construction import convert_object_array from pandas.core.tools.datetimes import to_datetime class Iterator(Iterable[_T_co], Protocol[_T_co]): def __next__(self) -> _T_co: def __iter__(self) -> Iterator[_T_co]: DtypeBackend = Literal["pyarrow", "numpy_nullable"] def read_sql_table( table_name, con, schema=..., index_col: str | list[str] | None = ..., coerce_float=..., parse_dates: list[str] | dict[str, str] | None = ..., columns: list[str] | None = ..., chunksize: int = ..., dtype_backend: DtypeBackend | lib.NoDefault = ..., ) -> Iterator[DataFrame]: ...
null
173,486
from __future__ import annotations from abc import ( ABC, abstractmethod, ) from contextlib import ( ExitStack, contextmanager, ) from datetime import ( date, datetime, time, ) from functools import partial import re from typing import ( TYPE_CHECKING, Any, Iterator, Literal, cast, overload, ) import warnings import numpy as np from pandas._libs import lib from pandas._typing import ( DateTimeErrorChoices, DtypeArg, DtypeBackend, IndexLabel, ) from pandas.compat._optional import import_optional_dependency from pandas.errors import ( AbstractMethodError, DatabaseError, ) from pandas.util._exceptions import find_stack_level from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( is_datetime64tz_dtype, is_dict_like, is_integer, is_list_like, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import isna from pandas import get_option from pandas.core.api import ( DataFrame, Series, ) from pandas.core.arrays import ArrowExtensionArray from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.internals.construction import convert_object_array from pandas.core.tools.datetimes import to_datetime def has_table(table_name: str, con, schema: str | None = None) -> bool: """ Check if DataBase has named table. Parameters ---------- table_name: string Name of SQL table. con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. schema : string, default None Name of SQL schema in database to write to (if database flavor supports this). If None, use default schema (default). Returns ------- boolean """ with pandasSQL_builder(con, schema=schema) as pandas_sql: return pandas_sql.has_table(table_name) def pandasSQL_builder( con, schema: str | None = None, need_transaction: bool = False, ) -> PandasSQL: """ Convenience function to return the correct PandasSQL subclass based on the provided parameters. Also creates a sqlalchemy connection and transaction if necessary. """ import sqlite3 if isinstance(con, sqlite3.Connection) or con is None: return SQLiteDatabase(con) sqlalchemy = import_optional_dependency("sqlalchemy", errors="ignore") if isinstance(con, str) and sqlalchemy is None: raise ImportError("Using URI string without sqlalchemy installed.") if sqlalchemy is not None and isinstance(con, (str, sqlalchemy.engine.Connectable)): return SQLDatabase(con, schema, need_transaction) warnings.warn( "pandas only supports SQLAlchemy connectable (engine/connection) or " "database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 " "objects are not tested. Please consider using SQLAlchemy.", UserWarning, stacklevel=find_stack_level(), ) return SQLiteDatabase(con) class Iterator(Iterable[_T_co], Protocol[_T_co]): def __next__(self) -> _T_co: ... def __iter__(self) -> Iterator[_T_co]: ... DtypeBackend = Literal["pyarrow", "numpy_nullable"] def check_dtype_backend(dtype_backend) -> None: if dtype_backend is not lib.no_default: if dtype_backend not in ["numpy_nullable", "pyarrow"]: raise ValueError( f"dtype_backend {dtype_backend} is invalid, only 'numpy_nullable' and " f"'pyarrow' are allowed.", ) The provided code snippet includes necessary dependencies for implementing the `read_sql_table` function. Write a Python function `def read_sql_table( table_name: str, con, schema: str | None = None, index_col: str | list[str] | None = None, coerce_float: bool = True, parse_dates: list[str] | dict[str, str] | None = None, columns: list[str] | None = None, chunksize: int | None = None, dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, ) -> DataFrame | Iterator[DataFrame]` to solve the following problem: Read SQL database table into a DataFrame. Given a table name and a SQLAlchemy connectable, returns a DataFrame. This function does not support DBAPI connections. Parameters ---------- table_name : str Name of SQL table in database. con : SQLAlchemy connectable or str A database URI could be provided as str. SQLite DBAPI connection mode not supported. schema : str, default None Name of SQL schema in database to query (if database flavor supports this). Uses default schema if None (default). index_col : str or list of str, optional, default: None Column(s) to set as index(MultiIndex). coerce_float : bool, default True Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point. Can result in loss of Precision. parse_dates : list or dict, default None - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. columns : list, default None List of column names to select from SQL table. chunksize : int, default None If specified, returns an iterator where `chunksize` is the number of rows to include in each chunk. dtype_backend : {"numpy_nullable", "pyarrow"}, defaults to NumPy backed DataFrames Which dtype_backend to use, e.g. whether a DataFrame should have NumPy arrays, nullable dtypes are used for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 Returns ------- DataFrame or Iterator[DataFrame] A SQL table is returned as two-dimensional data structure with labeled axes. See Also -------- read_sql_query : Read SQL query into a DataFrame. read_sql : Read SQL query or database table into a DataFrame. Notes ----- Any datetime values with time zone information will be converted to UTC. Examples -------- >>> pd.read_sql_table('table_name', 'postgres:///db_name') # doctest:+SKIP Here is the function: def read_sql_table( table_name: str, con, schema: str | None = None, index_col: str | list[str] | None = None, coerce_float: bool = True, parse_dates: list[str] | dict[str, str] | None = None, columns: list[str] | None = None, chunksize: int | None = None, dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, ) -> DataFrame | Iterator[DataFrame]: """ Read SQL database table into a DataFrame. Given a table name and a SQLAlchemy connectable, returns a DataFrame. This function does not support DBAPI connections. Parameters ---------- table_name : str Name of SQL table in database. con : SQLAlchemy connectable or str A database URI could be provided as str. SQLite DBAPI connection mode not supported. schema : str, default None Name of SQL schema in database to query (if database flavor supports this). Uses default schema if None (default). index_col : str or list of str, optional, default: None Column(s) to set as index(MultiIndex). coerce_float : bool, default True Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point. Can result in loss of Precision. parse_dates : list or dict, default None - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. columns : list, default None List of column names to select from SQL table. chunksize : int, default None If specified, returns an iterator where `chunksize` is the number of rows to include in each chunk. dtype_backend : {"numpy_nullable", "pyarrow"}, defaults to NumPy backed DataFrames Which dtype_backend to use, e.g. whether a DataFrame should have NumPy arrays, nullable dtypes are used for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 Returns ------- DataFrame or Iterator[DataFrame] A SQL table is returned as two-dimensional data structure with labeled axes. See Also -------- read_sql_query : Read SQL query into a DataFrame. read_sql : Read SQL query or database table into a DataFrame. Notes ----- Any datetime values with time zone information will be converted to UTC. Examples -------- >>> pd.read_sql_table('table_name', 'postgres:///db_name') # doctest:+SKIP """ check_dtype_backend(dtype_backend) if dtype_backend is lib.no_default: dtype_backend = "numpy" # type: ignore[assignment] with pandasSQL_builder(con, schema=schema, need_transaction=True) as pandas_sql: if not pandas_sql.has_table(table_name): raise ValueError(f"Table {table_name} not found") table = pandas_sql.read_table( table_name, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns, chunksize=chunksize, dtype_backend=dtype_backend, ) if table is not None: return table else: raise ValueError(f"Table {table_name} not found", con)
Read SQL database table into a DataFrame. Given a table name and a SQLAlchemy connectable, returns a DataFrame. This function does not support DBAPI connections. Parameters ---------- table_name : str Name of SQL table in database. con : SQLAlchemy connectable or str A database URI could be provided as str. SQLite DBAPI connection mode not supported. schema : str, default None Name of SQL schema in database to query (if database flavor supports this). Uses default schema if None (default). index_col : str or list of str, optional, default: None Column(s) to set as index(MultiIndex). coerce_float : bool, default True Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point. Can result in loss of Precision. parse_dates : list or dict, default None - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. columns : list, default None List of column names to select from SQL table. chunksize : int, default None If specified, returns an iterator where `chunksize` is the number of rows to include in each chunk. dtype_backend : {"numpy_nullable", "pyarrow"}, defaults to NumPy backed DataFrames Which dtype_backend to use, e.g. whether a DataFrame should have NumPy arrays, nullable dtypes are used for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 Returns ------- DataFrame or Iterator[DataFrame] A SQL table is returned as two-dimensional data structure with labeled axes. See Also -------- read_sql_query : Read SQL query into a DataFrame. read_sql : Read SQL query or database table into a DataFrame. Notes ----- Any datetime values with time zone information will be converted to UTC. Examples -------- >>> pd.read_sql_table('table_name', 'postgres:///db_name') # doctest:+SKIP
173,487
from __future__ import annotations from abc import ( ABC, abstractmethod, ) from contextlib import ( ExitStack, contextmanager, ) from datetime import ( date, datetime, time, ) from functools import partial import re from typing import ( TYPE_CHECKING, Any, Iterator, Literal, cast, overload, ) import warnings import numpy as np from pandas._libs import lib from pandas._typing import ( DateTimeErrorChoices, DtypeArg, DtypeBackend, IndexLabel, ) from pandas.compat._optional import import_optional_dependency from pandas.errors import ( AbstractMethodError, DatabaseError, ) from pandas.util._exceptions import find_stack_level from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( is_datetime64tz_dtype, is_dict_like, is_integer, is_list_like, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import isna from pandas import get_option from pandas.core.api import ( DataFrame, Series, ) from pandas.core.arrays import ArrowExtensionArray from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.internals.construction import convert_object_array from pandas.core.tools.datetimes import to_datetime DtypeArg = Union[Dtype, Dict[Hashable, Dtype]] DtypeBackend = Literal["pyarrow", "numpy_nullable"] def read_sql_query( sql, con, index_col: str | list[str] | None = ..., coerce_float=..., params: list[str] | dict[str, str] | None = ..., parse_dates: list[str] | dict[str, str] | None = ..., chunksize: None = ..., dtype: DtypeArg | None = ..., dtype_backend: DtypeBackend | lib.NoDefault = ..., ) -> DataFrame: ...
null
173,488
from __future__ import annotations from abc import ( ABC, abstractmethod, ) from contextlib import ( ExitStack, contextmanager, ) from datetime import ( date, datetime, time, ) from functools import partial import re from typing import ( TYPE_CHECKING, Any, Iterator, Literal, cast, overload, ) import warnings import numpy as np from pandas._libs import lib from pandas._typing import ( DateTimeErrorChoices, DtypeArg, DtypeBackend, IndexLabel, ) from pandas.compat._optional import import_optional_dependency from pandas.errors import ( AbstractMethodError, DatabaseError, ) from pandas.util._exceptions import find_stack_level from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( is_datetime64tz_dtype, is_dict_like, is_integer, is_list_like, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import isna from pandas import get_option from pandas.core.api import ( DataFrame, Series, ) from pandas.core.arrays import ArrowExtensionArray from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.internals.construction import convert_object_array from pandas.core.tools.datetimes import to_datetime class Iterator(Iterable[_T_co], Protocol[_T_co]): def __next__(self) -> _T_co: ... def __iter__(self) -> Iterator[_T_co]: ... DtypeArg = Union[Dtype, Dict[Hashable, Dtype]] DtypeBackend = Literal["pyarrow", "numpy_nullable"] def read_sql_query( sql, con, index_col: str | list[str] | None = ..., coerce_float=..., params: list[str] | dict[str, str] | None = ..., parse_dates: list[str] | dict[str, str] | None = ..., chunksize: int = ..., dtype: DtypeArg | None = ..., dtype_backend: DtypeBackend | lib.NoDefault = ..., ) -> Iterator[DataFrame]: ...
null
173,489
from __future__ import annotations from abc import ( ABC, abstractmethod, ) from contextlib import ( ExitStack, contextmanager, ) from datetime import ( date, datetime, time, ) from functools import partial import re from typing import ( TYPE_CHECKING, Any, Iterator, Literal, cast, overload, ) import warnings import numpy as np from pandas._libs import lib from pandas._typing import ( DateTimeErrorChoices, DtypeArg, DtypeBackend, IndexLabel, ) from pandas.compat._optional import import_optional_dependency from pandas.errors import ( AbstractMethodError, DatabaseError, ) from pandas.util._exceptions import find_stack_level from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( is_datetime64tz_dtype, is_dict_like, is_integer, is_list_like, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import isna from pandas import get_option from pandas.core.api import ( DataFrame, Series, ) from pandas.core.arrays import ArrowExtensionArray from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.internals.construction import convert_object_array from pandas.core.tools.datetimes import to_datetime def pandasSQL_builder( con, schema: str | None = None, need_transaction: bool = False, ) -> PandasSQL: """ Convenience function to return the correct PandasSQL subclass based on the provided parameters. Also creates a sqlalchemy connection and transaction if necessary. """ import sqlite3 if isinstance(con, sqlite3.Connection) or con is None: return SQLiteDatabase(con) sqlalchemy = import_optional_dependency("sqlalchemy", errors="ignore") if isinstance(con, str) and sqlalchemy is None: raise ImportError("Using URI string without sqlalchemy installed.") if sqlalchemy is not None and isinstance(con, (str, sqlalchemy.engine.Connectable)): return SQLDatabase(con, schema, need_transaction) warnings.warn( "pandas only supports SQLAlchemy connectable (engine/connection) or " "database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 " "objects are not tested. Please consider using SQLAlchemy.", UserWarning, stacklevel=find_stack_level(), ) return SQLiteDatabase(con) class Iterator(Iterable[_T_co], Protocol[_T_co]): def __next__(self) -> _T_co: ... def __iter__(self) -> Iterator[_T_co]: ... DtypeArg = Union[Dtype, Dict[Hashable, Dtype]] DtypeBackend = Literal["pyarrow", "numpy_nullable"] def check_dtype_backend(dtype_backend) -> None: if dtype_backend is not lib.no_default: if dtype_backend not in ["numpy_nullable", "pyarrow"]: raise ValueError( f"dtype_backend {dtype_backend} is invalid, only 'numpy_nullable' and " f"'pyarrow' are allowed.", ) The provided code snippet includes necessary dependencies for implementing the `read_sql_query` function. Write a Python function `def read_sql_query( sql, con, index_col: str | list[str] | None = None, coerce_float: bool = True, params: list[str] | dict[str, str] | None = None, parse_dates: list[str] | dict[str, str] | None = None, chunksize: int | None = None, dtype: DtypeArg | None = None, dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, ) -> DataFrame | Iterator[DataFrame]` to solve the following problem: Read SQL query into a DataFrame. Returns a DataFrame corresponding to the result set of the query string. Optionally provide an `index_col` parameter to use one of the columns as the index, otherwise default integer index will be used. Parameters ---------- sql : str SQL query or SQLAlchemy Selectable (select or text object) SQL query to be executed. con : SQLAlchemy connectable, str, or sqlite3 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. index_col : str or list of str, optional, default: None Column(s) to set as index(MultiIndex). coerce_float : bool, default True Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point. Useful for SQL result sets. params : list, tuple or dict, optional, default: None List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249's paramstyle, is supported. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}. parse_dates : list or dict, default: None - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times, or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. chunksize : int, default None If specified, return an iterator where `chunksize` is the number of rows to include in each chunk. dtype : Type name or dict of columns Data type for data or columns. E.g. np.float64 or {‘a’: np.float64, ‘b’: np.int32, ‘c’: ‘Int64’}. .. versionadded:: 1.3.0 dtype_backend : {"numpy_nullable", "pyarrow"}, defaults to NumPy backed DataFrames Which dtype_backend to use, e.g. whether a DataFrame should have NumPy arrays, nullable dtypes are used for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 Returns ------- DataFrame or Iterator[DataFrame] See Also -------- read_sql_table : Read SQL database table into a DataFrame. read_sql : Read SQL query or database table into a DataFrame. Notes ----- Any datetime values with time zone information parsed via the `parse_dates` parameter will be converted to UTC. Here is the function: def read_sql_query( sql, con, index_col: str | list[str] | None = None, coerce_float: bool = True, params: list[str] | dict[str, str] | None = None, parse_dates: list[str] | dict[str, str] | None = None, chunksize: int | None = None, dtype: DtypeArg | None = None, dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, ) -> DataFrame | Iterator[DataFrame]: """ Read SQL query into a DataFrame. Returns a DataFrame corresponding to the result set of the query string. Optionally provide an `index_col` parameter to use one of the columns as the index, otherwise default integer index will be used. Parameters ---------- sql : str SQL query or SQLAlchemy Selectable (select or text object) SQL query to be executed. con : SQLAlchemy connectable, str, or sqlite3 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. index_col : str or list of str, optional, default: None Column(s) to set as index(MultiIndex). coerce_float : bool, default True Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point. Useful for SQL result sets. params : list, tuple or dict, optional, default: None List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249's paramstyle, is supported. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}. parse_dates : list or dict, default: None - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times, or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. chunksize : int, default None If specified, return an iterator where `chunksize` is the number of rows to include in each chunk. dtype : Type name or dict of columns Data type for data or columns. E.g. np.float64 or {‘a’: np.float64, ‘b’: np.int32, ‘c’: ‘Int64’}. .. versionadded:: 1.3.0 dtype_backend : {"numpy_nullable", "pyarrow"}, defaults to NumPy backed DataFrames Which dtype_backend to use, e.g. whether a DataFrame should have NumPy arrays, nullable dtypes are used for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 Returns ------- DataFrame or Iterator[DataFrame] See Also -------- read_sql_table : Read SQL database table into a DataFrame. read_sql : Read SQL query or database table into a DataFrame. Notes ----- Any datetime values with time zone information parsed via the `parse_dates` parameter will be converted to UTC. """ check_dtype_backend(dtype_backend) if dtype_backend is lib.no_default: dtype_backend = "numpy" # type: ignore[assignment] with pandasSQL_builder(con) as pandas_sql: return pandas_sql.read_query( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates, chunksize=chunksize, dtype=dtype, dtype_backend=dtype_backend, )
Read SQL query into a DataFrame. Returns a DataFrame corresponding to the result set of the query string. Optionally provide an `index_col` parameter to use one of the columns as the index, otherwise default integer index will be used. Parameters ---------- sql : str SQL query or SQLAlchemy Selectable (select or text object) SQL query to be executed. con : SQLAlchemy connectable, str, or sqlite3 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. index_col : str or list of str, optional, default: None Column(s) to set as index(MultiIndex). coerce_float : bool, default True Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point. Useful for SQL result sets. params : list, tuple or dict, optional, default: None List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249's paramstyle, is supported. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}. parse_dates : list or dict, default: None - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times, or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. chunksize : int, default None If specified, return an iterator where `chunksize` is the number of rows to include in each chunk. dtype : Type name or dict of columns Data type for data or columns. E.g. np.float64 or {‘a’: np.float64, ‘b’: np.int32, ‘c’: ‘Int64’}. .. versionadded:: 1.3.0 dtype_backend : {"numpy_nullable", "pyarrow"}, defaults to NumPy backed DataFrames Which dtype_backend to use, e.g. whether a DataFrame should have NumPy arrays, nullable dtypes are used for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 Returns ------- DataFrame or Iterator[DataFrame] See Also -------- read_sql_table : Read SQL database table into a DataFrame. read_sql : Read SQL query or database table into a DataFrame. Notes ----- Any datetime values with time zone information parsed via the `parse_dates` parameter will be converted to UTC.
173,490
from __future__ import annotations from abc import ( ABC, abstractmethod, ) from contextlib import ( ExitStack, contextmanager, ) from datetime import ( date, datetime, time, ) from functools import partial import re from typing import ( TYPE_CHECKING, Any, Iterator, Literal, cast, overload, ) import warnings import numpy as np from pandas._libs import lib from pandas._typing import ( DateTimeErrorChoices, DtypeArg, DtypeBackend, IndexLabel, ) from pandas.compat._optional import import_optional_dependency from pandas.errors import ( AbstractMethodError, DatabaseError, ) from pandas.util._exceptions import find_stack_level from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( is_datetime64tz_dtype, is_dict_like, is_integer, is_list_like, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import isna from pandas import get_option from pandas.core.api import ( DataFrame, Series, ) from pandas.core.arrays import ArrowExtensionArray from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.internals.construction import convert_object_array from pandas.core.tools.datetimes import to_datetime DtypeArg = Union[Dtype, Dict[Hashable, Dtype]] DtypeBackend = Literal["pyarrow", "numpy_nullable"] def read_sql( sql, con, index_col: str | list[str] | None = ..., coerce_float=..., params=..., parse_dates=..., columns: list[str] = ..., chunksize: None = ..., dtype_backend: DtypeBackend | lib.NoDefault = ..., dtype: DtypeArg | None = None, ) -> DataFrame: ...
null
173,491
from __future__ import annotations from abc import ( ABC, abstractmethod, ) from contextlib import ( ExitStack, contextmanager, ) from datetime import ( date, datetime, time, ) from functools import partial import re from typing import ( TYPE_CHECKING, Any, Iterator, Literal, cast, overload, ) import warnings import numpy as np from pandas._libs import lib from pandas._typing import ( DateTimeErrorChoices, DtypeArg, DtypeBackend, IndexLabel, ) from pandas.compat._optional import import_optional_dependency from pandas.errors import ( AbstractMethodError, DatabaseError, ) from pandas.util._exceptions import find_stack_level from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( is_datetime64tz_dtype, is_dict_like, is_integer, is_list_like, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import isna from pandas import get_option from pandas.core.api import ( DataFrame, Series, ) from pandas.core.arrays import ArrowExtensionArray from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.internals.construction import convert_object_array from pandas.core.tools.datetimes import to_datetime class Iterator(Iterable[_T_co], Protocol[_T_co]): def __next__(self) -> _T_co: ... def __iter__(self) -> Iterator[_T_co]: ... DtypeArg = Union[Dtype, Dict[Hashable, Dtype]] DtypeBackend = Literal["pyarrow", "numpy_nullable"] def read_sql( sql, con, index_col: str | list[str] | None = ..., coerce_float=..., params=..., parse_dates=..., columns: list[str] = ..., chunksize: int = ..., dtype_backend: DtypeBackend | lib.NoDefault = ..., dtype: DtypeArg | None = None, ) -> Iterator[DataFrame]: ...
null
173,492
from __future__ import annotations from abc import ( ABC, abstractmethod, ) from contextlib import ( ExitStack, contextmanager, ) from datetime import ( date, datetime, time, ) from functools import partial import re from typing import ( TYPE_CHECKING, Any, Iterator, Literal, cast, overload, ) import warnings import numpy as np from pandas._libs import lib from pandas._typing import ( DateTimeErrorChoices, DtypeArg, DtypeBackend, IndexLabel, ) from pandas.compat._optional import import_optional_dependency from pandas.errors import ( AbstractMethodError, DatabaseError, ) from pandas.util._exceptions import find_stack_level from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( is_datetime64tz_dtype, is_dict_like, is_integer, is_list_like, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import isna from pandas import get_option from pandas.core.api import ( DataFrame, Series, ) from pandas.core.arrays import ArrowExtensionArray from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.internals.construction import convert_object_array from pandas.core.tools.datetimes import to_datetime def has_table(table_name: str, con, schema: str | None = None) -> bool: """ Check if DataBase has named table. Parameters ---------- table_name: string Name of SQL table. con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. schema : string, default None Name of SQL schema in database to write to (if database flavor supports this). If None, use default schema (default). Returns ------- boolean """ with pandasSQL_builder(con, schema=schema) as pandas_sql: return pandas_sql.has_table(table_name) def pandasSQL_builder( con, schema: str | None = None, need_transaction: bool = False, ) -> PandasSQL: """ Convenience function to return the correct PandasSQL subclass based on the provided parameters. Also creates a sqlalchemy connection and transaction if necessary. """ import sqlite3 if isinstance(con, sqlite3.Connection) or con is None: return SQLiteDatabase(con) sqlalchemy = import_optional_dependency("sqlalchemy", errors="ignore") if isinstance(con, str) and sqlalchemy is None: raise ImportError("Using URI string without sqlalchemy installed.") if sqlalchemy is not None and isinstance(con, (str, sqlalchemy.engine.Connectable)): return SQLDatabase(con, schema, need_transaction) warnings.warn( "pandas only supports SQLAlchemy connectable (engine/connection) or " "database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 " "objects are not tested. Please consider using SQLAlchemy.", UserWarning, stacklevel=find_stack_level(), ) return SQLiteDatabase(con) class SQLiteDatabase(PandasSQL): """ Version of SQLDatabase to support SQLite connections (fallback without SQLAlchemy). This should only be used internally. Parameters ---------- con : sqlite connection object """ def __init__(self, con) -> None: self.con = con def run_transaction(self): cur = self.con.cursor() try: yield cur self.con.commit() except Exception: self.con.rollback() raise finally: cur.close() def execute(self, sql: str | Select | TextClause, params=None): if not isinstance(sql, str): raise TypeError("Query must be a string unless using sqlalchemy.") args = [] if params is None else [params] cur = self.con.cursor() try: cur.execute(sql, *args) return cur except Exception as exc: try: self.con.rollback() except Exception as inner_exc: # pragma: no cover ex = DatabaseError( f"Execution failed on sql: {sql}\n{exc}\nunable to rollback" ) raise ex from inner_exc ex = DatabaseError(f"Execution failed on sql '{sql}': {exc}") raise ex from exc def _query_iterator( cursor, chunksize: int, columns, index_col=None, coerce_float: bool = True, parse_dates=None, dtype: DtypeArg | None = None, dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", ): """Return generator through chunked result set""" has_read_data = False while True: data = cursor.fetchmany(chunksize) if type(data) == tuple: data = list(data) if not data: cursor.close() if not has_read_data: result = DataFrame.from_records( [], columns=columns, coerce_float=coerce_float ) if dtype: result = result.astype(dtype) yield result break has_read_data = True yield _wrap_result( data, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, dtype=dtype, dtype_backend=dtype_backend, ) def read_query( self, sql, index_col=None, coerce_float: bool = True, parse_dates=None, params=None, chunksize: int | None = None, dtype: DtypeArg | None = None, dtype_backend: DtypeBackend | Literal["numpy"] = "numpy", ) -> DataFrame | Iterator[DataFrame]: cursor = self.execute(sql, params) columns = [col_desc[0] for col_desc in cursor.description] if chunksize is not None: return self._query_iterator( cursor, chunksize, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, dtype=dtype, dtype_backend=dtype_backend, ) else: data = self._fetchall_as_list(cursor) cursor.close() frame = _wrap_result( data, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, dtype=dtype, dtype_backend=dtype_backend, ) return frame def _fetchall_as_list(self, cur): result = cur.fetchall() if not isinstance(result, list): result = list(result) return result def to_sql( self, frame, name, if_exists: str = "fail", index: bool = True, index_label=None, schema=None, chunksize=None, dtype: DtypeArg | None = None, method=None, engine: str = "auto", **engine_kwargs, ) -> int | None: """ Write records stored in a DataFrame to a SQL database. Parameters ---------- frame: DataFrame name: string Name of SQL table. if_exists: {'fail', 'replace', 'append'}, default 'fail' fail: If table exists, do nothing. replace: If table exists, drop it, recreate it, and insert data. append: If table exists, insert data. Create if it does not exist. index : bool, default True Write DataFrame index as a column index_label : string or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. schema : string, default None Ignored parameter included for compatibility with SQLAlchemy version of ``to_sql``. chunksize : int, default None If not None, then rows will be written in batches of this size at a time. If None, all rows will be written at once. dtype : single type or dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should be a string. If all columns are of the same type, one single value can be used. method : {None, 'multi', callable}, default None Controls the SQL insertion clause used: * None : Uses standard SQL ``INSERT`` clause (one per row). * 'multi': Pass multiple values in a single ``INSERT`` clause. * callable with signature ``(pd_table, conn, keys, data_iter)``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. """ if dtype: if not is_dict_like(dtype): # error: Value expression in dictionary comprehension has incompatible # type "Union[ExtensionDtype, str, dtype[Any], Type[object], # Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]], # Type[str], Type[float], Type[int], Type[complex], Type[bool], # Type[object]]]]"; expected type "Union[ExtensionDtype, str, # dtype[Any], Type[object]]" dtype = {col_name: dtype for col_name in frame} # type: ignore[misc] else: dtype = cast(dict, dtype) for col, my_type in dtype.items(): if not isinstance(my_type, str): raise ValueError(f"{col} ({my_type}) not a string") table = SQLiteTable( name, self, frame=frame, index=index, if_exists=if_exists, index_label=index_label, dtype=dtype, ) table.create() return table.insert(chunksize, method) def has_table(self, name: str, schema: str | None = None) -> bool: wld = "?" query = f"SELECT name FROM sqlite_master WHERE type='table' AND name={wld};" return len(self.execute(query, [name]).fetchall()) > 0 def get_table(self, table_name: str, schema: str | None = None) -> None: return None # not supported in fallback mode def drop_table(self, name: str, schema: str | None = None) -> None: drop_sql = f"DROP TABLE {_get_valid_sqlite_name(name)}" self.execute(drop_sql) def _create_sql_schema( self, frame, table_name: str, keys=None, dtype: DtypeArg | None = None, schema: str | None = None, ): table = SQLiteTable( table_name, self, frame=frame, index=False, keys=keys, dtype=dtype, schema=schema, ) return str(table.sql_schema()) class Iterator(Iterable[_T_co], Protocol[_T_co]): def __next__(self) -> _T_co: ... def __iter__(self) -> Iterator[_T_co]: ... DtypeArg = Union[Dtype, Dict[Hashable, Dtype]] DtypeBackend = Literal["pyarrow", "numpy_nullable"] def check_dtype_backend(dtype_backend) -> None: if dtype_backend is not lib.no_default: if dtype_backend not in ["numpy_nullable", "pyarrow"]: raise ValueError( f"dtype_backend {dtype_backend} is invalid, only 'numpy_nullable' and " f"'pyarrow' are allowed.", ) The provided code snippet includes necessary dependencies for implementing the `read_sql` function. Write a Python function `def read_sql( sql, con, index_col: str | list[str] | None = None, coerce_float: bool = True, params=None, parse_dates=None, columns: list[str] | None = None, chunksize: int | None = None, dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, dtype: DtypeArg | None = None, ) -> DataFrame | Iterator[DataFrame]` to solve the following problem: Read SQL query or database table into a DataFrame. This function is a convenience wrapper around ``read_sql_table`` and ``read_sql_query`` (for backward compatibility). It will delegate to the specific function depending on the provided input. A SQL query will be routed to ``read_sql_query``, while a database table name will be routed to ``read_sql_table``. Note that the delegated function might have more specific notes about their functionality not listed here. Parameters ---------- sql : str or SQLAlchemy Selectable (select or text object) SQL query to be executed or a table name. con : SQLAlchemy connectable, str, or sqlite3 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible for engine disposal and connection closure for the SQLAlchemy connectable; str connections are closed automatically. See `here <https://docs.sqlalchemy.org/en/13/core/connections.html>`_. index_col : str or list of str, optional, default: None Column(s) to set as index(MultiIndex). coerce_float : bool, default True Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. params : list, tuple or dict, optional, default: None List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249's paramstyle, is supported. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}. parse_dates : list or dict, default: None - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times, or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. columns : list, default: None List of column names to select from SQL table (only used when reading a table). chunksize : int, default None If specified, return an iterator where `chunksize` is the number of rows to include in each chunk. dtype_backend : {"numpy_nullable", "pyarrow"}, defaults to NumPy backed DataFrames Which dtype_backend to use, e.g. whether a DataFrame should have NumPy arrays, nullable dtypes are used for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 dtype : Type name or dict of columns Data type for data or columns. E.g. np.float64 or {‘a’: np.float64, ‘b’: np.int32, ‘c’: ‘Int64’}. The argument is ignored if a table is passed instead of a query. .. versionadded:: 2.0.0 Returns ------- DataFrame or Iterator[DataFrame] See Also -------- read_sql_table : Read SQL database table into a DataFrame. read_sql_query : Read SQL query into a DataFrame. Examples -------- Read data from SQL via either a SQL query or a SQL tablename. When using a SQLite database only SQL queries are accepted, providing only the SQL tablename will result in an error. >>> from sqlite3 import connect >>> conn = connect(':memory:') >>> df = pd.DataFrame(data=[[0, '10/11/12'], [1, '12/11/10']], ... columns=['int_column', 'date_column']) >>> df.to_sql('test_data', conn) 2 >>> pd.read_sql('SELECT int_column, date_column FROM test_data', conn) int_column date_column 0 0 10/11/12 1 1 12/11/10 >>> pd.read_sql('test_data', 'postgres:///db_name') # doctest:+SKIP Apply date parsing to columns through the ``parse_dates`` argument The ``parse_dates`` argument calls ``pd.to_datetime`` on the provided columns. Custom argument values for applying ``pd.to_datetime`` on a column are specified via a dictionary format: >>> pd.read_sql('SELECT int_column, date_column FROM test_data', ... conn, ... parse_dates={"date_column": {"format": "%d/%m/%y"}}) int_column date_column 0 0 2012-11-10 1 1 2010-11-12 Here is the function: def read_sql( sql, con, index_col: str | list[str] | None = None, coerce_float: bool = True, params=None, parse_dates=None, columns: list[str] | None = None, chunksize: int | None = None, dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, dtype: DtypeArg | None = None, ) -> DataFrame | Iterator[DataFrame]: """ Read SQL query or database table into a DataFrame. This function is a convenience wrapper around ``read_sql_table`` and ``read_sql_query`` (for backward compatibility). It will delegate to the specific function depending on the provided input. A SQL query will be routed to ``read_sql_query``, while a database table name will be routed to ``read_sql_table``. Note that the delegated function might have more specific notes about their functionality not listed here. Parameters ---------- sql : str or SQLAlchemy Selectable (select or text object) SQL query to be executed or a table name. con : SQLAlchemy connectable, str, or sqlite3 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible for engine disposal and connection closure for the SQLAlchemy connectable; str connections are closed automatically. See `here <https://docs.sqlalchemy.org/en/13/core/connections.html>`_. index_col : str or list of str, optional, default: None Column(s) to set as index(MultiIndex). coerce_float : bool, default True Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. params : list, tuple or dict, optional, default: None List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249's paramstyle, is supported. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}. parse_dates : list or dict, default: None - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times, or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. columns : list, default: None List of column names to select from SQL table (only used when reading a table). chunksize : int, default None If specified, return an iterator where `chunksize` is the number of rows to include in each chunk. dtype_backend : {"numpy_nullable", "pyarrow"}, defaults to NumPy backed DataFrames Which dtype_backend to use, e.g. whether a DataFrame should have NumPy arrays, nullable dtypes are used for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 dtype : Type name or dict of columns Data type for data or columns. E.g. np.float64 or {‘a’: np.float64, ‘b’: np.int32, ‘c’: ‘Int64’}. The argument is ignored if a table is passed instead of a query. .. versionadded:: 2.0.0 Returns ------- DataFrame or Iterator[DataFrame] See Also -------- read_sql_table : Read SQL database table into a DataFrame. read_sql_query : Read SQL query into a DataFrame. Examples -------- Read data from SQL via either a SQL query or a SQL tablename. When using a SQLite database only SQL queries are accepted, providing only the SQL tablename will result in an error. >>> from sqlite3 import connect >>> conn = connect(':memory:') >>> df = pd.DataFrame(data=[[0, '10/11/12'], [1, '12/11/10']], ... columns=['int_column', 'date_column']) >>> df.to_sql('test_data', conn) 2 >>> pd.read_sql('SELECT int_column, date_column FROM test_data', conn) int_column date_column 0 0 10/11/12 1 1 12/11/10 >>> pd.read_sql('test_data', 'postgres:///db_name') # doctest:+SKIP Apply date parsing to columns through the ``parse_dates`` argument The ``parse_dates`` argument calls ``pd.to_datetime`` on the provided columns. Custom argument values for applying ``pd.to_datetime`` on a column are specified via a dictionary format: >>> pd.read_sql('SELECT int_column, date_column FROM test_data', ... conn, ... parse_dates={"date_column": {"format": "%d/%m/%y"}}) int_column date_column 0 0 2012-11-10 1 1 2010-11-12 """ check_dtype_backend(dtype_backend) if dtype_backend is lib.no_default: dtype_backend = "numpy" # type: ignore[assignment] with pandasSQL_builder(con) as pandas_sql: if isinstance(pandas_sql, SQLiteDatabase): return pandas_sql.read_query( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates, chunksize=chunksize, dtype_backend=dtype_backend, # type: ignore[arg-type] dtype=dtype, ) try: _is_table_name = pandas_sql.has_table(sql) except Exception: # using generic exception to catch errors from sql drivers (GH24988) _is_table_name = False if _is_table_name: return pandas_sql.read_table( sql, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns, chunksize=chunksize, dtype_backend=dtype_backend, ) else: return pandas_sql.read_query( sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates, chunksize=chunksize, dtype_backend=dtype_backend, dtype=dtype, )
Read SQL query or database table into a DataFrame. This function is a convenience wrapper around ``read_sql_table`` and ``read_sql_query`` (for backward compatibility). It will delegate to the specific function depending on the provided input. A SQL query will be routed to ``read_sql_query``, while a database table name will be routed to ``read_sql_table``. Note that the delegated function might have more specific notes about their functionality not listed here. Parameters ---------- sql : str or SQLAlchemy Selectable (select or text object) SQL query to be executed or a table name. con : SQLAlchemy connectable, str, or sqlite3 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible for engine disposal and connection closure for the SQLAlchemy connectable; str connections are closed automatically. See `here <https://docs.sqlalchemy.org/en/13/core/connections.html>`_. index_col : str or list of str, optional, default: None Column(s) to set as index(MultiIndex). coerce_float : bool, default True Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. params : list, tuple or dict, optional, default: None List of parameters to pass to execute method. The syntax used to pass parameters is database driver dependent. Check your database driver documentation for which of the five syntax styles, described in PEP 249's paramstyle, is supported. Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}. parse_dates : list or dict, default: None - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times, or is one of (D, s, ns, ms, us) in case of parsing integer timestamps. - Dict of ``{column_name: arg dict}``, where the arg dict corresponds to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. columns : list, default: None List of column names to select from SQL table (only used when reading a table). chunksize : int, default None If specified, return an iterator where `chunksize` is the number of rows to include in each chunk. dtype_backend : {"numpy_nullable", "pyarrow"}, defaults to NumPy backed DataFrames Which dtype_backend to use, e.g. whether a DataFrame should have NumPy arrays, nullable dtypes are used for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 dtype : Type name or dict of columns Data type for data or columns. E.g. np.float64 or {‘a’: np.float64, ‘b’: np.int32, ‘c’: ‘Int64’}. The argument is ignored if a table is passed instead of a query. .. versionadded:: 2.0.0 Returns ------- DataFrame or Iterator[DataFrame] See Also -------- read_sql_table : Read SQL database table into a DataFrame. read_sql_query : Read SQL query into a DataFrame. Examples -------- Read data from SQL via either a SQL query or a SQL tablename. When using a SQLite database only SQL queries are accepted, providing only the SQL tablename will result in an error. >>> from sqlite3 import connect >>> conn = connect(':memory:') >>> df = pd.DataFrame(data=[[0, '10/11/12'], [1, '12/11/10']], ... columns=['int_column', 'date_column']) >>> df.to_sql('test_data', conn) 2 >>> pd.read_sql('SELECT int_column, date_column FROM test_data', conn) int_column date_column 0 0 10/11/12 1 1 12/11/10 >>> pd.read_sql('test_data', 'postgres:///db_name') # doctest:+SKIP Apply date parsing to columns through the ``parse_dates`` argument The ``parse_dates`` argument calls ``pd.to_datetime`` on the provided columns. Custom argument values for applying ``pd.to_datetime`` on a column are specified via a dictionary format: >>> pd.read_sql('SELECT int_column, date_column FROM test_data', ... conn, ... parse_dates={"date_column": {"format": "%d/%m/%y"}}) int_column date_column 0 0 2012-11-10 1 1 2010-11-12
173,493
from __future__ import annotations from abc import ( ABC, abstractmethod, ) from contextlib import ( ExitStack, contextmanager, ) from datetime import ( date, datetime, time, ) from functools import partial import re from typing import ( TYPE_CHECKING, Any, Iterator, Literal, cast, overload, ) import warnings import numpy as np from pandas._libs import lib from pandas._typing import ( DateTimeErrorChoices, DtypeArg, DtypeBackend, IndexLabel, ) from pandas.compat._optional import import_optional_dependency from pandas.errors import ( AbstractMethodError, DatabaseError, ) from pandas.util._exceptions import find_stack_level from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( is_datetime64tz_dtype, is_dict_like, is_integer, is_list_like, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import isna from pandas import get_option from pandas.core.api import ( DataFrame, Series, ) from pandas.core.arrays import ArrowExtensionArray from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.internals.construction import convert_object_array from pandas.core.tools.datetimes import to_datetime def pandasSQL_builder( con, schema: str | None = None, need_transaction: bool = False, ) -> PandasSQL: """ Convenience function to return the correct PandasSQL subclass based on the provided parameters. Also creates a sqlalchemy connection and transaction if necessary. """ import sqlite3 if isinstance(con, sqlite3.Connection) or con is None: return SQLiteDatabase(con) sqlalchemy = import_optional_dependency("sqlalchemy", errors="ignore") if isinstance(con, str) and sqlalchemy is None: raise ImportError("Using URI string without sqlalchemy installed.") if sqlalchemy is not None and isinstance(con, (str, sqlalchemy.engine.Connectable)): return SQLDatabase(con, schema, need_transaction) warnings.warn( "pandas only supports SQLAlchemy connectable (engine/connection) or " "database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 " "objects are not tested. Please consider using SQLAlchemy.", UserWarning, stacklevel=find_stack_level(), ) return SQLiteDatabase(con) Literal: _SpecialForm = ... IndexLabel = Union[Hashable, Sequence[Hashable]] DtypeArg = Union[Dtype, Dict[Hashable, Dtype]] The provided code snippet includes necessary dependencies for implementing the `to_sql` function. Write a Python function `def to_sql( frame, name: str, con, schema: str | None = None, if_exists: Literal["fail", "replace", "append"] = "fail", index: bool = True, index_label: IndexLabel = None, chunksize: int | None = None, dtype: DtypeArg | None = None, method: str | None = None, engine: str = "auto", **engine_kwargs, ) -> int | None` to solve the following problem: Write records stored in a DataFrame to a SQL database. Parameters ---------- frame : DataFrame, Series name : str Name of SQL table. con : SQLAlchemy connectable(engine/connection) or database string URI or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. schema : str, optional Name of SQL schema in database to write to (if database flavor supports this). If None, use default schema (default). if_exists : {'fail', 'replace', 'append'}, default 'fail' - fail: If table exists, do nothing. - replace: If table exists, drop it, recreate it, and insert data. - append: If table exists, insert data. Create if does not exist. index : bool, default True Write DataFrame index as a column. index_label : str or sequence, optional Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. chunksize : int, optional Specify the number of rows in each batch to be written at a time. By default, all rows will be written at once. dtype : dict or scalar, optional Specifying the datatype for columns. If a dictionary is used, the keys should be the column names and the values should be the SQLAlchemy types or strings for the sqlite3 fallback mode. If a scalar is provided, it will be applied to all columns. method : {None, 'multi', callable}, optional Controls the SQL insertion clause used: - None : Uses standard SQL ``INSERT`` clause (one per row). - ``'multi'``: Pass multiple values in a single ``INSERT`` clause. - callable with signature ``(pd_table, conn, keys, data_iter) -> int | None``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. engine : {'auto', 'sqlalchemy'}, default 'auto' SQL engine library to use. If 'auto', then the option ``io.sql.engine`` is used. The default ``io.sql.engine`` behavior is 'sqlalchemy' .. versionadded:: 1.3.0 **engine_kwargs Any additional kwargs are passed to the engine. Returns ------- None or int Number of rows affected by to_sql. None is returned if the callable passed into ``method`` does not return an integer number of rows. .. versionadded:: 1.4.0 Notes ----- The returned rows affected is the sum of the ``rowcount`` attribute of ``sqlite3.Cursor`` or SQLAlchemy connectable. The returned value may not reflect the exact number of written rows as stipulated in the `sqlite3 <https://docs.python.org/3/library/sqlite3.html#sqlite3.Cursor.rowcount>`__ or `SQLAlchemy <https://docs.sqlalchemy.org/en/14/core/connections.html#sqlalchemy.engine.BaseCursorResult.rowcount>`__ Here is the function: def to_sql( frame, name: str, con, schema: str | None = None, if_exists: Literal["fail", "replace", "append"] = "fail", index: bool = True, index_label: IndexLabel = None, chunksize: int | None = None, dtype: DtypeArg | None = None, method: str | None = None, engine: str = "auto", **engine_kwargs, ) -> int | None: """ Write records stored in a DataFrame to a SQL database. Parameters ---------- frame : DataFrame, Series name : str Name of SQL table. con : SQLAlchemy connectable(engine/connection) or database string URI or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. schema : str, optional Name of SQL schema in database to write to (if database flavor supports this). If None, use default schema (default). if_exists : {'fail', 'replace', 'append'}, default 'fail' - fail: If table exists, do nothing. - replace: If table exists, drop it, recreate it, and insert data. - append: If table exists, insert data. Create if does not exist. index : bool, default True Write DataFrame index as a column. index_label : str or sequence, optional Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. chunksize : int, optional Specify the number of rows in each batch to be written at a time. By default, all rows will be written at once. dtype : dict or scalar, optional Specifying the datatype for columns. If a dictionary is used, the keys should be the column names and the values should be the SQLAlchemy types or strings for the sqlite3 fallback mode. If a scalar is provided, it will be applied to all columns. method : {None, 'multi', callable}, optional Controls the SQL insertion clause used: - None : Uses standard SQL ``INSERT`` clause (one per row). - ``'multi'``: Pass multiple values in a single ``INSERT`` clause. - callable with signature ``(pd_table, conn, keys, data_iter) -> int | None``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. engine : {'auto', 'sqlalchemy'}, default 'auto' SQL engine library to use. If 'auto', then the option ``io.sql.engine`` is used. The default ``io.sql.engine`` behavior is 'sqlalchemy' .. versionadded:: 1.3.0 **engine_kwargs Any additional kwargs are passed to the engine. Returns ------- None or int Number of rows affected by to_sql. None is returned if the callable passed into ``method`` does not return an integer number of rows. .. versionadded:: 1.4.0 Notes ----- The returned rows affected is the sum of the ``rowcount`` attribute of ``sqlite3.Cursor`` or SQLAlchemy connectable. The returned value may not reflect the exact number of written rows as stipulated in the `sqlite3 <https://docs.python.org/3/library/sqlite3.html#sqlite3.Cursor.rowcount>`__ or `SQLAlchemy <https://docs.sqlalchemy.org/en/14/core/connections.html#sqlalchemy.engine.BaseCursorResult.rowcount>`__ """ # noqa:E501 if if_exists not in ("fail", "replace", "append"): raise ValueError(f"'{if_exists}' is not valid for if_exists") if isinstance(frame, Series): frame = frame.to_frame() elif not isinstance(frame, DataFrame): raise NotImplementedError( "'frame' argument should be either a Series or a DataFrame" ) with pandasSQL_builder(con, schema=schema, need_transaction=True) as pandas_sql: return pandas_sql.to_sql( frame, name, if_exists=if_exists, index=index, index_label=index_label, schema=schema, chunksize=chunksize, dtype=dtype, method=method, engine=engine, **engine_kwargs, )
Write records stored in a DataFrame to a SQL database. Parameters ---------- frame : DataFrame, Series name : str Name of SQL table. con : SQLAlchemy connectable(engine/connection) or database string URI or sqlite3 DBAPI2 connection Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. schema : str, optional Name of SQL schema in database to write to (if database flavor supports this). If None, use default schema (default). if_exists : {'fail', 'replace', 'append'}, default 'fail' - fail: If table exists, do nothing. - replace: If table exists, drop it, recreate it, and insert data. - append: If table exists, insert data. Create if does not exist. index : bool, default True Write DataFrame index as a column. index_label : str or sequence, optional Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. chunksize : int, optional Specify the number of rows in each batch to be written at a time. By default, all rows will be written at once. dtype : dict or scalar, optional Specifying the datatype for columns. If a dictionary is used, the keys should be the column names and the values should be the SQLAlchemy types or strings for the sqlite3 fallback mode. If a scalar is provided, it will be applied to all columns. method : {None, 'multi', callable}, optional Controls the SQL insertion clause used: - None : Uses standard SQL ``INSERT`` clause (one per row). - ``'multi'``: Pass multiple values in a single ``INSERT`` clause. - callable with signature ``(pd_table, conn, keys, data_iter) -> int | None``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. engine : {'auto', 'sqlalchemy'}, default 'auto' SQL engine library to use. If 'auto', then the option ``io.sql.engine`` is used. The default ``io.sql.engine`` behavior is 'sqlalchemy' .. versionadded:: 1.3.0 **engine_kwargs Any additional kwargs are passed to the engine. Returns ------- None or int Number of rows affected by to_sql. None is returned if the callable passed into ``method`` does not return an integer number of rows. .. versionadded:: 1.4.0 Notes ----- The returned rows affected is the sum of the ``rowcount`` attribute of ``sqlite3.Cursor`` or SQLAlchemy connectable. The returned value may not reflect the exact number of written rows as stipulated in the `sqlite3 <https://docs.python.org/3/library/sqlite3.html#sqlite3.Cursor.rowcount>`__ or `SQLAlchemy <https://docs.sqlalchemy.org/en/14/core/connections.html#sqlalchemy.engine.BaseCursorResult.rowcount>`__
173,494
from __future__ import annotations from abc import ( ABC, abstractmethod, ) from contextlib import ( ExitStack, contextmanager, ) from datetime import ( date, datetime, time, ) from functools import partial import re from typing import ( TYPE_CHECKING, Any, Iterator, Literal, cast, overload, ) import warnings import numpy as np from pandas._libs import lib from pandas._typing import ( DateTimeErrorChoices, DtypeArg, DtypeBackend, IndexLabel, ) from pandas.compat._optional import import_optional_dependency from pandas.errors import ( AbstractMethodError, DatabaseError, ) from pandas.util._exceptions import find_stack_level from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( is_datetime64tz_dtype, is_dict_like, is_integer, is_list_like, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import isna from pandas import get_option from pandas.core.api import ( DataFrame, Series, ) from pandas.core.arrays import ArrowExtensionArray from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.internals.construction import convert_object_array from pandas.core.tools.datetimes import to_datetime class BaseEngine: def insert_records( self, table: SQLTable, con, frame, name, index: bool | str | list[str] | None = True, schema=None, chunksize=None, method=None, **engine_kwargs, ) -> int | None: """ Inserts data into already-prepared table """ raise AbstractMethodError(self) class SQLAlchemyEngine(BaseEngine): def __init__(self) -> None: import_optional_dependency( "sqlalchemy", extra="sqlalchemy is required for SQL support." ) def insert_records( self, table: SQLTable, con, frame, name, index: bool | str | list[str] | None = True, schema=None, chunksize=None, method=None, **engine_kwargs, ) -> int | None: from sqlalchemy import exc try: return table.insert(chunksize=chunksize, method=method) except exc.StatementError as err: # GH34431 # https://stackoverflow.com/a/67358288/6067848 msg = r"""(\(1054, "Unknown column 'inf(e0)?' in 'field list'"\))(?# )|inf can not be used with MySQL""" err_text = str(err.orig) if re.search(msg, err_text): raise ValueError("inf cannot be used with MySQL") from err raise err The provided code snippet includes necessary dependencies for implementing the `get_engine` function. Write a Python function `def get_engine(engine: str) -> BaseEngine` to solve the following problem: return our implementation Here is the function: def get_engine(engine: str) -> BaseEngine: """return our implementation""" if engine == "auto": engine = get_option("io.sql.engine") if engine == "auto": # try engines in this order engine_classes = [SQLAlchemyEngine] error_msgs = "" for engine_class in engine_classes: try: return engine_class() except ImportError as err: error_msgs += "\n - " + str(err) raise ImportError( "Unable to find a usable engine; " "tried using: 'sqlalchemy'.\n" "A suitable version of " "sqlalchemy is required for sql I/O " "support.\n" "Trying to import the above resulted in these errors:" f"{error_msgs}" ) if engine == "sqlalchemy": return SQLAlchemyEngine() raise ValueError("engine must be one of 'auto', 'sqlalchemy'")
return our implementation
173,495
from __future__ import annotations from abc import ( ABC, abstractmethod, ) from contextlib import ( ExitStack, contextmanager, ) from datetime import ( date, datetime, time, ) from functools import partial import re from typing import ( TYPE_CHECKING, Any, Iterator, Literal, cast, overload, ) import warnings import numpy as np from pandas._libs import lib from pandas._typing import ( DateTimeErrorChoices, DtypeArg, DtypeBackend, IndexLabel, ) from pandas.compat._optional import import_optional_dependency from pandas.errors import ( AbstractMethodError, DatabaseError, ) from pandas.util._exceptions import find_stack_level from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( is_datetime64tz_dtype, is_dict_like, is_integer, is_list_like, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import isna from pandas import get_option from pandas.core.api import ( DataFrame, Series, ) from pandas.core.arrays import ArrowExtensionArray from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.internals.construction import convert_object_array from pandas.core.tools.datetimes import to_datetime def _get_unicode_name(name): try: uname = str(name).encode("utf-8", "strict").decode("utf-8") except UnicodeError as err: raise ValueError(f"Cannot convert identifier to UTF-8: '{name}'") from err return uname def _get_valid_sqlite_name(name): # See https://stackoverflow.com/questions/6514274/how-do-you-escape-strings\ # -for-sqlite-table-column-names-in-python # Ensure the string can be encoded as UTF-8. # Ensure the string does not include any NUL characters. # Replace all " with "". # Wrap the entire thing in double quotes. uname = _get_unicode_name(name) if not len(uname): raise ValueError("Empty table or column name specified") nul_index = uname.find("\x00") if nul_index >= 0: raise ValueError("SQLite identifier cannot contain NULs") return '"' + uname.replace('"', '""') + '"'
null
173,496
from __future__ import annotations from abc import ( ABC, abstractmethod, ) from contextlib import ( ExitStack, contextmanager, ) from datetime import ( date, datetime, time, ) from functools import partial import re from typing import ( TYPE_CHECKING, Any, Iterator, Literal, cast, overload, ) import warnings import numpy as np from pandas._libs import lib from pandas._typing import ( DateTimeErrorChoices, DtypeArg, DtypeBackend, IndexLabel, ) from pandas.compat._optional import import_optional_dependency from pandas.errors import ( AbstractMethodError, DatabaseError, ) from pandas.util._exceptions import find_stack_level from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( is_datetime64tz_dtype, is_dict_like, is_integer, is_list_like, ) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import isna from pandas import get_option from pandas.core.api import ( DataFrame, Series, ) from pandas.core.arrays import ArrowExtensionArray from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.internals.construction import convert_object_array from pandas.core.tools.datetimes import to_datetime def pandasSQL_builder( con, schema: str | None = None, need_transaction: bool = False, ) -> PandasSQL: """ Convenience function to return the correct PandasSQL subclass based on the provided parameters. Also creates a sqlalchemy connection and transaction if necessary. """ import sqlite3 if isinstance(con, sqlite3.Connection) or con is None: return SQLiteDatabase(con) sqlalchemy = import_optional_dependency("sqlalchemy", errors="ignore") if isinstance(con, str) and sqlalchemy is None: raise ImportError("Using URI string without sqlalchemy installed.") if sqlalchemy is not None and isinstance(con, (str, sqlalchemy.engine.Connectable)): return SQLDatabase(con, schema, need_transaction) warnings.warn( "pandas only supports SQLAlchemy connectable (engine/connection) or " "database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 " "objects are not tested. Please consider using SQLAlchemy.", UserWarning, stacklevel=find_stack_level(), ) return SQLiteDatabase(con) DtypeArg = Union[Dtype, Dict[Hashable, Dtype]] The provided code snippet includes necessary dependencies for implementing the `get_schema` function. Write a Python function `def get_schema( frame, name: str, keys=None, con=None, dtype: DtypeArg | None = None, schema: str | None = None, ) -> str` to solve the following problem: Get the SQL db table schema for the given frame. Parameters ---------- frame : DataFrame name : str name of SQL table keys : string or sequence, default: None columns to use a primary key con: an open SQL database connection object or a SQLAlchemy connectable Using SQLAlchemy makes it possible to use any DB supported by that library, default: None If a DBAPI2 object, only sqlite3 is supported. dtype : dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should be a SQLAlchemy type, or a string for sqlite3 fallback connection. schema: str, default: None Optional specifying the schema to be used in creating the table. .. versionadded:: 1.2.0 Here is the function: def get_schema( frame, name: str, keys=None, con=None, dtype: DtypeArg | None = None, schema: str | None = None, ) -> str: """ Get the SQL db table schema for the given frame. Parameters ---------- frame : DataFrame name : str name of SQL table keys : string or sequence, default: None columns to use a primary key con: an open SQL database connection object or a SQLAlchemy connectable Using SQLAlchemy makes it possible to use any DB supported by that library, default: None If a DBAPI2 object, only sqlite3 is supported. dtype : dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should be a SQLAlchemy type, or a string for sqlite3 fallback connection. schema: str, default: None Optional specifying the schema to be used in creating the table. .. versionadded:: 1.2.0 """ with pandasSQL_builder(con=con) as pandas_sql: return pandas_sql._create_sql_schema( frame, name, keys=keys, dtype=dtype, schema=schema )
Get the SQL db table schema for the given frame. Parameters ---------- frame : DataFrame name : str name of SQL table keys : string or sequence, default: None columns to use a primary key con: an open SQL database connection object or a SQLAlchemy connectable Using SQLAlchemy makes it possible to use any DB supported by that library, default: None If a DBAPI2 object, only sqlite3 is supported. dtype : dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should be a SQLAlchemy type, or a string for sqlite3 fallback connection. schema: str, default: None Optional specifying the schema to be used in creating the table. .. versionadded:: 1.2.0
173,497
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, cast, ) import warnings from pandas._libs.json import loads from pandas._libs.tslibs import timezones from pandas._typing import ( DtypeObj, JSONSerializable, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.base import _registry as registry from pandas.core.dtypes.common import ( is_bool_dtype, is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_extension_array_dtype, is_integer_dtype, is_numeric_dtype, is_period_dtype, is_string_dtype, is_timedelta64_dtype, ) from pandas.core.dtypes.dtypes import CategoricalDtype from pandas import DataFrame import pandas.core.common as com TABLE_SCHEMA_VERSION = "1.4.0" def set_default_names(data): """Sets index names to 'index' for regular, or 'level_x' for Multi""" if com.all_not_none(*data.index.names): nms = data.index.names if len(nms) == 1 and data.index.name == "index": warnings.warn( "Index name of 'index' is not round-trippable.", stacklevel=find_stack_level(), ) elif len(nms) > 1 and any(x.startswith("level_") for x in nms): warnings.warn( "Index names beginning with 'level_' are not round-trippable.", stacklevel=find_stack_level(), ) return data data = data.copy() if data.index.nlevels > 1: data.index.names = com.fill_missing_names(data.index.names) else: data.index.name = data.index.name or "index" return data def convert_pandas_type_to_json_field(arr) -> dict[str, JSONSerializable]: dtype = arr.dtype name: JSONSerializable if arr.name is None: name = "values" else: name = arr.name field: dict[str, JSONSerializable] = { "name": name, "type": as_json_table_type(dtype), } if is_categorical_dtype(dtype): cats = dtype.categories ordered = dtype.ordered field["constraints"] = {"enum": list(cats)} field["ordered"] = ordered elif is_period_dtype(dtype): field["freq"] = dtype.freq.freqstr elif is_datetime64tz_dtype(dtype): if timezones.is_utc(dtype.tz): # timezone.utc has no "zone" attr field["tz"] = "UTC" else: field["tz"] = dtype.tz.zone elif is_extension_array_dtype(dtype): field["extDtype"] = dtype.name return field Any = object() def cast(typ: Type[_T], val: Any) -> _T: ... def cast(typ: str, val: Any) -> Any: ... def cast(typ: object, val: Any) -> Any: ... JSONSerializable = Optional[Union[PythonScalar, List, Dict]] The provided code snippet includes necessary dependencies for implementing the `build_table_schema` function. Write a Python function `def build_table_schema( data: DataFrame | Series, index: bool = True, primary_key: bool | None = None, version: bool = True, ) -> dict[str, JSONSerializable]` to solve the following problem: Create a Table schema from ``data``. Parameters ---------- data : Series, DataFrame index : bool, default True Whether to include ``data.index`` in the schema. primary_key : bool or None, default True Column names to designate as the primary key. The default `None` will set `'primaryKey'` to the index level or levels if the index is unique. version : bool, default True Whether to include a field `pandas_version` with the version of pandas that last revised the table schema. This version can be different from the installed pandas version. Returns ------- dict Notes ----- See `Table Schema <https://pandas.pydata.org/docs/user_guide/io.html#table-schema>`__ for conversion types. Timedeltas as converted to ISO8601 duration format with 9 decimal places after the seconds field for nanosecond precision. Categoricals are converted to the `any` dtype, and use the `enum` field constraint to list the allowed values. The `ordered` attribute is included in an `ordered` field. Examples -------- >>> from pandas.io.json._table_schema import build_table_schema >>> df = pd.DataFrame( ... {'A': [1, 2, 3], ... 'B': ['a', 'b', 'c'], ... 'C': pd.date_range('2016-01-01', freq='d', periods=3), ... }, index=pd.Index(range(3), name='idx')) >>> build_table_schema(df) {'fields': \ [{'name': 'idx', 'type': 'integer'}, \ {'name': 'A', 'type': 'integer'}, \ {'name': 'B', 'type': 'string'}, \ {'name': 'C', 'type': 'datetime'}], \ 'primaryKey': ['idx'], \ 'pandas_version': '1.4.0'} Here is the function: def build_table_schema( data: DataFrame | Series, index: bool = True, primary_key: bool | None = None, version: bool = True, ) -> dict[str, JSONSerializable]: """ Create a Table schema from ``data``. Parameters ---------- data : Series, DataFrame index : bool, default True Whether to include ``data.index`` in the schema. primary_key : bool or None, default True Column names to designate as the primary key. The default `None` will set `'primaryKey'` to the index level or levels if the index is unique. version : bool, default True Whether to include a field `pandas_version` with the version of pandas that last revised the table schema. This version can be different from the installed pandas version. Returns ------- dict Notes ----- See `Table Schema <https://pandas.pydata.org/docs/user_guide/io.html#table-schema>`__ for conversion types. Timedeltas as converted to ISO8601 duration format with 9 decimal places after the seconds field for nanosecond precision. Categoricals are converted to the `any` dtype, and use the `enum` field constraint to list the allowed values. The `ordered` attribute is included in an `ordered` field. Examples -------- >>> from pandas.io.json._table_schema import build_table_schema >>> df = pd.DataFrame( ... {'A': [1, 2, 3], ... 'B': ['a', 'b', 'c'], ... 'C': pd.date_range('2016-01-01', freq='d', periods=3), ... }, index=pd.Index(range(3), name='idx')) >>> build_table_schema(df) {'fields': \ [{'name': 'idx', 'type': 'integer'}, \ {'name': 'A', 'type': 'integer'}, \ {'name': 'B', 'type': 'string'}, \ {'name': 'C', 'type': 'datetime'}], \ 'primaryKey': ['idx'], \ 'pandas_version': '1.4.0'} """ if index is True: data = set_default_names(data) schema: dict[str, Any] = {} fields = [] if index: if data.index.nlevels > 1: data.index = cast("MultiIndex", data.index) for level, name in zip(data.index.levels, data.index.names): new_field = convert_pandas_type_to_json_field(level) new_field["name"] = name fields.append(new_field) else: fields.append(convert_pandas_type_to_json_field(data.index)) if data.ndim > 1: for column, s in data.items(): fields.append(convert_pandas_type_to_json_field(s)) else: fields.append(convert_pandas_type_to_json_field(data)) schema["fields"] = fields if index and data.index.is_unique and primary_key is None: if data.index.nlevels == 1: schema["primaryKey"] = [data.index.name] else: schema["primaryKey"] = data.index.names elif primary_key is not None: schema["primaryKey"] = primary_key if version: schema["pandas_version"] = TABLE_SCHEMA_VERSION return schema
Create a Table schema from ``data``. Parameters ---------- data : Series, DataFrame index : bool, default True Whether to include ``data.index`` in the schema. primary_key : bool or None, default True Column names to designate as the primary key. The default `None` will set `'primaryKey'` to the index level or levels if the index is unique. version : bool, default True Whether to include a field `pandas_version` with the version of pandas that last revised the table schema. This version can be different from the installed pandas version. Returns ------- dict Notes ----- See `Table Schema <https://pandas.pydata.org/docs/user_guide/io.html#table-schema>`__ for conversion types. Timedeltas as converted to ISO8601 duration format with 9 decimal places after the seconds field for nanosecond precision. Categoricals are converted to the `any` dtype, and use the `enum` field constraint to list the allowed values. The `ordered` attribute is included in an `ordered` field. Examples -------- >>> from pandas.io.json._table_schema import build_table_schema >>> df = pd.DataFrame( ... {'A': [1, 2, 3], ... 'B': ['a', 'b', 'c'], ... 'C': pd.date_range('2016-01-01', freq='d', periods=3), ... }, index=pd.Index(range(3), name='idx')) >>> build_table_schema(df) {'fields': \ [{'name': 'idx', 'type': 'integer'}, \ {'name': 'A', 'type': 'integer'}, \ {'name': 'B', 'type': 'string'}, \ {'name': 'C', 'type': 'datetime'}], \ 'primaryKey': ['idx'], \ 'pandas_version': '1.4.0'}
173,498
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, cast, ) import warnings from pandas._libs.json import loads from pandas._libs.tslibs import timezones from pandas._typing import ( DtypeObj, JSONSerializable, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.base import _registry as registry from pandas.core.dtypes.common import ( is_bool_dtype, is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_extension_array_dtype, is_integer_dtype, is_numeric_dtype, is_period_dtype, is_string_dtype, is_timedelta64_dtype, ) from pandas.core.dtypes.dtypes import CategoricalDtype from pandas import DataFrame import pandas.core.common as com def convert_json_field_to_pandas_type(field) -> str | CategoricalDtype: """ Converts a JSON field descriptor into its corresponding NumPy / pandas type Parameters ---------- field A JSON field descriptor Returns ------- dtype Raises ------ ValueError If the type of the provided field is unknown or currently unsupported Examples -------- >>> convert_json_field_to_pandas_type({"name": "an_int", "type": "integer"}) 'int64' >>> convert_json_field_to_pandas_type( ... { ... "name": "a_categorical", ... "type": "any", ... "constraints": {"enum": ["a", "b", "c"]}, ... "ordered": True, ... } ... ) CategoricalDtype(categories=['a', 'b', 'c'], ordered=True) >>> convert_json_field_to_pandas_type({"name": "a_datetime", "type": "datetime"}) 'datetime64[ns]' >>> convert_json_field_to_pandas_type( ... {"name": "a_datetime_with_tz", "type": "datetime", "tz": "US/Central"} ... ) 'datetime64[ns, US/Central]' """ typ = field["type"] if typ == "string": return "object" elif typ == "integer": return field.get("extDtype", "int64") elif typ == "number": return field.get("extDtype", "float64") elif typ == "boolean": return field.get("extDtype", "bool") elif typ == "duration": return "timedelta64" elif typ == "datetime": if field.get("tz"): return f"datetime64[ns, {field['tz']}]" elif field.get("freq"): # GH#47747 using datetime over period to minimize the change surface return f"period[{field['freq']}]" else: return "datetime64[ns]" elif typ == "any": if "constraints" in field and "ordered" in field: return CategoricalDtype( categories=field["constraints"]["enum"], ordered=field["ordered"] ) elif "extDtype" in field: return registry.find(field["extDtype"]) else: return "object" raise ValueError(f"Unsupported or invalid field type: {typ}") The provided code snippet includes necessary dependencies for implementing the `parse_table_schema` function. Write a Python function `def parse_table_schema(json, precise_float)` to solve the following problem: Builds a DataFrame from a given schema Parameters ---------- json : A JSON table schema precise_float : bool Flag controlling precision when decoding string to double values, as dictated by ``read_json`` Returns ------- df : DataFrame Raises ------ NotImplementedError If the JSON table schema contains either timezone or timedelta data Notes ----- Because :func:`DataFrame.to_json` uses the string 'index' to denote a name-less :class:`Index`, this function sets the name of the returned :class:`DataFrame` to ``None`` when said string is encountered with a normal :class:`Index`. For a :class:`MultiIndex`, the same limitation applies to any strings beginning with 'level_'. Therefore, an :class:`Index` name of 'index' and :class:`MultiIndex` names starting with 'level_' are not supported. See Also -------- build_table_schema : Inverse function. pandas.read_json Here is the function: def parse_table_schema(json, precise_float): """ Builds a DataFrame from a given schema Parameters ---------- json : A JSON table schema precise_float : bool Flag controlling precision when decoding string to double values, as dictated by ``read_json`` Returns ------- df : DataFrame Raises ------ NotImplementedError If the JSON table schema contains either timezone or timedelta data Notes ----- Because :func:`DataFrame.to_json` uses the string 'index' to denote a name-less :class:`Index`, this function sets the name of the returned :class:`DataFrame` to ``None`` when said string is encountered with a normal :class:`Index`. For a :class:`MultiIndex`, the same limitation applies to any strings beginning with 'level_'. Therefore, an :class:`Index` name of 'index' and :class:`MultiIndex` names starting with 'level_' are not supported. See Also -------- build_table_schema : Inverse function. pandas.read_json """ table = loads(json, precise_float=precise_float) col_order = [field["name"] for field in table["schema"]["fields"]] df = DataFrame(table["data"], columns=col_order)[col_order] dtypes = { field["name"]: convert_json_field_to_pandas_type(field) for field in table["schema"]["fields"] } # No ISO constructor for Timedelta as of yet, so need to raise if "timedelta64" in dtypes.values(): raise NotImplementedError( 'table="orient" can not yet read ISO-formatted Timedelta data' ) df = df.astype(dtypes) if "primaryKey" in table["schema"]: df = df.set_index(table["schema"]["primaryKey"]) if len(df.index.names) == 1: if df.index.name == "index": df.index.name = None else: df.index.names = [ None if x.startswith("level_") else x for x in df.index.names ] return df
Builds a DataFrame from a given schema Parameters ---------- json : A JSON table schema precise_float : bool Flag controlling precision when decoding string to double values, as dictated by ``read_json`` Returns ------- df : DataFrame Raises ------ NotImplementedError If the JSON table schema contains either timezone or timedelta data Notes ----- Because :func:`DataFrame.to_json` uses the string 'index' to denote a name-less :class:`Index`, this function sets the name of the returned :class:`DataFrame` to ``None`` when said string is encountered with a normal :class:`Index`. For a :class:`MultiIndex`, the same limitation applies to any strings beginning with 'level_'. Therefore, an :class:`Index` name of 'index' and :class:`MultiIndex` names starting with 'level_' are not supported. See Also -------- build_table_schema : Inverse function. pandas.read_json
173,499
from __future__ import annotations from abc import ( ABC, abstractmethod, ) from collections import abc from io import StringIO from itertools import islice from types import TracebackType from typing import ( TYPE_CHECKING, Any, Callable, Generic, Literal, Mapping, TypeVar, overload, ) import numpy as np from pandas._libs import lib from pandas._libs.json import ( dumps, loads, ) from pandas._libs.tslibs import iNaT from pandas._typing import ( CompressionOptions, DtypeArg, DtypeBackend, FilePath, IndexLabel, JSONEngine, JSONSerializable, ReadBuffer, StorageOptions, WriteBuffer, ) from pandas.compat._optional import import_optional_dependency from pandas.errors import AbstractMethodError from pandas.util._decorators import doc from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( ensure_str, is_period_dtype, ) from pandas.core.dtypes.generic import ABCIndex from pandas import ( ArrowDtype, DataFrame, MultiIndex, Series, isna, notna, to_datetime, ) from pandas.core.reshape.concat import concat from pandas.core.shared_docs import _shared_docs from pandas.io.common import ( IOHandles, dedup_names, extension_to_compression, file_exists, get_handle, is_fsspec_url, is_potential_multi_index, is_url, stringify_path, ) from pandas.io.json._normalize import convert_to_line_delimits from pandas.io.json._table_schema import ( build_table_schema, parse_table_schema, ) from pandas.io.parsers.readers import validate_integer Any = object() class Callable(BaseTypingInstance): def py__call__(self, arguments): """ def x() -> Callable[[Callable[..., _T]], _T]: ... """ # The 0th index are the arguments. try: param_values = self._generics_manager[0] result_values = self._generics_manager[1] except IndexError: debug.warning('Callable[...] defined without two arguments') return NO_VALUES else: from jedi.inference.gradual.annotation import infer_return_for_callable return infer_return_for_callable(arguments, param_values, result_values) def py__get__(self, instance, class_value): return ValueSet([self]) Literal: _SpecialForm = ... JSONSerializable = Optional[Union[PythonScalar, List, Dict]] class WriteBuffer(BaseBuffer, Protocol[AnyStr_contra]): def write(self, __b: AnyStr_contra) -> Any: # for gzip.GzipFile, bz2.BZ2File ... def flush(self) -> Any: # for gzip.GzipFile, bz2.BZ2File ... FilePath = Union[str, "PathLike[str]"] StorageOptions = Optional[Dict[str, Any]] CompressionOptions = Optional[ Union[Literal["infer", "gzip", "bz2", "zip", "xz", "zstd", "tar"], CompressionDict] ] class NDFrame(PandasObject, indexing.IndexingMixin): """ N-dimensional analogue of DataFrame. Store multi-dimensional in a size-mutable, labeled data structure Parameters ---------- data : BlockManager axes : list copy : bool, default False """ _internal_names: list[str] = [ "_mgr", "_cacher", "_item_cache", "_cache", "_is_copy", "_subtyp", "_name", "_default_kind", "_default_fill_value", "_metadata", "__array_struct__", "__array_interface__", "_flags", ] _internal_names_set: set[str] = set(_internal_names) _accessors: set[str] = set() _hidden_attrs: frozenset[str] = frozenset([]) _metadata: list[str] = [] _is_copy: weakref.ReferenceType[NDFrame] | None = None _mgr: Manager _attrs: dict[Hashable, Any] _typ: str # ---------------------------------------------------------------------- # Constructors def __init__( self, data: Manager, copy: bool_t = False, attrs: Mapping[Hashable, Any] | None = None, ) -> None: # copy kwarg is retained for mypy compat, is not used object.__setattr__(self, "_is_copy", None) object.__setattr__(self, "_mgr", data) object.__setattr__(self, "_item_cache", {}) if attrs is None: attrs = {} else: attrs = dict(attrs) object.__setattr__(self, "_attrs", attrs) object.__setattr__(self, "_flags", Flags(self, allows_duplicate_labels=True)) def _init_mgr( cls, mgr: Manager, axes, dtype: Dtype | None = None, copy: bool_t = False, ) -> Manager: """passed a manager and a axes dict""" for a, axe in axes.items(): if axe is not None: axe = ensure_index(axe) bm_axis = cls._get_block_manager_axis(a) mgr = mgr.reindex_axis(axe, axis=bm_axis) # make a copy if explicitly requested if copy: mgr = mgr.copy() if dtype is not None: # avoid further copies if we can if ( isinstance(mgr, BlockManager) and len(mgr.blocks) == 1 and is_dtype_equal(mgr.blocks[0].values.dtype, dtype) ): pass else: mgr = mgr.astype(dtype=dtype) return mgr def _as_manager(self: NDFrameT, typ: str, copy: bool_t = True) -> NDFrameT: """ Private helper function to create a DataFrame with specific manager. Parameters ---------- typ : {"block", "array"} copy : bool, default True Only controls whether the conversion from Block->ArrayManager copies the 1D arrays (to ensure proper/contiguous memory layout). Returns ------- DataFrame New DataFrame using specified manager type. Is not guaranteed to be a copy or not. """ new_mgr: Manager new_mgr = mgr_to_mgr(self._mgr, typ=typ, copy=copy) # fastpath of passing a manager doesn't check the option/manager class return self._constructor(new_mgr).__finalize__(self) # ---------------------------------------------------------------------- # attrs and flags def attrs(self) -> dict[Hashable, Any]: """ Dictionary of global attributes of this dataset. .. warning:: attrs is experimental and may change without warning. See Also -------- DataFrame.flags : Global flags applying to this object. """ if self._attrs is None: self._attrs = {} return self._attrs def attrs(self, value: Mapping[Hashable, Any]) -> None: self._attrs = dict(value) def flags(self) -> Flags: """ Get the properties associated with this pandas object. The available flags are * :attr:`Flags.allows_duplicate_labels` See Also -------- Flags : Flags that apply to pandas objects. DataFrame.attrs : Global metadata applying to this dataset. Notes ----- "Flags" differ from "metadata". Flags reflect properties of the pandas object (the Series or DataFrame). Metadata refer to properties of the dataset, and should be stored in :attr:`DataFrame.attrs`. Examples -------- >>> df = pd.DataFrame({"A": [1, 2]}) >>> df.flags <Flags(allows_duplicate_labels=True)> Flags can be get or set using ``.`` >>> df.flags.allows_duplicate_labels True >>> df.flags.allows_duplicate_labels = False Or by slicing with a key >>> df.flags["allows_duplicate_labels"] False >>> df.flags["allows_duplicate_labels"] = True """ return self._flags def set_flags( self: NDFrameT, *, copy: bool_t = False, allows_duplicate_labels: bool_t | None = None, ) -> NDFrameT: """ Return a new object with updated flags. Parameters ---------- copy : bool, default False Specify if a copy of the object should be made. allows_duplicate_labels : bool, optional Whether the returned object allows duplicate labels. Returns ------- Series or DataFrame The same type as the caller. See Also -------- DataFrame.attrs : Global metadata applying to this dataset. DataFrame.flags : Global flags applying to this object. Notes ----- This method returns a new object that's a view on the same data as the input. Mutating the input or the output values will be reflected in the other. This method is intended to be used in method chains. "Flags" differ from "metadata". Flags reflect properties of the pandas object (the Series or DataFrame). Metadata refer to properties of the dataset, and should be stored in :attr:`DataFrame.attrs`. Examples -------- >>> df = pd.DataFrame({"A": [1, 2]}) >>> df.flags.allows_duplicate_labels True >>> df2 = df.set_flags(allows_duplicate_labels=False) >>> df2.flags.allows_duplicate_labels False """ df = self.copy(deep=copy and not using_copy_on_write()) if allows_duplicate_labels is not None: df.flags["allows_duplicate_labels"] = allows_duplicate_labels return df def _validate_dtype(cls, dtype) -> DtypeObj | None: """validate the passed dtype""" if dtype is not None: dtype = pandas_dtype(dtype) # a compound dtype if dtype.kind == "V": raise NotImplementedError( "compound dtypes are not implemented " f"in the {cls.__name__} constructor" ) return dtype # ---------------------------------------------------------------------- # Construction def _constructor(self: NDFrameT) -> Callable[..., NDFrameT]: """ Used when a manipulation result has the same dimensions as the original. """ raise AbstractMethodError(self) # ---------------------------------------------------------------------- # Internals def _data(self): # GH#33054 retained because some downstream packages uses this, # e.g. fastparquet return self._mgr # ---------------------------------------------------------------------- # Axis _stat_axis_number = 0 _stat_axis_name = "index" _AXIS_ORDERS: list[Literal["index", "columns"]] _AXIS_TO_AXIS_NUMBER: dict[Axis, AxisInt] = {0: 0, "index": 0, "rows": 0} _info_axis_number: int _info_axis_name: Literal["index", "columns"] _AXIS_LEN: int def _construct_axes_dict(self, axes: Sequence[Axis] | None = None, **kwargs): """Return an axes dictionary for myself.""" d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)} # error: Argument 1 to "update" of "MutableMapping" has incompatible type # "Dict[str, Any]"; expected "SupportsKeysAndGetItem[Union[int, str], Any]" d.update(kwargs) # type: ignore[arg-type] return d def _get_axis_number(cls, axis: Axis) -> AxisInt: try: return cls._AXIS_TO_AXIS_NUMBER[axis] except KeyError: raise ValueError(f"No axis named {axis} for object type {cls.__name__}") def _get_axis_name(cls, axis: Axis) -> Literal["index", "columns"]: axis_number = cls._get_axis_number(axis) return cls._AXIS_ORDERS[axis_number] def _get_axis(self, axis: Axis) -> Index: axis_number = self._get_axis_number(axis) assert axis_number in {0, 1} return self.index if axis_number == 0 else self.columns def _get_block_manager_axis(cls, axis: Axis) -> AxisInt: """Map the axis to the block_manager axis.""" axis = cls._get_axis_number(axis) ndim = cls._AXIS_LEN if ndim == 2: # i.e. DataFrame return 1 - axis return axis def _get_axis_resolvers(self, axis: str) -> dict[str, Series | MultiIndex]: # index or columns axis_index = getattr(self, axis) d = {} prefix = axis[0] for i, name in enumerate(axis_index.names): if name is not None: key = level = name else: # prefix with 'i' or 'c' depending on the input axis # e.g., you must do ilevel_0 for the 0th level of an unnamed # multiiindex key = f"{prefix}level_{i}" level = i level_values = axis_index.get_level_values(level) s = level_values.to_series() s.index = axis_index d[key] = s # put the index/columns itself in the dict if isinstance(axis_index, MultiIndex): dindex = axis_index else: dindex = axis_index.to_series() d[axis] = dindex return d def _get_index_resolvers(self) -> dict[Hashable, Series | MultiIndex]: from pandas.core.computation.parsing import clean_column_name d: dict[str, Series | MultiIndex] = {} for axis_name in self._AXIS_ORDERS: d.update(self._get_axis_resolvers(axis_name)) return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)} def _get_cleaned_column_resolvers(self) -> dict[Hashable, Series]: """ Return the special character free column resolvers of a dataframe. Column names with special characters are 'cleaned up' so that they can be referred to by backtick quoting. Used in :meth:`DataFrame.eval`. """ from pandas.core.computation.parsing import clean_column_name if isinstance(self, ABCSeries): return {clean_column_name(self.name): self} return { clean_column_name(k): v for k, v in self.items() if not isinstance(k, int) } def _info_axis(self) -> Index: return getattr(self, self._info_axis_name) def _stat_axis(self) -> Index: return getattr(self, self._stat_axis_name) def shape(self) -> tuple[int, ...]: """ Return a tuple of axis dimensions """ return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS) def axes(self) -> list[Index]: """ Return index label(s) of the internal NDFrame """ # we do it this way because if we have reversed axes, then # the block manager shows then reversed return [self._get_axis(a) for a in self._AXIS_ORDERS] def ndim(self) -> int: """ Return an int representing the number of axes / array dimensions. Return 1 if Series. Otherwise return 2 if DataFrame. See Also -------- ndarray.ndim : Number of array dimensions. Examples -------- >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3}) >>> s.ndim 1 >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.ndim 2 """ return self._mgr.ndim def size(self) -> int: """ Return an int representing the number of elements in this object. Return the number of rows if Series. Otherwise return the number of rows times number of columns if DataFrame. See Also -------- ndarray.size : Number of elements in the array. Examples -------- >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3}) >>> s.size 3 >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.size 4 """ # error: Incompatible return value type (got "signedinteger[_64Bit]", # expected "int") [return-value] return np.prod(self.shape) # type: ignore[return-value] def set_axis( self: NDFrameT, labels, *, axis: Axis = 0, copy: bool_t | None = None, ) -> NDFrameT: """ Assign desired index to given axis. Indexes for%(extended_summary_sub)s row labels can be changed by assigning a list-like or Index. Parameters ---------- labels : list-like, Index The values for the new index. axis : %(axes_single_arg)s, default 0 The axis to update. The value 0 identifies the rows. For `Series` this parameter is unused and defaults to 0. copy : bool, default True Whether to make a copy of the underlying data. .. versionadded:: 1.5.0 Returns ------- %(klass)s An object of type %(klass)s. See Also -------- %(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s. """ return self._set_axis_nocheck(labels, axis, inplace=False, copy=copy) def _set_axis_nocheck( self, labels, axis: Axis, inplace: bool_t, copy: bool_t | None ): if inplace: setattr(self, self._get_axis_name(axis), labels) else: # With copy=False, we create a new object but don't copy the # underlying data. obj = self.copy(deep=copy and not using_copy_on_write()) setattr(obj, obj._get_axis_name(axis), labels) return obj def _set_axis(self, axis: AxisInt, labels: AnyArrayLike | list) -> None: """ This is called from the cython code when we set the `index` attribute directly, e.g. `series.index = [1, 2, 3]`. """ labels = ensure_index(labels) self._mgr.set_axis(axis, labels) self._clear_item_cache() def swapaxes( self: NDFrameT, axis1: Axis, axis2: Axis, copy: bool_t | None = None ) -> NDFrameT: """ Interchange axes and swap values axes appropriately. Returns ------- same as input """ i = self._get_axis_number(axis1) j = self._get_axis_number(axis2) if i == j: return self.copy(deep=copy and not using_copy_on_write()) mapping = {i: j, j: i} new_axes = [self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN)] new_values = self._values.swapaxes(i, j) # type: ignore[union-attr] if ( using_copy_on_write() and self._mgr.is_single_block and isinstance(self._mgr, BlockManager) ): # This should only get hit in case of having a single block, otherwise a # copy is made, we don't have to set up references. new_mgr = ndarray_to_mgr( new_values, new_axes[0], new_axes[1], dtype=None, copy=False, typ="block", ) assert isinstance(new_mgr, BlockManager) assert isinstance(self._mgr, BlockManager) new_mgr.blocks[0].refs = self._mgr.blocks[0].refs new_mgr.blocks[0].refs.add_reference( new_mgr.blocks[0] # type: ignore[arg-type] ) return self._constructor(new_mgr).__finalize__(self, method="swapaxes") elif (copy or copy is None) and self._mgr.is_single_block: new_values = new_values.copy() return self._constructor( new_values, *new_axes, # The no-copy case for CoW is handled above copy=False, ).__finalize__(self, method="swapaxes") def droplevel(self: NDFrameT, level: IndexLabel, axis: Axis = 0) -> NDFrameT: """ Return {klass} with requested index / column level(s) removed. Parameters ---------- level : int, str, or list-like If a string is given, must be the name of a level If list-like, elements must be names or positional indexes of levels. axis : {{0 or 'index', 1 or 'columns'}}, default 0 Axis along which the level(s) is removed: * 0 or 'index': remove level(s) in column. * 1 or 'columns': remove level(s) in row. For `Series` this parameter is unused and defaults to 0. Returns ------- {klass} {klass} with requested index / column level(s) removed. Examples -------- >>> df = pd.DataFrame([ ... [1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12] ... ]).set_index([0, 1]).rename_axis(['a', 'b']) >>> df.columns = pd.MultiIndex.from_tuples([ ... ('c', 'e'), ('d', 'f') ... ], names=['level_1', 'level_2']) >>> df level_1 c d level_2 e f a b 1 2 3 4 5 6 7 8 9 10 11 12 >>> df.droplevel('a') level_1 c d level_2 e f b 2 3 4 6 7 8 10 11 12 >>> df.droplevel('level_2', axis=1) level_1 c d a b 1 2 3 4 5 6 7 8 9 10 11 12 """ labels = self._get_axis(axis) new_labels = labels.droplevel(level) return self.set_axis(new_labels, axis=axis, copy=None) def pop(self, item: Hashable) -> Series | Any: result = self[item] del self[item] return result def squeeze(self, axis: Axis | None = None): """ Squeeze 1 dimensional axis objects into scalars. Series or DataFrames with a single element are squeezed to a scalar. DataFrames with a single column or a single row are squeezed to a Series. Otherwise the object is unchanged. This method is most useful when you don't know if your object is a Series or DataFrame, but you do know it has just a single column. In that case you can safely call `squeeze` to ensure you have a Series. Parameters ---------- axis : {0 or 'index', 1 or 'columns', None}, default None A specific axis to squeeze. By default, all length-1 axes are squeezed. For `Series` this parameter is unused and defaults to `None`. Returns ------- DataFrame, Series, or scalar The projection after squeezing `axis` or all the axes. See Also -------- Series.iloc : Integer-location based indexing for selecting scalars. DataFrame.iloc : Integer-location based indexing for selecting Series. Series.to_frame : Inverse of DataFrame.squeeze for a single-column DataFrame. Examples -------- >>> primes = pd.Series([2, 3, 5, 7]) Slicing might produce a Series with a single value: >>> even_primes = primes[primes % 2 == 0] >>> even_primes 0 2 dtype: int64 >>> even_primes.squeeze() 2 Squeezing objects with more than one value in every axis does nothing: >>> odd_primes = primes[primes % 2 == 1] >>> odd_primes 1 3 2 5 3 7 dtype: int64 >>> odd_primes.squeeze() 1 3 2 5 3 7 dtype: int64 Squeezing is even more effective when used with DataFrames. >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b']) >>> df a b 0 1 2 1 3 4 Slicing a single column will produce a DataFrame with the columns having only one value: >>> df_a = df[['a']] >>> df_a a 0 1 1 3 So the columns can be squeezed down, resulting in a Series: >>> df_a.squeeze('columns') 0 1 1 3 Name: a, dtype: int64 Slicing a single row from a single column will produce a single scalar DataFrame: >>> df_0a = df.loc[df.index < 1, ['a']] >>> df_0a a 0 1 Squeezing the rows produces a single scalar Series: >>> df_0a.squeeze('rows') a 1 Name: 0, dtype: int64 Squeezing all axes will project directly into a scalar: >>> df_0a.squeeze() 1 """ axes = range(self._AXIS_LEN) if axis is None else (self._get_axis_number(axis),) return self.iloc[ tuple( 0 if i in axes and len(a) == 1 else slice(None) for i, a in enumerate(self.axes) ) ] # ---------------------------------------------------------------------- # Rename def _rename( self: NDFrameT, mapper: Renamer | None = None, *, index: Renamer | None = None, columns: Renamer | None = None, axis: Axis | None = None, copy: bool_t | None = None, inplace: bool_t = False, level: Level | None = None, errors: str = "ignore", ) -> NDFrameT | None: # called by Series.rename and DataFrame.rename if mapper is None and index is None and columns is None: raise TypeError("must pass an index to rename") if index is not None or columns is not None: if axis is not None: raise TypeError( "Cannot specify both 'axis' and any of 'index' or 'columns'" ) if mapper is not None: raise TypeError( "Cannot specify both 'mapper' and any of 'index' or 'columns'" ) else: # use the mapper argument if axis and self._get_axis_number(axis) == 1: columns = mapper else: index = mapper self._check_inplace_and_allows_duplicate_labels(inplace) result = self if inplace else self.copy(deep=copy and not using_copy_on_write()) for axis_no, replacements in enumerate((index, columns)): if replacements is None: continue ax = self._get_axis(axis_no) f = common.get_rename_function(replacements) if level is not None: level = ax._get_level_number(level) # GH 13473 if not callable(replacements): if ax._is_multi and level is not None: indexer = ax.get_level_values(level).get_indexer_for(replacements) else: indexer = ax.get_indexer_for(replacements) if errors == "raise" and len(indexer[indexer == -1]): missing_labels = [ label for index, label in enumerate(replacements) if indexer[index] == -1 ] raise KeyError(f"{missing_labels} not found in axis") new_index = ax._transform_index(f, level=level) result._set_axis_nocheck(new_index, axis=axis_no, inplace=True, copy=False) result._clear_item_cache() if inplace: self._update_inplace(result) return None else: return result.__finalize__(self, method="rename") def rename_axis( self: NDFrameT, mapper: IndexLabel | lib.NoDefault = ..., *, index=..., columns=..., axis: Axis = ..., copy: bool_t | None = ..., inplace: Literal[False] = ..., ) -> NDFrameT: ... def rename_axis( self, mapper: IndexLabel | lib.NoDefault = ..., *, index=..., columns=..., axis: Axis = ..., copy: bool_t | None = ..., inplace: Literal[True], ) -> None: ... def rename_axis( self: NDFrameT, mapper: IndexLabel | lib.NoDefault = ..., *, index=..., columns=..., axis: Axis = ..., copy: bool_t | None = ..., inplace: bool_t = ..., ) -> NDFrameT | None: ... def rename_axis( self: NDFrameT, mapper: IndexLabel | lib.NoDefault = lib.no_default, *, index=lib.no_default, columns=lib.no_default, axis: Axis = 0, copy: bool_t | None = None, inplace: bool_t = False, ) -> NDFrameT | None: """ Set the name of the axis for the index or columns. Parameters ---------- mapper : scalar, list-like, optional Value to set the axis name attribute. index, columns : scalar, list-like, dict-like or function, optional A scalar, list-like, dict-like or functions transformations to apply to that axis' values. Note that the ``columns`` parameter is not allowed if the object is a Series. This parameter only apply for DataFrame type objects. Use either ``mapper`` and ``axis`` to specify the axis to target with ``mapper``, or ``index`` and/or ``columns``. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to rename. For `Series` this parameter is unused and defaults to 0. copy : bool, default None Also copy underlying data. inplace : bool, default False Modifies the object directly, instead of creating a new Series or DataFrame. Returns ------- Series, DataFrame, or None The same type as the caller or None if ``inplace=True``. See Also -------- Series.rename : Alter Series index labels or name. DataFrame.rename : Alter DataFrame index labels or name. Index.rename : Set new names on index. Notes ----- ``DataFrame.rename_axis`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` The first calling convention will only modify the names of the index and/or the names of the Index object that is the columns. In this case, the parameter ``copy`` is ignored. The second calling convention will modify the names of the corresponding index if mapper is a list or a scalar. However, if mapper is dict-like or a function, it will use the deprecated behavior of modifying the axis *labels*. We *highly* recommend using keyword arguments to clarify your intent. Examples -------- **Series** >>> s = pd.Series(["dog", "cat", "monkey"]) >>> s 0 dog 1 cat 2 monkey dtype: object >>> s.rename_axis("animal") animal 0 dog 1 cat 2 monkey dtype: object **DataFrame** >>> df = pd.DataFrame({"num_legs": [4, 4, 2], ... "num_arms": [0, 0, 2]}, ... ["dog", "cat", "monkey"]) >>> df num_legs num_arms dog 4 0 cat 4 0 monkey 2 2 >>> df = df.rename_axis("animal") >>> df num_legs num_arms animal dog 4 0 cat 4 0 monkey 2 2 >>> df = df.rename_axis("limbs", axis="columns") >>> df limbs num_legs num_arms animal dog 4 0 cat 4 0 monkey 2 2 **MultiIndex** >>> df.index = pd.MultiIndex.from_product([['mammal'], ... ['dog', 'cat', 'monkey']], ... names=['type', 'name']) >>> df limbs num_legs num_arms type name mammal dog 4 0 cat 4 0 monkey 2 2 >>> df.rename_axis(index={'type': 'class'}) limbs num_legs num_arms class name mammal dog 4 0 cat 4 0 monkey 2 2 >>> df.rename_axis(columns=str.upper) LIMBS num_legs num_arms type name mammal dog 4 0 cat 4 0 monkey 2 2 """ axes = {"index": index, "columns": columns} if axis is not None: axis = self._get_axis_number(axis) inplace = validate_bool_kwarg(inplace, "inplace") if copy and using_copy_on_write(): copy = False if mapper is not lib.no_default: # Use v0.23 behavior if a scalar or list non_mapper = is_scalar(mapper) or ( is_list_like(mapper) and not is_dict_like(mapper) ) if non_mapper: return self._set_axis_name( mapper, axis=axis, inplace=inplace, copy=copy ) else: raise ValueError("Use `.rename` to alter labels with a mapper.") else: # Use new behavior. Means that index and/or columns # is specified result = self if inplace else self.copy(deep=copy) for axis in range(self._AXIS_LEN): v = axes.get(self._get_axis_name(axis)) if v is lib.no_default: continue non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v)) if non_mapper: newnames = v else: f = common.get_rename_function(v) curnames = self._get_axis(axis).names newnames = [f(name) for name in curnames] result._set_axis_name(newnames, axis=axis, inplace=True, copy=copy) if not inplace: return result return None def _set_axis_name( self, name, axis: Axis = 0, inplace: bool_t = False, copy: bool_t | None = True ): """ Set the name(s) of the axis. Parameters ---------- name : str or list of str Name(s) to set. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to set the label. The value 0 or 'index' specifies index, and the value 1 or 'columns' specifies columns. inplace : bool, default False If `True`, do operation inplace and return None. copy: Whether to make a copy of the result. Returns ------- Series, DataFrame, or None The same type as the caller or `None` if `inplace` is `True`. See Also -------- DataFrame.rename : Alter the axis labels of :class:`DataFrame`. Series.rename : Alter the index labels or set the index name of :class:`Series`. Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`. Examples -------- >>> df = pd.DataFrame({"num_legs": [4, 4, 2]}, ... ["dog", "cat", "monkey"]) >>> df num_legs dog 4 cat 4 monkey 2 >>> df._set_axis_name("animal") num_legs animal dog 4 cat 4 monkey 2 >>> df.index = pd.MultiIndex.from_product( ... [["mammal"], ['dog', 'cat', 'monkey']]) >>> df._set_axis_name(["type", "name"]) num_legs type name mammal dog 4 cat 4 monkey 2 """ axis = self._get_axis_number(axis) idx = self._get_axis(axis).set_names(name) inplace = validate_bool_kwarg(inplace, "inplace") renamed = self if inplace else self.copy(deep=copy) if axis == 0: renamed.index = idx else: renamed.columns = idx if not inplace: return renamed # ---------------------------------------------------------------------- # Comparison Methods def _indexed_same(self, other) -> bool_t: return all( self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS ) def equals(self, other: object) -> bool_t: """ Test whether two objects contain the same elements. This function allows two Series or DataFrames to be compared against each other to see if they have the same shape and elements. NaNs in the same location are considered equal. The row/column index do not need to have the same type, as long as the values are considered equal. Corresponding columns must be of the same dtype. Parameters ---------- other : Series or DataFrame The other Series or DataFrame to be compared with the first. Returns ------- bool True if all elements are the same in both objects, False otherwise. See Also -------- Series.eq : Compare two Series objects of the same length and return a Series where each element is True if the element in each Series is equal, False otherwise. DataFrame.eq : Compare two DataFrame objects of the same shape and return a DataFrame where each element is True if the respective element in each DataFrame is equal, False otherwise. testing.assert_series_equal : Raises an AssertionError if left and right are not equal. Provides an easy interface to ignore inequality in dtypes, indexes and precision among others. testing.assert_frame_equal : Like assert_series_equal, but targets DataFrames. numpy.array_equal : Return True if two arrays have the same shape and elements, False otherwise. Examples -------- >>> df = pd.DataFrame({1: [10], 2: [20]}) >>> df 1 2 0 10 20 DataFrames df and exactly_equal have the same types and values for their elements and column labels, which will return True. >>> exactly_equal = pd.DataFrame({1: [10], 2: [20]}) >>> exactly_equal 1 2 0 10 20 >>> df.equals(exactly_equal) True DataFrames df and different_column_type have the same element types and values, but have different types for the column labels, which will still return True. >>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]}) >>> different_column_type 1.0 2.0 0 10 20 >>> df.equals(different_column_type) True DataFrames df and different_data_type have different types for the same values for their elements, and will return False even though their column labels are the same values and types. >>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]}) >>> different_data_type 1 2 0 10.0 20.0 >>> df.equals(different_data_type) False """ if not (isinstance(other, type(self)) or isinstance(self, type(other))): return False other = cast(NDFrame, other) return self._mgr.equals(other._mgr) # ------------------------------------------------------------------------- # Unary Methods def __neg__(self: NDFrameT) -> NDFrameT: def blk_func(values: ArrayLike): if is_bool_dtype(values.dtype): # error: Argument 1 to "inv" has incompatible type "Union # [ExtensionArray, ndarray[Any, Any]]"; expected # "_SupportsInversion[ndarray[Any, dtype[bool_]]]" return operator.inv(values) # type: ignore[arg-type] else: # error: Argument 1 to "neg" has incompatible type "Union # [ExtensionArray, ndarray[Any, Any]]"; expected # "_SupportsNeg[ndarray[Any, dtype[Any]]]" return operator.neg(values) # type: ignore[arg-type] new_data = self._mgr.apply(blk_func) res = self._constructor(new_data) return res.__finalize__(self, method="__neg__") def __pos__(self: NDFrameT) -> NDFrameT: def blk_func(values: ArrayLike): if is_bool_dtype(values.dtype): return values.copy() else: # error: Argument 1 to "pos" has incompatible type "Union # [ExtensionArray, ndarray[Any, Any]]"; expected # "_SupportsPos[ndarray[Any, dtype[Any]]]" return operator.pos(values) # type: ignore[arg-type] new_data = self._mgr.apply(blk_func) res = self._constructor(new_data) return res.__finalize__(self, method="__pos__") def __invert__(self: NDFrameT) -> NDFrameT: if not self.size: # inv fails with 0 len return self.copy(deep=False) new_data = self._mgr.apply(operator.invert) return self._constructor(new_data).__finalize__(self, method="__invert__") def __nonzero__(self) -> NoReturn: raise ValueError( f"The truth value of a {type(self).__name__} is ambiguous. " "Use a.empty, a.bool(), a.item(), a.any() or a.all()." ) __bool__ = __nonzero__ def bool(self) -> bool_t: """ Return the bool of a single element Series or DataFrame. This must be a boolean scalar value, either True or False. It will raise a ValueError if the Series or DataFrame does not have exactly 1 element, or that element is not boolean (integer values 0 and 1 will also raise an exception). Returns ------- bool The value in the Series or DataFrame. See Also -------- Series.astype : Change the data type of a Series, including to boolean. DataFrame.astype : Change the data type of a DataFrame, including to boolean. numpy.bool_ : NumPy boolean data type, used by pandas for boolean values. Examples -------- The method will only work for single element objects with a boolean value: >>> pd.Series([True]).bool() True >>> pd.Series([False]).bool() False >>> pd.DataFrame({'col': [True]}).bool() True >>> pd.DataFrame({'col': [False]}).bool() False """ v = self.squeeze() if isinstance(v, (bool, np.bool_)): return bool(v) elif is_scalar(v): raise ValueError( "bool cannot act on a non-boolean single element " f"{type(self).__name__}" ) self.__nonzero__() # for mypy (__nonzero__ raises) return True def abs(self: NDFrameT) -> NDFrameT: """ Return a Series/DataFrame with absolute numeric value of each element. This function only applies to elements that are all numeric. Returns ------- abs Series/DataFrame containing the absolute value of each element. See Also -------- numpy.absolute : Calculate the absolute value element-wise. Notes ----- For ``complex`` inputs, ``1.2 + 1j``, the absolute value is :math:`\\sqrt{ a^2 + b^2 }`. Examples -------- Absolute numeric values in a Series. >>> s = pd.Series([-1.10, 2, -3.33, 4]) >>> s.abs() 0 1.10 1 2.00 2 3.33 3 4.00 dtype: float64 Absolute numeric values in a Series with complex numbers. >>> s = pd.Series([1.2 + 1j]) >>> s.abs() 0 1.56205 dtype: float64 Absolute numeric values in a Series with a Timedelta element. >>> s = pd.Series([pd.Timedelta('1 days')]) >>> s.abs() 0 1 days dtype: timedelta64[ns] Select rows with data closest to certain value using argsort (from `StackOverflow <https://stackoverflow.com/a/17758115>`__). >>> df = pd.DataFrame({ ... 'a': [4, 5, 6, 7], ... 'b': [10, 20, 30, 40], ... 'c': [100, 50, -30, -50] ... }) >>> df a b c 0 4 10 100 1 5 20 50 2 6 30 -30 3 7 40 -50 >>> df.loc[(df.c - 43).abs().argsort()] a b c 1 5 20 50 0 4 10 100 2 6 30 -30 3 7 40 -50 """ res_mgr = self._mgr.apply(np.abs) return self._constructor(res_mgr).__finalize__(self, name="abs") def __abs__(self: NDFrameT) -> NDFrameT: return self.abs() def __round__(self: NDFrameT, decimals: int = 0) -> NDFrameT: return self.round(decimals).__finalize__(self, method="__round__") # ------------------------------------------------------------------------- # Label or Level Combination Helpers # # A collection of helper methods for DataFrame/Series operations that # accept a combination of column/index labels and levels. All such # operations should utilize/extend these methods when possible so that we # have consistent precedence and validation logic throughout the library. def _is_level_reference(self, key: Level, axis: Axis = 0) -> bool_t: """ Test whether a key is a level reference for a given axis. To be considered a level reference, `key` must be a string that: - (axis=0): Matches the name of an index level and does NOT match a column label. - (axis=1): Matches the name of a column level and does NOT match an index label. Parameters ---------- key : Hashable Potential level name for the given axis axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- is_level : bool """ axis_int = self._get_axis_number(axis) return ( key is not None and is_hashable(key) and key in self.axes[axis_int].names and not self._is_label_reference(key, axis=axis_int) ) def _is_label_reference(self, key: Level, axis: Axis = 0) -> bool_t: """ Test whether a key is a label reference for a given axis. To be considered a label reference, `key` must be a string that: - (axis=0): Matches a column label - (axis=1): Matches an index label Parameters ---------- key : Hashable Potential label name, i.e. Index entry. axis : int, default 0 Axis perpendicular to the axis that labels are associated with (0 means search for column labels, 1 means search for index labels) Returns ------- is_label: bool """ axis_int = self._get_axis_number(axis) other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis_int) return ( key is not None and is_hashable(key) and any(key in self.axes[ax] for ax in other_axes) ) def _is_label_or_level_reference(self, key: Level, axis: AxisInt = 0) -> bool_t: """ Test whether a key is a label or level reference for a given axis. To be considered either a label or a level reference, `key` must be a string that: - (axis=0): Matches a column label or an index level - (axis=1): Matches an index label or a column level Parameters ---------- key : Hashable Potential label or level name axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- bool """ return self._is_level_reference(key, axis=axis) or self._is_label_reference( key, axis=axis ) def _check_label_or_level_ambiguity(self, key: Level, axis: Axis = 0) -> None: """ Check whether `key` is ambiguous. By ambiguous, we mean that it matches both a level of the input `axis` and a label of the other axis. Parameters ---------- key : Hashable Label or level name. axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns). Raises ------ ValueError: `key` is ambiguous """ axis_int = self._get_axis_number(axis) other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis_int) if ( key is not None and is_hashable(key) and key in self.axes[axis_int].names and any(key in self.axes[ax] for ax in other_axes) ): # Build an informative and grammatical warning level_article, level_type = ( ("an", "index") if axis_int == 0 else ("a", "column") ) label_article, label_type = ( ("a", "column") if axis_int == 0 else ("an", "index") ) msg = ( f"'{key}' is both {level_article} {level_type} level and " f"{label_article} {label_type} label, which is ambiguous." ) raise ValueError(msg) def _get_label_or_level_values(self, key: Level, axis: AxisInt = 0) -> ArrayLike: """ Return a 1-D array of values associated with `key`, a label or level from the given `axis`. Retrieval logic: - (axis=0): Return column values if `key` matches a column label. Otherwise return index level values if `key` matches an index level. - (axis=1): Return row values if `key` matches an index label. Otherwise return column level values if 'key' matches a column level Parameters ---------- key : Hashable Label or level name. axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- np.ndarray or ExtensionArray Raises ------ KeyError if `key` matches neither a label nor a level ValueError if `key` matches multiple labels """ axis = self._get_axis_number(axis) other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis] if self._is_label_reference(key, axis=axis): self._check_label_or_level_ambiguity(key, axis=axis) values = self.xs(key, axis=other_axes[0])._values elif self._is_level_reference(key, axis=axis): values = self.axes[axis].get_level_values(key)._values else: raise KeyError(key) # Check for duplicates if values.ndim > 1: if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex): multi_message = ( "\n" "For a multi-index, the label must be a " "tuple with elements corresponding to each level." ) else: multi_message = "" label_axis_name = "column" if axis == 0 else "index" raise ValueError( f"The {label_axis_name} label '{key}' is not unique.{multi_message}" ) return values def _drop_labels_or_levels(self, keys, axis: AxisInt = 0): """ Drop labels and/or levels for the given `axis`. For each key in `keys`: - (axis=0): If key matches a column label then drop the column. Otherwise if key matches an index level then drop the level. - (axis=1): If key matches an index label then drop the row. Otherwise if key matches a column level then drop the level. Parameters ---------- keys : str or list of str labels or levels to drop axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- dropped: DataFrame Raises ------ ValueError if any `keys` match neither a label nor a level """ axis = self._get_axis_number(axis) # Validate keys keys = common.maybe_make_list(keys) invalid_keys = [ k for k in keys if not self._is_label_or_level_reference(k, axis=axis) ] if invalid_keys: raise ValueError( "The following keys are not valid labels or " f"levels for axis {axis}: {invalid_keys}" ) # Compute levels and labels to drop levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)] labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)] # Perform copy upfront and then use inplace operations below. # This ensures that we always perform exactly one copy. # ``copy`` and/or ``inplace`` options could be added in the future. dropped = self.copy(deep=False) if axis == 0: # Handle dropping index levels if levels_to_drop: dropped.reset_index(levels_to_drop, drop=True, inplace=True) # Handle dropping columns labels if labels_to_drop: dropped.drop(labels_to_drop, axis=1, inplace=True) else: # Handle dropping column levels if levels_to_drop: if isinstance(dropped.columns, MultiIndex): # Drop the specified levels from the MultiIndex dropped.columns = dropped.columns.droplevel(levels_to_drop) else: # Drop the last level of Index by replacing with # a RangeIndex dropped.columns = RangeIndex(dropped.columns.size) # Handle dropping index labels if labels_to_drop: dropped.drop(labels_to_drop, axis=0, inplace=True) return dropped # ---------------------------------------------------------------------- # Iteration # https://github.com/python/typeshed/issues/2148#issuecomment-520783318 # Incompatible types in assignment (expression has type "None", base class # "object" defined the type as "Callable[[object], int]") __hash__: ClassVar[None] # type: ignore[assignment] def __iter__(self) -> Iterator: """ Iterate over info axis. Returns ------- iterator Info axis as iterator. """ return iter(self._info_axis) # can we get a better explanation of this? def keys(self) -> Index: """ Get the 'info axis' (see Indexing for more). This is index for Series, columns for DataFrame. Returns ------- Index Info axis. """ return self._info_axis def items(self): """ Iterate over (label, values) on info axis This is index for Series and columns for DataFrame. Returns ------- Generator """ for h in self._info_axis: yield h, self[h] def __len__(self) -> int: """Returns length of info axis""" return len(self._info_axis) def __contains__(self, key) -> bool_t: """True if the key is in the info axis""" return key in self._info_axis def empty(self) -> bool_t: """ Indicator whether Series/DataFrame is empty. True if Series/DataFrame is entirely empty (no items), meaning any of the axes are of length 0. Returns ------- bool If Series/DataFrame is empty, return True, if not return False. See Also -------- Series.dropna : Return series without null values. DataFrame.dropna : Return DataFrame with labels on given axis omitted where (all or any) data are missing. Notes ----- If Series/DataFrame contains only NaNs, it is still not considered empty. See the example below. Examples -------- An example of an actual empty DataFrame. Notice the index is empty: >>> df_empty = pd.DataFrame({'A' : []}) >>> df_empty Empty DataFrame Columns: [A] Index: [] >>> df_empty.empty True If we only have NaNs in our DataFrame, it is not considered empty! We will need to drop the NaNs to make the DataFrame empty: >>> df = pd.DataFrame({'A' : [np.nan]}) >>> df A 0 NaN >>> df.empty False >>> df.dropna().empty True >>> ser_empty = pd.Series({'A' : []}) >>> ser_empty A [] dtype: object >>> ser_empty.empty False >>> ser_empty = pd.Series() >>> ser_empty.empty True """ return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS) # ---------------------------------------------------------------------- # Array Interface # This is also set in IndexOpsMixin # GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented __array_priority__: int = 1000 def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: values = self._values arr = np.asarray(values, dtype=dtype) if ( astype_is_view(values.dtype, arr.dtype) and using_copy_on_write() and self._mgr.is_single_block ): # Check if both conversions can be done without a copy if astype_is_view(self.dtypes.iloc[0], values.dtype) and astype_is_view( values.dtype, arr.dtype ): arr = arr.view() arr.flags.writeable = False return arr def __array_ufunc__( self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any ): return arraylike.array_ufunc(self, ufunc, method, *inputs, **kwargs) # ---------------------------------------------------------------------- # Picklability def __getstate__(self) -> dict[str, Any]: meta = {k: getattr(self, k, None) for k in self._metadata} return { "_mgr": self._mgr, "_typ": self._typ, "_metadata": self._metadata, "attrs": self.attrs, "_flags": {k: self.flags[k] for k in self.flags._keys}, **meta, } def __setstate__(self, state) -> None: if isinstance(state, BlockManager): self._mgr = state elif isinstance(state, dict): if "_data" in state and "_mgr" not in state: # compat for older pickles state["_mgr"] = state.pop("_data") typ = state.get("_typ") if typ is not None: attrs = state.get("_attrs", {}) object.__setattr__(self, "_attrs", attrs) flags = state.get("_flags", {"allows_duplicate_labels": True}) object.__setattr__(self, "_flags", Flags(self, **flags)) # set in the order of internal names # to avoid definitional recursion # e.g. say fill_value needing _mgr to be # defined meta = set(self._internal_names + self._metadata) for k in list(meta): if k in state and k != "_flags": v = state[k] object.__setattr__(self, k, v) for k, v in state.items(): if k not in meta: object.__setattr__(self, k, v) else: raise NotImplementedError("Pre-0.12 pickles are no longer supported") elif len(state) == 2: raise NotImplementedError("Pre-0.12 pickles are no longer supported") self._item_cache: dict[Hashable, Series] = {} # ---------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str: # string representation based upon iterating over self # (since, by definition, `PandasContainers` are iterable) prepr = f"[{','.join(map(pprint_thing, self))}]" return f"{type(self).__name__}({prepr})" def _repr_latex_(self): """ Returns a LaTeX representation for a particular object. Mainly for use with nbconvert (jupyter notebook conversion to pdf). """ if config.get_option("styler.render.repr") == "latex": return self.to_latex() else: return None def _repr_data_resource_(self): """ Not a real Jupyter special repr method, but we use the same naming convention. """ if config.get_option("display.html.table_schema"): data = self.head(config.get_option("display.max_rows")) as_json = data.to_json(orient="table") as_json = cast(str, as_json) return loads(as_json, object_pairs_hook=collections.OrderedDict) # ---------------------------------------------------------------------- # I/O Methods klass="object", storage_options=_shared_docs["storage_options"], storage_options_versionadded="1.2.0", ) def to_excel( self, excel_writer, sheet_name: str = "Sheet1", na_rep: str = "", float_format: str | None = None, columns: Sequence[Hashable] | None = None, header: Sequence[Hashable] | bool_t = True, index: bool_t = True, index_label: IndexLabel = None, startrow: int = 0, startcol: int = 0, engine: str | None = None, merge_cells: bool_t = True, inf_rep: str = "inf", freeze_panes: tuple[int, int] | None = None, storage_options: StorageOptions = None, ) -> None: """ Write {klass} to an Excel sheet. To write a single {klass} to an Excel .xlsx file it is only necessary to specify a target file name. To write to multiple sheets it is necessary to create an `ExcelWriter` object with a target file name, and specify a sheet in the file to write to. Multiple sheets may be written to by specifying unique `sheet_name`. With all data written to the file it is necessary to save the changes. Note that creating an `ExcelWriter` object with a file name that already exists will result in the contents of the existing file being erased. Parameters ---------- excel_writer : path-like, file-like, or ExcelWriter object File path or existing ExcelWriter. sheet_name : str, default 'Sheet1' Name of sheet which will contain DataFrame. na_rep : str, default '' Missing data representation. float_format : str, optional Format string for floating point numbers. For example ``float_format="%.2f"`` will format 0.1234 to 0.12. columns : sequence or list of str, optional Columns to write. header : bool or list of str, default True Write out the column names. If a list of string is given it is assumed to be aliases for the column names. index : bool, default True Write row names (index). index_label : str or sequence, optional Column label for index column(s) if desired. If not specified, and `header` and `index` are True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. startrow : int, default 0 Upper left cell row to dump data frame. startcol : int, default 0 Upper left cell column to dump data frame. engine : str, optional Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this via the options ``io.excel.xlsx.writer`` or ``io.excel.xlsm.writer``. merge_cells : bool, default True Write MultiIndex and Hierarchical Rows as merged cells. inf_rep : str, default 'inf' Representation for infinity (there is no native representation for infinity in Excel). freeze_panes : tuple of int (length 2), optional Specifies the one-based bottommost row and rightmost column that is to be frozen. {storage_options} .. versionadded:: {storage_options_versionadded} See Also -------- to_csv : Write DataFrame to a comma-separated values (csv) file. ExcelWriter : Class for writing DataFrame objects into excel sheets. read_excel : Read an Excel file into a pandas DataFrame. read_csv : Read a comma-separated values (csv) file into DataFrame. io.formats.style.Styler.to_excel : Add styles to Excel sheet. Notes ----- For compatibility with :meth:`~DataFrame.to_csv`, to_excel serializes lists and dicts to strings before writing. Once a workbook has been saved it is not possible to write further data without rewriting the whole workbook. Examples -------- Create, write to and save a workbook: >>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']], ... index=['row 1', 'row 2'], ... columns=['col 1', 'col 2']) >>> df1.to_excel("output.xlsx") # doctest: +SKIP To specify the sheet name: >>> df1.to_excel("output.xlsx", ... sheet_name='Sheet_name_1') # doctest: +SKIP If you wish to write to more than one sheet in the workbook, it is necessary to specify an ExcelWriter object: >>> df2 = df1.copy() >>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP ... df1.to_excel(writer, sheet_name='Sheet_name_1') ... df2.to_excel(writer, sheet_name='Sheet_name_2') ExcelWriter can also be used to append to an existing Excel file: >>> with pd.ExcelWriter('output.xlsx', ... mode='a') as writer: # doctest: +SKIP ... df.to_excel(writer, sheet_name='Sheet_name_3') To set the library that is used to write the Excel file, you can pass the `engine` keyword (the default engine is automatically chosen depending on the file extension): >>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP """ df = self if isinstance(self, ABCDataFrame) else self.to_frame() from pandas.io.formats.excel import ExcelFormatter formatter = ExcelFormatter( df, na_rep=na_rep, cols=columns, header=header, float_format=float_format, index=index, index_label=index_label, merge_cells=merge_cells, inf_rep=inf_rep, ) formatter.write( excel_writer, sheet_name=sheet_name, startrow=startrow, startcol=startcol, freeze_panes=freeze_panes, engine=engine, storage_options=storage_options, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buf", ) def to_json( self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, orient: str | None = None, date_format: str | None = None, double_precision: int = 10, force_ascii: bool_t = True, date_unit: str = "ms", default_handler: Callable[[Any], JSONSerializable] | None = None, lines: bool_t = False, compression: CompressionOptions = "infer", index: bool_t = True, indent: int | None = None, storage_options: StorageOptions = None, mode: Literal["a", "w"] = "w", ) -> str | None: """ Convert the object to a JSON string. Note NaN's and None will be converted to null and datetime objects will be converted to UNIX timestamps. Parameters ---------- path_or_buf : str, path object, file-like object, or None, default None String, path object (implementing os.PathLike[str]), or file-like object implementing a write() function. If None, the result is returned as a string. orient : str Indication of expected JSON string format. * Series: - default is 'index' - allowed values are: {{'split', 'records', 'index', 'table'}}. * DataFrame: - default is 'columns' - allowed values are: {{'split', 'records', 'index', 'columns', 'values', 'table'}}. * The format of the JSON string: - 'split' : dict like {{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}} - 'records' : list like [{{column -> value}}, ... , {{column -> value}}] - 'index' : dict like {{index -> {{column -> value}}}} - 'columns' : dict like {{column -> {{index -> value}}}} - 'values' : just the values array - 'table' : dict like {{'schema': {{schema}}, 'data': {{data}}}} Describing the data, where data component is like ``orient='records'``. date_format : {{None, 'epoch', 'iso'}} Type of date conversion. 'epoch' = epoch milliseconds, 'iso' = ISO8601. The default depends on the `orient`. For ``orient='table'``, the default is 'iso'. For all other orients, the default is 'epoch'. double_precision : int, default 10 The number of decimal places to use when encoding floating point values. force_ascii : bool, default True Force encoded string to be ASCII. date_unit : str, default 'ms' (milliseconds) The time unit to encode to, governs timestamp and ISO8601 precision. One of 's', 'ms', 'us', 'ns' for second, millisecond, microsecond, and nanosecond respectively. default_handler : callable, default None Handler to call if object cannot otherwise be converted to a suitable format for JSON. Should receive a single argument which is the object to convert and return a serialisable object. lines : bool, default False If 'orient' is 'records' write out line-delimited json format. Will throw ValueError if incorrect 'orient' since others are not list-like. {compression_options} .. versionchanged:: 1.4.0 Zstandard support. index : bool, default True Whether to include the index values in the JSON string. Not including the index (``index=False``) is only supported when orient is 'split' or 'table'. indent : int, optional Length of whitespace used to indent each record. {storage_options} .. versionadded:: 1.2.0 mode : str, default 'w' (writing) Specify the IO mode for output when supplying a path_or_buf. Accepted args are 'w' (writing) and 'a' (append) only. mode='a' is only supported when lines is True and orient is 'records'. Returns ------- None or str If path_or_buf is None, returns the resulting json format as a string. Otherwise returns None. See Also -------- read_json : Convert a JSON string to pandas object. Notes ----- The behavior of ``indent=0`` varies from the stdlib, which does not indent the output but does insert newlines. Currently, ``indent=0`` and the default ``indent=None`` are equivalent in pandas, though this may change in a future release. ``orient='table'`` contains a 'pandas_version' field under 'schema'. This stores the version of `pandas` used in the latest revision of the schema. Examples -------- >>> from json import loads, dumps >>> df = pd.DataFrame( ... [["a", "b"], ["c", "d"]], ... index=["row 1", "row 2"], ... columns=["col 1", "col 2"], ... ) >>> result = df.to_json(orient="split") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "columns": [ "col 1", "col 2" ], "index": [ "row 1", "row 2" ], "data": [ [ "a", "b" ], [ "c", "d" ] ] }} Encoding/decoding a Dataframe using ``'records'`` formatted JSON. Note that index labels are not preserved with this encoding. >>> result = df.to_json(orient="records") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP [ {{ "col 1": "a", "col 2": "b" }}, {{ "col 1": "c", "col 2": "d" }} ] Encoding/decoding a Dataframe using ``'index'`` formatted JSON: >>> result = df.to_json(orient="index") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "row 1": {{ "col 1": "a", "col 2": "b" }}, "row 2": {{ "col 1": "c", "col 2": "d" }} }} Encoding/decoding a Dataframe using ``'columns'`` formatted JSON: >>> result = df.to_json(orient="columns") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "col 1": {{ "row 1": "a", "row 2": "c" }}, "col 2": {{ "row 1": "b", "row 2": "d" }} }} Encoding/decoding a Dataframe using ``'values'`` formatted JSON: >>> result = df.to_json(orient="values") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP [ [ "a", "b" ], [ "c", "d" ] ] Encoding with Table Schema: >>> result = df.to_json(orient="table") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "schema": {{ "fields": [ {{ "name": "index", "type": "string" }}, {{ "name": "col 1", "type": "string" }}, {{ "name": "col 2", "type": "string" }} ], "primaryKey": [ "index" ], "pandas_version": "1.4.0" }}, "data": [ {{ "index": "row 1", "col 1": "a", "col 2": "b" }}, {{ "index": "row 2", "col 1": "c", "col 2": "d" }} ] }} """ from pandas.io import json if date_format is None and orient == "table": date_format = "iso" elif date_format is None: date_format = "epoch" config.is_nonnegative_int(indent) indent = indent or 0 return json.to_json( path_or_buf=path_or_buf, obj=self, orient=orient, date_format=date_format, double_precision=double_precision, force_ascii=force_ascii, date_unit=date_unit, default_handler=default_handler, lines=lines, compression=compression, index=index, indent=indent, storage_options=storage_options, mode=mode, ) def to_hdf( self, path_or_buf: FilePath | HDFStore, key: str, mode: str = "a", complevel: int | None = None, complib: str | None = None, append: bool_t = False, format: str | None = None, index: bool_t = True, min_itemsize: int | dict[str, int] | None = None, nan_rep=None, dropna: bool_t | None = None, data_columns: Literal[True] | list[str] | None = None, errors: str = "strict", encoding: str = "UTF-8", ) -> None: """ Write the contained data to an HDF5 file using HDFStore. Hierarchical Data Format (HDF) is self-describing, allowing an application to interpret the structure and contents of a file with no outside information. One HDF file can hold a mix of related objects which can be accessed as a group or as individual objects. In order to add another DataFrame or Series to an existing HDF file please use append mode and a different a key. .. warning:: One can store a subclass of ``DataFrame`` or ``Series`` to HDF5, but the type of the subclass is lost upon storing. For more information see the :ref:`user guide <io.hdf5>`. Parameters ---------- path_or_buf : str or pandas.HDFStore File path or HDFStore object. key : str Identifier for the group in the store. mode : {'a', 'w', 'r+'}, default 'a' Mode to open file: - 'w': write, a new file is created (an existing file with the same name would be deleted). - 'a': append, an existing file is opened for reading and writing, and if the file does not exist it is created. - 'r+': similar to 'a', but the file must already exist. complevel : {0-9}, default None Specifies a compression level for data. A value of 0 or None disables compression. complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib' Specifies the compression library to be used. As of v0.20.2 these additional compressors for Blosc are supported (default if no compressor specified: 'blosc:blosclz'): {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy', 'blosc:zlib', 'blosc:zstd'}. Specifying a compression library which is not available issues a ValueError. append : bool, default False For Table formats, append the input data to the existing. format : {'fixed', 'table', None}, default 'fixed' Possible values: - 'fixed': Fixed format. Fast writing/reading. Not-appendable, nor searchable. - 'table': Table format. Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching / selecting subsets of the data. - If None, pd.get_option('io.hdf.default_format') is checked, followed by fallback to "fixed". index : bool, default True Write DataFrame index as a column. min_itemsize : dict or int, optional Map column names to minimum string sizes for columns. nan_rep : Any, optional How to represent null values as str. Not allowed with append=True. dropna : bool, default False, optional Remove missing values. data_columns : list of columns or True, optional List of columns to create as indexed data columns for on-disk queries, or True to use all columns. By default only the axes of the object are indexed. See :ref:`Query via data columns<io.hdf5-query-data-columns>`. for more information. Applicable only to format='table'. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. encoding : str, default "UTF-8" See Also -------- read_hdf : Read from HDF file. DataFrame.to_orc : Write a DataFrame to the binary orc format. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. DataFrame.to_sql : Write to a SQL table. DataFrame.to_feather : Write out feather-format for DataFrames. DataFrame.to_csv : Write out to a csv file. Examples -------- >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}, ... index=['a', 'b', 'c']) # doctest: +SKIP >>> df.to_hdf('data.h5', key='df', mode='w') # doctest: +SKIP We can add another object to the same file: >>> s = pd.Series([1, 2, 3, 4]) # doctest: +SKIP >>> s.to_hdf('data.h5', key='s') # doctest: +SKIP Reading from HDF file: >>> pd.read_hdf('data.h5', 'df') # doctest: +SKIP A B a 1 4 b 2 5 c 3 6 >>> pd.read_hdf('data.h5', 's') # doctest: +SKIP 0 1 1 2 2 3 3 4 dtype: int64 """ from pandas.io import pytables # Argument 3 to "to_hdf" has incompatible type "NDFrame"; expected # "Union[DataFrame, Series]" [arg-type] pytables.to_hdf( path_or_buf, key, self, # type: ignore[arg-type] mode=mode, complevel=complevel, complib=complib, append=append, format=format, index=index, min_itemsize=min_itemsize, nan_rep=nan_rep, dropna=dropna, data_columns=data_columns, errors=errors, encoding=encoding, ) def to_sql( self, name: str, con, schema: str | None = None, if_exists: Literal["fail", "replace", "append"] = "fail", index: bool_t = True, index_label: IndexLabel = None, chunksize: int | None = None, dtype: DtypeArg | None = None, method: str | None = None, ) -> int | None: """ Write records stored in a DataFrame to a SQL database. Databases supported by SQLAlchemy [1]_ are supported. Tables can be newly created, appended to, or overwritten. Parameters ---------- name : str Name of SQL table. con : sqlalchemy.engine.(Engine or Connection) or sqlite3.Connection Using SQLAlchemy makes it possible to use any DB supported by that library. Legacy support is provided for sqlite3.Connection objects. The user is responsible for engine disposal and connection closure for the SQLAlchemy connectable. See `here \ <https://docs.sqlalchemy.org/en/20/core/connections.html>`_. If passing a sqlalchemy.engine.Connection which is already in a transaction, the transaction will not be committed. If passing a sqlite3.Connection, it will not be possible to roll back the record insertion. schema : str, optional Specify the schema (if database flavor supports this). If None, use default schema. if_exists : {'fail', 'replace', 'append'}, default 'fail' How to behave if the table already exists. * fail: Raise a ValueError. * replace: Drop the table before inserting new values. * append: Insert new values to the existing table. index : bool, default True Write DataFrame index as a column. Uses `index_label` as the column name in the table. index_label : str or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. chunksize : int, optional Specify the number of rows in each batch to be written at a time. By default, all rows will be written at once. dtype : dict or scalar, optional Specifying the datatype for columns. If a dictionary is used, the keys should be the column names and the values should be the SQLAlchemy types or strings for the sqlite3 legacy mode. If a scalar is provided, it will be applied to all columns. method : {None, 'multi', callable}, optional Controls the SQL insertion clause used: * None : Uses standard SQL ``INSERT`` clause (one per row). * 'multi': Pass multiple values in a single ``INSERT`` clause. * callable with signature ``(pd_table, conn, keys, data_iter)``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. Returns ------- None or int Number of rows affected by to_sql. None is returned if the callable passed into ``method`` does not return an integer number of rows. The number of returned rows affected is the sum of the ``rowcount`` attribute of ``sqlite3.Cursor`` or SQLAlchemy connectable which may not reflect the exact number of written rows as stipulated in the `sqlite3 <https://docs.python.org/3/library/sqlite3.html#sqlite3.Cursor.rowcount>`__ or `SQLAlchemy <https://docs.sqlalchemy.org/en/20/core/connections.html#sqlalchemy.engine.CursorResult.rowcount>`__. .. versionadded:: 1.4.0 Raises ------ ValueError When the table already exists and `if_exists` is 'fail' (the default). See Also -------- read_sql : Read a DataFrame from a table. Notes ----- Timezone aware datetime columns will be written as ``Timestamp with timezone`` type with SQLAlchemy if supported by the database. Otherwise, the datetimes will be stored as timezone unaware timestamps local to the original timezone. References ---------- .. [1] https://docs.sqlalchemy.org .. [2] https://www.python.org/dev/peps/pep-0249/ Examples -------- Create an in-memory SQLite database. >>> from sqlalchemy import create_engine >>> engine = create_engine('sqlite://', echo=False) Create a table from scratch with 3 rows. >>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']}) >>> df name 0 User 1 1 User 2 2 User 3 >>> df.to_sql('users', con=engine) 3 >>> from sqlalchemy import text >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM users")).fetchall() [(0, 'User 1'), (1, 'User 2'), (2, 'User 3')] An `sqlalchemy.engine.Connection` can also be passed to `con`: >>> with engine.begin() as connection: ... df1 = pd.DataFrame({'name' : ['User 4', 'User 5']}) ... df1.to_sql('users', con=connection, if_exists='append') 2 This is allowed to support operations that require that the same DBAPI connection is used for the entire operation. >>> df2 = pd.DataFrame({'name' : ['User 6', 'User 7']}) >>> df2.to_sql('users', con=engine, if_exists='append') 2 >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM users")).fetchall() [(0, 'User 1'), (1, 'User 2'), (2, 'User 3'), (0, 'User 4'), (1, 'User 5'), (0, 'User 6'), (1, 'User 7')] Overwrite the table with just ``df2``. >>> df2.to_sql('users', con=engine, if_exists='replace', ... index_label='id') 2 >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM users")).fetchall() [(0, 'User 6'), (1, 'User 7')] Specify the dtype (especially useful for integers with missing values). Notice that while pandas is forced to store the data as floating point, the database supports nullable integers. When fetching the data with Python, we get back integer scalars. >>> df = pd.DataFrame({"A": [1, None, 2]}) >>> df A 0 1.0 1 NaN 2 2.0 >>> from sqlalchemy.types import Integer >>> df.to_sql('integers', con=engine, index=False, ... dtype={"A": Integer()}) 3 >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM integers")).fetchall() [(1,), (None,), (2,)] """ # noqa:E501 from pandas.io import sql return sql.to_sql( self, name, con, schema=schema, if_exists=if_exists, index=index, index_label=index_label, chunksize=chunksize, dtype=dtype, method=method, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path", ) def to_pickle( self, path: FilePath | WriteBuffer[bytes], compression: CompressionOptions = "infer", protocol: int = pickle.HIGHEST_PROTOCOL, storage_options: StorageOptions = None, ) -> None: """ Pickle (serialize) object to file. Parameters ---------- path : str, path object, or file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. File path where the pickled object will be stored. {compression_options} protocol : int Int which indicates which protocol should be used by the pickler, default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible values are 0, 1, 2, 3, 4, 5. A negative value for the protocol parameter is equivalent to setting its value to HIGHEST_PROTOCOL. .. [1] https://docs.python.org/3/library/pickle.html. {storage_options} .. versionadded:: 1.2.0 See Also -------- read_pickle : Load pickled pandas object (or any object) from file. DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_sql : Write DataFrame to a SQL database. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Examples -------- >>> original_df = pd.DataFrame({{"foo": range(5), "bar": range(5, 10)}}) # doctest: +SKIP >>> original_df # doctest: +SKIP foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> original_df.to_pickle("./dummy.pkl") # doctest: +SKIP >>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP >>> unpickled_df # doctest: +SKIP foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 """ # noqa: E501 from pandas.io.pickle import to_pickle to_pickle( self, path, compression=compression, protocol=protocol, storage_options=storage_options, ) def to_clipboard( self, excel: bool_t = True, sep: str | None = None, **kwargs ) -> None: r""" Copy object to the system clipboard. Write a text representation of object to the system clipboard. This can be pasted into Excel, for example. Parameters ---------- excel : bool, default True Produce output in a csv format for easy pasting into excel. - True, use the provided separator for csv pasting. - False, write a string representation of the object to the clipboard. sep : str, default ``'\t'`` Field delimiter. **kwargs These parameters will be passed to DataFrame.to_csv. See Also -------- DataFrame.to_csv : Write a DataFrame to a comma-separated values (csv) file. read_clipboard : Read text from clipboard and pass to read_csv. Notes ----- Requirements for your platform. - Linux : `xclip`, or `xsel` (with `PyQt4` modules) - Windows : none - macOS : none This method uses the processes developed for the package `pyperclip`. A solution to render any output string format is given in the examples. Examples -------- Copy the contents of a DataFrame to the clipboard. >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) >>> df.to_clipboard(sep=',') # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # ,A,B,C ... # 0,1,2,3 ... # 1,4,5,6 We can omit the index by passing the keyword `index` and setting it to false. >>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # A,B,C ... # 1,2,3 ... # 4,5,6 Using the original `pyperclip` package for any string output format. .. code-block:: python import pyperclip html = df.style.to_html() pyperclip.copy(html) """ from pandas.io import clipboards clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs) def to_xarray(self): """ Return an xarray object from the pandas object. Returns ------- xarray.DataArray or xarray.Dataset Data in the pandas structure converted to Dataset if the object is a DataFrame, or a DataArray if the object is a Series. See Also -------- DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Notes ----- See the `xarray docs <https://xarray.pydata.org/en/stable/>`__ Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2), ... ('parrot', 'bird', 24.0, 2), ... ('lion', 'mammal', 80.5, 4), ... ('monkey', 'mammal', np.nan, 4)], ... columns=['name', 'class', 'max_speed', ... 'num_legs']) >>> df name class max_speed num_legs 0 falcon bird 389.0 2 1 parrot bird 24.0 2 2 lion mammal 80.5 4 3 monkey mammal NaN 4 >>> df.to_xarray() <xarray.Dataset> Dimensions: (index: 4) Coordinates: * index (index) int64 0 1 2 3 Data variables: name (index) object 'falcon' 'parrot' 'lion' 'monkey' class (index) object 'bird' 'bird' 'mammal' 'mammal' max_speed (index) float64 389.0 24.0 80.5 nan num_legs (index) int64 2 2 4 4 >>> df['max_speed'].to_xarray() <xarray.DataArray 'max_speed' (index: 4)> array([389. , 24. , 80.5, nan]) Coordinates: * index (index) int64 0 1 2 3 >>> dates = pd.to_datetime(['2018-01-01', '2018-01-01', ... '2018-01-02', '2018-01-02']) >>> df_multiindex = pd.DataFrame({'date': dates, ... 'animal': ['falcon', 'parrot', ... 'falcon', 'parrot'], ... 'speed': [350, 18, 361, 15]}) >>> df_multiindex = df_multiindex.set_index(['date', 'animal']) >>> df_multiindex speed date animal 2018-01-01 falcon 350 parrot 18 2018-01-02 falcon 361 parrot 15 >>> df_multiindex.to_xarray() <xarray.Dataset> Dimensions: (date: 2, animal: 2) Coordinates: * date (date) datetime64[ns] 2018-01-01 2018-01-02 * animal (animal) object 'falcon' 'parrot' Data variables: speed (date, animal) int64 350 18 361 15 """ xarray = import_optional_dependency("xarray") if self.ndim == 1: return xarray.DataArray.from_series(self) else: return xarray.Dataset.from_dataframe(self) def to_latex( self, buf: None = ..., columns: Sequence[Hashable] | None = ..., header: bool_t | Sequence[str] = ..., index: bool_t = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool_t | None = ..., index_names: bool_t = ..., bold_rows: bool_t = ..., column_format: str | None = ..., longtable: bool_t | None = ..., escape: bool_t | None = ..., encoding: str | None = ..., decimal: str = ..., multicolumn: bool_t | None = ..., multicolumn_format: str | None = ..., multirow: bool_t | None = ..., caption: str | tuple[str, str] | None = ..., label: str | None = ..., position: str | None = ..., ) -> str: ... def to_latex( self, buf: FilePath | WriteBuffer[str], columns: Sequence[Hashable] | None = ..., header: bool_t | Sequence[str] = ..., index: bool_t = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool_t | None = ..., index_names: bool_t = ..., bold_rows: bool_t = ..., column_format: str | None = ..., longtable: bool_t | None = ..., escape: bool_t | None = ..., encoding: str | None = ..., decimal: str = ..., multicolumn: bool_t | None = ..., multicolumn_format: str | None = ..., multirow: bool_t | None = ..., caption: str | tuple[str, str] | None = ..., label: str | None = ..., position: str | None = ..., ) -> None: ... def to_latex( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[Hashable] | None = None, header: bool_t | Sequence[str] = True, index: bool_t = True, na_rep: str = "NaN", formatters: FormattersType | None = None, float_format: FloatFormatType | None = None, sparsify: bool_t | None = None, index_names: bool_t = True, bold_rows: bool_t = False, column_format: str | None = None, longtable: bool_t | None = None, escape: bool_t | None = None, encoding: str | None = None, decimal: str = ".", multicolumn: bool_t | None = None, multicolumn_format: str | None = None, multirow: bool_t | None = None, caption: str | tuple[str, str] | None = None, label: str | None = None, position: str | None = None, ) -> str | None: r""" Render object to a LaTeX tabular, longtable, or nested table. Requires ``\usepackage{{booktabs}}``. The output can be copy/pasted into a main LaTeX document or read from an external file with ``\input{{table.tex}}``. .. versionchanged:: 1.2.0 Added position argument, changed meaning of caption argument. .. versionchanged:: 2.0.0 Refactored to use the Styler implementation via jinja2 templating. Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. columns : list of label, optional The subset of columns to write. Writes all columns by default. header : bool or list of str, default True Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names. index : bool, default True Write row names (index). na_rep : str, default 'NaN' Missing data representation. formatters : list of functions or dict of {{str: function}}, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function or str, optional, default None Formatter for floating point numbers. For example ``float_format="%.2f"`` and ``float_format="{{:0.2f}}".format`` will both result in 0.1234 being formatted as 0.12. sparsify : bool, optional Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. By default, the value will be read from the config module. index_names : bool, default True Prints the names of the indexes. bold_rows : bool, default False Make the row labels bold in the output. column_format : str, optional The columns format as specified in `LaTeX table format <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3 columns. By default, 'l' will be used for all columns except columns of numbers, which default to 'r'. longtable : bool, optional Use a longtable environment instead of tabular. Requires adding a \usepackage{{longtable}} to your LaTeX preamble. By default, the value will be read from the pandas config module, and set to `True` if the option ``styler.latex.environment`` is `"longtable"`. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed. escape : bool, optional By default, the value will be read from the pandas config module and set to `True` if the option ``styler.format.escape`` is `"latex"`. When set to False prevents from escaping latex special characters in column names. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed, as has the default value to `False`. encoding : str, optional A string representing the encoding to use in the output file, defaults to 'utf-8'. decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. multicolumn : bool, default True Use \multicolumn to enhance MultiIndex columns. The default will be read from the config module, and is set as the option ``styler.sparse.columns``. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed. multicolumn_format : str, default 'r' The alignment for multicolumns, similar to `column_format` The default will be read from the config module, and is set as the option ``styler.latex.multicol_align``. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed, as has the default value to "r". multirow : bool, default True Use \multirow to enhance MultiIndex rows. Requires adding a \usepackage{{multirow}} to your LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained rows, separating groups via clines. The default will be read from the pandas config module, and is set as the option ``styler.sparse.index``. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed, as has the default value to `True`. caption : str or tuple, optional Tuple (full_caption, short_caption), which results in ``\caption[short_caption]{{full_caption}}``; if a single string is passed, no short caption will be set. .. versionchanged:: 1.2.0 Optionally allow caption to be a tuple ``(full_caption, short_caption)``. label : str, optional The LaTeX label to be placed inside ``\label{{}}`` in the output. This is used with ``\ref{{}}`` in the main ``.tex`` file. position : str, optional The LaTeX positional argument for tables, to be placed after ``\begin{{}}`` in the output. .. versionadded:: 1.2.0 Returns ------- str or None If buf is None, returns the result as a string. Otherwise returns None. See Also -------- io.formats.style.Styler.to_latex : Render a DataFrame to LaTeX with conditional formatting. DataFrame.to_string : Render a DataFrame to a console-friendly tabular output. DataFrame.to_html : Render a DataFrame as an HTML table. Notes ----- As of v2.0.0 this method has changed to use the Styler implementation as part of :meth:`.Styler.to_latex` via ``jinja2`` templating. This means that ``jinja2`` is a requirement, and needs to be installed, for this method to function. It is advised that users switch to using Styler, since that implementation is more frequently updated and contains much more flexibility with the output. Examples -------- Convert a general DataFrame to LaTeX with formatting: >>> df = pd.DataFrame(dict(name=['Raphael', 'Donatello'], ... age=[26, 45], ... height=[181.23, 177.65])) >>> print(df.to_latex(index=False, ... formatters={"name": str.upper}, ... float_format="{:.1f}".format, ... )) # doctest: +SKIP \begin{tabular}{lrr} \toprule name & age & height \\ \midrule RAPHAEL & 26 & 181.2 \\ DONATELLO & 45 & 177.7 \\ \bottomrule \end{tabular} """ # Get defaults from the pandas config if self.ndim == 1: self = self.to_frame() if longtable is None: longtable = config.get_option("styler.latex.environment") == "longtable" if escape is None: escape = config.get_option("styler.format.escape") == "latex" if multicolumn is None: multicolumn = config.get_option("styler.sparse.columns") if multicolumn_format is None: multicolumn_format = config.get_option("styler.latex.multicol_align") if multirow is None: multirow = config.get_option("styler.sparse.index") if column_format is not None and not isinstance(column_format, str): raise ValueError("`column_format` must be str or unicode") length = len(self.columns) if columns is None else len(columns) if isinstance(header, (list, tuple)) and len(header) != length: raise ValueError(f"Writing {length} cols but got {len(header)} aliases") # Refactor formatters/float_format/decimal/na_rep/escape to Styler structure base_format_ = { "na_rep": na_rep, "escape": "latex" if escape else None, "decimal": decimal, } index_format_: dict[str, Any] = {"axis": 0, **base_format_} column_format_: dict[str, Any] = {"axis": 1, **base_format_} if isinstance(float_format, str): float_format_: Callable | None = lambda x: float_format % x else: float_format_ = float_format def _wrap(x, alt_format_): if isinstance(x, (float, complex)) and float_format_ is not None: return float_format_(x) else: return alt_format_(x) formatters_: list | tuple | dict | Callable | None = None if isinstance(formatters, list): formatters_ = { c: partial(_wrap, alt_format_=formatters[i]) for i, c in enumerate(self.columns) } elif isinstance(formatters, dict): index_formatter = formatters.pop("__index__", None) column_formatter = formatters.pop("__columns__", None) if index_formatter is not None: index_format_.update({"formatter": index_formatter}) if column_formatter is not None: column_format_.update({"formatter": column_formatter}) formatters_ = formatters float_columns = self.select_dtypes(include="float").columns for col in float_columns: if col not in formatters.keys(): formatters_.update({col: float_format_}) elif formatters is None and float_format is not None: formatters_ = partial(_wrap, alt_format_=lambda v: v) format_index_ = [index_format_, column_format_] # Deal with hiding indexes and relabelling column names hide_: list[dict] = [] relabel_index_: list[dict] = [] if columns: hide_.append( { "subset": [c for c in self.columns if c not in columns], "axis": "columns", } ) if header is False: hide_.append({"axis": "columns"}) elif isinstance(header, (list, tuple)): relabel_index_.append({"labels": header, "axis": "columns"}) format_index_ = [index_format_] # column_format is overwritten if index is False: hide_.append({"axis": "index"}) if index_names is False: hide_.append({"names": True, "axis": "index"}) render_kwargs_ = { "hrules": True, "sparse_index": sparsify, "sparse_columns": sparsify, "environment": "longtable" if longtable else None, "multicol_align": multicolumn_format if multicolumn else f"naive-{multicolumn_format}", "multirow_align": "t" if multirow else "naive", "encoding": encoding, "caption": caption, "label": label, "position": position, "column_format": column_format, "clines": "skip-last;data" if (multirow and isinstance(self.index, MultiIndex)) else None, "bold_rows": bold_rows, } return self._to_latex_via_styler( buf, hide=hide_, relabel_index=relabel_index_, format={"formatter": formatters_, **base_format_}, format_index=format_index_, render_kwargs=render_kwargs_, ) def _to_latex_via_styler( self, buf=None, *, hide: dict | list[dict] | None = None, relabel_index: dict | list[dict] | None = None, format: dict | list[dict] | None = None, format_index: dict | list[dict] | None = None, render_kwargs: dict | None = None, ): """ Render object to a LaTeX tabular, longtable, or nested table. Uses the ``Styler`` implementation with the following, ordered, method chaining: .. code-block:: python styler = Styler(DataFrame) styler.hide(**hide) styler.relabel_index(**relabel_index) styler.format(**format) styler.format_index(**format_index) styler.to_latex(buf=buf, **render_kwargs) Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. hide : dict, list of dict Keyword args to pass to the method call of ``Styler.hide``. If a list will call the method numerous times. relabel_index : dict, list of dict Keyword args to pass to the method of ``Styler.relabel_index``. If a list will call the method numerous times. format : dict, list of dict Keyword args to pass to the method call of ``Styler.format``. If a list will call the method numerous times. format_index : dict, list of dict Keyword args to pass to the method call of ``Styler.format_index``. If a list will call the method numerous times. render_kwargs : dict Keyword args to pass to the method call of ``Styler.to_latex``. Returns ------- str or None If buf is None, returns the result as a string. Otherwise returns None. """ from pandas.io.formats.style import Styler self = cast("DataFrame", self) styler = Styler(self, uuid="") for kw_name in ["hide", "relabel_index", "format", "format_index"]: kw = vars()[kw_name] if isinstance(kw, dict): getattr(styler, kw_name)(**kw) elif isinstance(kw, list): for sub_kw in kw: getattr(styler, kw_name)(**sub_kw) # bold_rows is not a direct kwarg of Styler.to_latex render_kwargs = {} if render_kwargs is None else render_kwargs if render_kwargs.pop("bold_rows"): styler.applymap_index(lambda v: "textbf:--rwrap;") return styler.to_latex(buf=buf, **render_kwargs) def to_csv( self, path_or_buf: None = ..., sep: str = ..., na_rep: str = ..., float_format: str | Callable | None = ..., columns: Sequence[Hashable] | None = ..., header: bool_t | list[str] = ..., index: bool_t = ..., index_label: IndexLabel | None = ..., mode: str = ..., encoding: str | None = ..., compression: CompressionOptions = ..., quoting: int | None = ..., quotechar: str = ..., lineterminator: str | None = ..., chunksize: int | None = ..., date_format: str | None = ..., doublequote: bool_t = ..., escapechar: str | None = ..., decimal: str = ..., errors: str = ..., storage_options: StorageOptions = ..., ) -> str: ... def to_csv( self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str], sep: str = ..., na_rep: str = ..., float_format: str | Callable | None = ..., columns: Sequence[Hashable] | None = ..., header: bool_t | list[str] = ..., index: bool_t = ..., index_label: IndexLabel | None = ..., mode: str = ..., encoding: str | None = ..., compression: CompressionOptions = ..., quoting: int | None = ..., quotechar: str = ..., lineterminator: str | None = ..., chunksize: int | None = ..., date_format: str | None = ..., doublequote: bool_t = ..., escapechar: str | None = ..., decimal: str = ..., errors: str = ..., storage_options: StorageOptions = ..., ) -> None: ... storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buf", ) def to_csv( self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, sep: str = ",", na_rep: str = "", float_format: str | Callable | None = None, columns: Sequence[Hashable] | None = None, header: bool_t | list[str] = True, index: bool_t = True, index_label: IndexLabel | None = None, mode: str = "w", encoding: str | None = None, compression: CompressionOptions = "infer", quoting: int | None = None, quotechar: str = '"', lineterminator: str | None = None, chunksize: int | None = None, date_format: str | None = None, doublequote: bool_t = True, escapechar: str | None = None, decimal: str = ".", errors: str = "strict", storage_options: StorageOptions = None, ) -> str | None: r""" Write object to a comma-separated values (csv) file. Parameters ---------- path_or_buf : str, path object, file-like object, or None, default None String, path object (implementing os.PathLike[str]), or file-like object implementing a write() function. If None, the result is returned as a string. If a non-binary file object is passed, it should be opened with `newline=''`, disabling universal newlines. If a binary file object is passed, `mode` might need to contain a `'b'`. .. versionchanged:: 1.2.0 Support for binary file objects was introduced. sep : str, default ',' String of length 1. Field delimiter for the output file. na_rep : str, default '' Missing data representation. float_format : str, Callable, default None Format string for floating point numbers. If a Callable is given, it takes precedence over other numeric formatting parameters, like decimal. columns : sequence, optional Columns to write. header : bool or list of str, default True Write out the column names. If a list of strings is given it is assumed to be aliases for the column names. index : bool, default True Write row names (index). index_label : str or sequence, or False, default None Column label for index column(s) if desired. If None is given, and `header` and `index` are True, then the index names are used. A sequence should be given if the object uses MultiIndex. If False do not print fields for index names. Use index_label=False for easier importing in R. mode : str, default 'w' Python write mode. The available write modes are the same as :py:func:`open`. encoding : str, optional A string representing the encoding to use in the output file, defaults to 'utf-8'. `encoding` is not supported if `path_or_buf` is a non-binary file object. {compression_options} .. versionchanged:: 1.0.0 May now be a dict with key 'method' as compression mode and other entries as additional compression options if compression mode is 'zip'. .. versionchanged:: 1.1.0 Passing compression options as keys in dict is supported for compression modes 'gzip', 'bz2', 'zstd', and 'zip'. .. versionchanged:: 1.2.0 Compression is supported for binary file objects. .. versionchanged:: 1.2.0 Previous versions forwarded dict entries for 'gzip' to `gzip.open` instead of `gzip.GzipFile` which prevented setting `mtime`. quoting : optional constant from csv module Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` then floats are converted to strings and thus csv.QUOTE_NONNUMERIC will treat them as non-numeric. quotechar : str, default '\"' String of length 1. Character used to quote fields. lineterminator : str, optional The newline character or character sequence to use in the output file. Defaults to `os.linesep`, which depends on the OS in which this method is called ('\\n' for linux, '\\r\\n' for Windows, i.e.). .. versionchanged:: 1.5.0 Previously was line_terminator, changed for consistency with read_csv and the standard library 'csv' module. chunksize : int or None Rows to write at a time. date_format : str, default None Format string for datetime objects. doublequote : bool, default True Control quoting of `quotechar` inside a field. escapechar : str, default None String of length 1. Character used to escape `sep` and `quotechar` when appropriate. decimal : str, default '.' Character recognized as decimal separator. E.g. use ',' for European data. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. .. versionadded:: 1.1.0 {storage_options} .. versionadded:: 1.2.0 Returns ------- None or str If path_or_buf is None, returns the resulting csv format as a string. Otherwise returns None. See Also -------- read_csv : Load a CSV file into a DataFrame. to_excel : Write DataFrame to an Excel file. Examples -------- >>> df = pd.DataFrame({{'name': ['Raphael', 'Donatello'], ... 'mask': ['red', 'purple'], ... 'weapon': ['sai', 'bo staff']}}) >>> df.to_csv(index=False) 'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n' Create 'out.zip' containing 'out.csv' >>> compression_opts = dict(method='zip', ... archive_name='out.csv') # doctest: +SKIP >>> df.to_csv('out.zip', index=False, ... compression=compression_opts) # doctest: +SKIP To write a csv file to a new folder or nested folder you will first need to create it using either Pathlib or os: >>> from pathlib import Path # doctest: +SKIP >>> filepath = Path('folder/subfolder/out.csv') # doctest: +SKIP >>> filepath.parent.mkdir(parents=True, exist_ok=True) # doctest: +SKIP >>> df.to_csv(filepath) # doctest: +SKIP >>> import os # doctest: +SKIP >>> os.makedirs('folder/subfolder', exist_ok=True) # doctest: +SKIP >>> df.to_csv('folder/subfolder/out.csv') # doctest: +SKIP """ df = self if isinstance(self, ABCDataFrame) else self.to_frame() formatter = DataFrameFormatter( frame=df, header=header, index=index, na_rep=na_rep, float_format=float_format, decimal=decimal, ) return DataFrameRenderer(formatter).to_csv( path_or_buf, lineterminator=lineterminator, sep=sep, encoding=encoding, errors=errors, compression=compression, quoting=quoting, columns=columns, index_label=index_label, mode=mode, chunksize=chunksize, quotechar=quotechar, date_format=date_format, doublequote=doublequote, escapechar=escapechar, storage_options=storage_options, ) # ---------------------------------------------------------------------- # Lookup Caching def _reset_cacher(self) -> None: """ Reset the cacher. """ raise AbstractMethodError(self) def _maybe_update_cacher( self, clear: bool_t = False, verify_is_copy: bool_t = True, inplace: bool_t = False, ) -> None: """ See if we need to update our parent cacher if clear, then clear our cache. Parameters ---------- clear : bool, default False Clear the item cache. verify_is_copy : bool, default True Provide is_copy checks. """ if using_copy_on_write(): return if verify_is_copy: self._check_setitem_copy(t="referent") if clear: self._clear_item_cache() def _clear_item_cache(self) -> None: raise AbstractMethodError(self) # ---------------------------------------------------------------------- # Indexing Methods def take(self: NDFrameT, indices, axis: Axis = 0, **kwargs) -> NDFrameT: """ Return the elements in the given *positional* indices along an axis. This means that we are not indexing according to actual values in the index attribute of the object. We are indexing according to the actual position of the element in the object. Parameters ---------- indices : array-like An array of ints indicating which positions to take. axis : {0 or 'index', 1 or 'columns', None}, default 0 The axis on which to select elements. ``0`` means that we are selecting rows, ``1`` means that we are selecting columns. For `Series` this parameter is unused and defaults to 0. **kwargs For compatibility with :meth:`numpy.take`. Has no effect on the output. Returns ------- same type as caller An array-like containing the elements taken from the object. See Also -------- DataFrame.loc : Select a subset of a DataFrame by labels. DataFrame.iloc : Select a subset of a DataFrame by positions. numpy.take : Take elements from an array along an axis. Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey', 'mammal', np.nan)], ... columns=['name', 'class', 'max_speed'], ... index=[0, 2, 3, 1]) >>> df name class max_speed 0 falcon bird 389.0 2 parrot bird 24.0 3 lion mammal 80.5 1 monkey mammal NaN Take elements at positions 0 and 3 along the axis 0 (default). Note how the actual indices selected (0 and 1) do not correspond to our selected indices 0 and 3. That's because we are selecting the 0th and 3rd rows, not rows whose indices equal 0 and 3. >>> df.take([0, 3]) name class max_speed 0 falcon bird 389.0 1 monkey mammal NaN Take elements at indices 1 and 2 along the axis 1 (column selection). >>> df.take([1, 2], axis=1) class max_speed 0 bird 389.0 2 bird 24.0 3 mammal 80.5 1 mammal NaN We may take elements using negative integers for positive indices, starting from the end of the object, just like with Python lists. >>> df.take([-1, -2]) name class max_speed 1 monkey mammal NaN 3 lion mammal 80.5 """ nv.validate_take((), kwargs) return self._take(indices, axis) def _take( self: NDFrameT, indices, axis: Axis = 0, convert_indices: bool_t = True, ) -> NDFrameT: """ Internal version of the `take` allowing specification of additional args. See the docstring of `take` for full explanation of the parameters. """ if not isinstance(indices, slice): indices = np.asarray(indices, dtype=np.intp) if ( axis == 0 and indices.ndim == 1 and using_copy_on_write() and is_range_indexer(indices, len(self)) ): return self.copy(deep=None) new_data = self._mgr.take( indices, axis=self._get_block_manager_axis(axis), verify=True, convert_indices=convert_indices, ) return self._constructor(new_data).__finalize__(self, method="take") def _take_with_is_copy(self: NDFrameT, indices, axis: Axis = 0) -> NDFrameT: """ Internal version of the `take` method that sets the `_is_copy` attribute to keep track of the parent dataframe (using in indexing for the SettingWithCopyWarning). See the docstring of `take` for full explanation of the parameters. """ result = self._take(indices=indices, axis=axis) # Maybe set copy if we didn't actually change the index. if not result._get_axis(axis).equals(self._get_axis(axis)): result._set_is_copy(self) return result def xs( self: NDFrameT, key: IndexLabel, axis: Axis = 0, level: IndexLabel = None, drop_level: bool_t = True, ) -> NDFrameT: """ Return cross-section from the Series/DataFrame. This method takes a `key` argument to select data at a particular level of a MultiIndex. Parameters ---------- key : label or tuple of label Label contained in the index, or partially in a MultiIndex. axis : {0 or 'index', 1 or 'columns'}, default 0 Axis to retrieve cross-section on. level : object, defaults to first n levels (n=1 or len(key)) In case of a key partially contained in a MultiIndex, indicate which levels are used. Levels can be referred by label or position. drop_level : bool, default True If False, returns object with same levels as self. Returns ------- Series or DataFrame Cross-section from the original Series or DataFrame corresponding to the selected index levels. See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. DataFrame.iloc : Purely integer-location based indexing for selection by position. Notes ----- `xs` can not be used to set values. MultiIndex Slicers is a generic way to get/set values on any level or levels. It is a superset of `xs` functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`. Examples -------- >>> d = {'num_legs': [4, 4, 2, 2], ... 'num_wings': [0, 0, 2, 2], ... 'class': ['mammal', 'mammal', 'mammal', 'bird'], ... 'animal': ['cat', 'dog', 'bat', 'penguin'], ... 'locomotion': ['walks', 'walks', 'flies', 'walks']} >>> df = pd.DataFrame(data=d) >>> df = df.set_index(['class', 'animal', 'locomotion']) >>> df num_legs num_wings class animal locomotion mammal cat walks 4 0 dog walks 4 0 bat flies 2 2 bird penguin walks 2 2 Get values at specified index >>> df.xs('mammal') num_legs num_wings animal locomotion cat walks 4 0 dog walks 4 0 bat flies 2 2 Get values at several indexes >>> df.xs(('mammal', 'dog', 'walks')) num_legs 4 num_wings 0 Name: (mammal, dog, walks), dtype: int64 Get values at specified index and level >>> df.xs('cat', level=1) num_legs num_wings class locomotion mammal walks 4 0 Get values at several indexes and levels >>> df.xs(('bird', 'walks'), ... level=[0, 'locomotion']) num_legs num_wings animal penguin 2 2 Get values at specified column and axis >>> df.xs('num_wings', axis=1) class animal locomotion mammal cat walks 0 dog walks 0 bat flies 2 bird penguin walks 2 Name: num_wings, dtype: int64 """ axis = self._get_axis_number(axis) labels = self._get_axis(axis) if isinstance(key, list): raise TypeError("list keys are not supported in xs, pass a tuple instead") if level is not None: if not isinstance(labels, MultiIndex): raise TypeError("Index must be a MultiIndex") loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level) # create the tuple of the indexer _indexer = [slice(None)] * self.ndim _indexer[axis] = loc indexer = tuple(_indexer) result = self.iloc[indexer] setattr(result, result._get_axis_name(axis), new_ax) return result if axis == 1: if drop_level: return self[key] index = self.columns else: index = self.index if isinstance(index, MultiIndex): loc, new_index = index._get_loc_level(key, level=0) if not drop_level: if lib.is_integer(loc): new_index = index[loc : loc + 1] else: new_index = index[loc] else: loc = index.get_loc(key) if isinstance(loc, np.ndarray): if loc.dtype == np.bool_: (inds,) = loc.nonzero() return self._take_with_is_copy(inds, axis=axis) else: return self._take_with_is_copy(loc, axis=axis) if not is_scalar(loc): new_index = index[loc] if is_scalar(loc) and axis == 0: # In this case loc should be an integer if self.ndim == 1: # if we encounter an array-like and we only have 1 dim # that means that their are list/ndarrays inside the Series! # so just return them (GH 6394) return self._values[loc] new_mgr = self._mgr.fast_xs(loc) result = self._constructor_sliced( new_mgr, name=self.index[loc] ).__finalize__(self) elif is_scalar(loc): result = self.iloc[:, slice(loc, loc + 1)] elif axis == 1: result = self.iloc[:, loc] else: result = self.iloc[loc] result.index = new_index # this could be a view # but only in a single-dtyped view sliceable case result._set_is_copy(self, copy=not result._is_view) return result def __getitem__(self, item): raise AbstractMethodError(self) def _slice(self: NDFrameT, slobj: slice, axis: Axis = 0) -> NDFrameT: """ Construct a slice of this container. Slicing with this method is *always* positional. """ assert isinstance(slobj, slice), type(slobj) axis = self._get_block_manager_axis(axis) result = self._constructor(self._mgr.get_slice(slobj, axis=axis)) result = result.__finalize__(self) # this could be a view # but only in a single-dtyped view sliceable case is_copy = axis != 0 or result._is_view result._set_is_copy(self, copy=is_copy) return result def _set_is_copy(self, ref: NDFrame, copy: bool_t = True) -> None: if not copy: self._is_copy = None else: assert ref is not None self._is_copy = weakref.ref(ref) def _check_is_chained_assignment_possible(self) -> bool_t: """ Check if we are a view, have a cacher, and are of mixed type. If so, then force a setitem_copy check. Should be called just near setting a value Will return a boolean if it we are a view and are cached, but a single-dtype meaning that the cacher should be updated following setting. """ if self._is_copy: self._check_setitem_copy(t="referent") return False def _check_setitem_copy(self, t: str = "setting", force: bool_t = False): """ Parameters ---------- t : str, the type of setting error force : bool, default False If True, then force showing an error. validate if we are doing a setitem on a chained copy. It is technically possible to figure out that we are setting on a copy even WITH a multi-dtyped pandas object. In other words, some blocks may be views while other are not. Currently _is_view will ALWAYS return False for multi-blocks to avoid having to handle this case. df = DataFrame(np.arange(0,9), columns=['count']) df['group'] = 'b' # This technically need not raise SettingWithCopy if both are view # (which is not generally guaranteed but is usually True. However, # this is in general not a good practice and we recommend using .loc. df.iloc[0:5]['group'] = 'a' """ if using_copy_on_write(): return # return early if the check is not needed if not (force or self._is_copy): return value = config.get_option("mode.chained_assignment") if value is None: return # see if the copy is not actually referred; if so, then dissolve # the copy weakref if self._is_copy is not None and not isinstance(self._is_copy, str): r = self._is_copy() if not gc.get_referents(r) or (r is not None and r.shape == self.shape): self._is_copy = None return # a custom message if isinstance(self._is_copy, str): t = self._is_copy elif t == "referent": t = ( "\n" "A value is trying to be set on a copy of a slice from a " "DataFrame\n\n" "See the caveats in the documentation: " "https://pandas.pydata.org/pandas-docs/stable/user_guide/" "indexing.html#returning-a-view-versus-a-copy" ) else: t = ( "\n" "A value is trying to be set on a copy of a slice from a " "DataFrame.\n" "Try using .loc[row_indexer,col_indexer] = value " "instead\n\nSee the caveats in the documentation: " "https://pandas.pydata.org/pandas-docs/stable/user_guide/" "indexing.html#returning-a-view-versus-a-copy" ) if value == "raise": raise SettingWithCopyError(t) if value == "warn": warnings.warn(t, SettingWithCopyWarning, stacklevel=find_stack_level()) def __delitem__(self, key) -> None: """ Delete item """ deleted = False maybe_shortcut = False if self.ndim == 2 and isinstance(self.columns, MultiIndex): try: # By using engine's __contains__ we effectively # restrict to same-length tuples maybe_shortcut = key not in self.columns._engine except TypeError: pass if maybe_shortcut: # Allow shorthand to delete all columns whose first len(key) # elements match key: if not isinstance(key, tuple): key = (key,) for col in self.columns: if isinstance(col, tuple) and col[: len(key)] == key: del self[col] deleted = True if not deleted: # If the above loop ran and didn't delete anything because # there was no match, this call should raise the appropriate # exception: loc = self.axes[-1].get_loc(key) self._mgr = self._mgr.idelete(loc) # delete from the caches try: del self._item_cache[key] except KeyError: pass # ---------------------------------------------------------------------- # Unsorted def _check_inplace_and_allows_duplicate_labels(self, inplace): if inplace and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'inplace=True' when " "'self.flags.allows_duplicate_labels' is False." ) def get(self, key, default=None): """ Get item from object for given key (ex: DataFrame column). Returns default value if not found. Parameters ---------- key : object Returns ------- same type as items contained in object Examples -------- >>> df = pd.DataFrame( ... [ ... [24.3, 75.7, "high"], ... [31, 87.8, "high"], ... [22, 71.6, "medium"], ... [35, 95, "medium"], ... ], ... columns=["temp_celsius", "temp_fahrenheit", "windspeed"], ... index=pd.date_range(start="2014-02-12", end="2014-02-15", freq="D"), ... ) >>> df temp_celsius temp_fahrenheit windspeed 2014-02-12 24.3 75.7 high 2014-02-13 31.0 87.8 high 2014-02-14 22.0 71.6 medium 2014-02-15 35.0 95.0 medium >>> df.get(["temp_celsius", "windspeed"]) temp_celsius windspeed 2014-02-12 24.3 high 2014-02-13 31.0 high 2014-02-14 22.0 medium 2014-02-15 35.0 medium >>> ser = df['windspeed'] >>> ser.get('2014-02-13') 'high' If the key isn't found, the default value will be used. >>> df.get(["temp_celsius", "temp_kelvin"], default="default_value") 'default_value' >>> ser.get('2014-02-10', '[unknown]') '[unknown]' """ try: return self[key] except (KeyError, ValueError, IndexError): return default def _is_view(self) -> bool_t: """Return boolean indicating if self is view of another array""" return self._mgr.is_view def reindex_like( self: NDFrameT, other, method: Literal["backfill", "bfill", "pad", "ffill", "nearest"] | None = None, copy: bool_t | None = None, limit=None, tolerance=None, ) -> NDFrameT: """ Return an object with matching indices as other object. Conform the object to the same index on all axes. Optional filling logic, placing NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and copy=False. Parameters ---------- other : Object of the same data type Its row and column indices are used to define the new indices of this object. method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'} Method to use for filling holes in reindexed DataFrame. Please note: this is only applicable to DataFrames/Series with a monotonically increasing/decreasing index. * None (default): don't fill gaps * pad / ffill: propagate last valid observation forward to next valid * backfill / bfill: use next valid observation to fill gap * nearest: use nearest valid observations to fill gap. copy : bool, default True Return a new object, even if the passed indexes are the same. limit : int, default None Maximum number of consecutive labels to fill for inexact matches. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- Series or DataFrame Same type as caller, but with changed indices on each axis. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. DataFrame.reindex : Change to new indices or expand indices. Notes ----- Same as calling ``.reindex(index=other.index, columns=other.columns,...)``. Examples -------- >>> df1 = pd.DataFrame([[24.3, 75.7, 'high'], ... [31, 87.8, 'high'], ... [22, 71.6, 'medium'], ... [35, 95, 'medium']], ... columns=['temp_celsius', 'temp_fahrenheit', ... 'windspeed'], ... index=pd.date_range(start='2014-02-12', ... end='2014-02-15', freq='D')) >>> df1 temp_celsius temp_fahrenheit windspeed 2014-02-12 24.3 75.7 high 2014-02-13 31.0 87.8 high 2014-02-14 22.0 71.6 medium 2014-02-15 35.0 95.0 medium >>> df2 = pd.DataFrame([[28, 'low'], ... [30, 'low'], ... [35.1, 'medium']], ... columns=['temp_celsius', 'windspeed'], ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13', ... '2014-02-15'])) >>> df2 temp_celsius windspeed 2014-02-12 28.0 low 2014-02-13 30.0 low 2014-02-15 35.1 medium >>> df2.reindex_like(df1) temp_celsius temp_fahrenheit windspeed 2014-02-12 28.0 NaN low 2014-02-13 30.0 NaN low 2014-02-14 NaN NaN NaN 2014-02-15 35.1 NaN medium """ d = other._construct_axes_dict( axes=self._AXIS_ORDERS, method=method, copy=copy, limit=limit, tolerance=tolerance, ) return self.reindex(**d) def drop( self, labels: IndexLabel = ..., *, axis: Axis = ..., index: IndexLabel = ..., columns: IndexLabel = ..., level: Level | None = ..., inplace: Literal[True], errors: IgnoreRaise = ..., ) -> None: ... def drop( self: NDFrameT, labels: IndexLabel = ..., *, axis: Axis = ..., index: IndexLabel = ..., columns: IndexLabel = ..., level: Level | None = ..., inplace: Literal[False] = ..., errors: IgnoreRaise = ..., ) -> NDFrameT: ... def drop( self: NDFrameT, labels: IndexLabel = ..., *, axis: Axis = ..., index: IndexLabel = ..., columns: IndexLabel = ..., level: Level | None = ..., inplace: bool_t = ..., errors: IgnoreRaise = ..., ) -> NDFrameT | None: ... def drop( self: NDFrameT, labels: IndexLabel = None, *, axis: Axis = 0, index: IndexLabel = None, columns: IndexLabel = None, level: Level | None = None, inplace: bool_t = False, errors: IgnoreRaise = "raise", ) -> NDFrameT | None: inplace = validate_bool_kwarg(inplace, "inplace") if labels is not None: if index is not None or columns is not None: raise ValueError("Cannot specify both 'labels' and 'index'/'columns'") axis_name = self._get_axis_name(axis) axes = {axis_name: labels} elif index is not None or columns is not None: axes = {"index": index} if self.ndim == 2: axes["columns"] = columns else: raise ValueError( "Need to specify at least one of 'labels', 'index' or 'columns'" ) obj = self for axis, labels in axes.items(): if labels is not None: obj = obj._drop_axis(labels, axis, level=level, errors=errors) if inplace: self._update_inplace(obj) return None else: return obj def _drop_axis( self: NDFrameT, labels, axis, level=None, errors: IgnoreRaise = "raise", only_slice: bool_t = False, ) -> NDFrameT: """ Drop labels from specified axis. Used in the ``drop`` method internally. Parameters ---------- labels : single label or list-like axis : int or axis name level : int or level name, default None For MultiIndex errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and existing labels are dropped. only_slice : bool, default False Whether indexing along columns should be view-only. """ axis_num = self._get_axis_number(axis) axis = self._get_axis(axis) if axis.is_unique: if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError("axis must be a MultiIndex") new_axis = axis.drop(labels, level=level, errors=errors) else: new_axis = axis.drop(labels, errors=errors) indexer = axis.get_indexer(new_axis) # Case for non-unique axis else: is_tuple_labels = is_nested_list_like(labels) or isinstance(labels, tuple) labels = ensure_object(common.index_labels_to_array(labels)) if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError("axis must be a MultiIndex") mask = ~axis.get_level_values(level).isin(labels) # GH 18561 MultiIndex.drop should raise if label is absent if errors == "raise" and mask.all(): raise KeyError(f"{labels} not found in axis") elif ( isinstance(axis, MultiIndex) and labels.dtype == "object" and not is_tuple_labels ): # Set level to zero in case of MultiIndex and label is string, # because isin can't handle strings for MultiIndexes GH#36293 # In case of tuples we get dtype object but have to use isin GH#42771 mask = ~axis.get_level_values(0).isin(labels) else: mask = ~axis.isin(labels) # Check if label doesn't exist along axis labels_missing = (axis.get_indexer_for(labels) == -1).any() if errors == "raise" and labels_missing: raise KeyError(f"{labels} not found in axis") if is_extension_array_dtype(mask.dtype): # GH#45860 mask = mask.to_numpy(dtype=bool) indexer = mask.nonzero()[0] new_axis = axis.take(indexer) bm_axis = self.ndim - axis_num - 1 new_mgr = self._mgr.reindex_indexer( new_axis, indexer, axis=bm_axis, allow_dups=True, copy=None, only_slice=only_slice, ) result = self._constructor(new_mgr) if self.ndim == 1: result.name = self.name return result.__finalize__(self) def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None: """ Replace self internals with result. Parameters ---------- result : same type as self verify_is_copy : bool, default True Provide is_copy checks. """ # NOTE: This does *not* call __finalize__ and that's an explicit # decision that we may revisit in the future. self._reset_cache() self._clear_item_cache() self._mgr = result._mgr self._maybe_update_cacher(verify_is_copy=verify_is_copy, inplace=True) def add_prefix(self: NDFrameT, prefix: str, axis: Axis | None = None) -> NDFrameT: """ Prefix labels with string `prefix`. For Series, the row labels are prefixed. For DataFrame, the column labels are prefixed. Parameters ---------- prefix : str The string to add before each label. axis : {{0 or 'index', 1 or 'columns', None}}, default None Axis to add prefix on .. versionadded:: 2.0.0 Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_suffix: Suffix row labels with string `suffix`. DataFrame.add_suffix: Suffix column labels with string `suffix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_prefix('item_') item_0 1 item_1 2 item_2 3 item_3 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_prefix('col_') col_A col_B 0 1 3 1 2 4 2 3 5 3 4 6 """ f = lambda x: f"{prefix}{x}" axis_name = self._info_axis_name if axis is not None: axis_name = self._get_axis_name(axis) mapper = {axis_name: f} # error: Incompatible return value type (got "Optional[NDFrameT]", # expected "NDFrameT") # error: Argument 1 to "rename" of "NDFrame" has incompatible type # "**Dict[str, partial[str]]"; expected "Union[str, int, None]" # error: Keywords must be strings return self._rename(**mapper) # type: ignore[return-value, arg-type, misc] def add_suffix(self: NDFrameT, suffix: str, axis: Axis | None = None) -> NDFrameT: """ Suffix labels with string `suffix`. For Series, the row labels are suffixed. For DataFrame, the column labels are suffixed. Parameters ---------- suffix : str The string to add after each label. axis : {{0 or 'index', 1 or 'columns', None}}, default None Axis to add suffix on .. versionadded:: 2.0.0 Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string `prefix`. DataFrame.add_prefix: Prefix column labels with string `prefix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_suffix('_item') 0_item 1 1_item 2 2_item 3 3_item 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_suffix('_col') A_col B_col 0 1 3 1 2 4 2 3 5 3 4 6 """ f = lambda x: f"{x}{suffix}" axis_name = self._info_axis_name if axis is not None: axis_name = self._get_axis_name(axis) mapper = {axis_name: f} # error: Incompatible return value type (got "Optional[NDFrameT]", # expected "NDFrameT") # error: Argument 1 to "rename" of "NDFrame" has incompatible type # "**Dict[str, partial[str]]"; expected "Union[str, int, None]" # error: Keywords must be strings return self._rename(**mapper) # type: ignore[return-value, arg-type, misc] def sort_values( self: NDFrameT, *, axis: Axis = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[False] = ..., kind: str = ..., na_position: str = ..., ignore_index: bool_t = ..., key: ValueKeyFunc = ..., ) -> NDFrameT: ... def sort_values( self, *, axis: Axis = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[True], kind: str = ..., na_position: str = ..., ignore_index: bool_t = ..., key: ValueKeyFunc = ..., ) -> None: ... def sort_values( self: NDFrameT, *, axis: Axis = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: bool_t = ..., kind: str = ..., na_position: str = ..., ignore_index: bool_t = ..., key: ValueKeyFunc = ..., ) -> NDFrameT | None: ... def sort_values( self: NDFrameT, *, axis: Axis = 0, ascending: bool_t | Sequence[bool_t] = True, inplace: bool_t = False, kind: str = "quicksort", na_position: str = "last", ignore_index: bool_t = False, key: ValueKeyFunc = None, ) -> NDFrameT | None: """ Sort by the values along either axis. Parameters ----------%(optional_by)s axis : %(axes_single_arg)s, default 0 Axis to be sorted. ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. `mergesort` and `stable` are the only stable algorithms. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' Puts NaNs at the beginning if `first`; `last` puts NaNs at the end. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional Apply the key function to the values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect a ``Series`` and return a Series with the same shape as the input. It will be applied to each column in `by` independently. .. versionadded:: 1.1.0 Returns ------- DataFrame or None DataFrame with sorted values or None if ``inplace=True``. See Also -------- DataFrame.sort_index : Sort a DataFrame by the index. Series.sort_values : Similar method for a Series. Examples -------- >>> df = pd.DataFrame({ ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'], ... 'col2': [2, 1, 9, 8, 7, 4], ... 'col3': [0, 1, 9, 4, 2, 3], ... 'col4': ['a', 'B', 'c', 'D', 'e', 'F'] ... }) >>> df col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F Sort by col1 >>> df.sort_values(by=['col1']) col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 5 C 4 3 F 4 D 7 2 e 3 NaN 8 4 D Sort by multiple columns >>> df.sort_values(by=['col1', 'col2']) col1 col2 col3 col4 1 A 1 1 B 0 A 2 0 a 2 B 9 9 c 5 C 4 3 F 4 D 7 2 e 3 NaN 8 4 D Sort Descending >>> df.sort_values(by='col1', ascending=False) col1 col2 col3 col4 4 D 7 2 e 5 C 4 3 F 2 B 9 9 c 0 A 2 0 a 1 A 1 1 B 3 NaN 8 4 D Putting NAs first >>> df.sort_values(by='col1', ascending=False, na_position='first') col1 col2 col3 col4 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F 2 B 9 9 c 0 A 2 0 a 1 A 1 1 B Sorting with a key function >>> df.sort_values(by='col4', key=lambda col: col.str.lower()) col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F Natural sort with the key argument, using the `natsort <https://github.com/SethMMorton/natsort>` package. >>> df = pd.DataFrame({ ... "time": ['0hr', '128hr', '72hr', '48hr', '96hr'], ... "value": [10, 20, 30, 40, 50] ... }) >>> df time value 0 0hr 10 1 128hr 20 2 72hr 30 3 48hr 40 4 96hr 50 >>> from natsort import index_natsorted >>> df.sort_values( ... by="time", ... key=lambda x: np.argsort(index_natsorted(df["time"])) ... ) time value 0 0hr 10 3 48hr 40 2 72hr 30 4 96hr 50 1 128hr 20 """ raise AbstractMethodError(self) def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[True], kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool_t = ..., ignore_index: bool_t = ..., key: IndexKeyFunc = ..., ) -> None: ... def sort_index( self: NDFrameT, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[False] = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool_t = ..., ignore_index: bool_t = ..., key: IndexKeyFunc = ..., ) -> NDFrameT: ... def sort_index( self: NDFrameT, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: bool_t = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool_t = ..., ignore_index: bool_t = ..., key: IndexKeyFunc = ..., ) -> NDFrameT | None: ... def sort_index( self: NDFrameT, *, axis: Axis = 0, level: IndexLabel = None, ascending: bool_t | Sequence[bool_t] = True, inplace: bool_t = False, kind: SortKind = "quicksort", na_position: NaPosition = "last", sort_remaining: bool_t = True, ignore_index: bool_t = False, key: IndexKeyFunc = None, ) -> NDFrameT | None: inplace = validate_bool_kwarg(inplace, "inplace") axis = self._get_axis_number(axis) ascending = validate_ascending(ascending) target = self._get_axis(axis) indexer = get_indexer_indexer( target, level, ascending, kind, na_position, sort_remaining, key ) if indexer is None: if inplace: result = self else: result = self.copy(deep=None) if ignore_index: result.index = default_index(len(self)) if inplace: return None else: return result baxis = self._get_block_manager_axis(axis) new_data = self._mgr.take(indexer, axis=baxis, verify=False) # reconstruct axis if needed new_data.set_axis(baxis, new_data.axes[baxis]._sort_levels_monotonic()) if ignore_index: axis = 1 if isinstance(self, ABCDataFrame) else 0 new_data.set_axis(axis, default_index(len(indexer))) result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="sort_index") klass=_shared_doc_kwargs["klass"], optional_reindex="", ) def reindex( self: NDFrameT, labels=None, index=None, columns=None, axis: Axis | None = None, method: str | None = None, copy: bool_t | None = None, level: Level | None = None, fill_value: Scalar | None = np.nan, limit: int | None = None, tolerance=None, ) -> NDFrameT: """ Conform {klass} to new index with optional filling logic. Places NA/NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and ``copy=False``. Parameters ---------- {optional_reindex} method : {{None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}} Method to use for filling holes in reindexed DataFrame. Please note: this is only applicable to DataFrames/Series with a monotonically increasing/decreasing index. * None (default): don't fill gaps * pad / ffill: Propagate last valid observation forward to next valid. * backfill / bfill: Use next valid observation to fill gap. * nearest: Use nearest valid observations to fill gap. copy : bool, default True Return a new object, even if the passed indexes are the same. level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level. fill_value : scalar, default np.NaN Value to use for missing values. Defaults to NaN, but can be any "compatible" value. limit : int, default None Maximum number of consecutive elements to forward or backward fill. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations most satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- {klass} with changed index. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. DataFrame.reindex_like : Change to same indices as other DataFrame. Examples -------- ``DataFrame.reindex`` supports two calling conventions * ``(index=index_labels, columns=column_labels, ...)`` * ``(labels, axis={{'index', 'columns'}}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. Create a dataframe with some fictional data. >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror'] >>> df = pd.DataFrame({{'http_status': [200, 200, 404, 404, 301], ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]}}, ... index=index) >>> df http_status response_time Firefox 200 0.04 Chrome 200 0.02 Safari 404 0.07 IE10 404 0.08 Konqueror 301 1.00 Create a new index and reindex the dataframe. By default values in the new index that do not have corresponding records in the dataframe are assigned ``NaN``. >>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10', ... 'Chrome'] >>> df.reindex(new_index) http_status response_time Safari 404.0 0.07 Iceweasel NaN NaN Comodo Dragon NaN NaN IE10 404.0 0.08 Chrome 200.0 0.02 We can fill in the missing values by passing a value to the keyword ``fill_value``. Because the index is not monotonically increasing or decreasing, we cannot use arguments to the keyword ``method`` to fill the ``NaN`` values. >>> df.reindex(new_index, fill_value=0) http_status response_time Safari 404 0.07 Iceweasel 0 0.00 Comodo Dragon 0 0.00 IE10 404 0.08 Chrome 200 0.02 >>> df.reindex(new_index, fill_value='missing') http_status response_time Safari 404 0.07 Iceweasel missing missing Comodo Dragon missing missing IE10 404 0.08 Chrome 200 0.02 We can also reindex the columns. >>> df.reindex(columns=['http_status', 'user_agent']) http_status user_agent Firefox 200 NaN Chrome 200 NaN Safari 404 NaN IE10 404 NaN Konqueror 301 NaN Or we can use "axis-style" keyword arguments >>> df.reindex(['http_status', 'user_agent'], axis="columns") http_status user_agent Firefox 200 NaN Chrome 200 NaN Safari 404 NaN IE10 404 NaN Konqueror 301 NaN To further illustrate the filling functionality in ``reindex``, we will create a dataframe with a monotonically increasing index (for example, a sequence of dates). >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D') >>> df2 = pd.DataFrame({{"prices": [100, 101, np.nan, 100, 89, 88]}}, ... index=date_index) >>> df2 prices 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 Suppose we decide to expand the dataframe to cover a wider date range. >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D') >>> df2.reindex(date_index2) prices 2009-12-29 NaN 2009-12-30 NaN 2009-12-31 NaN 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 2010-01-07 NaN The index entries that did not have a value in the original data frame (for example, '2009-12-29') are by default filled with ``NaN``. If desired, we can fill in the missing values using one of several options. For example, to back-propagate the last valid value to fill the ``NaN`` values, pass ``bfill`` as an argument to the ``method`` keyword. >>> df2.reindex(date_index2, method='bfill') prices 2009-12-29 100.0 2009-12-30 100.0 2009-12-31 100.0 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 2010-01-07 NaN Please note that the ``NaN`` value present in the original dataframe (at index value 2010-01-03) will not be filled by any of the value propagation schemes. This is because filling while reindexing does not look at dataframe values, but only compares the original and desired indexes. If you do want to fill in the ``NaN`` values present in the original dataframe, use the ``fillna()`` method. See the :ref:`user guide <basics.reindexing>` for more. """ # TODO: Decide if we care about having different examples for different # kinds if index is not None and columns is not None and labels is not None: raise TypeError("Cannot specify all of 'labels', 'index', 'columns'.") elif index is not None or columns is not None: if axis is not None: raise TypeError( "Cannot specify both 'axis' and any of 'index' or 'columns'" ) if labels is not None: if index is not None: columns = labels else: index = labels else: if axis and self._get_axis_number(axis) == 1: columns = labels else: index = labels axes: dict[Literal["index", "columns"], Any] = { "index": index, "columns": columns, } method = clean_reindex_fill_method(method) # if all axes that are requested to reindex are equal, then only copy # if indicated must have index names equal here as well as values if copy and using_copy_on_write(): copy = False if all( self._get_axis(axis_name).identical(ax) for axis_name, ax in axes.items() if ax is not None ): return self.copy(deep=copy) # check if we are a multi reindex if self._needs_reindex_multi(axes, method, level): return self._reindex_multi(axes, copy, fill_value) # perform the reindex on the axes return self._reindex_axes( axes, level, limit, tolerance, method, fill_value, copy ).__finalize__(self, method="reindex") def _reindex_axes( self: NDFrameT, axes, level, limit, tolerance, method, fill_value, copy ) -> NDFrameT: """Perform the reindex for all the axes.""" obj = self for a in self._AXIS_ORDERS: labels = axes[a] if labels is None: continue ax = self._get_axis(a) new_index, indexer = ax.reindex( labels, level=level, limit=limit, tolerance=tolerance, method=method ) axis = self._get_axis_number(a) obj = obj._reindex_with_indexers( {axis: [new_index, indexer]}, fill_value=fill_value, copy=copy, allow_dups=False, ) # If we've made a copy once, no need to make another one copy = False return obj def _needs_reindex_multi(self, axes, method, level) -> bool_t: """Check if we do need a multi reindex.""" return ( (common.count_not_none(*axes.values()) == self._AXIS_LEN) and method is None and level is None and not self._is_mixed_type and not ( self.ndim == 2 and len(self.dtypes) == 1 and is_extension_array_dtype(self.dtypes.iloc[0]) ) ) def _reindex_multi(self, axes, copy, fill_value): raise AbstractMethodError(self) def _reindex_with_indexers( self: NDFrameT, reindexers, fill_value=None, copy: bool_t | None = False, allow_dups: bool_t = False, ) -> NDFrameT: """allow_dups indicates an internal call here""" # reindex doing multiple operations on different axes if indicated new_data = self._mgr for axis in sorted(reindexers.keys()): index, indexer = reindexers[axis] baxis = self._get_block_manager_axis(axis) if index is None: continue index = ensure_index(index) if indexer is not None: indexer = ensure_platform_int(indexer) # TODO: speed up on homogeneous DataFrame objects (see _reindex_multi) new_data = new_data.reindex_indexer( index, indexer, axis=baxis, fill_value=fill_value, allow_dups=allow_dups, copy=copy, ) # If we've made a copy once, no need to make another one copy = False if ( (copy or copy is None) and new_data is self._mgr and not using_copy_on_write() ): new_data = new_data.copy(deep=copy) elif using_copy_on_write() and new_data is self._mgr: new_data = new_data.copy(deep=False) return self._constructor(new_data).__finalize__(self) def filter( self: NDFrameT, items=None, like: str | None = None, regex: str | None = None, axis: Axis | None = None, ) -> NDFrameT: """ Subset the dataframe rows or columns according to the specified index labels. Note that this routine does not filter a dataframe on its contents. The filter is applied to the labels of the index. Parameters ---------- items : list-like Keep labels from axis which are in items. like : str Keep labels from axis for which "like in label == True". regex : str (regular expression) Keep labels from axis for which re.search(regex, label) == True. axis : {0 or ‘index’, 1 or ‘columns’, None}, default None The axis to filter on, expressed either as an index (int) or axis name (str). By default this is the info axis, 'columns' for DataFrame. For `Series` this parameter is unused and defaults to `None`. Returns ------- same type as input object See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. Notes ----- The ``items``, ``like``, and ``regex`` parameters are enforced to be mutually exclusive. ``axis`` defaults to the info axis that is used when indexing with ``[]``. Examples -------- >>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])), ... index=['mouse', 'rabbit'], ... columns=['one', 'two', 'three']) >>> df one two three mouse 1 2 3 rabbit 4 5 6 >>> # select columns by name >>> df.filter(items=['one', 'three']) one three mouse 1 3 rabbit 4 6 >>> # select columns by regular expression >>> df.filter(regex='e$', axis=1) one three mouse 1 3 rabbit 4 6 >>> # select rows containing 'bbi' >>> df.filter(like='bbi', axis=0) one two three rabbit 4 5 6 """ nkw = common.count_not_none(items, like, regex) if nkw > 1: raise TypeError( "Keyword arguments `items`, `like`, or `regex` " "are mutually exclusive" ) if axis is None: axis = self._info_axis_name labels = self._get_axis(axis) if items is not None: name = self._get_axis_name(axis) # error: Keywords must be strings return self.reindex( # type: ignore[misc] **{name: [r for r in items if r in labels]} # type: ignore[arg-type] ) elif like: def f(x) -> bool_t: assert like is not None # needed for mypy return like in ensure_str(x) values = labels.map(f) return self.loc(axis=axis)[values] elif regex: def f(x) -> bool_t: return matcher.search(ensure_str(x)) is not None matcher = re.compile(regex) values = labels.map(f) return self.loc(axis=axis)[values] else: raise TypeError("Must pass either `items`, `like`, or `regex`") def head(self: NDFrameT, n: int = 5) -> NDFrameT: """ Return the first `n` rows. This function returns the first `n` rows for the object based on position. It is useful for quickly testing if your object has the right type of data in it. For negative values of `n`, this function returns all rows except the last `|n|` rows, equivalent to ``df[:n]``. If n is larger than the number of rows, this function returns all rows. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- same type as caller The first `n` rows of the caller object. See Also -------- DataFrame.tail: Returns the last `n` rows. Examples -------- >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the first 5 lines >>> df.head() animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey Viewing the first `n` lines (three in this case) >>> df.head(3) animal 0 alligator 1 bee 2 falcon For negative values of `n` >>> df.head(-3) animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot """ return self.iloc[:n] def tail(self: NDFrameT, n: int = 5) -> NDFrameT: """ Return the last `n` rows. This function returns last `n` rows from the object based on position. It is useful for quickly verifying data, for example, after sorting or appending rows. For negative values of `n`, this function returns all rows except the first `|n|` rows, equivalent to ``df[|n|:]``. If n is larger than the number of rows, this function returns all rows. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- type of caller The last `n` rows of the caller object. See Also -------- DataFrame.head : The first `n` rows of the caller object. Examples -------- >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last 5 lines >>> df.tail() animal 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last `n` lines (three in this case) >>> df.tail(3) animal 6 shark 7 whale 8 zebra For negative values of `n` >>> df.tail(-3) animal 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra """ if n == 0: return self.iloc[0:0] return self.iloc[-n:] def sample( self: NDFrameT, n: int | None = None, frac: float | None = None, replace: bool_t = False, weights=None, random_state: RandomState | None = None, axis: Axis | None = None, ignore_index: bool_t = False, ) -> NDFrameT: """ Return a random sample of items from an axis of object. You can use `random_state` for reproducibility. Parameters ---------- n : int, optional Number of items from axis to return. Cannot be used with `frac`. Default = 1 if `frac` = None. frac : float, optional Fraction of axis items to return. Cannot be used with `n`. replace : bool, default False Allow or disallow sampling of the same row more than once. weights : str or ndarray-like, optional Default 'None' results in equal probability weighting. If passed a Series, will align with target object on index. Index values in weights not found in sampled object will be ignored and index values in sampled object not in weights will be assigned weights of zero. If called on a DataFrame, will accept the name of a column when axis = 0. Unless weights are a Series, weights must be same length as axis being sampled. If weights do not sum to 1, they will be normalized to sum to 1. Missing values in the weights column will be treated as zero. Infinite values not allowed. random_state : int, array-like, BitGenerator, np.random.RandomState, np.random.Generator, optional If int, array-like, or BitGenerator, seed for random number generator. If np.random.RandomState or np.random.Generator, use as given. .. versionchanged:: 1.1.0 array-like and BitGenerator object now passed to np.random.RandomState() as seed .. versionchanged:: 1.4.0 np.random.Generator objects now accepted axis : {0 or ‘index’, 1 or ‘columns’, None}, default None Axis to sample. Accepts axis number or name. Default is stat axis for given data type. For `Series` this parameter is unused and defaults to `None`. ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 1.3.0 Returns ------- Series or DataFrame A new object of same type as caller containing `n` items randomly sampled from the caller object. See Also -------- DataFrameGroupBy.sample: Generates random samples from each group of a DataFrame object. SeriesGroupBy.sample: Generates random samples from each group of a Series object. numpy.random.choice: Generates a random sample from a given 1-D numpy array. Notes ----- If `frac` > 1, `replacement` should be set to `True`. Examples -------- >>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0], ... 'num_wings': [2, 0, 0, 0], ... 'num_specimen_seen': [10, 2, 1, 8]}, ... index=['falcon', 'dog', 'spider', 'fish']) >>> df num_legs num_wings num_specimen_seen falcon 2 2 10 dog 4 0 2 spider 8 0 1 fish 0 0 8 Extract 3 random elements from the ``Series`` ``df['num_legs']``: Note that we use `random_state` to ensure the reproducibility of the examples. >>> df['num_legs'].sample(n=3, random_state=1) fish 0 spider 8 falcon 2 Name: num_legs, dtype: int64 A random 50% sample of the ``DataFrame`` with replacement: >>> df.sample(frac=0.5, replace=True, random_state=1) num_legs num_wings num_specimen_seen dog 4 0 2 fish 0 0 8 An upsample sample of the ``DataFrame`` with replacement: Note that `replace` parameter has to be `True` for `frac` parameter > 1. >>> df.sample(frac=2, replace=True, random_state=1) num_legs num_wings num_specimen_seen dog 4 0 2 fish 0 0 8 falcon 2 2 10 falcon 2 2 10 fish 0 0 8 dog 4 0 2 fish 0 0 8 dog 4 0 2 Using a DataFrame column as weights. Rows with larger value in the `num_specimen_seen` column are more likely to be sampled. >>> df.sample(n=2, weights='num_specimen_seen', random_state=1) num_legs num_wings num_specimen_seen falcon 2 2 10 fish 0 0 8 """ # noqa:E501 if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) obj_len = self.shape[axis] # Process random_state argument rs = common.random_state(random_state) size = sample.process_sampling_size(n, frac, replace) if size is None: assert frac is not None size = round(frac * obj_len) if weights is not None: weights = sample.preprocess_weights(self, weights, axis) sampled_indices = sample.sample(obj_len, size, replace, weights, rs) result = self.take(sampled_indices, axis=axis) if ignore_index: result.index = default_index(len(result)) return result def pipe( self, func: Callable[..., T] | tuple[Callable[..., T], str], *args, **kwargs, ) -> T: r""" Apply chainable functions that expect Series or DataFrames. Parameters ---------- func : function Function to apply to the {klass}. ``args``, and ``kwargs`` are passed into ``func``. Alternatively a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of ``callable`` that expects the {klass}. args : iterable, optional Positional arguments passed into ``func``. kwargs : mapping, optional A dictionary of keyword arguments passed into ``func``. Returns ------- the return type of ``func``. See Also -------- DataFrame.apply : Apply a function along input axis of DataFrame. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Series.map : Apply a mapping correspondence on a :class:`~pandas.Series`. Notes ----- Use ``.pipe`` when chaining together functions that expect Series, DataFrames or GroupBy objects. Instead of writing >>> func(g(h(df), arg1=a), arg2=b, arg3=c) # doctest: +SKIP You can write >>> (df.pipe(h) ... .pipe(g, arg1=a) ... .pipe(func, arg2=b, arg3=c) ... ) # doctest: +SKIP If you have a function that takes the data as (say) the second argument, pass a tuple indicating which keyword expects the data. For example, suppose ``func`` takes its data as ``arg2``: >>> (df.pipe(h) ... .pipe(g, arg1=a) ... .pipe((func, 'arg2'), arg1=a, arg3=c) ... ) # doctest: +SKIP """ if using_copy_on_write(): return common.pipe(self.copy(deep=None), func, *args, **kwargs) return common.pipe(self, func, *args, **kwargs) # ---------------------------------------------------------------------- # Attribute access def __finalize__( self: NDFrameT, other, method: str | None = None, **kwargs ) -> NDFrameT: """ Propagate metadata from other to self. Parameters ---------- other : the object from which to get the attributes that we are going to propagate method : str, optional A passed method name providing context on where ``__finalize__`` was called. .. warning:: The value passed as `method` are not currently considered stable across pandas releases. """ if isinstance(other, NDFrame): for name in other.attrs: self.attrs[name] = other.attrs[name] self.flags.allows_duplicate_labels = other.flags.allows_duplicate_labels # For subclasses using _metadata. for name in set(self._metadata) & set(other._metadata): assert isinstance(name, str) object.__setattr__(self, name, getattr(other, name, None)) if method == "concat": attrs = other.objs[0].attrs check_attrs = all(objs.attrs == attrs for objs in other.objs[1:]) if check_attrs: for name in attrs: self.attrs[name] = attrs[name] allows_duplicate_labels = all( x.flags.allows_duplicate_labels for x in other.objs ) self.flags.allows_duplicate_labels = allows_duplicate_labels return self def __getattr__(self, name: str): """ After regular attribute access, try looking up the name This allows simpler access to columns for interactive use. """ # Note: obj.x will always call obj.__getattribute__('x') prior to # calling obj.__getattr__('x'). if ( name not in self._internal_names_set and name not in self._metadata and name not in self._accessors and self._info_axis._can_hold_identifiers_and_holds_name(name) ): return self[name] return object.__getattribute__(self, name) def __setattr__(self, name: str, value) -> None: """ After regular attribute access, try setting the name This allows simpler access to columns for interactive use. """ # first try regular attribute access via __getattribute__, so that # e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify # the same attribute. try: object.__getattribute__(self, name) return object.__setattr__(self, name, value) except AttributeError: pass # if this fails, go on to more involved attribute setting # (note that this matches __getattr__, above). if name in self._internal_names_set: object.__setattr__(self, name, value) elif name in self._metadata: object.__setattr__(self, name, value) else: try: existing = getattr(self, name) if isinstance(existing, Index): object.__setattr__(self, name, value) elif name in self._info_axis: self[name] = value else: object.__setattr__(self, name, value) except (AttributeError, TypeError): if isinstance(self, ABCDataFrame) and (is_list_like(value)): warnings.warn( "Pandas doesn't allow columns to be " "created via a new attribute name - see " "https://pandas.pydata.org/pandas-docs/" "stable/indexing.html#attribute-access", stacklevel=find_stack_level(), ) object.__setattr__(self, name, value) def _dir_additions(self) -> set[str]: """ add the string-like attributes from the info_axis. If info_axis is a MultiIndex, its first level values are used. """ additions = super()._dir_additions() if self._info_axis._can_hold_strings: additions.update(self._info_axis._dir_additions_for_owner) return additions # ---------------------------------------------------------------------- # Consolidation of internals def _protect_consolidate(self, f): """ Consolidate _mgr -- if the blocks have changed, then clear the cache """ if isinstance(self._mgr, (ArrayManager, SingleArrayManager)): return f() blocks_before = len(self._mgr.blocks) result = f() if len(self._mgr.blocks) != blocks_before: self._clear_item_cache() return result def _consolidate_inplace(self) -> None: """Consolidate data in place and return None""" def f() -> None: self._mgr = self._mgr.consolidate() self._protect_consolidate(f) def _consolidate(self): """ Compute NDFrame with "consolidated" internals (data of each dtype grouped together in a single ndarray). Returns ------- consolidated : same type as caller """ f = lambda: self._mgr.consolidate() cons_data = self._protect_consolidate(f) return self._constructor(cons_data).__finalize__(self) def _is_mixed_type(self) -> bool_t: if self._mgr.is_single_block: return False if self._mgr.any_extension_types: # Even if they have the same dtype, we can't consolidate them, # so we pretend this is "mixed'" return True return self.dtypes.nunique() > 1 def _check_inplace_setting(self, value) -> bool_t: """check whether we allow in-place setting with this type of value""" if self._is_mixed_type and not self._mgr.is_numeric_mixed_type: # allow an actual np.nan through if is_float(value) and np.isnan(value) or value is lib.no_default: return True raise TypeError( "Cannot do inplace boolean setting on " "mixed-types with a non np.nan value" ) return True def _get_numeric_data(self: NDFrameT) -> NDFrameT: return self._constructor(self._mgr.get_numeric_data()).__finalize__(self) def _get_bool_data(self): return self._constructor(self._mgr.get_bool_data()).__finalize__(self) # ---------------------------------------------------------------------- # Internal Interface Methods def values(self): raise AbstractMethodError(self) def _values(self) -> ArrayLike: """internal implementation""" raise AbstractMethodError(self) def dtypes(self): """ Return the dtypes in the DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the ``object`` dtype. See :ref:`the User Guide <basics.dtypes>` for more. Returns ------- pandas.Series The data type of each column. Examples -------- >>> df = pd.DataFrame({'float': [1.0], ... 'int': [1], ... 'datetime': [pd.Timestamp('20180310')], ... 'string': ['foo']}) >>> df.dtypes float float64 int int64 datetime datetime64[ns] string object dtype: object """ data = self._mgr.get_dtypes() return self._constructor_sliced(data, index=self._info_axis, dtype=np.object_) def astype( self: NDFrameT, dtype, copy: bool_t | None = None, errors: IgnoreRaise = "raise" ) -> NDFrameT: """ Cast a pandas object to a specified dtype ``dtype``. Parameters ---------- dtype : str, data type, Series or Mapping of column name -> data type Use a str, numpy.dtype, pandas.ExtensionDtype or Python type to cast entire pandas object to the same type. Alternatively, use a mapping, e.g. {col: dtype, ...}, where col is a column label and dtype is a numpy.dtype or Python type to cast one or more of the DataFrame's columns to column-specific types. copy : bool, default True Return a copy when ``copy=True`` (be very careful setting ``copy=False`` as changes to values then may propagate to other pandas objects). errors : {'raise', 'ignore'}, default 'raise' Control raising of exceptions on invalid data for provided dtype. - ``raise`` : allow exceptions to be raised - ``ignore`` : suppress exceptions. On error return original object. Returns ------- same type as caller See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to a numeric type. numpy.ndarray.astype : Cast a numpy array to a specified type. Notes ----- .. versionchanged:: 2.0.0 Using ``astype`` to convert from timezone-naive dtype to timezone-aware dtype will raise an exception. Use :meth:`Series.dt.tz_localize` instead. Examples -------- Create a DataFrame: >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df.dtypes col1 int64 col2 int64 dtype: object Cast all columns to int32: >>> df.astype('int32').dtypes col1 int32 col2 int32 dtype: object Cast col1 to int32 using a dictionary: >>> df.astype({'col1': 'int32'}).dtypes col1 int32 col2 int64 dtype: object Create a series: >>> ser = pd.Series([1, 2], dtype='int32') >>> ser 0 1 1 2 dtype: int32 >>> ser.astype('int64') 0 1 1 2 dtype: int64 Convert to categorical type: >>> ser.astype('category') 0 1 1 2 dtype: category Categories (2, int32): [1, 2] Convert to ordered categorical type with custom ordering: >>> from pandas.api.types import CategoricalDtype >>> cat_dtype = CategoricalDtype( ... categories=[2, 1], ordered=True) >>> ser.astype(cat_dtype) 0 1 1 2 dtype: category Categories (2, int64): [2 < 1] Create a series of dates: >>> ser_date = pd.Series(pd.date_range('20200101', periods=3)) >>> ser_date 0 2020-01-01 1 2020-01-02 2 2020-01-03 dtype: datetime64[ns] """ if copy and using_copy_on_write(): copy = False if is_dict_like(dtype): if self.ndim == 1: # i.e. Series if len(dtype) > 1 or self.name not in dtype: raise KeyError( "Only the Series name can be used for " "the key in Series dtype mappings." ) new_type = dtype[self.name] return self.astype(new_type, copy, errors) # GH#44417 cast to Series so we can use .iat below, which will be # robust in case we from pandas import Series dtype_ser = Series(dtype, dtype=object) for col_name in dtype_ser.index: if col_name not in self: raise KeyError( "Only a column name can be used for the " "key in a dtype mappings argument. " f"'{col_name}' not found in columns." ) dtype_ser = dtype_ser.reindex(self.columns, fill_value=None, copy=False) results = [] for i, (col_name, col) in enumerate(self.items()): cdt = dtype_ser.iat[i] if isna(cdt): res_col = col.copy(deep=copy) else: try: res_col = col.astype(dtype=cdt, copy=copy, errors=errors) except ValueError as ex: ex.args = ( f"{ex}: Error while type casting for column '{col_name}'", ) raise results.append(res_col) elif is_extension_array_dtype(dtype) and self.ndim > 1: # GH 18099/22869: columnwise conversion to extension dtype # GH 24704: use iloc to handle duplicate column names # TODO(EA2D): special case not needed with 2D EAs results = [ self.iloc[:, i].astype(dtype, copy=copy) for i in range(len(self.columns)) ] else: # else, only a single dtype is given new_data = self._mgr.astype(dtype=dtype, copy=copy, errors=errors) return self._constructor(new_data).__finalize__(self, method="astype") # GH 33113: handle empty frame or series if not results: return self.copy(deep=None) # GH 19920: retain column metadata after concat result = concat(results, axis=1, copy=False) # GH#40810 retain subclass # error: Incompatible types in assignment # (expression has type "NDFrameT", variable has type "DataFrame") result = self._constructor(result) # type: ignore[assignment] result.columns = self.columns result = result.__finalize__(self, method="astype") # https://github.com/python/mypy/issues/8354 return cast(NDFrameT, result) def copy(self: NDFrameT, deep: bool_t | None = True) -> NDFrameT: """ Make a copy of this object's indices and data. When ``deep=True`` (default), a new object will be created with a copy of the calling object's data and indices. Modifications to the data or indices of the copy will not be reflected in the original object (see notes below). When ``deep=False``, a new object will be created without copying the calling object's data or index (only references to the data and index are copied). Any changes to the data of the original will be reflected in the shallow copy (and vice versa). Parameters ---------- deep : bool, default True Make a deep copy, including a copy of the data and the indices. With ``deep=False`` neither the indices nor the data are copied. Returns ------- Series or DataFrame Object type matches caller. Notes ----- When ``deep=True``, data is copied but actual Python objects will not be copied recursively, only the reference to the object. This is in contrast to `copy.deepcopy` in the Standard Library, which recursively copies object data (see examples below). While ``Index`` objects are copied when ``deep=True``, the underlying numpy array is not copied for performance reasons. Since ``Index`` is immutable, the underlying data can be safely shared and a copy is not needed. Since pandas is not thread safe, see the :ref:`gotchas <gotchas.thread-safety>` when copying in a threading environment. Examples -------- >>> s = pd.Series([1, 2], index=["a", "b"]) >>> s a 1 b 2 dtype: int64 >>> s_copy = s.copy() >>> s_copy a 1 b 2 dtype: int64 **Shallow copy versus default (deep) copy:** >>> s = pd.Series([1, 2], index=["a", "b"]) >>> deep = s.copy() >>> shallow = s.copy(deep=False) Shallow copy shares data and index with original. >>> s is shallow False >>> s.values is shallow.values and s.index is shallow.index True Deep copy has own copy of data and index. >>> s is deep False >>> s.values is deep.values or s.index is deep.index False Updates to the data shared by shallow copy and original is reflected in both; deep copy remains unchanged. >>> s[0] = 3 >>> shallow[1] = 4 >>> s a 3 b 4 dtype: int64 >>> shallow a 3 b 4 dtype: int64 >>> deep a 1 b 2 dtype: int64 Note that when copying an object containing Python objects, a deep copy will copy the data, but will not do so recursively. Updating a nested data object will be reflected in the deep copy. >>> s = pd.Series([[1, 2], [3, 4]]) >>> deep = s.copy() >>> s[0][0] = 10 >>> s 0 [10, 2] 1 [3, 4] dtype: object >>> deep 0 [10, 2] 1 [3, 4] dtype: object """ data = self._mgr.copy(deep=deep) self._clear_item_cache() return self._constructor(data).__finalize__(self, method="copy") def __copy__(self: NDFrameT, deep: bool_t = True) -> NDFrameT: return self.copy(deep=deep) def __deepcopy__(self: NDFrameT, memo=None) -> NDFrameT: """ Parameters ---------- memo, default None Standard signature. Unused """ return self.copy(deep=True) def infer_objects(self: NDFrameT, copy: bool_t | None = None) -> NDFrameT: """ Attempt to infer better dtypes for object columns. Attempts soft conversion of object-dtyped columns, leaving non-object and unconvertible columns unchanged. The inference rules are the same as during normal Series/DataFrame construction. Parameters ---------- copy : bool, default True Whether to make a copy for non-object or non-inferrable columns or Series. Returns ------- same type as input object See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to numeric type. convert_dtypes : Convert argument to best possible dtype. Examples -------- >>> df = pd.DataFrame({"A": ["a", 1, 2, 3]}) >>> df = df.iloc[1:] >>> df A 1 1 2 2 3 3 >>> df.dtypes A object dtype: object >>> df.infer_objects().dtypes A int64 dtype: object """ new_mgr = self._mgr.convert(copy=copy) return self._constructor(new_mgr).__finalize__(self, method="infer_objects") def convert_dtypes( self: NDFrameT, infer_objects: bool_t = True, convert_string: bool_t = True, convert_integer: bool_t = True, convert_boolean: bool_t = True, convert_floating: bool_t = True, dtype_backend: DtypeBackend = "numpy_nullable", ) -> NDFrameT: """ Convert columns to the best possible dtypes using dtypes supporting ``pd.NA``. Parameters ---------- infer_objects : bool, default True Whether object dtypes should be converted to the best possible types. convert_string : bool, default True Whether object dtypes should be converted to ``StringDtype()``. convert_integer : bool, default True Whether, if possible, conversion can be done to integer extension types. convert_boolean : bool, defaults True Whether object dtypes should be converted to ``BooleanDtypes()``. convert_floating : bool, defaults True Whether, if possible, conversion can be done to floating extension types. If `convert_integer` is also True, preference will be give to integer dtypes if the floats can be faithfully casted to integers. .. versionadded:: 1.2.0 dtype_backend : {"numpy_nullable", "pyarrow"}, default "numpy_nullable" Which dtype_backend to use, e.g. whether a DataFrame should use nullable dtypes for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 Returns ------- Series or DataFrame Copy of input object with new dtype. See Also -------- infer_objects : Infer dtypes of objects. to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to a numeric type. Notes ----- By default, ``convert_dtypes`` will attempt to convert a Series (or each Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options ``convert_string``, ``convert_integer``, ``convert_boolean`` and ``convert_floating``, it is possible to turn off individual conversions to ``StringDtype``, the integer extension types, ``BooleanDtype`` or floating extension types, respectively. For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference rules as during normal Series/DataFrame construction. Then, if possible, convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer or floating extension type, otherwise leave as ``object``. If the dtype is integer, convert to an appropriate integer extension type. If the dtype is numeric, and consists of all integers, convert to an appropriate integer extension type. Otherwise, convert to an appropriate floating extension type. .. versionchanged:: 1.2 Starting with pandas 1.2, this method also converts float columns to the nullable floating extension type. In the future, as new dtypes are added that support ``pd.NA``, the results of this method will change to support those new dtypes. .. versionadded:: 2.0 The nullable dtype implementation can be configured by calling ``pd.set_option("mode.dtype_backend", "pandas")`` to use numpy-backed nullable dtypes or ``pd.set_option("mode.dtype_backend", "pyarrow")`` to use pyarrow-backed nullable dtypes (using ``pd.ArrowDtype``). Examples -------- >>> df = pd.DataFrame( ... { ... "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")), ... "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")), ... "c": pd.Series([True, False, np.nan], dtype=np.dtype("O")), ... "d": pd.Series(["h", "i", np.nan], dtype=np.dtype("O")), ... "e": pd.Series([10, np.nan, 20], dtype=np.dtype("float")), ... "f": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")), ... } ... ) Start with a DataFrame with default dtypes. >>> df a b c d e f 0 1 x True h 10.0 NaN 1 2 y False i NaN 100.5 2 3 z NaN NaN 20.0 200.0 >>> df.dtypes a int32 b object c object d object e float64 f float64 dtype: object Convert the DataFrame to use best possible dtypes. >>> dfn = df.convert_dtypes() >>> dfn a b c d e f 0 1 x True h 10 <NA> 1 2 y False i <NA> 100.5 2 3 z <NA> <NA> 20 200.0 >>> dfn.dtypes a Int32 b string[python] c boolean d string[python] e Int64 f Float64 dtype: object Start with a Series of strings and missing data represented by ``np.nan``. >>> s = pd.Series(["a", "b", np.nan]) >>> s 0 a 1 b 2 NaN dtype: object Obtain a Series with dtype ``StringDtype``. >>> s.convert_dtypes() 0 a 1 b 2 <NA> dtype: string """ check_dtype_backend(dtype_backend) if self.ndim == 1: return self._convert_dtypes( infer_objects, convert_string, convert_integer, convert_boolean, convert_floating, dtype_backend=dtype_backend, ) else: results = [ col._convert_dtypes( infer_objects, convert_string, convert_integer, convert_boolean, convert_floating, dtype_backend=dtype_backend, ) for col_name, col in self.items() ] if len(results) > 0: result = concat(results, axis=1, copy=False, keys=self.columns) cons = cast(Type["DataFrame"], self._constructor) result = cons(result) result = result.__finalize__(self, method="convert_dtypes") # https://github.com/python/mypy/issues/8354 return cast(NDFrameT, result) else: return self.copy(deep=None) # ---------------------------------------------------------------------- # Filling NA's def fillna( self: NDFrameT, value: Hashable | Mapping | Series | DataFrame = ..., *, method: FillnaOptions | None = ..., axis: Axis | None = ..., inplace: Literal[False] = ..., limit: int | None = ..., downcast: dict | None = ..., ) -> NDFrameT: ... def fillna( self, value: Hashable | Mapping | Series | DataFrame = ..., *, method: FillnaOptions | None = ..., axis: Axis | None = ..., inplace: Literal[True], limit: int | None = ..., downcast: dict | None = ..., ) -> None: ... def fillna( self: NDFrameT, value: Hashable | Mapping | Series | DataFrame = ..., *, method: FillnaOptions | None = ..., axis: Axis | None = ..., inplace: bool_t = ..., limit: int | None = ..., downcast: dict | None = ..., ) -> NDFrameT | None: ... def fillna( self: NDFrameT, value: Hashable | Mapping | Series | DataFrame = None, *, method: FillnaOptions | None = None, axis: Axis | None = None, inplace: bool_t = False, limit: int | None = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Fill NA/NaN values using the specified method. Parameters ---------- value : scalar, dict, Series, or DataFrame Value to use to fill holes (e.g. 0), alternately a dict/Series/DataFrame of values specifying which value to use for each index (for a Series) or column (for a DataFrame). Values not in the dict/Series/DataFrame will not be filled. This value cannot be a list. method : {{'backfill', 'bfill', 'ffill', None}}, default None Method to use for filling holes in reindexed Series: * ffill: propagate last valid observation forward to next valid. * backfill / bfill: use next valid observation to fill gap. axis : {axes_single_arg} Axis along which to fill missing values. For `Series` this parameter is unused and defaults to 0. inplace : bool, default False If True, fill in-place. Note: this will modify any other views on this object (e.g., a no-copy slice for a column in a DataFrame). limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None. downcast : dict, default is None A dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate equal type (e.g. float64 to int64 if possible). Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. See Also -------- interpolate : Fill NaN values using interpolation. reindex : Conform object to new index. asfreq : Convert TimeSeries to specified frequency. Examples -------- >>> df = pd.DataFrame([[np.nan, 2, np.nan, 0], ... [3, 4, np.nan, 1], ... [np.nan, np.nan, np.nan, np.nan], ... [np.nan, 3, np.nan, 4]], ... columns=list("ABCD")) >>> df A B C D 0 NaN 2.0 NaN 0.0 1 3.0 4.0 NaN 1.0 2 NaN NaN NaN NaN 3 NaN 3.0 NaN 4.0 Replace all NaN elements with 0s. >>> df.fillna(0) A B C D 0 0.0 2.0 0.0 0.0 1 3.0 4.0 0.0 1.0 2 0.0 0.0 0.0 0.0 3 0.0 3.0 0.0 4.0 We can also propagate non-null values forward or backward. >>> df.fillna(method="ffill") A B C D 0 NaN 2.0 NaN 0.0 1 3.0 4.0 NaN 1.0 2 3.0 4.0 NaN 1.0 3 3.0 3.0 NaN 4.0 Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1, 2, and 3 respectively. >>> values = {{"A": 0, "B": 1, "C": 2, "D": 3}} >>> df.fillna(value=values) A B C D 0 0.0 2.0 2.0 0.0 1 3.0 4.0 2.0 1.0 2 0.0 1.0 2.0 3.0 3 0.0 3.0 2.0 4.0 Only replace the first NaN element. >>> df.fillna(value=values, limit=1) A B C D 0 0.0 2.0 2.0 0.0 1 3.0 4.0 NaN 1.0 2 NaN 1.0 NaN 3.0 3 NaN 3.0 NaN 4.0 When filling using a DataFrame, replacement happens along the same column names and same indices >>> df2 = pd.DataFrame(np.zeros((4, 4)), columns=list("ABCE")) >>> df.fillna(df2) A B C D 0 0.0 2.0 0.0 0.0 1 3.0 4.0 0.0 1.0 2 0.0 0.0 0.0 NaN 3 0.0 3.0 0.0 4.0 Note that column D is not affected since it is not present in df2. """ inplace = validate_bool_kwarg(inplace, "inplace") value, method = validate_fillna_kwargs(value, method) # set the default here, so functions examining the signaure # can detect if something was set (e.g. in groupby) (GH9221) if axis is None: axis = 0 axis = self._get_axis_number(axis) if value is None: if not self._mgr.is_single_block and axis == 1: if inplace: raise NotImplementedError() result = self.T.fillna(method=method, limit=limit).T return result new_data = self._mgr.interpolate( method=method, axis=axis, limit=limit, inplace=inplace, downcast=downcast, ) else: if self.ndim == 1: if isinstance(value, (dict, ABCSeries)): if not len(value): # test_fillna_nonscalar if inplace: return None return self.copy(deep=None) from pandas import Series value = Series(value) value = value.reindex(self.index, copy=False) value = value._values elif not is_list_like(value): pass else: raise TypeError( '"value" parameter must be a scalar, dict ' "or Series, but you passed a " f'"{type(value).__name__}"' ) new_data = self._mgr.fillna( value=value, limit=limit, inplace=inplace, downcast=downcast ) elif isinstance(value, (dict, ABCSeries)): if axis == 1: raise NotImplementedError( "Currently only can fill " "with dict/Series column " "by column" ) if using_copy_on_write(): result = self.copy(deep=None) else: result = self if inplace else self.copy() is_dict = isinstance(downcast, dict) for k, v in value.items(): if k not in result: continue # error: Item "None" of "Optional[Dict[Any, Any]]" has no # attribute "get" downcast_k = ( downcast if not is_dict else downcast.get(k) # type: ignore[union-attr] ) res_k = result[k].fillna(v, limit=limit, downcast=downcast_k) if not inplace: result[k] = res_k else: # We can write into our existing column(s) iff dtype # was preserved. if isinstance(res_k, ABCSeries): # i.e. 'k' only shows up once in self.columns if res_k.dtype == result[k].dtype: result.loc[:, k] = res_k else: # Different dtype -> no way to do inplace. result[k] = res_k else: # see test_fillna_dict_inplace_nonunique_columns locs = result.columns.get_loc(k) if isinstance(locs, slice): locs = np.arange(self.shape[1])[locs] elif ( isinstance(locs, np.ndarray) and locs.dtype.kind == "b" ): locs = locs.nonzero()[0] elif not ( isinstance(locs, np.ndarray) and locs.dtype.kind == "i" ): # Should never be reached, but let's cover our bases raise NotImplementedError( "Unexpected get_loc result, please report a bug at " "https://github.com/pandas-dev/pandas" ) for i, loc in enumerate(locs): res_loc = res_k.iloc[:, i] target = self.iloc[:, loc] if res_loc.dtype == target.dtype: result.iloc[:, loc] = res_loc else: result.isetitem(loc, res_loc) if inplace: return self._update_inplace(result) else: return result elif not is_list_like(value): if axis == 1: result = self.T.fillna(value=value, limit=limit).T new_data = result else: new_data = self._mgr.fillna( value=value, limit=limit, inplace=inplace, downcast=downcast ) elif isinstance(value, ABCDataFrame) and self.ndim == 2: new_data = self.where(self.notna(), value)._mgr else: raise ValueError(f"invalid fill value with a {type(value)}") result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="fillna") def ffill( self: NDFrameT, *, axis: None | Axis = ..., inplace: Literal[False] = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT: ... def ffill( self, *, axis: None | Axis = ..., inplace: Literal[True], limit: None | int = ..., downcast: dict | None = ..., ) -> None: ... def ffill( self: NDFrameT, *, axis: None | Axis = ..., inplace: bool_t = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT | None: ... def ffill( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ return self.fillna( method="ffill", axis=axis, inplace=inplace, limit=limit, downcast=downcast ) def pad( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``. .. deprecated:: 2.0 {klass}.pad is deprecated. Use {klass}.ffill instead. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ warnings.warn( "DataFrame.pad/Series.pad is deprecated. Use " "DataFrame.ffill/Series.ffill instead", FutureWarning, stacklevel=find_stack_level(), ) return self.ffill(axis=axis, inplace=inplace, limit=limit, downcast=downcast) def bfill( self: NDFrameT, *, axis: None | Axis = ..., inplace: Literal[False] = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT: ... def bfill( self, *, axis: None | Axis = ..., inplace: Literal[True], limit: None | int = ..., downcast: dict | None = ..., ) -> None: ... def bfill( self: NDFrameT, *, axis: None | Axis = ..., inplace: bool_t = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT | None: ... def bfill( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ return self.fillna( method="bfill", axis=axis, inplace=inplace, limit=limit, downcast=downcast ) def backfill( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``. .. deprecated:: 2.0 {klass}.backfill is deprecated. Use {klass}.bfill instead. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ warnings.warn( "DataFrame.backfill/Series.backfill is deprecated. Use " "DataFrame.bfill/Series.bfill instead", FutureWarning, stacklevel=find_stack_level(), ) return self.bfill(axis=axis, inplace=inplace, limit=limit, downcast=downcast) def replace( self: NDFrameT, to_replace=..., value=..., *, inplace: Literal[False] = ..., limit: int | None = ..., regex: bool_t = ..., method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., ) -> NDFrameT: ... def replace( self, to_replace=..., value=..., *, inplace: Literal[True], limit: int | None = ..., regex: bool_t = ..., method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., ) -> None: ... def replace( self: NDFrameT, to_replace=..., value=..., *, inplace: bool_t = ..., limit: int | None = ..., regex: bool_t = ..., method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., ) -> NDFrameT | None: ... _shared_docs["replace"], klass=_shared_doc_kwargs["klass"], inplace=_shared_doc_kwargs["inplace"], replace_iloc=_shared_doc_kwargs["replace_iloc"], ) def replace( self: NDFrameT, to_replace=None, value=lib.no_default, *, inplace: bool_t = False, limit: int | None = None, regex: bool_t = False, method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = lib.no_default, ) -> NDFrameT | None: if not ( is_scalar(to_replace) or is_re_compilable(to_replace) or is_list_like(to_replace) ): raise TypeError( "Expecting 'to_replace' to be either a scalar, array-like, " "dict or None, got invalid type " f"{repr(type(to_replace).__name__)}" ) inplace = validate_bool_kwarg(inplace, "inplace") if not is_bool(regex) and to_replace is not None: raise ValueError("'to_replace' must be 'None' if 'regex' is not a bool") if value is lib.no_default or method is not lib.no_default: # GH#36984 if the user explicitly passes value=None we want to # respect that. We have the corner case where the user explicitly # passes value=None *and* a method, which we interpret as meaning # they want the (documented) default behavior. if method is lib.no_default: # TODO: get this to show up as the default in the docs? method = "pad" # passing a single value that is scalar like # when value is None (GH5319), for compat if not is_dict_like(to_replace) and not is_dict_like(regex): to_replace = [to_replace] if isinstance(to_replace, (tuple, list)): # TODO: Consider copy-on-write for non-replaced columns's here if isinstance(self, ABCDataFrame): from pandas import Series result = self.apply( Series._replace_single, args=(to_replace, method, inplace, limit), ) if inplace: return None return result return self._replace_single(to_replace, method, inplace, limit) if not is_dict_like(to_replace): if not is_dict_like(regex): raise TypeError( 'If "to_replace" and "value" are both None ' 'and "to_replace" is not a list, then ' "regex must be a mapping" ) to_replace = regex regex = True items = list(to_replace.items()) if items: keys, values = zip(*items) else: keys, values = ([], []) are_mappings = [is_dict_like(v) for v in values] if any(are_mappings): if not all(are_mappings): raise TypeError( "If a nested mapping is passed, all values " "of the top level mapping must be mappings" ) # passed a nested dict/Series to_rep_dict = {} value_dict = {} for k, v in items: keys, values = list(zip(*v.items())) or ([], []) to_rep_dict[k] = list(keys) value_dict[k] = list(values) to_replace, value = to_rep_dict, value_dict else: to_replace, value = keys, values return self.replace( to_replace, value, inplace=inplace, limit=limit, regex=regex ) else: # need a non-zero len on all axes if not self.size: if inplace: return None return self.copy(deep=None) if is_dict_like(to_replace): if is_dict_like(value): # {'A' : NA} -> {'A' : 0} # Note: Checking below for `in foo.keys()` instead of # `in foo` is needed for when we have a Series and not dict mapping = { col: (to_replace[col], value[col]) for col in to_replace.keys() if col in value.keys() and col in self } return self._replace_columnwise(mapping, inplace, regex) # {'A': NA} -> 0 elif not is_list_like(value): # Operate column-wise if self.ndim == 1: raise ValueError( "Series.replace cannot use dict-like to_replace " "and non-None value" ) mapping = { col: (to_rep, value) for col, to_rep in to_replace.items() } return self._replace_columnwise(mapping, inplace, regex) else: raise TypeError("value argument must be scalar, dict, or Series") elif is_list_like(to_replace): if not is_list_like(value): # e.g. to_replace = [NA, ''] and value is 0, # so we replace NA with 0 and then replace '' with 0 value = [value] * len(to_replace) # e.g. we have to_replace = [NA, ''] and value = [0, 'missing'] if len(to_replace) != len(value): raise ValueError( f"Replacement lists must match in length. " f"Expecting {len(to_replace)} got {len(value)} " ) new_data = self._mgr.replace_list( src_list=to_replace, dest_list=value, inplace=inplace, regex=regex, ) elif to_replace is None: if not ( is_re_compilable(regex) or is_list_like(regex) or is_dict_like(regex) ): raise TypeError( f"'regex' must be a string or a compiled regular expression " f"or a list or dict of strings or regular expressions, " f"you passed a {repr(type(regex).__name__)}" ) return self.replace( regex, value, inplace=inplace, limit=limit, regex=True ) else: # dest iterable dict-like if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1} # Operate column-wise if self.ndim == 1: raise ValueError( "Series.replace cannot use dict-value and " "non-None to_replace" ) mapping = {col: (to_replace, val) for col, val in value.items()} return self._replace_columnwise(mapping, inplace, regex) elif not is_list_like(value): # NA -> 0 regex = should_use_regex(regex, to_replace) if regex: new_data = self._mgr.replace_regex( to_replace=to_replace, value=value, inplace=inplace, ) else: new_data = self._mgr.replace( to_replace=to_replace, value=value, inplace=inplace ) else: raise TypeError( f'Invalid "to_replace" type: {repr(type(to_replace).__name__)}' ) result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="replace") def interpolate( self: NDFrameT, method: str = "linear", *, axis: Axis = 0, limit: int | None = None, inplace: bool_t = False, limit_direction: str | None = None, limit_area: str | None = None, downcast: str | None = None, **kwargs, ) -> NDFrameT | None: """ Fill NaN values using an interpolation method. Please note that only ``method='linear'`` is supported for DataFrame/Series with a MultiIndex. Parameters ---------- method : str, default 'linear' Interpolation technique to use. One of: * 'linear': Ignore the index and treat the values as equally spaced. This is the only method supported on MultiIndexes. * 'time': Works on daily and higher resolution data to interpolate given length of interval. * 'index', 'values': use the actual numerical values of the index. * 'pad': Fill in NaNs using existing values. * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'barycentric', 'polynomial': Passed to `scipy.interpolate.interp1d`, whereas 'spline' is passed to `scipy.interpolate.UnivariateSpline`. These methods use the numerical values of the index. Both 'polynomial' and 'spline' require that you also specify an `order` (int), e.g. ``df.interpolate(method='polynomial', order=5)``. Note that, `slinear` method in Pandas refers to the Scipy first order `spline` instead of Pandas first order `spline`. * 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima', 'cubicspline': Wrappers around the SciPy interpolation methods of similar names. See `Notes`. * 'from_derivatives': Refers to `scipy.interpolate.BPoly.from_derivatives` which replaces 'piecewise_polynomial' interpolation method in scipy 0.18. axis : {{0 or 'index', 1 or 'columns', None}}, default None Axis to interpolate along. For `Series` this parameter is unused and defaults to 0. limit : int, optional Maximum number of consecutive NaNs to fill. Must be greater than 0. inplace : bool, default False Update the data in place if possible. limit_direction : {{'forward', 'backward', 'both'}}, Optional Consecutive NaNs will be filled in this direction. If limit is specified: * If 'method' is 'pad' or 'ffill', 'limit_direction' must be 'forward'. * If 'method' is 'backfill' or 'bfill', 'limit_direction' must be 'backwards'. If 'limit' is not specified: * If 'method' is 'backfill' or 'bfill', the default is 'backward' * else the default is 'forward' .. versionchanged:: 1.1.0 raises ValueError if `limit_direction` is 'forward' or 'both' and method is 'backfill' or 'bfill'. raises ValueError if `limit_direction` is 'backward' or 'both' and method is 'pad' or 'ffill'. limit_area : {{`None`, 'inside', 'outside'}}, default None If limit is specified, consecutive NaNs will be filled with this restriction. * ``None``: No fill restriction. * 'inside': Only fill NaNs surrounded by valid values (interpolate). * 'outside': Only fill NaNs outside valid values (extrapolate). downcast : optional, 'infer' or None, defaults to None Downcast dtypes if possible. ``**kwargs`` : optional Keyword arguments to pass on to the interpolating function. Returns ------- Series or DataFrame or None Returns the same object type as the caller, interpolated at some or all ``NaN`` values or None if ``inplace=True``. See Also -------- fillna : Fill missing values using different methods. scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials (Akima interpolator). scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the Bernstein basis. scipy.interpolate.interp1d : Interpolate a 1-D function. scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh interpolator). scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic interpolation. scipy.interpolate.CubicSpline : Cubic spline data interpolator. Notes ----- The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima' methods are wrappers around the respective SciPy implementations of similar names. These use the actual numerical values of the index. For more information on their behavior, see the `SciPy documentation <https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__. Examples -------- Filling in ``NaN`` in a :class:`~pandas.Series` via linear interpolation. >>> s = pd.Series([0, 1, np.nan, 3]) >>> s 0 0.0 1 1.0 2 NaN 3 3.0 dtype: float64 >>> s.interpolate() 0 0.0 1 1.0 2 2.0 3 3.0 dtype: float64 Filling in ``NaN`` in a Series by padding, but filling at most two consecutive ``NaN`` at a time. >>> s = pd.Series([np.nan, "single_one", np.nan, ... "fill_two_more", np.nan, np.nan, np.nan, ... 4.71, np.nan]) >>> s 0 NaN 1 single_one 2 NaN 3 fill_two_more 4 NaN 5 NaN 6 NaN 7 4.71 8 NaN dtype: object >>> s.interpolate(method='pad', limit=2) 0 NaN 1 single_one 2 single_one 3 fill_two_more 4 fill_two_more 5 fill_two_more 6 NaN 7 4.71 8 4.71 dtype: object Filling in ``NaN`` in a Series via polynomial interpolation or splines: Both 'polynomial' and 'spline' methods require that you also specify an ``order`` (int). >>> s = pd.Series([0, 2, np.nan, 8]) >>> s.interpolate(method='polynomial', order=2) 0 0.000000 1 2.000000 2 4.666667 3 8.000000 dtype: float64 Fill the DataFrame forward (that is, going down) along each column using linear interpolation. Note how the last entry in column 'a' is interpolated differently, because there is no entry after it to use for interpolation. Note how the first entry in column 'b' remains ``NaN``, because there is no entry before it to use for interpolation. >>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0), ... (np.nan, 2.0, np.nan, np.nan), ... (2.0, 3.0, np.nan, 9.0), ... (np.nan, 4.0, -4.0, 16.0)], ... columns=list('abcd')) >>> df a b c d 0 0.0 NaN -1.0 1.0 1 NaN 2.0 NaN NaN 2 2.0 3.0 NaN 9.0 3 NaN 4.0 -4.0 16.0 >>> df.interpolate(method='linear', limit_direction='forward', axis=0) a b c d 0 0.0 NaN -1.0 1.0 1 1.0 2.0 -2.0 5.0 2 2.0 3.0 -3.0 9.0 3 2.0 4.0 -4.0 16.0 Using polynomial interpolation. >>> df['d'].interpolate(method='polynomial', order=2) 0 1.0 1 4.0 2 9.0 3 16.0 Name: d, dtype: float64 """ inplace = validate_bool_kwarg(inplace, "inplace") axis = self._get_axis_number(axis) fillna_methods = ["ffill", "bfill", "pad", "backfill"] should_transpose = axis == 1 and method not in fillna_methods obj = self.T if should_transpose else self if obj.empty: return self.copy() if method not in fillna_methods: axis = self._info_axis_number if isinstance(obj.index, MultiIndex) and method != "linear": raise ValueError( "Only `method=linear` interpolation is supported on MultiIndexes." ) # Set `limit_direction` depending on `method` if limit_direction is None: limit_direction = ( "backward" if method in ("backfill", "bfill") else "forward" ) else: if method in ("pad", "ffill") and limit_direction != "forward": raise ValueError( f"`limit_direction` must be 'forward' for method `{method}`" ) if method in ("backfill", "bfill") and limit_direction != "backward": raise ValueError( f"`limit_direction` must be 'backward' for method `{method}`" ) if obj.ndim == 2 and np.all(obj.dtypes == np.dtype("object")): raise TypeError( "Cannot interpolate with all object-dtype columns " "in the DataFrame. Try setting at least one " "column to a numeric dtype." ) # create/use the index if method == "linear": # prior default index = Index(np.arange(len(obj.index))) else: index = obj.index methods = {"index", "values", "nearest", "time"} is_numeric_or_datetime = ( is_numeric_dtype(index.dtype) or is_datetime64_any_dtype(index.dtype) or is_timedelta64_dtype(index.dtype) ) if method not in methods and not is_numeric_or_datetime: raise ValueError( "Index column must be numeric or datetime type when " f"using {method} method other than linear. " "Try setting a numeric or datetime index column before " "interpolating." ) if isna(index).any(): raise NotImplementedError( "Interpolation with NaNs in the index " "has not been implemented. Try filling " "those NaNs before interpolating." ) new_data = obj._mgr.interpolate( method=method, axis=axis, index=index, limit=limit, limit_direction=limit_direction, limit_area=limit_area, inplace=inplace, downcast=downcast, **kwargs, ) result = self._constructor(new_data) if should_transpose: result = result.T if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="interpolate") # ---------------------------------------------------------------------- # Timeseries methods Methods def asof(self, where, subset=None): """ Return the last row(s) without any NaNs before `where`. The last row (for each element in `where`, if list) without any NaN is taken. In case of a :class:`~pandas.DataFrame`, the last row without NaN considering only the subset of columns (if not `None`) If there is no good value, NaN is returned for a Series or a Series of NaN values for a DataFrame Parameters ---------- where : date or array-like of dates Date(s) before which the last row(s) are returned. subset : str or array-like of str, default `None` For DataFrame, if not `None`, only use these columns to check for NaNs. Returns ------- scalar, Series, or DataFrame The return can be: * scalar : when `self` is a Series and `where` is a scalar * Series: when `self` is a Series and `where` is an array-like, or when `self` is a DataFrame and `where` is a scalar * DataFrame : when `self` is a DataFrame and `where` is an array-like Return scalar, Series, or DataFrame. See Also -------- merge_asof : Perform an asof merge. Similar to left join. Notes ----- Dates are assumed to be sorted. Raises if this is not the case. Examples -------- A Series and a scalar `where`. >>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40]) >>> s 10 1.0 20 2.0 30 NaN 40 4.0 dtype: float64 >>> s.asof(20) 2.0 For a sequence `where`, a Series is returned. The first value is NaN, because the first element of `where` is before the first index value. >>> s.asof([5, 20]) 5 NaN 20 2.0 dtype: float64 Missing values are not considered. The following is ``2.0``, not NaN, even though NaN is at the index location for ``30``. >>> s.asof(30) 2.0 Take all columns into consideration >>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50], ... 'b': [None, None, None, None, 500]}, ... index=pd.DatetimeIndex(['2018-02-27 09:01:00', ... '2018-02-27 09:02:00', ... '2018-02-27 09:03:00', ... '2018-02-27 09:04:00', ... '2018-02-27 09:05:00'])) >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', ... '2018-02-27 09:04:30'])) a b 2018-02-27 09:03:30 NaN NaN 2018-02-27 09:04:30 NaN NaN Take a single column into consideration >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', ... '2018-02-27 09:04:30']), ... subset=['a']) a b 2018-02-27 09:03:30 30 NaN 2018-02-27 09:04:30 40 NaN """ if isinstance(where, str): where = Timestamp(where) if not self.index.is_monotonic_increasing: raise ValueError("asof requires a sorted index") is_series = isinstance(self, ABCSeries) if is_series: if subset is not None: raise ValueError("subset is not valid for Series") else: if subset is None: subset = self.columns if not is_list_like(subset): subset = [subset] is_list = is_list_like(where) if not is_list: start = self.index[0] if isinstance(self.index, PeriodIndex): where = Period(where, freq=self.index.freq) if where < start: if not is_series: return self._constructor_sliced( index=self.columns, name=where, dtype=np.float64 ) return np.nan # It's always much faster to use a *while* loop here for # Series than pre-computing all the NAs. However a # *while* loop is extremely expensive for DataFrame # so we later pre-compute all the NAs and use the same # code path whether *where* is a scalar or list. # See PR: https://github.com/pandas-dev/pandas/pull/14476 if is_series: loc = self.index.searchsorted(where, side="right") if loc > 0: loc -= 1 values = self._values while loc > 0 and isna(values[loc]): loc -= 1 return values[loc] if not isinstance(where, Index): where = Index(where) if is_list else Index([where]) nulls = self.isna() if is_series else self[subset].isna().any(axis=1) if nulls.all(): if is_series: self = cast("Series", self) return self._constructor(np.nan, index=where, name=self.name) elif is_list: self = cast("DataFrame", self) return self._constructor(np.nan, index=where, columns=self.columns) else: self = cast("DataFrame", self) return self._constructor_sliced( np.nan, index=self.columns, name=where[0] ) locs = self.index.asof_locs(where, ~(nulls._values)) # mask the missing missing = locs == -1 data = self.take(locs) data.index = where if missing.any(): # GH#16063 only do this setting when necessary, otherwise # we'd cast e.g. bools to floats data.loc[missing] = np.nan return data if is_list else data.iloc[-1] # ---------------------------------------------------------------------- # Action Methods def isna(self: NDFrameT) -> NDFrameT: """ Detect missing values. Return a boolean same-sized object indicating if the values are NA. NA values, such as None or :attr:`numpy.NaN`, gets mapped to True values. Everything else gets mapped to False values. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). Returns ------- {klass} Mask of bool values for each element in {klass} that indicates whether an element is an NA value. See Also -------- {klass}.isnull : Alias of isna. {klass}.notna : Boolean inverse of isna. {klass}.dropna : Omit axes labels with missing values. isna : Top-level isna. Examples -------- Show which entries in a DataFrame are NA. >>> df = pd.DataFrame(dict(age=[5, 6, np.NaN], ... born=[pd.NaT, pd.Timestamp('1939-05-27'), ... pd.Timestamp('1940-04-25')], ... name=['Alfred', 'Batman', ''], ... toy=[None, 'Batmobile', 'Joker'])) >>> df age born name toy 0 5.0 NaT Alfred None 1 6.0 1939-05-27 Batman Batmobile 2 NaN 1940-04-25 Joker >>> df.isna() age born name toy 0 False True False True 1 False False False False 2 True False False False Show which entries in a Series are NA. >>> ser = pd.Series([5, 6, np.NaN]) >>> ser 0 5.0 1 6.0 2 NaN dtype: float64 >>> ser.isna() 0 False 1 False 2 True dtype: bool """ return isna(self).__finalize__(self, method="isna") def isnull(self: NDFrameT) -> NDFrameT: return isna(self).__finalize__(self, method="isnull") def notna(self: NDFrameT) -> NDFrameT: """ Detect existing (non-missing) values. Return a boolean same-sized object indicating if the values are not NA. Non-missing values get mapped to True. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). NA values, such as None or :attr:`numpy.NaN`, get mapped to False values. Returns ------- {klass} Mask of bool values for each element in {klass} that indicates whether an element is not an NA value. See Also -------- {klass}.notnull : Alias of notna. {klass}.isna : Boolean inverse of notna. {klass}.dropna : Omit axes labels with missing values. notna : Top-level notna. Examples -------- Show which entries in a DataFrame are not NA. >>> df = pd.DataFrame(dict(age=[5, 6, np.NaN], ... born=[pd.NaT, pd.Timestamp('1939-05-27'), ... pd.Timestamp('1940-04-25')], ... name=['Alfred', 'Batman', ''], ... toy=[None, 'Batmobile', 'Joker'])) >>> df age born name toy 0 5.0 NaT Alfred None 1 6.0 1939-05-27 Batman Batmobile 2 NaN 1940-04-25 Joker >>> df.notna() age born name toy 0 True False True False 1 True True True True 2 False True True True Show which entries in a Series are not NA. >>> ser = pd.Series([5, 6, np.NaN]) >>> ser 0 5.0 1 6.0 2 NaN dtype: float64 >>> ser.notna() 0 True 1 True 2 False dtype: bool """ return notna(self).__finalize__(self, method="notna") def notnull(self: NDFrameT) -> NDFrameT: return notna(self).__finalize__(self, method="notnull") def _clip_with_scalar(self, lower, upper, inplace: bool_t = False): if (lower is not None and np.any(isna(lower))) or ( upper is not None and np.any(isna(upper)) ): raise ValueError("Cannot use an NA value as a clip threshold") result = self mask = isna(self._values) with np.errstate(all="ignore"): if upper is not None: subset = self <= upper result = result.where(subset, upper, axis=None, inplace=False) if lower is not None: subset = self >= lower result = result.where(subset, lower, axis=None, inplace=False) if np.any(mask): result[mask] = np.nan if inplace: return self._update_inplace(result) else: return result def _clip_with_one_bound(self, threshold, method, axis, inplace): if axis is not None: axis = self._get_axis_number(axis) # method is self.le for upper bound and self.ge for lower bound if is_scalar(threshold) and is_number(threshold): if method.__name__ == "le": return self._clip_with_scalar(None, threshold, inplace=inplace) return self._clip_with_scalar(threshold, None, inplace=inplace) # GH #15390 # In order for where method to work, the threshold must # be transformed to NDFrame from other array like structure. if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold): if isinstance(self, ABCSeries): threshold = self._constructor(threshold, index=self.index) else: threshold = align_method_FRAME(self, threshold, axis, flex=None)[1] # GH 40420 # Treat missing thresholds as no bounds, not clipping the values if is_list_like(threshold): fill_value = np.inf if method.__name__ == "le" else -np.inf threshold_inf = threshold.fillna(fill_value) else: threshold_inf = threshold subset = method(threshold_inf, axis=axis) | isna(self) # GH 40420 return self.where(subset, threshold, axis=axis, inplace=inplace) def clip( self: NDFrameT, lower=None, upper=None, *, axis: Axis | None = None, inplace: bool_t = False, **kwargs, ) -> NDFrameT | None: """ Trim values at input threshold(s). Assigns values outside boundary to boundary values. Thresholds can be singular values or array like, and in the latter case the clipping is performed element-wise in the specified axis. Parameters ---------- lower : float or array-like, default None Minimum threshold value. All values below this threshold will be set to it. A missing threshold (e.g `NA`) will not clip the value. upper : float or array-like, default None Maximum threshold value. All values above this threshold will be set to it. A missing threshold (e.g `NA`) will not clip the value. axis : {{0 or 'index', 1 or 'columns', None}}, default None Align object with lower and upper along the given axis. For `Series` this parameter is unused and defaults to `None`. inplace : bool, default False Whether to perform the operation in place on the data. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with numpy. Returns ------- Series or DataFrame or None Same type as calling object with the values outside the clip boundaries replaced or None if ``inplace=True``. See Also -------- Series.clip : Trim values at input threshold in series. DataFrame.clip : Trim values at input threshold in dataframe. numpy.clip : Clip (limit) the values in an array. Examples -------- >>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]} >>> df = pd.DataFrame(data) >>> df col_0 col_1 0 9 -2 1 -3 -7 2 0 6 3 -1 8 4 5 -5 Clips per column using lower and upper thresholds: >>> df.clip(-4, 6) col_0 col_1 0 6 -2 1 -3 -4 2 0 6 3 -1 6 4 5 -4 Clips using specific lower and upper thresholds per column element: >>> t = pd.Series([2, -4, -1, 6, 3]) >>> t 0 2 1 -4 2 -1 3 6 4 3 dtype: int64 >>> df.clip(t, t + 4, axis=0) col_0 col_1 0 6 2 1 -3 -4 2 0 3 3 6 8 4 5 3 Clips using specific lower threshold per column element, with missing values: >>> t = pd.Series([2, -4, np.NaN, 6, 3]) >>> t 0 2.0 1 -4.0 2 NaN 3 6.0 4 3.0 dtype: float64 >>> df.clip(t, axis=0) col_0 col_1 0 9 2 1 -3 -4 2 0 6 3 6 8 4 5 3 """ inplace = validate_bool_kwarg(inplace, "inplace") axis = nv.validate_clip_with_axis(axis, (), kwargs) if axis is not None: axis = self._get_axis_number(axis) # GH 17276 # numpy doesn't like NaN as a clip value # so ignore # GH 19992 # numpy doesn't drop a list-like bound containing NaN isna_lower = isna(lower) if not is_list_like(lower): if np.any(isna_lower): lower = None elif np.all(isna_lower): lower = None isna_upper = isna(upper) if not is_list_like(upper): if np.any(isna_upper): upper = None elif np.all(isna_upper): upper = None # GH 2747 (arguments were reversed) if ( lower is not None and upper is not None and is_scalar(lower) and is_scalar(upper) ): lower, upper = min(lower, upper), max(lower, upper) # fast-path for scalars if (lower is None or (is_scalar(lower) and is_number(lower))) and ( upper is None or (is_scalar(upper) and is_number(upper)) ): return self._clip_with_scalar(lower, upper, inplace=inplace) result = self if lower is not None: result = result._clip_with_one_bound( lower, method=self.ge, axis=axis, inplace=inplace ) if upper is not None: if inplace: result = self result = result._clip_with_one_bound( upper, method=self.le, axis=axis, inplace=inplace ) return result def asfreq( self: NDFrameT, freq: Frequency, method: FillnaOptions | None = None, how: str | None = None, normalize: bool_t = False, fill_value: Hashable = None, ) -> NDFrameT: """ Convert time series to specified frequency. Returns the original data conformed to a new index with the specified frequency. If the index of this {klass} is a :class:`~pandas.PeriodIndex`, the new index is the result of transforming the original index with :meth:`PeriodIndex.asfreq <pandas.PeriodIndex.asfreq>` (so the original index will map one-to-one to the new index). Otherwise, the new index will be equivalent to ``pd.date_range(start, end, freq=freq)`` where ``start`` and ``end`` are, respectively, the first and last entries in the original index (see :func:`pandas.date_range`). The values corresponding to any timesteps in the new index which were not present in the original index will be null (``NaN``), unless a method for filling such unknowns is provided (see the ``method`` parameter below). The :meth:`resample` method is more appropriate if an operation on each group of timesteps (such as an aggregate) is necessary to represent the data at the new frequency. Parameters ---------- freq : DateOffset or str Frequency DateOffset or string. method : {{'backfill'/'bfill', 'pad'/'ffill'}}, default None Method to use for filling holes in reindexed Series (note this does not fill NaNs that already were present): * 'pad' / 'ffill': propagate last valid observation forward to next valid * 'backfill' / 'bfill': use NEXT valid observation to fill. how : {{'start', 'end'}}, default end For PeriodIndex only (see PeriodIndex.asfreq). normalize : bool, default False Whether to reset output index to midnight. fill_value : scalar, optional Value to use for missing values, applied during upsampling (note this does not fill NaNs that already were present). Returns ------- {klass} {klass} object reindexed to the specified frequency. See Also -------- reindex : Conform DataFrame to new index with optional filling logic. Notes ----- To learn more about the frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- Start by creating a series with 4 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=4, freq='T') >>> series = pd.Series([0.0, None, 2.0, 3.0], index=index) >>> df = pd.DataFrame({{'s': series}}) >>> df s 2000-01-01 00:00:00 0.0 2000-01-01 00:01:00 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:03:00 3.0 Upsample the series into 30 second bins. >>> df.asfreq(freq='30S') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 NaN 2000-01-01 00:03:00 3.0 Upsample again, providing a ``fill value``. >>> df.asfreq(freq='30S', fill_value=9.0) s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 9.0 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 9.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 9.0 2000-01-01 00:03:00 3.0 Upsample again, providing a ``method``. >>> df.asfreq(freq='30S', method='bfill') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 2.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 3.0 2000-01-01 00:03:00 3.0 """ from pandas.core.resample import asfreq return asfreq( self, freq, method=method, how=how, normalize=normalize, fill_value=fill_value, ) def at_time( self: NDFrameT, time, asof: bool_t = False, axis: Axis | None = None ) -> NDFrameT: """ Select values at particular time of day (e.g., 9:30AM). Parameters ---------- time : datetime.time or str The values to select. axis : {0 or 'index', 1 or 'columns'}, default 0 For `Series` this parameter is unused and defaults to 0. Returns ------- Series or DataFrame Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- between_time : Select values between particular times of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_at_time : Get just the index locations for values at particular time of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='12H') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 00:00:00 1 2018-04-09 12:00:00 2 2018-04-10 00:00:00 3 2018-04-10 12:00:00 4 >>> ts.at_time('12:00') A 2018-04-09 12:00:00 2 2018-04-10 12:00:00 4 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) index = self._get_axis(axis) if not isinstance(index, DatetimeIndex): raise TypeError("Index must be DatetimeIndex") indexer = index.indexer_at_time(time, asof=asof) return self._take_with_is_copy(indexer, axis=axis) def between_time( self: NDFrameT, start_time, end_time, inclusive: IntervalClosedType = "both", axis: Axis | None = None, ) -> NDFrameT: """ Select values between particular times of the day (e.g., 9:00-9:30 AM). By setting ``start_time`` to be later than ``end_time``, you can get the times that are *not* between the two times. Parameters ---------- start_time : datetime.time or str Initial time as a time filter limit. end_time : datetime.time or str End time as a time filter limit. inclusive : {"both", "neither", "left", "right"}, default "both" Include boundaries; whether to set each bound as closed or open. axis : {0 or 'index', 1 or 'columns'}, default 0 Determine range time on index or columns value. For `Series` this parameter is unused and defaults to 0. Returns ------- Series or DataFrame Data from the original object filtered to the specified dates range. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- at_time : Select values at a particular time of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_between_time : Get just the index locations for values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 00:00:00 1 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 2018-04-12 01:00:00 4 >>> ts.between_time('0:15', '0:45') A 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 You get the times that are *not* between two times by setting ``start_time`` later than ``end_time``: >>> ts.between_time('0:45', '0:15') A 2018-04-09 00:00:00 1 2018-04-12 01:00:00 4 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) index = self._get_axis(axis) if not isinstance(index, DatetimeIndex): raise TypeError("Index must be DatetimeIndex") left_inclusive, right_inclusive = validate_inclusive(inclusive) indexer = index.indexer_between_time( start_time, end_time, include_start=left_inclusive, include_end=right_inclusive, ) return self._take_with_is_copy(indexer, axis=axis) def resample( self, rule, axis: Axis = 0, closed: str | None = None, label: str | None = None, convention: str = "start", kind: str | None = None, on: Level = None, level: Level = None, origin: str | TimestampConvertibleTypes = "start_day", offset: TimedeltaConvertibleTypes | None = None, group_keys: bool_t = False, ) -> Resampler: """ Resample time-series data. Convenience method for frequency conversion and resampling of time series. The object must have a datetime-like index (`DatetimeIndex`, `PeriodIndex`, or `TimedeltaIndex`), or the caller must pass the label of a datetime-like series/index to the ``on``/``level`` keyword parameter. Parameters ---------- rule : DateOffset, Timedelta or str The offset string or object representing target conversion. axis : {{0 or 'index', 1 or 'columns'}}, default 0 Which axis to use for up- or down-sampling. For `Series` this parameter is unused and defaults to 0. Must be `DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`. closed : {{'right', 'left'}}, default None Which side of bin interval is closed. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. label : {{'right', 'left'}}, default None Which bin edge label to label bucket with. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. convention : {{'start', 'end', 's', 'e'}}, default 'start' For `PeriodIndex` only, controls whether to use the start or end of `rule`. kind : {{'timestamp', 'period'}}, optional, default None Pass 'timestamp' to convert the resulting index to a `DateTimeIndex` or 'period' to convert it to a `PeriodIndex`. By default the input representation is retained. on : str, optional For a DataFrame, column to use instead of index for resampling. Column must be datetime-like. level : str or int, optional For a MultiIndex, level (name or number) to use for resampling. `level` must be datetime-like. origin : Timestamp or str, default 'start_day' The timestamp on which to adjust the grouping. The timezone of origin must match the timezone of the index. If string, must be one of the following: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries .. versionadded:: 1.1.0 - 'end': `origin` is the last value of the timeseries - 'end_day': `origin` is the ceiling midnight of the last day .. versionadded:: 1.3.0 offset : Timedelta or str, default is None An offset timedelta added to the origin. .. versionadded:: 1.1.0 group_keys : bool, default False Whether to include the group keys in the result index when using ``.apply()`` on the resampled object. .. versionadded:: 1.5.0 Not specifying ``group_keys`` will retain values-dependent behavior from pandas 1.4 and earlier (see :ref:`pandas 1.5.0 Release notes <whatsnew_150.enhancements.resample_group_keys>` for examples). .. versionchanged:: 2.0.0 ``group_keys`` now defaults to ``False``. Returns ------- pandas.core.Resampler :class:`~pandas.core.Resampler` object. See Also -------- Series.resample : Resample a Series. DataFrame.resample : Resample a DataFrame. groupby : Group {klass} by mapping, function, label, or list of labels. asfreq : Reindex a {klass} with the given frequency without grouping. Notes ----- See the `user guide <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`__ for more. To learn more about the offset strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__. Examples -------- Start by creating a series with 9 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=9, freq='T') >>> series = pd.Series(range(9), index=index) >>> series 2000-01-01 00:00:00 0 2000-01-01 00:01:00 1 2000-01-01 00:02:00 2 2000-01-01 00:03:00 3 2000-01-01 00:04:00 4 2000-01-01 00:05:00 5 2000-01-01 00:06:00 6 2000-01-01 00:07:00 7 2000-01-01 00:08:00 8 Freq: T, dtype: int64 Downsample the series into 3 minute bins and sum the values of the timestamps falling into a bin. >>> series.resample('3T').sum() 2000-01-01 00:00:00 3 2000-01-01 00:03:00 12 2000-01-01 00:06:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but label each bin using the right edge instead of the left. Please note that the value in the bucket used as the label is not included in the bucket, which it labels. For example, in the original series the bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed value in the resampled bucket with the label ``2000-01-01 00:03:00`` does not include 3 (if it did, the summed value would be 6, not 3). To include this value close the right side of the bin interval as illustrated in the example below this one. >>> series.resample('3T', label='right').sum() 2000-01-01 00:03:00 3 2000-01-01 00:06:00 12 2000-01-01 00:09:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but close the right side of the bin interval. >>> series.resample('3T', label='right', closed='right').sum() 2000-01-01 00:00:00 0 2000-01-01 00:03:00 6 2000-01-01 00:06:00 15 2000-01-01 00:09:00 15 Freq: 3T, dtype: int64 Upsample the series into 30 second bins. >>> series.resample('30S').asfreq()[0:5] # Select first 5 rows 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 1.0 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 Freq: 30S, dtype: float64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``ffill`` method. >>> series.resample('30S').ffill()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 0 2000-01-01 00:01:00 1 2000-01-01 00:01:30 1 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``bfill`` method. >>> series.resample('30S').bfill()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 1 2000-01-01 00:01:00 1 2000-01-01 00:01:30 2 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Pass a custom function via ``apply`` >>> def custom_resampler(arraylike): ... return np.sum(arraylike) + 5 ... >>> series.resample('3T').apply(custom_resampler) 2000-01-01 00:00:00 8 2000-01-01 00:03:00 17 2000-01-01 00:06:00 26 Freq: 3T, dtype: int64 For a Series with a PeriodIndex, the keyword `convention` can be used to control whether to use the start or end of `rule`. Resample a year by quarter using 'start' `convention`. Values are assigned to the first quarter of the period. >>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01', ... freq='A', ... periods=2)) >>> s 2012 1 2013 2 Freq: A-DEC, dtype: int64 >>> s.resample('Q', convention='start').asfreq() 2012Q1 1.0 2012Q2 NaN 2012Q3 NaN 2012Q4 NaN 2013Q1 2.0 2013Q2 NaN 2013Q3 NaN 2013Q4 NaN Freq: Q-DEC, dtype: float64 Resample quarters by month using 'end' `convention`. Values are assigned to the last month of the period. >>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01', ... freq='Q', ... periods=4)) >>> q 2018Q1 1 2018Q2 2 2018Q3 3 2018Q4 4 Freq: Q-DEC, dtype: int64 >>> q.resample('M', convention='end').asfreq() 2018-03 1.0 2018-04 NaN 2018-05 NaN 2018-06 2.0 2018-07 NaN 2018-08 NaN 2018-09 3.0 2018-10 NaN 2018-11 NaN 2018-12 4.0 Freq: M, dtype: float64 For DataFrame objects, the keyword `on` can be used to specify the column instead of the index for resampling. >>> d = {{'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}} >>> df = pd.DataFrame(d) >>> df['week_starting'] = pd.date_range('01/01/2018', ... periods=8, ... freq='W') >>> df price volume week_starting 0 10 50 2018-01-07 1 11 60 2018-01-14 2 9 40 2018-01-21 3 13 100 2018-01-28 4 14 50 2018-02-04 5 18 100 2018-02-11 6 17 40 2018-02-18 7 19 50 2018-02-25 >>> df.resample('M', on='week_starting').mean() price volume week_starting 2018-01-31 10.75 62.5 2018-02-28 17.00 60.0 For a DataFrame with MultiIndex, the keyword `level` can be used to specify on which level the resampling needs to take place. >>> days = pd.date_range('1/1/2000', periods=4, freq='D') >>> d2 = {{'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}} >>> df2 = pd.DataFrame( ... d2, ... index=pd.MultiIndex.from_product( ... [days, ['morning', 'afternoon']] ... ) ... ) >>> df2 price volume 2000-01-01 morning 10 50 afternoon 11 60 2000-01-02 morning 9 40 afternoon 13 100 2000-01-03 morning 14 50 afternoon 18 100 2000-01-04 morning 17 40 afternoon 19 50 >>> df2.resample('D', level=0).sum() price volume 2000-01-01 21 110 2000-01-02 22 140 2000-01-03 32 150 2000-01-04 36 90 If you want to adjust the start of the bins based on a fixed timestamp: >>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00' >>> rng = pd.date_range(start, end, freq='7min') >>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng) >>> ts 2000-10-01 23:30:00 0 2000-10-01 23:37:00 3 2000-10-01 23:44:00 6 2000-10-01 23:51:00 9 2000-10-01 23:58:00 12 2000-10-02 00:05:00 15 2000-10-02 00:12:00 18 2000-10-02 00:19:00 21 2000-10-02 00:26:00 24 Freq: 7T, dtype: int64 >>> ts.resample('17min').sum() 2000-10-01 23:14:00 0 2000-10-01 23:31:00 9 2000-10-01 23:48:00 21 2000-10-02 00:05:00 54 2000-10-02 00:22:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', origin='epoch').sum() 2000-10-01 23:18:00 0 2000-10-01 23:35:00 18 2000-10-01 23:52:00 27 2000-10-02 00:09:00 39 2000-10-02 00:26:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', origin='2000-01-01').sum() 2000-10-01 23:24:00 3 2000-10-01 23:41:00 15 2000-10-01 23:58:00 45 2000-10-02 00:15:00 45 Freq: 17T, dtype: int64 If you want to adjust the start of the bins with an `offset` Timedelta, the two following lines are equivalent: >>> ts.resample('17min', origin='start').sum() 2000-10-01 23:30:00 9 2000-10-01 23:47:00 21 2000-10-02 00:04:00 54 2000-10-02 00:21:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', offset='23h30min').sum() 2000-10-01 23:30:00 9 2000-10-01 23:47:00 21 2000-10-02 00:04:00 54 2000-10-02 00:21:00 24 Freq: 17T, dtype: int64 If you want to take the largest Timestamp as the end of the bins: >>> ts.resample('17min', origin='end').sum() 2000-10-01 23:35:00 0 2000-10-01 23:52:00 18 2000-10-02 00:09:00 27 2000-10-02 00:26:00 63 Freq: 17T, dtype: int64 In contrast with the `start_day`, you can use `end_day` to take the ceiling midnight of the largest Timestamp as the end of the bins and drop the bins not containing data: >>> ts.resample('17min', origin='end_day').sum() 2000-10-01 23:38:00 3 2000-10-01 23:55:00 15 2000-10-02 00:12:00 45 2000-10-02 00:29:00 45 Freq: 17T, dtype: int64 """ from pandas.core.resample import get_resampler axis = self._get_axis_number(axis) return get_resampler( cast("Series | DataFrame", self), freq=rule, label=label, closed=closed, axis=axis, kind=kind, convention=convention, key=on, level=level, origin=origin, offset=offset, group_keys=group_keys, ) def first(self: NDFrameT, offset) -> NDFrameT: """ Select initial periods of time series data based on a date offset. When having a DataFrame with dates as index, this function can select the first few rows based on a date offset. Parameters ---------- offset : str, DateOffset or dateutil.relativedelta The offset length of the data that will be selected. For instance, '1M' will display all the rows having their index within the first month. Returns ------- Series or DataFrame A subset of the caller. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- last : Select final periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the first 3 days: >>> ts.first('3D') A 2018-04-09 1 2018-04-11 2 Notice the data for 3 first calendar days were returned, not the first 3 days observed in the dataset, and therefore data for 2018-04-13 was not returned. """ if not isinstance(self.index, DatetimeIndex): raise TypeError("'first' only supports a DatetimeIndex index") if len(self.index) == 0: return self.copy(deep=False) offset = to_offset(offset) if not isinstance(offset, Tick) and offset.is_on_offset(self.index[0]): # GH#29623 if first value is end of period, remove offset with n = 1 # before adding the real offset end_date = end = self.index[0] - offset.base + offset else: end_date = end = self.index[0] + offset # Tick-like, e.g. 3 weeks if isinstance(offset, Tick) and end_date in self.index: end = self.index.searchsorted(end_date, side="left") return self.iloc[:end] return self.loc[:end] def last(self: NDFrameT, offset) -> NDFrameT: """ Select final periods of time series data based on a date offset. For a DataFrame with a sorted DatetimeIndex, this function selects the last few rows based on a date offset. Parameters ---------- offset : str, DateOffset, dateutil.relativedelta The offset length of the data that will be selected. For instance, '3D' will display all the rows having their index within the last 3 days. Returns ------- Series or DataFrame A subset of the caller. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- first : Select initial periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the last 3 days: >>> ts.last('3D') A 2018-04-13 3 2018-04-15 4 Notice the data for 3 last calendar days were returned, not the last 3 observed days in the dataset, and therefore data for 2018-04-11 was not returned. """ if not isinstance(self.index, DatetimeIndex): raise TypeError("'last' only supports a DatetimeIndex index") if len(self.index) == 0: return self.copy(deep=False) offset = to_offset(offset) start_date = self.index[-1] - offset start = self.index.searchsorted(start_date, side="right") return self.iloc[start:] def rank( self: NDFrameT, axis: Axis = 0, method: str = "average", numeric_only: bool_t = False, na_option: str = "keep", ascending: bool_t = True, pct: bool_t = False, ) -> NDFrameT: """ Compute numerical data ranks (1 through n) along axis. By default, equal values are assigned a rank that is the average of the ranks of those values. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 Index to direct ranking. For `Series` this parameter is unused and defaults to 0. method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' How to rank the group of records that have the same value (i.e. ties): * average: average rank of the group * min: lowest rank in the group * max: highest rank in the group * first: ranks assigned in order they appear in the array * dense: like 'min', but rank always increases by 1 between groups. numeric_only : bool, default False For DataFrame objects, rank only numeric columns if set to True. .. versionchanged:: 2.0.0 The default value of ``numeric_only`` is now ``False``. na_option : {'keep', 'top', 'bottom'}, default 'keep' How to rank NaN values: * keep: assign NaN rank to NaN values * top: assign lowest rank to NaN values * bottom: assign highest rank to NaN values ascending : bool, default True Whether or not the elements should be ranked in ascending order. pct : bool, default False Whether or not to display the returned rankings in percentile form. Returns ------- same type as caller Return a Series or DataFrame with data ranks as values. See Also -------- core.groupby.DataFrameGroupBy.rank : Rank of values within each group. core.groupby.SeriesGroupBy.rank : Rank of values within each group. Examples -------- >>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog', ... 'spider', 'snake'], ... 'Number_legs': [4, 2, 4, 8, np.nan]}) >>> df Animal Number_legs 0 cat 4.0 1 penguin 2.0 2 dog 4.0 3 spider 8.0 4 snake NaN Ties are assigned the mean of the ranks (by default) for the group. >>> s = pd.Series(range(5), index=list("abcde")) >>> s["d"] = s["b"] >>> s.rank() a 1.0 b 2.5 c 4.0 d 2.5 e 5.0 dtype: float64 The following example shows how the method behaves with the above parameters: * default_rank: this is the default behaviour obtained without using any parameter. * max_rank: setting ``method = 'max'`` the records that have the same values are ranked using the highest rank (e.g.: since 'cat' and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.) * NA_bottom: choosing ``na_option = 'bottom'``, if there are records with NaN values they are placed at the bottom of the ranking. * pct_rank: when setting ``pct = True``, the ranking is expressed as percentile rank. >>> df['default_rank'] = df['Number_legs'].rank() >>> df['max_rank'] = df['Number_legs'].rank(method='max') >>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom') >>> df['pct_rank'] = df['Number_legs'].rank(pct=True) >>> df Animal Number_legs default_rank max_rank NA_bottom pct_rank 0 cat 4.0 2.5 3.0 2.5 0.625 1 penguin 2.0 1.0 1.0 1.0 0.250 2 dog 4.0 2.5 3.0 2.5 0.625 3 spider 8.0 4.0 4.0 4.0 1.000 4 snake NaN NaN NaN 5.0 NaN """ axis_int = self._get_axis_number(axis) if na_option not in {"keep", "top", "bottom"}: msg = "na_option must be one of 'keep', 'top', or 'bottom'" raise ValueError(msg) def ranker(data): if data.ndim == 2: # i.e. DataFrame, we cast to ndarray values = data.values else: # i.e. Series, can dispatch to EA values = data._values if isinstance(values, ExtensionArray): ranks = values._rank( axis=axis_int, method=method, ascending=ascending, na_option=na_option, pct=pct, ) else: ranks = algos.rank( values, axis=axis_int, method=method, ascending=ascending, na_option=na_option, pct=pct, ) ranks_obj = self._constructor(ranks, **data._construct_axes_dict()) return ranks_obj.__finalize__(self, method="rank") if numeric_only: if self.ndim == 1 and not is_numeric_dtype(self.dtype): # GH#47500 raise TypeError( "Series.rank does not allow numeric_only=True with " "non-numeric dtype." ) data = self._get_numeric_data() else: data = self return ranker(data) def compare( self, other, align_axis: Axis = 1, keep_shape: bool_t = False, keep_equal: bool_t = False, result_names: Suffixes = ("self", "other"), ): if type(self) is not type(other): cls_self, cls_other = type(self).__name__, type(other).__name__ raise TypeError( f"can only compare '{cls_self}' (not '{cls_other}') with '{cls_self}'" ) mask = ~((self == other) | (self.isna() & other.isna())) mask.fillna(True, inplace=True) if not keep_equal: self = self.where(mask) other = other.where(mask) if not keep_shape: if isinstance(self, ABCDataFrame): cmask = mask.any() rmask = mask.any(axis=1) self = self.loc[rmask, cmask] other = other.loc[rmask, cmask] else: self = self[mask] other = other[mask] if not isinstance(result_names, tuple): raise TypeError( f"Passing 'result_names' as a {type(result_names)} is not " "supported. Provide 'result_names' as a tuple instead." ) if align_axis in (1, "columns"): # This is needed for Series axis = 1 else: axis = self._get_axis_number(align_axis) diff = concat([self, other], axis=axis, keys=result_names) if axis >= self.ndim: # No need to reorganize data if stacking on new axis # This currently applies for stacking two Series on columns return diff ax = diff._get_axis(axis) ax_names = np.array(ax.names) # set index names to positions to avoid confusion ax.names = np.arange(len(ax_names)) # bring self-other to inner level order = list(range(1, ax.nlevels)) + [0] if isinstance(diff, ABCDataFrame): diff = diff.reorder_levels(order, axis=axis) else: diff = diff.reorder_levels(order) # restore the index names in order diff._get_axis(axis=axis).names = ax_names[order] # reorder axis to keep things organized indices = ( np.arange(diff.shape[axis]).reshape([2, diff.shape[axis] // 2]).T.flatten() ) diff = diff.take(indices, axis=axis) return diff def align( self: NDFrameT, other: NDFrameT, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool_t | None = None, fill_value: Hashable = None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> NDFrameT: """ Align two objects on their axes with the specified join method. Join method is specified for each axis Index. Parameters ---------- other : DataFrame or Series join : {{'outer', 'inner', 'left', 'right'}}, default 'outer' axis : allowed axis of the other object, default None Align on index (0), columns (1), or both (None). level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level. copy : bool, default True Always returns new objects. If copy=False and no reindexing is required then original objects are returned. fill_value : scalar, default np.NaN Value to use for missing values. Defaults to NaN, but can be any "compatible" value. method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None Method to use for filling holes in reindexed Series: - pad / ffill: propagate last valid observation forward to next valid. - backfill / bfill: use NEXT valid observation to fill gap. limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None. fill_axis : {axes_single_arg}, default 0 Filling axis, method and limit. broadcast_axis : {axes_single_arg}, default None Broadcast values along this axis, if aligning two objects of different dimensions. Returns ------- tuple of ({klass}, type of other) Aligned objects. Examples -------- >>> df = pd.DataFrame( ... [[1, 2, 3, 4], [6, 7, 8, 9]], columns=["D", "B", "E", "A"], index=[1, 2] ... ) >>> other = pd.DataFrame( ... [[10, 20, 30, 40], [60, 70, 80, 90], [600, 700, 800, 900]], ... columns=["A", "B", "C", "D"], ... index=[2, 3, 4], ... ) >>> df D B E A 1 1 2 3 4 2 6 7 8 9 >>> other A B C D 2 10 20 30 40 3 60 70 80 90 4 600 700 800 900 Align on columns: >>> left, right = df.align(other, join="outer", axis=1) >>> left A B C D E 1 4 2 NaN 1 3 2 9 7 NaN 6 8 >>> right A B C D E 2 10 20 30 40 NaN 3 60 70 80 90 NaN 4 600 700 800 900 NaN We can also align on the index: >>> left, right = df.align(other, join="outer", axis=0) >>> left D B E A 1 1.0 2.0 3.0 4.0 2 6.0 7.0 8.0 9.0 3 NaN NaN NaN NaN 4 NaN NaN NaN NaN >>> right A B C D 1 NaN NaN NaN NaN 2 10.0 20.0 30.0 40.0 3 60.0 70.0 80.0 90.0 4 600.0 700.0 800.0 900.0 Finally, the default `axis=None` will align on both index and columns: >>> left, right = df.align(other, join="outer", axis=None) >>> left A B C D E 1 4.0 2.0 NaN 1.0 3.0 2 9.0 7.0 NaN 6.0 8.0 3 NaN NaN NaN NaN NaN 4 NaN NaN NaN NaN NaN >>> right A B C D E 1 NaN NaN NaN NaN NaN 2 10.0 20.0 30.0 40.0 NaN 3 60.0 70.0 80.0 90.0 NaN 4 600.0 700.0 800.0 900.0 NaN """ method = clean_fill_method(method) if broadcast_axis == 1 and self.ndim != other.ndim: if isinstance(self, ABCSeries): # this means other is a DataFrame, and we need to broadcast # self cons = self._constructor_expanddim df = cons( {c: self for c in other.columns}, **other._construct_axes_dict() ) return df._align_frame( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) elif isinstance(other, ABCSeries): # this means self is a DataFrame, and we need to broadcast # other cons = other._constructor_expanddim df = cons( {c: other for c in self.columns}, **self._construct_axes_dict() ) return self._align_frame( df, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) if axis is not None: axis = self._get_axis_number(axis) if isinstance(other, ABCDataFrame): return self._align_frame( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) elif isinstance(other, ABCSeries): return self._align_series( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def _align_frame( self, other, join: AlignJoin = "outer", axis: Axis | None = None, level=None, copy: bool_t | None = None, fill_value=None, method=None, limit=None, fill_axis: Axis = 0, ): # defaults join_index, join_columns = None, None ilidx, iridx = None, None clidx, cridx = None, None is_series = isinstance(self, ABCSeries) if (axis is None or axis == 0) and not self.index.equals(other.index): join_index, ilidx, iridx = self.index.join( other.index, how=join, level=level, return_indexers=True ) if ( (axis is None or axis == 1) and not is_series and not self.columns.equals(other.columns) ): join_columns, clidx, cridx = self.columns.join( other.columns, how=join, level=level, return_indexers=True ) if is_series: reindexers = {0: [join_index, ilidx]} else: reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]} left = self._reindex_with_indexers( reindexers, copy=copy, fill_value=fill_value, allow_dups=True ) # other must be always DataFrame right = other._reindex_with_indexers( {0: [join_index, iridx], 1: [join_columns, cridx]}, copy=copy, fill_value=fill_value, allow_dups=True, ) if method is not None: _left = left.fillna(method=method, axis=fill_axis, limit=limit) assert _left is not None # needed for mypy left = _left right = right.fillna(method=method, axis=fill_axis, limit=limit) # if DatetimeIndex have different tz, convert to UTC left, right = _align_as_utc(left, right, join_index) return ( left.__finalize__(self), right.__finalize__(other), ) def _align_series( self, other, join: AlignJoin = "outer", axis: Axis | None = None, level=None, copy: bool_t | None = None, fill_value=None, method=None, limit=None, fill_axis: Axis = 0, ): is_series = isinstance(self, ABCSeries) if copy and using_copy_on_write(): copy = False if (not is_series and axis is None) or axis not in [None, 0, 1]: raise ValueError("Must specify axis=0 or 1") if is_series and axis == 1: raise ValueError("cannot align series to a series other than axis 0") # series/series compat, other must always be a Series if not axis: # equal if self.index.equals(other.index): join_index, lidx, ridx = None, None, None else: join_index, lidx, ridx = self.index.join( other.index, how=join, level=level, return_indexers=True ) if is_series: left = self._reindex_indexer(join_index, lidx, copy) elif lidx is None or join_index is None: left = self.copy(deep=copy) else: left = self._constructor( self._mgr.reindex_indexer(join_index, lidx, axis=1, copy=copy) ) right = other._reindex_indexer(join_index, ridx, copy) else: # one has > 1 ndim fdata = self._mgr join_index = self.axes[1] lidx, ridx = None, None if not join_index.equals(other.index): join_index, lidx, ridx = join_index.join( other.index, how=join, level=level, return_indexers=True ) if lidx is not None: bm_axis = self._get_block_manager_axis(1) fdata = fdata.reindex_indexer(join_index, lidx, axis=bm_axis) if copy and fdata is self._mgr: fdata = fdata.copy() left = self._constructor(fdata) if ridx is None: right = other.copy(deep=copy) else: right = other.reindex(join_index, level=level) # fill fill_na = notna(fill_value) or (method is not None) if fill_na: left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis) right = right.fillna(fill_value, method=method, limit=limit) # if DatetimeIndex have different tz, convert to UTC if is_series or (not is_series and axis == 0): left, right = _align_as_utc(left, right, join_index) return ( left.__finalize__(self), right.__finalize__(other), ) def _where( self, cond, other=lib.no_default, inplace: bool_t = False, axis: Axis | None = None, level=None, ): """ Equivalent to public method `where`, except that `other` is not applied as a function even if callable. Used in __setitem__. """ inplace = validate_bool_kwarg(inplace, "inplace") if axis is not None: axis = self._get_axis_number(axis) # align the cond to same shape as myself cond = common.apply_if_callable(cond, self) if isinstance(cond, NDFrame): # CoW: Make sure reference is not kept alive cond = cond.align(self, join="right", broadcast_axis=1, copy=False)[0] else: if not hasattr(cond, "shape"): cond = np.asanyarray(cond) if cond.shape != self.shape: raise ValueError("Array conditional must be same shape as self") cond = self._constructor(cond, **self._construct_axes_dict(), copy=False) # make sure we are boolean fill_value = bool(inplace) cond = cond.fillna(fill_value) msg = "Boolean array expected for the condition, not {dtype}" if not cond.empty: if not isinstance(cond, ABCDataFrame): # This is a single-dimensional object. if not is_bool_dtype(cond): raise ValueError(msg.format(dtype=cond.dtype)) else: for _dt in cond.dtypes: if not is_bool_dtype(_dt): raise ValueError(msg.format(dtype=_dt)) else: # GH#21947 we have an empty DataFrame/Series, could be object-dtype cond = cond.astype(bool) cond = -cond if inplace else cond cond = cond.reindex(self._info_axis, axis=self._info_axis_number, copy=False) # try to align with other if isinstance(other, NDFrame): # align with me if other.ndim <= self.ndim: # CoW: Make sure reference is not kept alive other = self.align( other, join="left", axis=axis, level=level, fill_value=None, copy=False, )[1] # if we are NOT aligned, raise as we cannot where index if axis is None and not other._indexed_same(self): raise InvalidIndexError if other.ndim < self.ndim: # TODO(EA2D): avoid object-dtype cast in EA case GH#38729 other = other._values if axis == 0: other = np.reshape(other, (-1, 1)) elif axis == 1: other = np.reshape(other, (1, -1)) other = np.broadcast_to(other, self.shape) # slice me out of the other else: raise NotImplementedError( "cannot align with a higher dimensional NDFrame" ) elif not isinstance(other, (MultiIndex, NDFrame)): # mainly just catching Index here other = extract_array(other, extract_numpy=True) if isinstance(other, (np.ndarray, ExtensionArray)): if other.shape != self.shape: if self.ndim != 1: # In the ndim == 1 case we may have # other length 1, which we treat as scalar (GH#2745, GH#4192) # or len(other) == icond.sum(), which we treat like # __setitem__ (GH#3235) raise ValueError( "other must be the same shape as self when an ndarray" ) # we are the same shape, so create an actual object for alignment else: other = self._constructor( other, **self._construct_axes_dict(), copy=False ) if axis is None: axis = 0 if self.ndim == getattr(other, "ndim", 0): align = True else: align = self._get_axis_number(axis) == 1 if inplace: # we may have different type blocks come out of putmask, so # reconstruct the block manager self._check_inplace_setting(other) new_data = self._mgr.putmask(mask=cond, new=other, align=align) result = self._constructor(new_data) return self._update_inplace(result) else: new_data = self._mgr.where( other=other, cond=cond, align=align, ) result = self._constructor(new_data) return result.__finalize__(self) def where( self: NDFrameT, cond, other=..., *, inplace: Literal[False] = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT: ... def where( self, cond, other=..., *, inplace: Literal[True], axis: Axis | None = ..., level: Level = ..., ) -> None: ... def where( self: NDFrameT, cond, other=..., *, inplace: bool_t = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT | None: ... klass=_shared_doc_kwargs["klass"], cond="True", cond_rev="False", name="where", name_other="mask", ) def where( self: NDFrameT, cond, other=np.nan, *, inplace: bool_t = False, axis: Axis | None = None, level: Level = None, ) -> NDFrameT | None: """ Replace values where the condition is {cond_rev}. Parameters ---------- cond : bool {klass}, array-like, or callable Where `cond` is {cond}, keep the original value. Where {cond_rev}, replace with corresponding value from `other`. If `cond` is callable, it is computed on the {klass} and should return boolean {klass} or array. The callable must not change input {klass} (though pandas doesn't check it). other : scalar, {klass}, or callable Entries where `cond` is {cond_rev} are replaced with corresponding value from `other`. If other is callable, it is computed on the {klass} and should return scalar or {klass}. The callable must not change input {klass} (though pandas doesn't check it). If not specified, entries will be filled with the corresponding NULL value (``np.nan`` for numpy dtypes, ``pd.NA`` for extension dtypes). inplace : bool, default False Whether to perform the operation in place on the data. axis : int, default None Alignment axis if needed. For `Series` this parameter is unused and defaults to 0. level : int, default None Alignment level if needed. Returns ------- Same type as caller or None if ``inplace=True``. See Also -------- :func:`DataFrame.{name_other}` : Return an object of same shape as self. Notes ----- The {name} method is an application of the if-then idiom. For each element in the calling DataFrame, if ``cond`` is ``{cond}`` the element is used; otherwise the corresponding element from the DataFrame ``other`` is used. If the axis of ``other`` does not align with axis of ``cond`` {klass}, the misaligned index positions will be filled with {cond_rev}. The signature for :func:`DataFrame.where` differs from :func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to ``np.where(m, df1, df2)``. For further details and examples see the ``{name}`` documentation in :ref:`indexing <indexing.where_mask>`. The dtype of the object takes precedence. The fill value is casted to the object's dtype, if this can be done losslessly. Examples -------- >>> s = pd.Series(range(5)) >>> s.where(s > 0) 0 NaN 1 1.0 2 2.0 3 3.0 4 4.0 dtype: float64 >>> s.mask(s > 0) 0 0.0 1 NaN 2 NaN 3 NaN 4 NaN dtype: float64 >>> s = pd.Series(range(5)) >>> t = pd.Series([True, False]) >>> s.where(t, 99) 0 0 1 99 2 99 3 99 4 99 dtype: int64 >>> s.mask(t, 99) 0 99 1 1 2 99 3 99 4 99 dtype: int64 >>> s.where(s > 1, 10) 0 10 1 10 2 2 3 3 4 4 dtype: int64 >>> s.mask(s > 1, 10) 0 0 1 1 2 10 3 10 4 10 dtype: int64 >>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B']) >>> df A B 0 0 1 1 2 3 2 4 5 3 6 7 4 8 9 >>> m = df % 3 == 0 >>> df.where(m, -df) A B 0 0 -1 1 -2 3 2 -4 -5 3 6 -7 4 -8 9 >>> df.where(m, -df) == np.where(m, df, -df) A B 0 True True 1 True True 2 True True 3 True True 4 True True >>> df.where(m, -df) == df.mask(~m, -df) A B 0 True True 1 True True 2 True True 3 True True 4 True True """ other = common.apply_if_callable(other, self) return self._where(cond, other, inplace, axis, level) def mask( self: NDFrameT, cond, other=..., *, inplace: Literal[False] = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT: ... def mask( self, cond, other=..., *, inplace: Literal[True], axis: Axis | None = ..., level: Level = ..., ) -> None: ... def mask( self: NDFrameT, cond, other=..., *, inplace: bool_t = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT | None: ... where, klass=_shared_doc_kwargs["klass"], cond="False", cond_rev="True", name="mask", name_other="where", ) def mask( self: NDFrameT, cond, other=lib.no_default, *, inplace: bool_t = False, axis: Axis | None = None, level: Level = None, ) -> NDFrameT | None: inplace = validate_bool_kwarg(inplace, "inplace") cond = common.apply_if_callable(cond, self) # see gh-21891 if not hasattr(cond, "__invert__"): cond = np.array(cond) return self.where( ~cond, other=other, inplace=inplace, axis=axis, level=level, ) def shift( self: NDFrameT, periods: int = 1, freq=None, axis: Axis = 0, fill_value: Hashable = None, ) -> NDFrameT: """ Shift index by desired number of periods with an optional time `freq`. When `freq` is not passed, shift the index without realigning the data. If `freq` is passed (in this case, the index must be date or datetime, or it will raise a `NotImplementedError`), the index will be increased using the periods and the `freq`. `freq` can be inferred when specified as "infer" as long as either freq or inferred_freq attribute is set in the index. Parameters ---------- periods : int Number of periods to shift. Can be positive or negative. freq : DateOffset, tseries.offsets, timedelta, or str, optional Offset to use from the tseries module or time rule (e.g. 'EOM'). If `freq` is specified then the index values are shifted but the data is not realigned. That is, use `freq` if you would like to extend the index when shifting and preserve the original data. If `freq` is specified as "infer" then it will be inferred from the freq or inferred_freq attributes of the index. If neither of those attributes exist, a ValueError is thrown. axis : {{0 or 'index', 1 or 'columns', None}}, default None Shift direction. For `Series` this parameter is unused and defaults to 0. fill_value : object, optional The scalar value to use for newly introduced missing values. the default depends on the dtype of `self`. For numeric data, ``np.nan`` is used. For datetime, timedelta, or period data, etc. :attr:`NaT` is used. For extension dtypes, ``self.dtype.na_value`` is used. .. versionchanged:: 1.1.0 Returns ------- {klass} Copy of input object, shifted. See Also -------- Index.shift : Shift values of Index. DatetimeIndex.shift : Shift values of DatetimeIndex. PeriodIndex.shift : Shift values of PeriodIndex. Examples -------- >>> df = pd.DataFrame({{"Col1": [10, 20, 15, 30, 45], ... "Col2": [13, 23, 18, 33, 48], ... "Col3": [17, 27, 22, 37, 52]}}, ... index=pd.date_range("2020-01-01", "2020-01-05")) >>> df Col1 Col2 Col3 2020-01-01 10 13 17 2020-01-02 20 23 27 2020-01-03 15 18 22 2020-01-04 30 33 37 2020-01-05 45 48 52 >>> df.shift(periods=3) Col1 Col2 Col3 2020-01-01 NaN NaN NaN 2020-01-02 NaN NaN NaN 2020-01-03 NaN NaN NaN 2020-01-04 10.0 13.0 17.0 2020-01-05 20.0 23.0 27.0 >>> df.shift(periods=1, axis="columns") Col1 Col2 Col3 2020-01-01 NaN 10 13 2020-01-02 NaN 20 23 2020-01-03 NaN 15 18 2020-01-04 NaN 30 33 2020-01-05 NaN 45 48 >>> df.shift(periods=3, fill_value=0) Col1 Col2 Col3 2020-01-01 0 0 0 2020-01-02 0 0 0 2020-01-03 0 0 0 2020-01-04 10 13 17 2020-01-05 20 23 27 >>> df.shift(periods=3, freq="D") Col1 Col2 Col3 2020-01-04 10 13 17 2020-01-05 20 23 27 2020-01-06 15 18 22 2020-01-07 30 33 37 2020-01-08 45 48 52 >>> df.shift(periods=3, freq="infer") Col1 Col2 Col3 2020-01-04 10 13 17 2020-01-05 20 23 27 2020-01-06 15 18 22 2020-01-07 30 33 37 2020-01-08 45 48 52 """ if periods == 0: return self.copy(deep=None) if freq is None: # when freq is None, data is shifted, index is not axis = self._get_axis_number(axis) new_data = self._mgr.shift( periods=periods, axis=axis, fill_value=fill_value ) return self._constructor(new_data).__finalize__(self, method="shift") # when freq is given, index is shifted, data is not index = self._get_axis(axis) if freq == "infer": freq = getattr(index, "freq", None) if freq is None: freq = getattr(index, "inferred_freq", None) if freq is None: msg = "Freq was not set in the index hence cannot be inferred" raise ValueError(msg) elif isinstance(freq, str): freq = to_offset(freq) if isinstance(index, PeriodIndex): orig_freq = to_offset(index.freq) if freq != orig_freq: assert orig_freq is not None # for mypy raise ValueError( f"Given freq {freq.rule_code} does not match " f"PeriodIndex freq {orig_freq.rule_code}" ) new_ax = index.shift(periods) else: new_ax = index.shift(periods, freq) result = self.set_axis(new_ax, axis=axis) return result.__finalize__(self, method="shift") def truncate( self: NDFrameT, before=None, after=None, axis: Axis | None = None, copy: bool_t | None = None, ) -> NDFrameT: """ Truncate a Series or DataFrame before and after some index value. This is a useful shorthand for boolean indexing based on index values above or below certain thresholds. Parameters ---------- before : date, str, int Truncate all rows before this index value. after : date, str, int Truncate all rows after this index value. axis : {0 or 'index', 1 or 'columns'}, optional Axis to truncate. Truncates the index (rows) by default. For `Series` this parameter is unused and defaults to 0. copy : bool, default is True, Return a copy of the truncated section. Returns ------- type of caller The truncated Series or DataFrame. See Also -------- DataFrame.loc : Select a subset of a DataFrame by label. DataFrame.iloc : Select a subset of a DataFrame by position. Notes ----- If the index being truncated contains only datetime values, `before` and `after` may be specified as strings instead of Timestamps. Examples -------- >>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'], ... 'B': ['f', 'g', 'h', 'i', 'j'], ... 'C': ['k', 'l', 'm', 'n', 'o']}, ... index=[1, 2, 3, 4, 5]) >>> df A B C 1 a f k 2 b g l 3 c h m 4 d i n 5 e j o >>> df.truncate(before=2, after=4) A B C 2 b g l 3 c h m 4 d i n The columns of a DataFrame can be truncated. >>> df.truncate(before="A", after="B", axis="columns") A B 1 a f 2 b g 3 c h 4 d i 5 e j For Series, only rows can be truncated. >>> df['A'].truncate(before=2, after=4) 2 b 3 c 4 d Name: A, dtype: object The index values in ``truncate`` can be datetimes or string dates. >>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s') >>> df = pd.DataFrame(index=dates, data={'A': 1}) >>> df.tail() A 2016-01-31 23:59:56 1 2016-01-31 23:59:57 1 2016-01-31 23:59:58 1 2016-01-31 23:59:59 1 2016-02-01 00:00:00 1 >>> df.truncate(before=pd.Timestamp('2016-01-05'), ... after=pd.Timestamp('2016-01-10')).tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Because the index is a DatetimeIndex containing only dates, we can specify `before` and `after` as strings. They will be coerced to Timestamps before truncation. >>> df.truncate('2016-01-05', '2016-01-10').tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Note that ``truncate`` assumes a 0 value for any unspecified time component (midnight). This differs from partial string slicing, which returns any partially matching dates. >>> df.loc['2016-01-05':'2016-01-10', :].tail() A 2016-01-10 23:59:55 1 2016-01-10 23:59:56 1 2016-01-10 23:59:57 1 2016-01-10 23:59:58 1 2016-01-10 23:59:59 1 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) ax = self._get_axis(axis) # GH 17935 # Check that index is sorted if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing: raise ValueError("truncate requires a sorted index") # if we have a date index, convert to dates, otherwise # treat like a slice if ax._is_all_dates: from pandas.core.tools.datetimes import to_datetime before = to_datetime(before) after = to_datetime(after) if before is not None and after is not None and before > after: raise ValueError(f"Truncate: {after} must be after {before}") if len(ax) > 1 and ax.is_monotonic_decreasing and ax.nunique() > 1: before, after = after, before slicer = [slice(None, None)] * self._AXIS_LEN slicer[axis] = slice(before, after) result = self.loc[tuple(slicer)] if isinstance(ax, MultiIndex): setattr(result, self._get_axis_name(axis), ax.truncate(before, after)) result = result.copy(deep=copy and not using_copy_on_write()) return result def tz_convert( self: NDFrameT, tz, axis: Axis = 0, level=None, copy: bool_t | None = None ) -> NDFrameT: """ Convert tz-aware axis to target time zone. Parameters ---------- tz : str or tzinfo object or None Target time zone. Passing ``None`` will convert to UTC and remove the timezone information. axis : {{0 or 'index', 1 or 'columns'}}, default 0 The axis to convert level : int, str, default None If axis is a MultiIndex, convert a specific level. Otherwise must be None. copy : bool, default True Also make a copy of the underlying data. Returns ------- {klass} Object with time zone converted axis. Raises ------ TypeError If the axis is tz-naive. Examples -------- Change to another time zone: >>> s = pd.Series( ... [1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00']), ... ) >>> s.tz_convert('Asia/Shanghai') 2018-09-15 07:30:00+08:00 1 dtype: int64 Pass None to convert to UTC and get a tz-naive index: >>> s = pd.Series([1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00'])) >>> s.tz_convert(None) 2018-09-14 23:30:00 1 dtype: int64 """ axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_convert(ax, tz): if not hasattr(ax, "tz_convert"): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError( f"{ax_name} is not a valid DatetimeIndex or PeriodIndex" ) ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_convert(tz) return ax # if a level is given it must be a MultiIndex level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_convert(ax.levels[level], tz) ax = ax.set_levels(new_level, level=level) else: if level not in (None, 0, ax.name): raise ValueError(f"The level {level} is not valid") ax = _tz_convert(ax, tz) result = self.copy(deep=copy and not using_copy_on_write()) result = result.set_axis(ax, axis=axis, copy=False) return result.__finalize__(self, method="tz_convert") def tz_localize( self: NDFrameT, tz, axis: Axis = 0, level=None, copy: bool_t | None = None, ambiguous: TimeAmbiguous = "raise", nonexistent: TimeNonexistent = "raise", ) -> NDFrameT: """ Localize tz-naive index of a Series or DataFrame to target time zone. This operation localizes the Index. To localize the values in a timezone-naive Series, use :meth:`Series.dt.tz_localize`. Parameters ---------- tz : str or tzinfo or None Time zone to localize. Passing ``None`` will remove the time zone information and preserve local time. axis : {{0 or 'index', 1 or 'columns'}}, default 0 The axis to localize level : int, str, default None If axis ia a MultiIndex, localize a specific level. Otherwise must be None. copy : bool, default True Also make a copy of the underlying data. ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' When clocks moved backward due to DST, ambiguous times may arise. For example in Central European Time (UTC+01), when going from 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter dictates how ambiguous times should be handled. - 'infer' will attempt to infer fall dst-transition hours based on order - bool-ndarray where True signifies a DST time, False designates a non-DST time (note that this flag is only applicable for ambiguous times) - 'NaT' will return NaT where there are ambiguous times - 'raise' will raise an AmbiguousTimeError if there are ambiguous times. nonexistent : str, default 'raise' A nonexistent time does not exist in a particular timezone where clocks moved forward due to DST. Valid values are: - 'shift_forward' will shift the nonexistent time forward to the closest existing time - 'shift_backward' will shift the nonexistent time backward to the closest existing time - 'NaT' will return NaT where there are nonexistent times - timedelta objects will shift nonexistent times by the timedelta - 'raise' will raise an NonExistentTimeError if there are nonexistent times. Returns ------- {klass} Same type as the input. Raises ------ TypeError If the TimeSeries is tz-aware and tz is not None. Examples -------- Localize local times: >>> s = pd.Series( ... [1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00']), ... ) >>> s.tz_localize('CET') 2018-09-15 01:30:00+02:00 1 dtype: int64 Pass None to convert to tz-naive index and preserve local time: >>> s = pd.Series([1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00'])) >>> s.tz_localize(None) 2018-09-15 01:30:00 1 dtype: int64 Be careful with DST changes. When there is sequential data, pandas can infer the DST time: >>> s = pd.Series(range(7), ... index=pd.DatetimeIndex(['2018-10-28 01:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 03:00:00', ... '2018-10-28 03:30:00'])) >>> s.tz_localize('CET', ambiguous='infer') 2018-10-28 01:30:00+02:00 0 2018-10-28 02:00:00+02:00 1 2018-10-28 02:30:00+02:00 2 2018-10-28 02:00:00+01:00 3 2018-10-28 02:30:00+01:00 4 2018-10-28 03:00:00+01:00 5 2018-10-28 03:30:00+01:00 6 dtype: int64 In some cases, inferring the DST is impossible. In such cases, you can pass an ndarray to the ambiguous parameter to set the DST explicitly >>> s = pd.Series(range(3), ... index=pd.DatetimeIndex(['2018-10-28 01:20:00', ... '2018-10-28 02:36:00', ... '2018-10-28 03:46:00'])) >>> s.tz_localize('CET', ambiguous=np.array([True, True, False])) 2018-10-28 01:20:00+02:00 0 2018-10-28 02:36:00+02:00 1 2018-10-28 03:46:00+01:00 2 dtype: int64 If the DST transition causes nonexistent times, you can shift these dates forward or backward with a timedelta object or `'shift_forward'` or `'shift_backward'`. >>> s = pd.Series(range(2), ... index=pd.DatetimeIndex(['2015-03-29 02:30:00', ... '2015-03-29 03:30:00'])) >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward') 2015-03-29 03:00:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward') 2015-03-29 01:59:59.999999999+01:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H')) 2015-03-29 03:30:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 """ nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward") if nonexistent not in nonexistent_options and not isinstance( nonexistent, dt.timedelta ): raise ValueError( "The nonexistent argument must be one of 'raise', " "'NaT', 'shift_forward', 'shift_backward' or " "a timedelta object" ) axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_localize(ax, tz, ambiguous, nonexistent): if not hasattr(ax, "tz_localize"): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError( f"{ax_name} is not a valid DatetimeIndex or PeriodIndex" ) ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent) return ax # if a level is given it must be a MultiIndex level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent) ax = ax.set_levels(new_level, level=level) else: if level not in (None, 0, ax.name): raise ValueError(f"The level {level} is not valid") ax = _tz_localize(ax, tz, ambiguous, nonexistent) result = self.copy(deep=copy and not using_copy_on_write()) result = result.set_axis(ax, axis=axis, copy=False) return result.__finalize__(self, method="tz_localize") # ---------------------------------------------------------------------- # Numeric Methods def describe( self: NDFrameT, percentiles=None, include=None, exclude=None, ) -> NDFrameT: """ Generate descriptive statistics. Descriptive statistics include those that summarize the central tendency, dispersion and shape of a dataset's distribution, excluding ``NaN`` values. Analyzes both numeric and object series, as well as ``DataFrame`` column sets of mixed data types. The output will vary depending on what is provided. Refer to the notes below for more detail. Parameters ---------- percentiles : list-like of numbers, optional The percentiles to include in the output. All should fall between 0 and 1. The default is ``[.25, .5, .75]``, which returns the 25th, 50th, and 75th percentiles. include : 'all', list-like of dtypes or None (default), optional A white list of data types to include in the result. Ignored for ``Series``. Here are the options: - 'all' : All columns of the input will be included in the output. - A list-like of dtypes : Limits the results to the provided data types. To limit the result to numeric types submit ``numpy.number``. To limit it instead to object columns submit the ``numpy.object`` data type. Strings can also be used in the style of ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To select pandas categorical columns, use ``'category'`` - None (default) : The result will include all numeric columns. exclude : list-like of dtypes or None (default), optional, A black list of data types to omit from the result. Ignored for ``Series``. Here are the options: - A list-like of dtypes : Excludes the provided data types from the result. To exclude numeric types submit ``numpy.number``. To exclude object columns submit the data type ``numpy.object``. Strings can also be used in the style of ``select_dtypes`` (e.g. ``df.describe(exclude=['O'])``). To exclude pandas categorical columns, use ``'category'`` - None (default) : The result will exclude nothing. Returns ------- Series or DataFrame Summary statistics of the Series or Dataframe provided. See Also -------- DataFrame.count: Count number of non-NA/null observations. DataFrame.max: Maximum of the values in the object. DataFrame.min: Minimum of the values in the object. DataFrame.mean: Mean of the values. DataFrame.std: Standard deviation of the observations. DataFrame.select_dtypes: Subset of a DataFrame including/excluding columns based on their dtype. Notes ----- For numeric data, the result's index will include ``count``, ``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and upper percentiles. By default the lower percentile is ``25`` and the upper percentile is ``75``. The ``50`` percentile is the same as the median. For object data (e.g. strings or timestamps), the result's index will include ``count``, ``unique``, ``top``, and ``freq``. The ``top`` is the most common value. The ``freq`` is the most common value's frequency. Timestamps also include the ``first`` and ``last`` items. If multiple object values have the highest count, then the ``count`` and ``top`` results will be arbitrarily chosen from among those with the highest count. For mixed data types provided via a ``DataFrame``, the default is to return only an analysis of numeric columns. If the dataframe consists only of object and categorical data without any numeric columns, the default is to return an analysis of both the object and categorical columns. If ``include='all'`` is provided as an option, the result will include a union of attributes of each type. The `include` and `exclude` parameters can be used to limit which columns in a ``DataFrame`` are analyzed for the output. The parameters are ignored when analyzing a ``Series``. Examples -------- Describing a numeric ``Series``. >>> s = pd.Series([1, 2, 3]) >>> s.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 dtype: float64 Describing a categorical ``Series``. >>> s = pd.Series(['a', 'a', 'b', 'c']) >>> s.describe() count 4 unique 3 top a freq 2 dtype: object Describing a timestamp ``Series``. >>> s = pd.Series([ ... np.datetime64("2000-01-01"), ... np.datetime64("2010-01-01"), ... np.datetime64("2010-01-01") ... ]) >>> s.describe() count 3 mean 2006-09-01 08:00:00 min 2000-01-01 00:00:00 25% 2004-12-31 12:00:00 50% 2010-01-01 00:00:00 75% 2010-01-01 00:00:00 max 2010-01-01 00:00:00 dtype: object Describing a ``DataFrame``. By default only numeric fields are returned. >>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']), ... 'numeric': [1, 2, 3], ... 'object': ['a', 'b', 'c'] ... }) >>> df.describe() numeric count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Describing all columns of a ``DataFrame`` regardless of data type. >>> df.describe(include='all') # doctest: +SKIP categorical numeric object count 3 3.0 3 unique 3 NaN 3 top f NaN a freq 1 NaN 1 mean NaN 2.0 NaN std NaN 1.0 NaN min NaN 1.0 NaN 25% NaN 1.5 NaN 50% NaN 2.0 NaN 75% NaN 2.5 NaN max NaN 3.0 NaN Describing a column from a ``DataFrame`` by accessing it as an attribute. >>> df.numeric.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Name: numeric, dtype: float64 Including only numeric columns in a ``DataFrame`` description. >>> df.describe(include=[np.number]) numeric count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Including only string columns in a ``DataFrame`` description. >>> df.describe(include=[object]) # doctest: +SKIP object count 3 unique 3 top a freq 1 Including only categorical columns from a ``DataFrame`` description. >>> df.describe(include=['category']) categorical count 3 unique 3 top d freq 1 Excluding numeric columns from a ``DataFrame`` description. >>> df.describe(exclude=[np.number]) # doctest: +SKIP categorical object count 3 3 unique 3 3 top f a freq 1 1 Excluding object columns from a ``DataFrame`` description. >>> df.describe(exclude=[object]) # doctest: +SKIP categorical numeric count 3 3.0 unique 3 NaN top f NaN freq 1 NaN mean NaN 2.0 std NaN 1.0 min NaN 1.0 25% NaN 1.5 50% NaN 2.0 75% NaN 2.5 max NaN 3.0 """ return describe_ndframe( obj=self, include=include, exclude=exclude, percentiles=percentiles, ) def pct_change( self: NDFrameT, periods: int = 1, fill_method: Literal["backfill", "bfill", "pad", "ffill"] | None = "pad", limit=None, freq=None, **kwargs, ) -> NDFrameT: """ Percentage change between the current and a prior element. Computes the percentage change from the immediately previous row by default. This is useful in comparing the percentage of change in a time series of elements. Parameters ---------- periods : int, default 1 Periods to shift for forming percent change. fill_method : {'backfill', 'bfill', 'pad', 'ffill', None}, default 'pad' How to handle NAs **before** computing percent changes. limit : int, default None The number of consecutive NAs to fill before stopping. freq : DateOffset, timedelta, or str, optional Increment to use from time series API (e.g. 'M' or BDay()). **kwargs Additional keyword arguments are passed into `DataFrame.shift` or `Series.shift`. Returns ------- Series or DataFrame The same type as the calling object. See Also -------- Series.diff : Compute the difference of two elements in a Series. DataFrame.diff : Compute the difference of two elements in a DataFrame. Series.shift : Shift the index by some number of periods. DataFrame.shift : Shift the index by some number of periods. Examples -------- **Series** >>> s = pd.Series([90, 91, 85]) >>> s 0 90 1 91 2 85 dtype: int64 >>> s.pct_change() 0 NaN 1 0.011111 2 -0.065934 dtype: float64 >>> s.pct_change(periods=2) 0 NaN 1 NaN 2 -0.055556 dtype: float64 See the percentage change in a Series where filling NAs with last valid observation forward to next valid. >>> s = pd.Series([90, 91, None, 85]) >>> s 0 90.0 1 91.0 2 NaN 3 85.0 dtype: float64 >>> s.pct_change(fill_method='ffill') 0 NaN 1 0.011111 2 0.000000 3 -0.065934 dtype: float64 **DataFrame** Percentage change in French franc, Deutsche Mark, and Italian lira from 1980-01-01 to 1980-03-01. >>> df = pd.DataFrame({ ... 'FR': [4.0405, 4.0963, 4.3149], ... 'GR': [1.7246, 1.7482, 1.8519], ... 'IT': [804.74, 810.01, 860.13]}, ... index=['1980-01-01', '1980-02-01', '1980-03-01']) >>> df FR GR IT 1980-01-01 4.0405 1.7246 804.74 1980-02-01 4.0963 1.7482 810.01 1980-03-01 4.3149 1.8519 860.13 >>> df.pct_change() FR GR IT 1980-01-01 NaN NaN NaN 1980-02-01 0.013810 0.013684 0.006549 1980-03-01 0.053365 0.059318 0.061876 Percentage of change in GOOG and APPL stock volume. Shows computing the percentage change between columns. >>> df = pd.DataFrame({ ... '2016': [1769950, 30586265], ... '2015': [1500923, 40912316], ... '2014': [1371819, 41403351]}, ... index=['GOOG', 'APPL']) >>> df 2016 2015 2014 GOOG 1769950 1500923 1371819 APPL 30586265 40912316 41403351 >>> df.pct_change(axis='columns', periods=-1) 2016 2015 2014 GOOG 0.179241 0.094112 NaN APPL -0.252395 -0.011860 NaN """ axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name)) if fill_method is None: data = self else: _data = self.fillna(method=fill_method, axis=axis, limit=limit) assert _data is not None # needed for mypy data = _data shifted = data.shift(periods=periods, freq=freq, axis=axis, **kwargs) # Unsupported left operand type for / ("NDFrameT") rs = data / shifted - 1 # type: ignore[operator] if freq is not None: # Shift method is implemented differently when freq is not None # We want to restore the original index rs = rs.loc[~rs.index.duplicated()] rs = rs.reindex_like(data) return rs.__finalize__(self, method="pct_change") def _logical_func( self, name: str, func, axis: Axis = 0, bool_only: bool_t = False, skipna: bool_t = True, **kwargs, ) -> Series | bool_t: nv.validate_logical_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if self.ndim > 1 and axis is None: # Reduce along one dimension then the other, to simplify DataFrame._reduce res = self._logical_func( name, func, axis=0, bool_only=bool_only, skipna=skipna, **kwargs ) return res._logical_func(name, func, skipna=skipna, **kwargs) if ( self.ndim > 1 and axis == 1 and len(self._mgr.arrays) > 1 # TODO(EA2D): special-case not needed and all(x.ndim == 2 for x in self._mgr.arrays) and not kwargs ): # Fastpath avoiding potentially expensive transpose obj = self if bool_only: obj = self._get_bool_data() return obj._reduce_axis1(name, func, skipna=skipna) return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=bool_only, filter_type="bool", ) def any( self, axis: Axis = 0, bool_only: bool_t = False, skipna: bool_t = True, **kwargs, ) -> DataFrame | Series | bool_t: return self._logical_func( "any", nanops.nanany, axis, bool_only, skipna, **kwargs ) def all( self, axis: Axis = 0, bool_only: bool_t = False, skipna: bool_t = True, **kwargs, ) -> Series | bool_t: return self._logical_func( "all", nanops.nanall, axis, bool_only, skipna, **kwargs ) def _accum_func( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs, ): skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name) if axis is None: axis = self._stat_axis_number else: axis = self._get_axis_number(axis) if axis == 1: return self.T._accum_func( name, func, axis=0, skipna=skipna, *args, **kwargs # noqa: B026 ).T def block_accum_func(blk_values): values = blk_values.T if hasattr(blk_values, "T") else blk_values result: np.ndarray | ExtensionArray if isinstance(values, ExtensionArray): result = values._accumulate(name, skipna=skipna, **kwargs) else: result = nanops.na_accum_func(values, func, skipna=skipna) result = result.T if hasattr(result, "T") else result return result result = self._mgr.apply(block_accum_func) return self._constructor(result).__finalize__(self, method=name) def cummax(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func( "cummax", np.maximum.accumulate, axis, skipna, *args, **kwargs ) def cummin(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func( "cummin", np.minimum.accumulate, axis, skipna, *args, **kwargs ) def cumsum(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func("cumsum", np.cumsum, axis, skipna, *args, **kwargs) def cumprod(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func("cumprod", np.cumprod, axis, skipna, *args, **kwargs) def _stat_function_ddof( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: nv.validate_stat_ddof_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if axis is None: axis = self._stat_axis_number return self._reduce( func, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof ) def sem( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function_ddof( "sem", nanops.nansem, axis, skipna, ddof, numeric_only, **kwargs ) def var( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function_ddof( "var", nanops.nanvar, axis, skipna, ddof, numeric_only, **kwargs ) def std( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function_ddof( "std", nanops.nanstd, axis, skipna, ddof, numeric_only, **kwargs ) def _stat_function( self, name: str, func, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): if name == "median": nv.validate_median((), kwargs) else: nv.validate_stat_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only ) def min( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return self._stat_function( "min", nanops.nanmin, axis, skipna, numeric_only, **kwargs, ) def max( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return self._stat_function( "max", nanops.nanmax, axis, skipna, numeric_only, **kwargs, ) def mean( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "mean", nanops.nanmean, axis, skipna, numeric_only, **kwargs ) def median( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "median", nanops.nanmedian, axis, skipna, numeric_only, **kwargs ) def skew( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "skew", nanops.nanskew, axis, skipna, numeric_only, **kwargs ) def kurt( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "kurt", nanops.nankurt, axis, skipna, numeric_only, **kwargs ) kurtosis = kurt def _min_count_stat_function( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): if name == "sum": nv.validate_sum((), kwargs) elif name == "prod": nv.validate_prod((), kwargs) else: nv.validate_stat_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if axis is None: axis = self._stat_axis_number return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count, ) def sum( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return self._min_count_stat_function( "sum", nanops.nansum, axis, skipna, numeric_only, min_count, **kwargs ) def prod( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return self._min_count_stat_function( "prod", nanops.nanprod, axis, skipna, numeric_only, min_count, **kwargs, ) product = prod def _add_numeric_operations(cls) -> None: """ Add the operations to the cls; evaluate the doc strings again """ axis_descr, name1, name2 = _doc_params(cls) _bool_doc, desc=_any_desc, name1=name1, name2=name2, axis_descr=axis_descr, see_also=_any_see_also, examples=_any_examples, empty_value=False, ) def any( self, *, axis: Axis = 0, bool_only=None, skipna: bool_t = True, **kwargs, ): return NDFrame.any( self, axis=axis, bool_only=bool_only, skipna=skipna, **kwargs, ) setattr(cls, "any", any) _bool_doc, desc=_all_desc, name1=name1, name2=name2, axis_descr=axis_descr, see_also=_all_see_also, examples=_all_examples, empty_value=True, ) def all( self, axis: Axis = 0, bool_only=None, skipna: bool_t = True, **kwargs, ): return NDFrame.all(self, axis, bool_only, skipna, **kwargs) setattr(cls, "all", all) _num_ddof_doc, desc="Return unbiased standard error of the mean over requested " "axis.\n\nNormalized by N-1 by default. This can be changed " "using the ddof argument", name1=name1, name2=name2, axis_descr=axis_descr, notes="", examples="", ) def sem( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ): return NDFrame.sem(self, axis, skipna, ddof, numeric_only, **kwargs) setattr(cls, "sem", sem) _num_ddof_doc, desc="Return unbiased variance over requested axis.\n\nNormalized by " "N-1 by default. This can be changed using the ddof argument.", name1=name1, name2=name2, axis_descr=axis_descr, notes="", examples=_var_examples, ) def var( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ): return NDFrame.var(self, axis, skipna, ddof, numeric_only, **kwargs) setattr(cls, "var", var) _num_ddof_doc, desc="Return sample standard deviation over requested axis." "\n\nNormalized by N-1 by default. This can be changed using the " "ddof argument.", name1=name1, name2=name2, axis_descr=axis_descr, notes=_std_notes, examples=_std_examples, ) def std( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ): return NDFrame.std(self, axis, skipna, ddof, numeric_only, **kwargs) setattr(cls, "std", std) _cnum_doc, desc="minimum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="min", examples=_cummin_examples, ) def cummin( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cummin(self, axis, skipna, *args, **kwargs) setattr(cls, "cummin", cummin) _cnum_doc, desc="maximum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="max", examples=_cummax_examples, ) def cummax( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cummax(self, axis, skipna, *args, **kwargs) setattr(cls, "cummax", cummax) _cnum_doc, desc="sum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="sum", examples=_cumsum_examples, ) def cumsum( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cumsum(self, axis, skipna, *args, **kwargs) setattr(cls, "cumsum", cumsum) _cnum_doc, desc="product", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="prod", examples=_cumprod_examples, ) def cumprod( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cumprod(self, axis, skipna, *args, **kwargs) setattr(cls, "cumprod", cumprod) # error: Untyped decorator makes function "sum" untyped _num_doc, desc="Return the sum of the values over the requested axis.\n\n" "This is equivalent to the method ``numpy.sum``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count=_min_count_stub, see_also=_stat_func_see_also, examples=_sum_examples, ) def sum( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return NDFrame.sum(self, axis, skipna, numeric_only, min_count, **kwargs) setattr(cls, "sum", sum) _num_doc, desc="Return the product of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count=_min_count_stub, see_also=_stat_func_see_also, examples=_prod_examples, ) def prod( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return NDFrame.prod(self, axis, skipna, numeric_only, min_count, **kwargs) setattr(cls, "prod", prod) cls.product = prod _num_doc, desc="Return the mean of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def mean( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.mean(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "mean", mean) _num_doc, desc="Return unbiased skew over requested axis.\n\nNormalized by N-1.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def skew( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.skew(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "skew", skew) _num_doc, desc="Return unbiased kurtosis over requested axis.\n\n" "Kurtosis obtained using Fisher's definition of\n" "kurtosis (kurtosis of normal == 0.0). Normalized " "by N-1.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def kurt( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.kurt(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "kurt", kurt) cls.kurtosis = kurt _num_doc, desc="Return the median of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def median( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.median(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "median", median) _num_doc, desc="Return the maximum of the values over the requested axis.\n\n" "If you want the *index* of the maximum, use ``idxmax``. This is " "the equivalent of the ``numpy.ndarray`` method ``argmax``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also=_stat_func_see_also, examples=_max_examples, ) def max( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.max(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "max", max) _num_doc, desc="Return the minimum of the values over the requested axis.\n\n" "If you want the *index* of the minimum, use ``idxmin``. This is " "the equivalent of the ``numpy.ndarray`` method ``argmin``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also=_stat_func_see_also, examples=_min_examples, ) def min( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.min(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "min", min) def rolling( self, window: int | dt.timedelta | str | BaseOffset | BaseIndexer, min_periods: int | None = None, center: bool_t = False, win_type: str | None = None, on: str | None = None, axis: Axis = 0, closed: str | None = None, step: int | None = None, method: str = "single", ) -> Window | Rolling: axis = self._get_axis_number(axis) if win_type is not None: return Window( self, window=window, min_periods=min_periods, center=center, win_type=win_type, on=on, axis=axis, closed=closed, step=step, method=method, ) return Rolling( self, window=window, min_periods=min_periods, center=center, win_type=win_type, on=on, axis=axis, closed=closed, step=step, method=method, ) def expanding( self, min_periods: int = 1, axis: Axis = 0, method: str = "single", ) -> Expanding: axis = self._get_axis_number(axis) return Expanding(self, min_periods=min_periods, axis=axis, method=method) def ewm( self, com: float | None = None, span: float | None = None, halflife: float | TimedeltaConvertibleTypes | None = None, alpha: float | None = None, min_periods: int | None = 0, adjust: bool_t = True, ignore_na: bool_t = False, axis: Axis = 0, times: np.ndarray | DataFrame | Series | None = None, method: str = "single", ) -> ExponentialMovingWindow: axis = self._get_axis_number(axis) return ExponentialMovingWindow( self, com=com, span=span, halflife=halflife, alpha=alpha, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, axis=axis, times=times, method=method, ) # ---------------------------------------------------------------------- # Arithmetic Methods def _inplace_method(self, other, op): """ Wrap arithmetic method to operate inplace. """ result = op(self, other) if ( self.ndim == 1 and result._indexed_same(self) and is_dtype_equal(result.dtype, self.dtype) ): # GH#36498 this inplace op can _actually_ be inplace. # Item "ArrayManager" of "Union[ArrayManager, SingleArrayManager, # BlockManager, SingleBlockManager]" has no attribute "setitem_inplace" self._mgr.setitem_inplace( # type: ignore[union-attr] slice(None), result._values ) return self # Delete cacher self._reset_cacher() # this makes sure that we are aligned like the input # we are updating inplace so we want to ignore is_copy self._update_inplace( result.reindex_like(self, copy=False), verify_is_copy=False ) return self def __iadd__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for + ("Type[NDFrame]") return self._inplace_method(other, type(self).__add__) # type: ignore[operator] def __isub__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for - ("Type[NDFrame]") return self._inplace_method(other, type(self).__sub__) # type: ignore[operator] def __imul__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for * ("Type[NDFrame]") return self._inplace_method(other, type(self).__mul__) # type: ignore[operator] def __itruediv__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for / ("Type[NDFrame]") return self._inplace_method( other, type(self).__truediv__ # type: ignore[operator] ) def __ifloordiv__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for // ("Type[NDFrame]") return self._inplace_method( other, type(self).__floordiv__ # type: ignore[operator] ) def __imod__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for % ("Type[NDFrame]") return self._inplace_method(other, type(self).__mod__) # type: ignore[operator] def __ipow__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for ** ("Type[NDFrame]") return self._inplace_method(other, type(self).__pow__) # type: ignore[operator] def __iand__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for & ("Type[NDFrame]") return self._inplace_method(other, type(self).__and__) # type: ignore[operator] def __ior__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for | ("Type[NDFrame]") return self._inplace_method(other, type(self).__or__) # type: ignore[operator] def __ixor__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for ^ ("Type[NDFrame]") return self._inplace_method(other, type(self).__xor__) # type: ignore[operator] # ---------------------------------------------------------------------- # Misc methods def _find_valid_index(self, *, how: str) -> Hashable | None: """ Retrieves the index of the first valid value. Parameters ---------- how : {'first', 'last'} Use this parameter to change between the first or last valid index. Returns ------- idx_first_valid : type of index """ idxpos = find_valid_index(self._values, how=how, is_valid=~isna(self._values)) if idxpos is None: return None return self.index[idxpos] def first_valid_index(self) -> Hashable | None: """ Return index for {position} non-NA value or None, if no non-NA value is found. Returns ------- type of index Notes ----- If all elements are non-NA/null, returns None. Also returns None for empty {klass}. """ return self._find_valid_index(how="first") def last_valid_index(self) -> Hashable | None: return self._find_valid_index(how="last") def to_json( path_or_buf: FilePath | WriteBuffer[str] | WriteBuffer[bytes], obj: NDFrame, orient: str | None = ..., date_format: str = ..., double_precision: int = ..., force_ascii: bool = ..., date_unit: str = ..., default_handler: Callable[[Any], JSONSerializable] | None = ..., lines: bool = ..., compression: CompressionOptions = ..., index: bool = ..., indent: int = ..., storage_options: StorageOptions = ..., mode: Literal["a", "w"] = ..., ) -> None: ...
null
173,500
from __future__ import annotations from abc import ( ABC, abstractmethod, ) from collections import abc from io import StringIO from itertools import islice from types import TracebackType from typing import ( TYPE_CHECKING, Any, Callable, Generic, Literal, Mapping, TypeVar, overload, ) import numpy as np from pandas._libs import lib from pandas._libs.json import ( dumps, loads, ) from pandas._libs.tslibs import iNaT from pandas._typing import ( CompressionOptions, DtypeArg, DtypeBackend, FilePath, IndexLabel, JSONEngine, JSONSerializable, ReadBuffer, StorageOptions, WriteBuffer, ) from pandas.compat._optional import import_optional_dependency from pandas.errors import AbstractMethodError from pandas.util._decorators import doc from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( ensure_str, is_period_dtype, ) from pandas.core.dtypes.generic import ABCIndex from pandas import ( ArrowDtype, DataFrame, MultiIndex, Series, isna, notna, to_datetime, ) from pandas.core.reshape.concat import concat from pandas.core.shared_docs import _shared_docs from pandas.io.common import ( IOHandles, dedup_names, extension_to_compression, file_exists, get_handle, is_fsspec_url, is_potential_multi_index, is_url, stringify_path, ) from pandas.io.json._normalize import convert_to_line_delimits from pandas.io.json._table_schema import ( build_table_schema, parse_table_schema, ) from pandas.io.parsers.readers import validate_integer Any = object() class Callable(BaseTypingInstance): def py__call__(self, arguments): """ def x() -> Callable[[Callable[..., _T]], _T]: ... """ # The 0th index are the arguments. try: param_values = self._generics_manager[0] result_values = self._generics_manager[1] except IndexError: debug.warning('Callable[...] defined without two arguments') return NO_VALUES else: from jedi.inference.gradual.annotation import infer_return_for_callable return infer_return_for_callable(arguments, param_values, result_values) def py__get__(self, instance, class_value): return ValueSet([self]) Literal: _SpecialForm = ... JSONSerializable = Optional[Union[PythonScalar, List, Dict]] StorageOptions = Optional[Dict[str, Any]] CompressionOptions = Optional[ Union[Literal["infer", "gzip", "bz2", "zip", "xz", "zstd", "tar"], CompressionDict] ] class NDFrame(PandasObject, indexing.IndexingMixin): """ N-dimensional analogue of DataFrame. Store multi-dimensional in a size-mutable, labeled data structure Parameters ---------- data : BlockManager axes : list copy : bool, default False """ _internal_names: list[str] = [ "_mgr", "_cacher", "_item_cache", "_cache", "_is_copy", "_subtyp", "_name", "_default_kind", "_default_fill_value", "_metadata", "__array_struct__", "__array_interface__", "_flags", ] _internal_names_set: set[str] = set(_internal_names) _accessors: set[str] = set() _hidden_attrs: frozenset[str] = frozenset([]) _metadata: list[str] = [] _is_copy: weakref.ReferenceType[NDFrame] | None = None _mgr: Manager _attrs: dict[Hashable, Any] _typ: str # ---------------------------------------------------------------------- # Constructors def __init__( self, data: Manager, copy: bool_t = False, attrs: Mapping[Hashable, Any] | None = None, ) -> None: # copy kwarg is retained for mypy compat, is not used object.__setattr__(self, "_is_copy", None) object.__setattr__(self, "_mgr", data) object.__setattr__(self, "_item_cache", {}) if attrs is None: attrs = {} else: attrs = dict(attrs) object.__setattr__(self, "_attrs", attrs) object.__setattr__(self, "_flags", Flags(self, allows_duplicate_labels=True)) def _init_mgr( cls, mgr: Manager, axes, dtype: Dtype | None = None, copy: bool_t = False, ) -> Manager: """passed a manager and a axes dict""" for a, axe in axes.items(): if axe is not None: axe = ensure_index(axe) bm_axis = cls._get_block_manager_axis(a) mgr = mgr.reindex_axis(axe, axis=bm_axis) # make a copy if explicitly requested if copy: mgr = mgr.copy() if dtype is not None: # avoid further copies if we can if ( isinstance(mgr, BlockManager) and len(mgr.blocks) == 1 and is_dtype_equal(mgr.blocks[0].values.dtype, dtype) ): pass else: mgr = mgr.astype(dtype=dtype) return mgr def _as_manager(self: NDFrameT, typ: str, copy: bool_t = True) -> NDFrameT: """ Private helper function to create a DataFrame with specific manager. Parameters ---------- typ : {"block", "array"} copy : bool, default True Only controls whether the conversion from Block->ArrayManager copies the 1D arrays (to ensure proper/contiguous memory layout). Returns ------- DataFrame New DataFrame using specified manager type. Is not guaranteed to be a copy or not. """ new_mgr: Manager new_mgr = mgr_to_mgr(self._mgr, typ=typ, copy=copy) # fastpath of passing a manager doesn't check the option/manager class return self._constructor(new_mgr).__finalize__(self) # ---------------------------------------------------------------------- # attrs and flags def attrs(self) -> dict[Hashable, Any]: """ Dictionary of global attributes of this dataset. .. warning:: attrs is experimental and may change without warning. See Also -------- DataFrame.flags : Global flags applying to this object. """ if self._attrs is None: self._attrs = {} return self._attrs def attrs(self, value: Mapping[Hashable, Any]) -> None: self._attrs = dict(value) def flags(self) -> Flags: """ Get the properties associated with this pandas object. The available flags are * :attr:`Flags.allows_duplicate_labels` See Also -------- Flags : Flags that apply to pandas objects. DataFrame.attrs : Global metadata applying to this dataset. Notes ----- "Flags" differ from "metadata". Flags reflect properties of the pandas object (the Series or DataFrame). Metadata refer to properties of the dataset, and should be stored in :attr:`DataFrame.attrs`. Examples -------- >>> df = pd.DataFrame({"A": [1, 2]}) >>> df.flags <Flags(allows_duplicate_labels=True)> Flags can be get or set using ``.`` >>> df.flags.allows_duplicate_labels True >>> df.flags.allows_duplicate_labels = False Or by slicing with a key >>> df.flags["allows_duplicate_labels"] False >>> df.flags["allows_duplicate_labels"] = True """ return self._flags def set_flags( self: NDFrameT, *, copy: bool_t = False, allows_duplicate_labels: bool_t | None = None, ) -> NDFrameT: """ Return a new object with updated flags. Parameters ---------- copy : bool, default False Specify if a copy of the object should be made. allows_duplicate_labels : bool, optional Whether the returned object allows duplicate labels. Returns ------- Series or DataFrame The same type as the caller. See Also -------- DataFrame.attrs : Global metadata applying to this dataset. DataFrame.flags : Global flags applying to this object. Notes ----- This method returns a new object that's a view on the same data as the input. Mutating the input or the output values will be reflected in the other. This method is intended to be used in method chains. "Flags" differ from "metadata". Flags reflect properties of the pandas object (the Series or DataFrame). Metadata refer to properties of the dataset, and should be stored in :attr:`DataFrame.attrs`. Examples -------- >>> df = pd.DataFrame({"A": [1, 2]}) >>> df.flags.allows_duplicate_labels True >>> df2 = df.set_flags(allows_duplicate_labels=False) >>> df2.flags.allows_duplicate_labels False """ df = self.copy(deep=copy and not using_copy_on_write()) if allows_duplicate_labels is not None: df.flags["allows_duplicate_labels"] = allows_duplicate_labels return df def _validate_dtype(cls, dtype) -> DtypeObj | None: """validate the passed dtype""" if dtype is not None: dtype = pandas_dtype(dtype) # a compound dtype if dtype.kind == "V": raise NotImplementedError( "compound dtypes are not implemented " f"in the {cls.__name__} constructor" ) return dtype # ---------------------------------------------------------------------- # Construction def _constructor(self: NDFrameT) -> Callable[..., NDFrameT]: """ Used when a manipulation result has the same dimensions as the original. """ raise AbstractMethodError(self) # ---------------------------------------------------------------------- # Internals def _data(self): # GH#33054 retained because some downstream packages uses this, # e.g. fastparquet return self._mgr # ---------------------------------------------------------------------- # Axis _stat_axis_number = 0 _stat_axis_name = "index" _AXIS_ORDERS: list[Literal["index", "columns"]] _AXIS_TO_AXIS_NUMBER: dict[Axis, AxisInt] = {0: 0, "index": 0, "rows": 0} _info_axis_number: int _info_axis_name: Literal["index", "columns"] _AXIS_LEN: int def _construct_axes_dict(self, axes: Sequence[Axis] | None = None, **kwargs): """Return an axes dictionary for myself.""" d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)} # error: Argument 1 to "update" of "MutableMapping" has incompatible type # "Dict[str, Any]"; expected "SupportsKeysAndGetItem[Union[int, str], Any]" d.update(kwargs) # type: ignore[arg-type] return d def _get_axis_number(cls, axis: Axis) -> AxisInt: try: return cls._AXIS_TO_AXIS_NUMBER[axis] except KeyError: raise ValueError(f"No axis named {axis} for object type {cls.__name__}") def _get_axis_name(cls, axis: Axis) -> Literal["index", "columns"]: axis_number = cls._get_axis_number(axis) return cls._AXIS_ORDERS[axis_number] def _get_axis(self, axis: Axis) -> Index: axis_number = self._get_axis_number(axis) assert axis_number in {0, 1} return self.index if axis_number == 0 else self.columns def _get_block_manager_axis(cls, axis: Axis) -> AxisInt: """Map the axis to the block_manager axis.""" axis = cls._get_axis_number(axis) ndim = cls._AXIS_LEN if ndim == 2: # i.e. DataFrame return 1 - axis return axis def _get_axis_resolvers(self, axis: str) -> dict[str, Series | MultiIndex]: # index or columns axis_index = getattr(self, axis) d = {} prefix = axis[0] for i, name in enumerate(axis_index.names): if name is not None: key = level = name else: # prefix with 'i' or 'c' depending on the input axis # e.g., you must do ilevel_0 for the 0th level of an unnamed # multiiindex key = f"{prefix}level_{i}" level = i level_values = axis_index.get_level_values(level) s = level_values.to_series() s.index = axis_index d[key] = s # put the index/columns itself in the dict if isinstance(axis_index, MultiIndex): dindex = axis_index else: dindex = axis_index.to_series() d[axis] = dindex return d def _get_index_resolvers(self) -> dict[Hashable, Series | MultiIndex]: from pandas.core.computation.parsing import clean_column_name d: dict[str, Series | MultiIndex] = {} for axis_name in self._AXIS_ORDERS: d.update(self._get_axis_resolvers(axis_name)) return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)} def _get_cleaned_column_resolvers(self) -> dict[Hashable, Series]: """ Return the special character free column resolvers of a dataframe. Column names with special characters are 'cleaned up' so that they can be referred to by backtick quoting. Used in :meth:`DataFrame.eval`. """ from pandas.core.computation.parsing import clean_column_name if isinstance(self, ABCSeries): return {clean_column_name(self.name): self} return { clean_column_name(k): v for k, v in self.items() if not isinstance(k, int) } def _info_axis(self) -> Index: return getattr(self, self._info_axis_name) def _stat_axis(self) -> Index: return getattr(self, self._stat_axis_name) def shape(self) -> tuple[int, ...]: """ Return a tuple of axis dimensions """ return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS) def axes(self) -> list[Index]: """ Return index label(s) of the internal NDFrame """ # we do it this way because if we have reversed axes, then # the block manager shows then reversed return [self._get_axis(a) for a in self._AXIS_ORDERS] def ndim(self) -> int: """ Return an int representing the number of axes / array dimensions. Return 1 if Series. Otherwise return 2 if DataFrame. See Also -------- ndarray.ndim : Number of array dimensions. Examples -------- >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3}) >>> s.ndim 1 >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.ndim 2 """ return self._mgr.ndim def size(self) -> int: """ Return an int representing the number of elements in this object. Return the number of rows if Series. Otherwise return the number of rows times number of columns if DataFrame. See Also -------- ndarray.size : Number of elements in the array. Examples -------- >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3}) >>> s.size 3 >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.size 4 """ # error: Incompatible return value type (got "signedinteger[_64Bit]", # expected "int") [return-value] return np.prod(self.shape) # type: ignore[return-value] def set_axis( self: NDFrameT, labels, *, axis: Axis = 0, copy: bool_t | None = None, ) -> NDFrameT: """ Assign desired index to given axis. Indexes for%(extended_summary_sub)s row labels can be changed by assigning a list-like or Index. Parameters ---------- labels : list-like, Index The values for the new index. axis : %(axes_single_arg)s, default 0 The axis to update. The value 0 identifies the rows. For `Series` this parameter is unused and defaults to 0. copy : bool, default True Whether to make a copy of the underlying data. .. versionadded:: 1.5.0 Returns ------- %(klass)s An object of type %(klass)s. See Also -------- %(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s. """ return self._set_axis_nocheck(labels, axis, inplace=False, copy=copy) def _set_axis_nocheck( self, labels, axis: Axis, inplace: bool_t, copy: bool_t | None ): if inplace: setattr(self, self._get_axis_name(axis), labels) else: # With copy=False, we create a new object but don't copy the # underlying data. obj = self.copy(deep=copy and not using_copy_on_write()) setattr(obj, obj._get_axis_name(axis), labels) return obj def _set_axis(self, axis: AxisInt, labels: AnyArrayLike | list) -> None: """ This is called from the cython code when we set the `index` attribute directly, e.g. `series.index = [1, 2, 3]`. """ labels = ensure_index(labels) self._mgr.set_axis(axis, labels) self._clear_item_cache() def swapaxes( self: NDFrameT, axis1: Axis, axis2: Axis, copy: bool_t | None = None ) -> NDFrameT: """ Interchange axes and swap values axes appropriately. Returns ------- same as input """ i = self._get_axis_number(axis1) j = self._get_axis_number(axis2) if i == j: return self.copy(deep=copy and not using_copy_on_write()) mapping = {i: j, j: i} new_axes = [self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN)] new_values = self._values.swapaxes(i, j) # type: ignore[union-attr] if ( using_copy_on_write() and self._mgr.is_single_block and isinstance(self._mgr, BlockManager) ): # This should only get hit in case of having a single block, otherwise a # copy is made, we don't have to set up references. new_mgr = ndarray_to_mgr( new_values, new_axes[0], new_axes[1], dtype=None, copy=False, typ="block", ) assert isinstance(new_mgr, BlockManager) assert isinstance(self._mgr, BlockManager) new_mgr.blocks[0].refs = self._mgr.blocks[0].refs new_mgr.blocks[0].refs.add_reference( new_mgr.blocks[0] # type: ignore[arg-type] ) return self._constructor(new_mgr).__finalize__(self, method="swapaxes") elif (copy or copy is None) and self._mgr.is_single_block: new_values = new_values.copy() return self._constructor( new_values, *new_axes, # The no-copy case for CoW is handled above copy=False, ).__finalize__(self, method="swapaxes") def droplevel(self: NDFrameT, level: IndexLabel, axis: Axis = 0) -> NDFrameT: """ Return {klass} with requested index / column level(s) removed. Parameters ---------- level : int, str, or list-like If a string is given, must be the name of a level If list-like, elements must be names or positional indexes of levels. axis : {{0 or 'index', 1 or 'columns'}}, default 0 Axis along which the level(s) is removed: * 0 or 'index': remove level(s) in column. * 1 or 'columns': remove level(s) in row. For `Series` this parameter is unused and defaults to 0. Returns ------- {klass} {klass} with requested index / column level(s) removed. Examples -------- >>> df = pd.DataFrame([ ... [1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12] ... ]).set_index([0, 1]).rename_axis(['a', 'b']) >>> df.columns = pd.MultiIndex.from_tuples([ ... ('c', 'e'), ('d', 'f') ... ], names=['level_1', 'level_2']) >>> df level_1 c d level_2 e f a b 1 2 3 4 5 6 7 8 9 10 11 12 >>> df.droplevel('a') level_1 c d level_2 e f b 2 3 4 6 7 8 10 11 12 >>> df.droplevel('level_2', axis=1) level_1 c d a b 1 2 3 4 5 6 7 8 9 10 11 12 """ labels = self._get_axis(axis) new_labels = labels.droplevel(level) return self.set_axis(new_labels, axis=axis, copy=None) def pop(self, item: Hashable) -> Series | Any: result = self[item] del self[item] return result def squeeze(self, axis: Axis | None = None): """ Squeeze 1 dimensional axis objects into scalars. Series or DataFrames with a single element are squeezed to a scalar. DataFrames with a single column or a single row are squeezed to a Series. Otherwise the object is unchanged. This method is most useful when you don't know if your object is a Series or DataFrame, but you do know it has just a single column. In that case you can safely call `squeeze` to ensure you have a Series. Parameters ---------- axis : {0 or 'index', 1 or 'columns', None}, default None A specific axis to squeeze. By default, all length-1 axes are squeezed. For `Series` this parameter is unused and defaults to `None`. Returns ------- DataFrame, Series, or scalar The projection after squeezing `axis` or all the axes. See Also -------- Series.iloc : Integer-location based indexing for selecting scalars. DataFrame.iloc : Integer-location based indexing for selecting Series. Series.to_frame : Inverse of DataFrame.squeeze for a single-column DataFrame. Examples -------- >>> primes = pd.Series([2, 3, 5, 7]) Slicing might produce a Series with a single value: >>> even_primes = primes[primes % 2 == 0] >>> even_primes 0 2 dtype: int64 >>> even_primes.squeeze() 2 Squeezing objects with more than one value in every axis does nothing: >>> odd_primes = primes[primes % 2 == 1] >>> odd_primes 1 3 2 5 3 7 dtype: int64 >>> odd_primes.squeeze() 1 3 2 5 3 7 dtype: int64 Squeezing is even more effective when used with DataFrames. >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b']) >>> df a b 0 1 2 1 3 4 Slicing a single column will produce a DataFrame with the columns having only one value: >>> df_a = df[['a']] >>> df_a a 0 1 1 3 So the columns can be squeezed down, resulting in a Series: >>> df_a.squeeze('columns') 0 1 1 3 Name: a, dtype: int64 Slicing a single row from a single column will produce a single scalar DataFrame: >>> df_0a = df.loc[df.index < 1, ['a']] >>> df_0a a 0 1 Squeezing the rows produces a single scalar Series: >>> df_0a.squeeze('rows') a 1 Name: 0, dtype: int64 Squeezing all axes will project directly into a scalar: >>> df_0a.squeeze() 1 """ axes = range(self._AXIS_LEN) if axis is None else (self._get_axis_number(axis),) return self.iloc[ tuple( 0 if i in axes and len(a) == 1 else slice(None) for i, a in enumerate(self.axes) ) ] # ---------------------------------------------------------------------- # Rename def _rename( self: NDFrameT, mapper: Renamer | None = None, *, index: Renamer | None = None, columns: Renamer | None = None, axis: Axis | None = None, copy: bool_t | None = None, inplace: bool_t = False, level: Level | None = None, errors: str = "ignore", ) -> NDFrameT | None: # called by Series.rename and DataFrame.rename if mapper is None and index is None and columns is None: raise TypeError("must pass an index to rename") if index is not None or columns is not None: if axis is not None: raise TypeError( "Cannot specify both 'axis' and any of 'index' or 'columns'" ) if mapper is not None: raise TypeError( "Cannot specify both 'mapper' and any of 'index' or 'columns'" ) else: # use the mapper argument if axis and self._get_axis_number(axis) == 1: columns = mapper else: index = mapper self._check_inplace_and_allows_duplicate_labels(inplace) result = self if inplace else self.copy(deep=copy and not using_copy_on_write()) for axis_no, replacements in enumerate((index, columns)): if replacements is None: continue ax = self._get_axis(axis_no) f = common.get_rename_function(replacements) if level is not None: level = ax._get_level_number(level) # GH 13473 if not callable(replacements): if ax._is_multi and level is not None: indexer = ax.get_level_values(level).get_indexer_for(replacements) else: indexer = ax.get_indexer_for(replacements) if errors == "raise" and len(indexer[indexer == -1]): missing_labels = [ label for index, label in enumerate(replacements) if indexer[index] == -1 ] raise KeyError(f"{missing_labels} not found in axis") new_index = ax._transform_index(f, level=level) result._set_axis_nocheck(new_index, axis=axis_no, inplace=True, copy=False) result._clear_item_cache() if inplace: self._update_inplace(result) return None else: return result.__finalize__(self, method="rename") def rename_axis( self: NDFrameT, mapper: IndexLabel | lib.NoDefault = ..., *, index=..., columns=..., axis: Axis = ..., copy: bool_t | None = ..., inplace: Literal[False] = ..., ) -> NDFrameT: ... def rename_axis( self, mapper: IndexLabel | lib.NoDefault = ..., *, index=..., columns=..., axis: Axis = ..., copy: bool_t | None = ..., inplace: Literal[True], ) -> None: ... def rename_axis( self: NDFrameT, mapper: IndexLabel | lib.NoDefault = ..., *, index=..., columns=..., axis: Axis = ..., copy: bool_t | None = ..., inplace: bool_t = ..., ) -> NDFrameT | None: ... def rename_axis( self: NDFrameT, mapper: IndexLabel | lib.NoDefault = lib.no_default, *, index=lib.no_default, columns=lib.no_default, axis: Axis = 0, copy: bool_t | None = None, inplace: bool_t = False, ) -> NDFrameT | None: """ Set the name of the axis for the index or columns. Parameters ---------- mapper : scalar, list-like, optional Value to set the axis name attribute. index, columns : scalar, list-like, dict-like or function, optional A scalar, list-like, dict-like or functions transformations to apply to that axis' values. Note that the ``columns`` parameter is not allowed if the object is a Series. This parameter only apply for DataFrame type objects. Use either ``mapper`` and ``axis`` to specify the axis to target with ``mapper``, or ``index`` and/or ``columns``. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to rename. For `Series` this parameter is unused and defaults to 0. copy : bool, default None Also copy underlying data. inplace : bool, default False Modifies the object directly, instead of creating a new Series or DataFrame. Returns ------- Series, DataFrame, or None The same type as the caller or None if ``inplace=True``. See Also -------- Series.rename : Alter Series index labels or name. DataFrame.rename : Alter DataFrame index labels or name. Index.rename : Set new names on index. Notes ----- ``DataFrame.rename_axis`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` The first calling convention will only modify the names of the index and/or the names of the Index object that is the columns. In this case, the parameter ``copy`` is ignored. The second calling convention will modify the names of the corresponding index if mapper is a list or a scalar. However, if mapper is dict-like or a function, it will use the deprecated behavior of modifying the axis *labels*. We *highly* recommend using keyword arguments to clarify your intent. Examples -------- **Series** >>> s = pd.Series(["dog", "cat", "monkey"]) >>> s 0 dog 1 cat 2 monkey dtype: object >>> s.rename_axis("animal") animal 0 dog 1 cat 2 monkey dtype: object **DataFrame** >>> df = pd.DataFrame({"num_legs": [4, 4, 2], ... "num_arms": [0, 0, 2]}, ... ["dog", "cat", "monkey"]) >>> df num_legs num_arms dog 4 0 cat 4 0 monkey 2 2 >>> df = df.rename_axis("animal") >>> df num_legs num_arms animal dog 4 0 cat 4 0 monkey 2 2 >>> df = df.rename_axis("limbs", axis="columns") >>> df limbs num_legs num_arms animal dog 4 0 cat 4 0 monkey 2 2 **MultiIndex** >>> df.index = pd.MultiIndex.from_product([['mammal'], ... ['dog', 'cat', 'monkey']], ... names=['type', 'name']) >>> df limbs num_legs num_arms type name mammal dog 4 0 cat 4 0 monkey 2 2 >>> df.rename_axis(index={'type': 'class'}) limbs num_legs num_arms class name mammal dog 4 0 cat 4 0 monkey 2 2 >>> df.rename_axis(columns=str.upper) LIMBS num_legs num_arms type name mammal dog 4 0 cat 4 0 monkey 2 2 """ axes = {"index": index, "columns": columns} if axis is not None: axis = self._get_axis_number(axis) inplace = validate_bool_kwarg(inplace, "inplace") if copy and using_copy_on_write(): copy = False if mapper is not lib.no_default: # Use v0.23 behavior if a scalar or list non_mapper = is_scalar(mapper) or ( is_list_like(mapper) and not is_dict_like(mapper) ) if non_mapper: return self._set_axis_name( mapper, axis=axis, inplace=inplace, copy=copy ) else: raise ValueError("Use `.rename` to alter labels with a mapper.") else: # Use new behavior. Means that index and/or columns # is specified result = self if inplace else self.copy(deep=copy) for axis in range(self._AXIS_LEN): v = axes.get(self._get_axis_name(axis)) if v is lib.no_default: continue non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v)) if non_mapper: newnames = v else: f = common.get_rename_function(v) curnames = self._get_axis(axis).names newnames = [f(name) for name in curnames] result._set_axis_name(newnames, axis=axis, inplace=True, copy=copy) if not inplace: return result return None def _set_axis_name( self, name, axis: Axis = 0, inplace: bool_t = False, copy: bool_t | None = True ): """ Set the name(s) of the axis. Parameters ---------- name : str or list of str Name(s) to set. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to set the label. The value 0 or 'index' specifies index, and the value 1 or 'columns' specifies columns. inplace : bool, default False If `True`, do operation inplace and return None. copy: Whether to make a copy of the result. Returns ------- Series, DataFrame, or None The same type as the caller or `None` if `inplace` is `True`. See Also -------- DataFrame.rename : Alter the axis labels of :class:`DataFrame`. Series.rename : Alter the index labels or set the index name of :class:`Series`. Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`. Examples -------- >>> df = pd.DataFrame({"num_legs": [4, 4, 2]}, ... ["dog", "cat", "monkey"]) >>> df num_legs dog 4 cat 4 monkey 2 >>> df._set_axis_name("animal") num_legs animal dog 4 cat 4 monkey 2 >>> df.index = pd.MultiIndex.from_product( ... [["mammal"], ['dog', 'cat', 'monkey']]) >>> df._set_axis_name(["type", "name"]) num_legs type name mammal dog 4 cat 4 monkey 2 """ axis = self._get_axis_number(axis) idx = self._get_axis(axis).set_names(name) inplace = validate_bool_kwarg(inplace, "inplace") renamed = self if inplace else self.copy(deep=copy) if axis == 0: renamed.index = idx else: renamed.columns = idx if not inplace: return renamed # ---------------------------------------------------------------------- # Comparison Methods def _indexed_same(self, other) -> bool_t: return all( self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS ) def equals(self, other: object) -> bool_t: """ Test whether two objects contain the same elements. This function allows two Series or DataFrames to be compared against each other to see if they have the same shape and elements. NaNs in the same location are considered equal. The row/column index do not need to have the same type, as long as the values are considered equal. Corresponding columns must be of the same dtype. Parameters ---------- other : Series or DataFrame The other Series or DataFrame to be compared with the first. Returns ------- bool True if all elements are the same in both objects, False otherwise. See Also -------- Series.eq : Compare two Series objects of the same length and return a Series where each element is True if the element in each Series is equal, False otherwise. DataFrame.eq : Compare two DataFrame objects of the same shape and return a DataFrame where each element is True if the respective element in each DataFrame is equal, False otherwise. testing.assert_series_equal : Raises an AssertionError if left and right are not equal. Provides an easy interface to ignore inequality in dtypes, indexes and precision among others. testing.assert_frame_equal : Like assert_series_equal, but targets DataFrames. numpy.array_equal : Return True if two arrays have the same shape and elements, False otherwise. Examples -------- >>> df = pd.DataFrame({1: [10], 2: [20]}) >>> df 1 2 0 10 20 DataFrames df and exactly_equal have the same types and values for their elements and column labels, which will return True. >>> exactly_equal = pd.DataFrame({1: [10], 2: [20]}) >>> exactly_equal 1 2 0 10 20 >>> df.equals(exactly_equal) True DataFrames df and different_column_type have the same element types and values, but have different types for the column labels, which will still return True. >>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]}) >>> different_column_type 1.0 2.0 0 10 20 >>> df.equals(different_column_type) True DataFrames df and different_data_type have different types for the same values for their elements, and will return False even though their column labels are the same values and types. >>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]}) >>> different_data_type 1 2 0 10.0 20.0 >>> df.equals(different_data_type) False """ if not (isinstance(other, type(self)) or isinstance(self, type(other))): return False other = cast(NDFrame, other) return self._mgr.equals(other._mgr) # ------------------------------------------------------------------------- # Unary Methods def __neg__(self: NDFrameT) -> NDFrameT: def blk_func(values: ArrayLike): if is_bool_dtype(values.dtype): # error: Argument 1 to "inv" has incompatible type "Union # [ExtensionArray, ndarray[Any, Any]]"; expected # "_SupportsInversion[ndarray[Any, dtype[bool_]]]" return operator.inv(values) # type: ignore[arg-type] else: # error: Argument 1 to "neg" has incompatible type "Union # [ExtensionArray, ndarray[Any, Any]]"; expected # "_SupportsNeg[ndarray[Any, dtype[Any]]]" return operator.neg(values) # type: ignore[arg-type] new_data = self._mgr.apply(blk_func) res = self._constructor(new_data) return res.__finalize__(self, method="__neg__") def __pos__(self: NDFrameT) -> NDFrameT: def blk_func(values: ArrayLike): if is_bool_dtype(values.dtype): return values.copy() else: # error: Argument 1 to "pos" has incompatible type "Union # [ExtensionArray, ndarray[Any, Any]]"; expected # "_SupportsPos[ndarray[Any, dtype[Any]]]" return operator.pos(values) # type: ignore[arg-type] new_data = self._mgr.apply(blk_func) res = self._constructor(new_data) return res.__finalize__(self, method="__pos__") def __invert__(self: NDFrameT) -> NDFrameT: if not self.size: # inv fails with 0 len return self.copy(deep=False) new_data = self._mgr.apply(operator.invert) return self._constructor(new_data).__finalize__(self, method="__invert__") def __nonzero__(self) -> NoReturn: raise ValueError( f"The truth value of a {type(self).__name__} is ambiguous. " "Use a.empty, a.bool(), a.item(), a.any() or a.all()." ) __bool__ = __nonzero__ def bool(self) -> bool_t: """ Return the bool of a single element Series or DataFrame. This must be a boolean scalar value, either True or False. It will raise a ValueError if the Series or DataFrame does not have exactly 1 element, or that element is not boolean (integer values 0 and 1 will also raise an exception). Returns ------- bool The value in the Series or DataFrame. See Also -------- Series.astype : Change the data type of a Series, including to boolean. DataFrame.astype : Change the data type of a DataFrame, including to boolean. numpy.bool_ : NumPy boolean data type, used by pandas for boolean values. Examples -------- The method will only work for single element objects with a boolean value: >>> pd.Series([True]).bool() True >>> pd.Series([False]).bool() False >>> pd.DataFrame({'col': [True]}).bool() True >>> pd.DataFrame({'col': [False]}).bool() False """ v = self.squeeze() if isinstance(v, (bool, np.bool_)): return bool(v) elif is_scalar(v): raise ValueError( "bool cannot act on a non-boolean single element " f"{type(self).__name__}" ) self.__nonzero__() # for mypy (__nonzero__ raises) return True def abs(self: NDFrameT) -> NDFrameT: """ Return a Series/DataFrame with absolute numeric value of each element. This function only applies to elements that are all numeric. Returns ------- abs Series/DataFrame containing the absolute value of each element. See Also -------- numpy.absolute : Calculate the absolute value element-wise. Notes ----- For ``complex`` inputs, ``1.2 + 1j``, the absolute value is :math:`\\sqrt{ a^2 + b^2 }`. Examples -------- Absolute numeric values in a Series. >>> s = pd.Series([-1.10, 2, -3.33, 4]) >>> s.abs() 0 1.10 1 2.00 2 3.33 3 4.00 dtype: float64 Absolute numeric values in a Series with complex numbers. >>> s = pd.Series([1.2 + 1j]) >>> s.abs() 0 1.56205 dtype: float64 Absolute numeric values in a Series with a Timedelta element. >>> s = pd.Series([pd.Timedelta('1 days')]) >>> s.abs() 0 1 days dtype: timedelta64[ns] Select rows with data closest to certain value using argsort (from `StackOverflow <https://stackoverflow.com/a/17758115>`__). >>> df = pd.DataFrame({ ... 'a': [4, 5, 6, 7], ... 'b': [10, 20, 30, 40], ... 'c': [100, 50, -30, -50] ... }) >>> df a b c 0 4 10 100 1 5 20 50 2 6 30 -30 3 7 40 -50 >>> df.loc[(df.c - 43).abs().argsort()] a b c 1 5 20 50 0 4 10 100 2 6 30 -30 3 7 40 -50 """ res_mgr = self._mgr.apply(np.abs) return self._constructor(res_mgr).__finalize__(self, name="abs") def __abs__(self: NDFrameT) -> NDFrameT: return self.abs() def __round__(self: NDFrameT, decimals: int = 0) -> NDFrameT: return self.round(decimals).__finalize__(self, method="__round__") # ------------------------------------------------------------------------- # Label or Level Combination Helpers # # A collection of helper methods for DataFrame/Series operations that # accept a combination of column/index labels and levels. All such # operations should utilize/extend these methods when possible so that we # have consistent precedence and validation logic throughout the library. def _is_level_reference(self, key: Level, axis: Axis = 0) -> bool_t: """ Test whether a key is a level reference for a given axis. To be considered a level reference, `key` must be a string that: - (axis=0): Matches the name of an index level and does NOT match a column label. - (axis=1): Matches the name of a column level and does NOT match an index label. Parameters ---------- key : Hashable Potential level name for the given axis axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- is_level : bool """ axis_int = self._get_axis_number(axis) return ( key is not None and is_hashable(key) and key in self.axes[axis_int].names and not self._is_label_reference(key, axis=axis_int) ) def _is_label_reference(self, key: Level, axis: Axis = 0) -> bool_t: """ Test whether a key is a label reference for a given axis. To be considered a label reference, `key` must be a string that: - (axis=0): Matches a column label - (axis=1): Matches an index label Parameters ---------- key : Hashable Potential label name, i.e. Index entry. axis : int, default 0 Axis perpendicular to the axis that labels are associated with (0 means search for column labels, 1 means search for index labels) Returns ------- is_label: bool """ axis_int = self._get_axis_number(axis) other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis_int) return ( key is not None and is_hashable(key) and any(key in self.axes[ax] for ax in other_axes) ) def _is_label_or_level_reference(self, key: Level, axis: AxisInt = 0) -> bool_t: """ Test whether a key is a label or level reference for a given axis. To be considered either a label or a level reference, `key` must be a string that: - (axis=0): Matches a column label or an index level - (axis=1): Matches an index label or a column level Parameters ---------- key : Hashable Potential label or level name axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- bool """ return self._is_level_reference(key, axis=axis) or self._is_label_reference( key, axis=axis ) def _check_label_or_level_ambiguity(self, key: Level, axis: Axis = 0) -> None: """ Check whether `key` is ambiguous. By ambiguous, we mean that it matches both a level of the input `axis` and a label of the other axis. Parameters ---------- key : Hashable Label or level name. axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns). Raises ------ ValueError: `key` is ambiguous """ axis_int = self._get_axis_number(axis) other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis_int) if ( key is not None and is_hashable(key) and key in self.axes[axis_int].names and any(key in self.axes[ax] for ax in other_axes) ): # Build an informative and grammatical warning level_article, level_type = ( ("an", "index") if axis_int == 0 else ("a", "column") ) label_article, label_type = ( ("a", "column") if axis_int == 0 else ("an", "index") ) msg = ( f"'{key}' is both {level_article} {level_type} level and " f"{label_article} {label_type} label, which is ambiguous." ) raise ValueError(msg) def _get_label_or_level_values(self, key: Level, axis: AxisInt = 0) -> ArrayLike: """ Return a 1-D array of values associated with `key`, a label or level from the given `axis`. Retrieval logic: - (axis=0): Return column values if `key` matches a column label. Otherwise return index level values if `key` matches an index level. - (axis=1): Return row values if `key` matches an index label. Otherwise return column level values if 'key' matches a column level Parameters ---------- key : Hashable Label or level name. axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- np.ndarray or ExtensionArray Raises ------ KeyError if `key` matches neither a label nor a level ValueError if `key` matches multiple labels """ axis = self._get_axis_number(axis) other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis] if self._is_label_reference(key, axis=axis): self._check_label_or_level_ambiguity(key, axis=axis) values = self.xs(key, axis=other_axes[0])._values elif self._is_level_reference(key, axis=axis): values = self.axes[axis].get_level_values(key)._values else: raise KeyError(key) # Check for duplicates if values.ndim > 1: if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex): multi_message = ( "\n" "For a multi-index, the label must be a " "tuple with elements corresponding to each level." ) else: multi_message = "" label_axis_name = "column" if axis == 0 else "index" raise ValueError( f"The {label_axis_name} label '{key}' is not unique.{multi_message}" ) return values def _drop_labels_or_levels(self, keys, axis: AxisInt = 0): """ Drop labels and/or levels for the given `axis`. For each key in `keys`: - (axis=0): If key matches a column label then drop the column. Otherwise if key matches an index level then drop the level. - (axis=1): If key matches an index label then drop the row. Otherwise if key matches a column level then drop the level. Parameters ---------- keys : str or list of str labels or levels to drop axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- dropped: DataFrame Raises ------ ValueError if any `keys` match neither a label nor a level """ axis = self._get_axis_number(axis) # Validate keys keys = common.maybe_make_list(keys) invalid_keys = [ k for k in keys if not self._is_label_or_level_reference(k, axis=axis) ] if invalid_keys: raise ValueError( "The following keys are not valid labels or " f"levels for axis {axis}: {invalid_keys}" ) # Compute levels and labels to drop levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)] labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)] # Perform copy upfront and then use inplace operations below. # This ensures that we always perform exactly one copy. # ``copy`` and/or ``inplace`` options could be added in the future. dropped = self.copy(deep=False) if axis == 0: # Handle dropping index levels if levels_to_drop: dropped.reset_index(levels_to_drop, drop=True, inplace=True) # Handle dropping columns labels if labels_to_drop: dropped.drop(labels_to_drop, axis=1, inplace=True) else: # Handle dropping column levels if levels_to_drop: if isinstance(dropped.columns, MultiIndex): # Drop the specified levels from the MultiIndex dropped.columns = dropped.columns.droplevel(levels_to_drop) else: # Drop the last level of Index by replacing with # a RangeIndex dropped.columns = RangeIndex(dropped.columns.size) # Handle dropping index labels if labels_to_drop: dropped.drop(labels_to_drop, axis=0, inplace=True) return dropped # ---------------------------------------------------------------------- # Iteration # https://github.com/python/typeshed/issues/2148#issuecomment-520783318 # Incompatible types in assignment (expression has type "None", base class # "object" defined the type as "Callable[[object], int]") __hash__: ClassVar[None] # type: ignore[assignment] def __iter__(self) -> Iterator: """ Iterate over info axis. Returns ------- iterator Info axis as iterator. """ return iter(self._info_axis) # can we get a better explanation of this? def keys(self) -> Index: """ Get the 'info axis' (see Indexing for more). This is index for Series, columns for DataFrame. Returns ------- Index Info axis. """ return self._info_axis def items(self): """ Iterate over (label, values) on info axis This is index for Series and columns for DataFrame. Returns ------- Generator """ for h in self._info_axis: yield h, self[h] def __len__(self) -> int: """Returns length of info axis""" return len(self._info_axis) def __contains__(self, key) -> bool_t: """True if the key is in the info axis""" return key in self._info_axis def empty(self) -> bool_t: """ Indicator whether Series/DataFrame is empty. True if Series/DataFrame is entirely empty (no items), meaning any of the axes are of length 0. Returns ------- bool If Series/DataFrame is empty, return True, if not return False. See Also -------- Series.dropna : Return series without null values. DataFrame.dropna : Return DataFrame with labels on given axis omitted where (all or any) data are missing. Notes ----- If Series/DataFrame contains only NaNs, it is still not considered empty. See the example below. Examples -------- An example of an actual empty DataFrame. Notice the index is empty: >>> df_empty = pd.DataFrame({'A' : []}) >>> df_empty Empty DataFrame Columns: [A] Index: [] >>> df_empty.empty True If we only have NaNs in our DataFrame, it is not considered empty! We will need to drop the NaNs to make the DataFrame empty: >>> df = pd.DataFrame({'A' : [np.nan]}) >>> df A 0 NaN >>> df.empty False >>> df.dropna().empty True >>> ser_empty = pd.Series({'A' : []}) >>> ser_empty A [] dtype: object >>> ser_empty.empty False >>> ser_empty = pd.Series() >>> ser_empty.empty True """ return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS) # ---------------------------------------------------------------------- # Array Interface # This is also set in IndexOpsMixin # GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented __array_priority__: int = 1000 def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: values = self._values arr = np.asarray(values, dtype=dtype) if ( astype_is_view(values.dtype, arr.dtype) and using_copy_on_write() and self._mgr.is_single_block ): # Check if both conversions can be done without a copy if astype_is_view(self.dtypes.iloc[0], values.dtype) and astype_is_view( values.dtype, arr.dtype ): arr = arr.view() arr.flags.writeable = False return arr def __array_ufunc__( self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any ): return arraylike.array_ufunc(self, ufunc, method, *inputs, **kwargs) # ---------------------------------------------------------------------- # Picklability def __getstate__(self) -> dict[str, Any]: meta = {k: getattr(self, k, None) for k in self._metadata} return { "_mgr": self._mgr, "_typ": self._typ, "_metadata": self._metadata, "attrs": self.attrs, "_flags": {k: self.flags[k] for k in self.flags._keys}, **meta, } def __setstate__(self, state) -> None: if isinstance(state, BlockManager): self._mgr = state elif isinstance(state, dict): if "_data" in state and "_mgr" not in state: # compat for older pickles state["_mgr"] = state.pop("_data") typ = state.get("_typ") if typ is not None: attrs = state.get("_attrs", {}) object.__setattr__(self, "_attrs", attrs) flags = state.get("_flags", {"allows_duplicate_labels": True}) object.__setattr__(self, "_flags", Flags(self, **flags)) # set in the order of internal names # to avoid definitional recursion # e.g. say fill_value needing _mgr to be # defined meta = set(self._internal_names + self._metadata) for k in list(meta): if k in state and k != "_flags": v = state[k] object.__setattr__(self, k, v) for k, v in state.items(): if k not in meta: object.__setattr__(self, k, v) else: raise NotImplementedError("Pre-0.12 pickles are no longer supported") elif len(state) == 2: raise NotImplementedError("Pre-0.12 pickles are no longer supported") self._item_cache: dict[Hashable, Series] = {} # ---------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str: # string representation based upon iterating over self # (since, by definition, `PandasContainers` are iterable) prepr = f"[{','.join(map(pprint_thing, self))}]" return f"{type(self).__name__}({prepr})" def _repr_latex_(self): """ Returns a LaTeX representation for a particular object. Mainly for use with nbconvert (jupyter notebook conversion to pdf). """ if config.get_option("styler.render.repr") == "latex": return self.to_latex() else: return None def _repr_data_resource_(self): """ Not a real Jupyter special repr method, but we use the same naming convention. """ if config.get_option("display.html.table_schema"): data = self.head(config.get_option("display.max_rows")) as_json = data.to_json(orient="table") as_json = cast(str, as_json) return loads(as_json, object_pairs_hook=collections.OrderedDict) # ---------------------------------------------------------------------- # I/O Methods klass="object", storage_options=_shared_docs["storage_options"], storage_options_versionadded="1.2.0", ) def to_excel( self, excel_writer, sheet_name: str = "Sheet1", na_rep: str = "", float_format: str | None = None, columns: Sequence[Hashable] | None = None, header: Sequence[Hashable] | bool_t = True, index: bool_t = True, index_label: IndexLabel = None, startrow: int = 0, startcol: int = 0, engine: str | None = None, merge_cells: bool_t = True, inf_rep: str = "inf", freeze_panes: tuple[int, int] | None = None, storage_options: StorageOptions = None, ) -> None: """ Write {klass} to an Excel sheet. To write a single {klass} to an Excel .xlsx file it is only necessary to specify a target file name. To write to multiple sheets it is necessary to create an `ExcelWriter` object with a target file name, and specify a sheet in the file to write to. Multiple sheets may be written to by specifying unique `sheet_name`. With all data written to the file it is necessary to save the changes. Note that creating an `ExcelWriter` object with a file name that already exists will result in the contents of the existing file being erased. Parameters ---------- excel_writer : path-like, file-like, or ExcelWriter object File path or existing ExcelWriter. sheet_name : str, default 'Sheet1' Name of sheet which will contain DataFrame. na_rep : str, default '' Missing data representation. float_format : str, optional Format string for floating point numbers. For example ``float_format="%.2f"`` will format 0.1234 to 0.12. columns : sequence or list of str, optional Columns to write. header : bool or list of str, default True Write out the column names. If a list of string is given it is assumed to be aliases for the column names. index : bool, default True Write row names (index). index_label : str or sequence, optional Column label for index column(s) if desired. If not specified, and `header` and `index` are True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. startrow : int, default 0 Upper left cell row to dump data frame. startcol : int, default 0 Upper left cell column to dump data frame. engine : str, optional Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this via the options ``io.excel.xlsx.writer`` or ``io.excel.xlsm.writer``. merge_cells : bool, default True Write MultiIndex and Hierarchical Rows as merged cells. inf_rep : str, default 'inf' Representation for infinity (there is no native representation for infinity in Excel). freeze_panes : tuple of int (length 2), optional Specifies the one-based bottommost row and rightmost column that is to be frozen. {storage_options} .. versionadded:: {storage_options_versionadded} See Also -------- to_csv : Write DataFrame to a comma-separated values (csv) file. ExcelWriter : Class for writing DataFrame objects into excel sheets. read_excel : Read an Excel file into a pandas DataFrame. read_csv : Read a comma-separated values (csv) file into DataFrame. io.formats.style.Styler.to_excel : Add styles to Excel sheet. Notes ----- For compatibility with :meth:`~DataFrame.to_csv`, to_excel serializes lists and dicts to strings before writing. Once a workbook has been saved it is not possible to write further data without rewriting the whole workbook. Examples -------- Create, write to and save a workbook: >>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']], ... index=['row 1', 'row 2'], ... columns=['col 1', 'col 2']) >>> df1.to_excel("output.xlsx") # doctest: +SKIP To specify the sheet name: >>> df1.to_excel("output.xlsx", ... sheet_name='Sheet_name_1') # doctest: +SKIP If you wish to write to more than one sheet in the workbook, it is necessary to specify an ExcelWriter object: >>> df2 = df1.copy() >>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP ... df1.to_excel(writer, sheet_name='Sheet_name_1') ... df2.to_excel(writer, sheet_name='Sheet_name_2') ExcelWriter can also be used to append to an existing Excel file: >>> with pd.ExcelWriter('output.xlsx', ... mode='a') as writer: # doctest: +SKIP ... df.to_excel(writer, sheet_name='Sheet_name_3') To set the library that is used to write the Excel file, you can pass the `engine` keyword (the default engine is automatically chosen depending on the file extension): >>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP """ df = self if isinstance(self, ABCDataFrame) else self.to_frame() from pandas.io.formats.excel import ExcelFormatter formatter = ExcelFormatter( df, na_rep=na_rep, cols=columns, header=header, float_format=float_format, index=index, index_label=index_label, merge_cells=merge_cells, inf_rep=inf_rep, ) formatter.write( excel_writer, sheet_name=sheet_name, startrow=startrow, startcol=startcol, freeze_panes=freeze_panes, engine=engine, storage_options=storage_options, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buf", ) def to_json( self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, orient: str | None = None, date_format: str | None = None, double_precision: int = 10, force_ascii: bool_t = True, date_unit: str = "ms", default_handler: Callable[[Any], JSONSerializable] | None = None, lines: bool_t = False, compression: CompressionOptions = "infer", index: bool_t = True, indent: int | None = None, storage_options: StorageOptions = None, mode: Literal["a", "w"] = "w", ) -> str | None: """ Convert the object to a JSON string. Note NaN's and None will be converted to null and datetime objects will be converted to UNIX timestamps. Parameters ---------- path_or_buf : str, path object, file-like object, or None, default None String, path object (implementing os.PathLike[str]), or file-like object implementing a write() function. If None, the result is returned as a string. orient : str Indication of expected JSON string format. * Series: - default is 'index' - allowed values are: {{'split', 'records', 'index', 'table'}}. * DataFrame: - default is 'columns' - allowed values are: {{'split', 'records', 'index', 'columns', 'values', 'table'}}. * The format of the JSON string: - 'split' : dict like {{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}} - 'records' : list like [{{column -> value}}, ... , {{column -> value}}] - 'index' : dict like {{index -> {{column -> value}}}} - 'columns' : dict like {{column -> {{index -> value}}}} - 'values' : just the values array - 'table' : dict like {{'schema': {{schema}}, 'data': {{data}}}} Describing the data, where data component is like ``orient='records'``. date_format : {{None, 'epoch', 'iso'}} Type of date conversion. 'epoch' = epoch milliseconds, 'iso' = ISO8601. The default depends on the `orient`. For ``orient='table'``, the default is 'iso'. For all other orients, the default is 'epoch'. double_precision : int, default 10 The number of decimal places to use when encoding floating point values. force_ascii : bool, default True Force encoded string to be ASCII. date_unit : str, default 'ms' (milliseconds) The time unit to encode to, governs timestamp and ISO8601 precision. One of 's', 'ms', 'us', 'ns' for second, millisecond, microsecond, and nanosecond respectively. default_handler : callable, default None Handler to call if object cannot otherwise be converted to a suitable format for JSON. Should receive a single argument which is the object to convert and return a serialisable object. lines : bool, default False If 'orient' is 'records' write out line-delimited json format. Will throw ValueError if incorrect 'orient' since others are not list-like. {compression_options} .. versionchanged:: 1.4.0 Zstandard support. index : bool, default True Whether to include the index values in the JSON string. Not including the index (``index=False``) is only supported when orient is 'split' or 'table'. indent : int, optional Length of whitespace used to indent each record. {storage_options} .. versionadded:: 1.2.0 mode : str, default 'w' (writing) Specify the IO mode for output when supplying a path_or_buf. Accepted args are 'w' (writing) and 'a' (append) only. mode='a' is only supported when lines is True and orient is 'records'. Returns ------- None or str If path_or_buf is None, returns the resulting json format as a string. Otherwise returns None. See Also -------- read_json : Convert a JSON string to pandas object. Notes ----- The behavior of ``indent=0`` varies from the stdlib, which does not indent the output but does insert newlines. Currently, ``indent=0`` and the default ``indent=None`` are equivalent in pandas, though this may change in a future release. ``orient='table'`` contains a 'pandas_version' field under 'schema'. This stores the version of `pandas` used in the latest revision of the schema. Examples -------- >>> from json import loads, dumps >>> df = pd.DataFrame( ... [["a", "b"], ["c", "d"]], ... index=["row 1", "row 2"], ... columns=["col 1", "col 2"], ... ) >>> result = df.to_json(orient="split") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "columns": [ "col 1", "col 2" ], "index": [ "row 1", "row 2" ], "data": [ [ "a", "b" ], [ "c", "d" ] ] }} Encoding/decoding a Dataframe using ``'records'`` formatted JSON. Note that index labels are not preserved with this encoding. >>> result = df.to_json(orient="records") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP [ {{ "col 1": "a", "col 2": "b" }}, {{ "col 1": "c", "col 2": "d" }} ] Encoding/decoding a Dataframe using ``'index'`` formatted JSON: >>> result = df.to_json(orient="index") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "row 1": {{ "col 1": "a", "col 2": "b" }}, "row 2": {{ "col 1": "c", "col 2": "d" }} }} Encoding/decoding a Dataframe using ``'columns'`` formatted JSON: >>> result = df.to_json(orient="columns") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "col 1": {{ "row 1": "a", "row 2": "c" }}, "col 2": {{ "row 1": "b", "row 2": "d" }} }} Encoding/decoding a Dataframe using ``'values'`` formatted JSON: >>> result = df.to_json(orient="values") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP [ [ "a", "b" ], [ "c", "d" ] ] Encoding with Table Schema: >>> result = df.to_json(orient="table") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "schema": {{ "fields": [ {{ "name": "index", "type": "string" }}, {{ "name": "col 1", "type": "string" }}, {{ "name": "col 2", "type": "string" }} ], "primaryKey": [ "index" ], "pandas_version": "1.4.0" }}, "data": [ {{ "index": "row 1", "col 1": "a", "col 2": "b" }}, {{ "index": "row 2", "col 1": "c", "col 2": "d" }} ] }} """ from pandas.io import json if date_format is None and orient == "table": date_format = "iso" elif date_format is None: date_format = "epoch" config.is_nonnegative_int(indent) indent = indent or 0 return json.to_json( path_or_buf=path_or_buf, obj=self, orient=orient, date_format=date_format, double_precision=double_precision, force_ascii=force_ascii, date_unit=date_unit, default_handler=default_handler, lines=lines, compression=compression, index=index, indent=indent, storage_options=storage_options, mode=mode, ) def to_hdf( self, path_or_buf: FilePath | HDFStore, key: str, mode: str = "a", complevel: int | None = None, complib: str | None = None, append: bool_t = False, format: str | None = None, index: bool_t = True, min_itemsize: int | dict[str, int] | None = None, nan_rep=None, dropna: bool_t | None = None, data_columns: Literal[True] | list[str] | None = None, errors: str = "strict", encoding: str = "UTF-8", ) -> None: """ Write the contained data to an HDF5 file using HDFStore. Hierarchical Data Format (HDF) is self-describing, allowing an application to interpret the structure and contents of a file with no outside information. One HDF file can hold a mix of related objects which can be accessed as a group or as individual objects. In order to add another DataFrame or Series to an existing HDF file please use append mode and a different a key. .. warning:: One can store a subclass of ``DataFrame`` or ``Series`` to HDF5, but the type of the subclass is lost upon storing. For more information see the :ref:`user guide <io.hdf5>`. Parameters ---------- path_or_buf : str or pandas.HDFStore File path or HDFStore object. key : str Identifier for the group in the store. mode : {'a', 'w', 'r+'}, default 'a' Mode to open file: - 'w': write, a new file is created (an existing file with the same name would be deleted). - 'a': append, an existing file is opened for reading and writing, and if the file does not exist it is created. - 'r+': similar to 'a', but the file must already exist. complevel : {0-9}, default None Specifies a compression level for data. A value of 0 or None disables compression. complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib' Specifies the compression library to be used. As of v0.20.2 these additional compressors for Blosc are supported (default if no compressor specified: 'blosc:blosclz'): {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy', 'blosc:zlib', 'blosc:zstd'}. Specifying a compression library which is not available issues a ValueError. append : bool, default False For Table formats, append the input data to the existing. format : {'fixed', 'table', None}, default 'fixed' Possible values: - 'fixed': Fixed format. Fast writing/reading. Not-appendable, nor searchable. - 'table': Table format. Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching / selecting subsets of the data. - If None, pd.get_option('io.hdf.default_format') is checked, followed by fallback to "fixed". index : bool, default True Write DataFrame index as a column. min_itemsize : dict or int, optional Map column names to minimum string sizes for columns. nan_rep : Any, optional How to represent null values as str. Not allowed with append=True. dropna : bool, default False, optional Remove missing values. data_columns : list of columns or True, optional List of columns to create as indexed data columns for on-disk queries, or True to use all columns. By default only the axes of the object are indexed. See :ref:`Query via data columns<io.hdf5-query-data-columns>`. for more information. Applicable only to format='table'. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. encoding : str, default "UTF-8" See Also -------- read_hdf : Read from HDF file. DataFrame.to_orc : Write a DataFrame to the binary orc format. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. DataFrame.to_sql : Write to a SQL table. DataFrame.to_feather : Write out feather-format for DataFrames. DataFrame.to_csv : Write out to a csv file. Examples -------- >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}, ... index=['a', 'b', 'c']) # doctest: +SKIP >>> df.to_hdf('data.h5', key='df', mode='w') # doctest: +SKIP We can add another object to the same file: >>> s = pd.Series([1, 2, 3, 4]) # doctest: +SKIP >>> s.to_hdf('data.h5', key='s') # doctest: +SKIP Reading from HDF file: >>> pd.read_hdf('data.h5', 'df') # doctest: +SKIP A B a 1 4 b 2 5 c 3 6 >>> pd.read_hdf('data.h5', 's') # doctest: +SKIP 0 1 1 2 2 3 3 4 dtype: int64 """ from pandas.io import pytables # Argument 3 to "to_hdf" has incompatible type "NDFrame"; expected # "Union[DataFrame, Series]" [arg-type] pytables.to_hdf( path_or_buf, key, self, # type: ignore[arg-type] mode=mode, complevel=complevel, complib=complib, append=append, format=format, index=index, min_itemsize=min_itemsize, nan_rep=nan_rep, dropna=dropna, data_columns=data_columns, errors=errors, encoding=encoding, ) def to_sql( self, name: str, con, schema: str | None = None, if_exists: Literal["fail", "replace", "append"] = "fail", index: bool_t = True, index_label: IndexLabel = None, chunksize: int | None = None, dtype: DtypeArg | None = None, method: str | None = None, ) -> int | None: """ Write records stored in a DataFrame to a SQL database. Databases supported by SQLAlchemy [1]_ are supported. Tables can be newly created, appended to, or overwritten. Parameters ---------- name : str Name of SQL table. con : sqlalchemy.engine.(Engine or Connection) or sqlite3.Connection Using SQLAlchemy makes it possible to use any DB supported by that library. Legacy support is provided for sqlite3.Connection objects. The user is responsible for engine disposal and connection closure for the SQLAlchemy connectable. See `here \ <https://docs.sqlalchemy.org/en/20/core/connections.html>`_. If passing a sqlalchemy.engine.Connection which is already in a transaction, the transaction will not be committed. If passing a sqlite3.Connection, it will not be possible to roll back the record insertion. schema : str, optional Specify the schema (if database flavor supports this). If None, use default schema. if_exists : {'fail', 'replace', 'append'}, default 'fail' How to behave if the table already exists. * fail: Raise a ValueError. * replace: Drop the table before inserting new values. * append: Insert new values to the existing table. index : bool, default True Write DataFrame index as a column. Uses `index_label` as the column name in the table. index_label : str or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. chunksize : int, optional Specify the number of rows in each batch to be written at a time. By default, all rows will be written at once. dtype : dict or scalar, optional Specifying the datatype for columns. If a dictionary is used, the keys should be the column names and the values should be the SQLAlchemy types or strings for the sqlite3 legacy mode. If a scalar is provided, it will be applied to all columns. method : {None, 'multi', callable}, optional Controls the SQL insertion clause used: * None : Uses standard SQL ``INSERT`` clause (one per row). * 'multi': Pass multiple values in a single ``INSERT`` clause. * callable with signature ``(pd_table, conn, keys, data_iter)``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. Returns ------- None or int Number of rows affected by to_sql. None is returned if the callable passed into ``method`` does not return an integer number of rows. The number of returned rows affected is the sum of the ``rowcount`` attribute of ``sqlite3.Cursor`` or SQLAlchemy connectable which may not reflect the exact number of written rows as stipulated in the `sqlite3 <https://docs.python.org/3/library/sqlite3.html#sqlite3.Cursor.rowcount>`__ or `SQLAlchemy <https://docs.sqlalchemy.org/en/20/core/connections.html#sqlalchemy.engine.CursorResult.rowcount>`__. .. versionadded:: 1.4.0 Raises ------ ValueError When the table already exists and `if_exists` is 'fail' (the default). See Also -------- read_sql : Read a DataFrame from a table. Notes ----- Timezone aware datetime columns will be written as ``Timestamp with timezone`` type with SQLAlchemy if supported by the database. Otherwise, the datetimes will be stored as timezone unaware timestamps local to the original timezone. References ---------- .. [1] https://docs.sqlalchemy.org .. [2] https://www.python.org/dev/peps/pep-0249/ Examples -------- Create an in-memory SQLite database. >>> from sqlalchemy import create_engine >>> engine = create_engine('sqlite://', echo=False) Create a table from scratch with 3 rows. >>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']}) >>> df name 0 User 1 1 User 2 2 User 3 >>> df.to_sql('users', con=engine) 3 >>> from sqlalchemy import text >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM users")).fetchall() [(0, 'User 1'), (1, 'User 2'), (2, 'User 3')] An `sqlalchemy.engine.Connection` can also be passed to `con`: >>> with engine.begin() as connection: ... df1 = pd.DataFrame({'name' : ['User 4', 'User 5']}) ... df1.to_sql('users', con=connection, if_exists='append') 2 This is allowed to support operations that require that the same DBAPI connection is used for the entire operation. >>> df2 = pd.DataFrame({'name' : ['User 6', 'User 7']}) >>> df2.to_sql('users', con=engine, if_exists='append') 2 >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM users")).fetchall() [(0, 'User 1'), (1, 'User 2'), (2, 'User 3'), (0, 'User 4'), (1, 'User 5'), (0, 'User 6'), (1, 'User 7')] Overwrite the table with just ``df2``. >>> df2.to_sql('users', con=engine, if_exists='replace', ... index_label='id') 2 >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM users")).fetchall() [(0, 'User 6'), (1, 'User 7')] Specify the dtype (especially useful for integers with missing values). Notice that while pandas is forced to store the data as floating point, the database supports nullable integers. When fetching the data with Python, we get back integer scalars. >>> df = pd.DataFrame({"A": [1, None, 2]}) >>> df A 0 1.0 1 NaN 2 2.0 >>> from sqlalchemy.types import Integer >>> df.to_sql('integers', con=engine, index=False, ... dtype={"A": Integer()}) 3 >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM integers")).fetchall() [(1,), (None,), (2,)] """ # noqa:E501 from pandas.io import sql return sql.to_sql( self, name, con, schema=schema, if_exists=if_exists, index=index, index_label=index_label, chunksize=chunksize, dtype=dtype, method=method, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path", ) def to_pickle( self, path: FilePath | WriteBuffer[bytes], compression: CompressionOptions = "infer", protocol: int = pickle.HIGHEST_PROTOCOL, storage_options: StorageOptions = None, ) -> None: """ Pickle (serialize) object to file. Parameters ---------- path : str, path object, or file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. File path where the pickled object will be stored. {compression_options} protocol : int Int which indicates which protocol should be used by the pickler, default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible values are 0, 1, 2, 3, 4, 5. A negative value for the protocol parameter is equivalent to setting its value to HIGHEST_PROTOCOL. .. [1] https://docs.python.org/3/library/pickle.html. {storage_options} .. versionadded:: 1.2.0 See Also -------- read_pickle : Load pickled pandas object (or any object) from file. DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_sql : Write DataFrame to a SQL database. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Examples -------- >>> original_df = pd.DataFrame({{"foo": range(5), "bar": range(5, 10)}}) # doctest: +SKIP >>> original_df # doctest: +SKIP foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> original_df.to_pickle("./dummy.pkl") # doctest: +SKIP >>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP >>> unpickled_df # doctest: +SKIP foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 """ # noqa: E501 from pandas.io.pickle import to_pickle to_pickle( self, path, compression=compression, protocol=protocol, storage_options=storage_options, ) def to_clipboard( self, excel: bool_t = True, sep: str | None = None, **kwargs ) -> None: r""" Copy object to the system clipboard. Write a text representation of object to the system clipboard. This can be pasted into Excel, for example. Parameters ---------- excel : bool, default True Produce output in a csv format for easy pasting into excel. - True, use the provided separator for csv pasting. - False, write a string representation of the object to the clipboard. sep : str, default ``'\t'`` Field delimiter. **kwargs These parameters will be passed to DataFrame.to_csv. See Also -------- DataFrame.to_csv : Write a DataFrame to a comma-separated values (csv) file. read_clipboard : Read text from clipboard and pass to read_csv. Notes ----- Requirements for your platform. - Linux : `xclip`, or `xsel` (with `PyQt4` modules) - Windows : none - macOS : none This method uses the processes developed for the package `pyperclip`. A solution to render any output string format is given in the examples. Examples -------- Copy the contents of a DataFrame to the clipboard. >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) >>> df.to_clipboard(sep=',') # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # ,A,B,C ... # 0,1,2,3 ... # 1,4,5,6 We can omit the index by passing the keyword `index` and setting it to false. >>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # A,B,C ... # 1,2,3 ... # 4,5,6 Using the original `pyperclip` package for any string output format. .. code-block:: python import pyperclip html = df.style.to_html() pyperclip.copy(html) """ from pandas.io import clipboards clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs) def to_xarray(self): """ Return an xarray object from the pandas object. Returns ------- xarray.DataArray or xarray.Dataset Data in the pandas structure converted to Dataset if the object is a DataFrame, or a DataArray if the object is a Series. See Also -------- DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Notes ----- See the `xarray docs <https://xarray.pydata.org/en/stable/>`__ Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2), ... ('parrot', 'bird', 24.0, 2), ... ('lion', 'mammal', 80.5, 4), ... ('monkey', 'mammal', np.nan, 4)], ... columns=['name', 'class', 'max_speed', ... 'num_legs']) >>> df name class max_speed num_legs 0 falcon bird 389.0 2 1 parrot bird 24.0 2 2 lion mammal 80.5 4 3 monkey mammal NaN 4 >>> df.to_xarray() <xarray.Dataset> Dimensions: (index: 4) Coordinates: * index (index) int64 0 1 2 3 Data variables: name (index) object 'falcon' 'parrot' 'lion' 'monkey' class (index) object 'bird' 'bird' 'mammal' 'mammal' max_speed (index) float64 389.0 24.0 80.5 nan num_legs (index) int64 2 2 4 4 >>> df['max_speed'].to_xarray() <xarray.DataArray 'max_speed' (index: 4)> array([389. , 24. , 80.5, nan]) Coordinates: * index (index) int64 0 1 2 3 >>> dates = pd.to_datetime(['2018-01-01', '2018-01-01', ... '2018-01-02', '2018-01-02']) >>> df_multiindex = pd.DataFrame({'date': dates, ... 'animal': ['falcon', 'parrot', ... 'falcon', 'parrot'], ... 'speed': [350, 18, 361, 15]}) >>> df_multiindex = df_multiindex.set_index(['date', 'animal']) >>> df_multiindex speed date animal 2018-01-01 falcon 350 parrot 18 2018-01-02 falcon 361 parrot 15 >>> df_multiindex.to_xarray() <xarray.Dataset> Dimensions: (date: 2, animal: 2) Coordinates: * date (date) datetime64[ns] 2018-01-01 2018-01-02 * animal (animal) object 'falcon' 'parrot' Data variables: speed (date, animal) int64 350 18 361 15 """ xarray = import_optional_dependency("xarray") if self.ndim == 1: return xarray.DataArray.from_series(self) else: return xarray.Dataset.from_dataframe(self) def to_latex( self, buf: None = ..., columns: Sequence[Hashable] | None = ..., header: bool_t | Sequence[str] = ..., index: bool_t = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool_t | None = ..., index_names: bool_t = ..., bold_rows: bool_t = ..., column_format: str | None = ..., longtable: bool_t | None = ..., escape: bool_t | None = ..., encoding: str | None = ..., decimal: str = ..., multicolumn: bool_t | None = ..., multicolumn_format: str | None = ..., multirow: bool_t | None = ..., caption: str | tuple[str, str] | None = ..., label: str | None = ..., position: str | None = ..., ) -> str: ... def to_latex( self, buf: FilePath | WriteBuffer[str], columns: Sequence[Hashable] | None = ..., header: bool_t | Sequence[str] = ..., index: bool_t = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool_t | None = ..., index_names: bool_t = ..., bold_rows: bool_t = ..., column_format: str | None = ..., longtable: bool_t | None = ..., escape: bool_t | None = ..., encoding: str | None = ..., decimal: str = ..., multicolumn: bool_t | None = ..., multicolumn_format: str | None = ..., multirow: bool_t | None = ..., caption: str | tuple[str, str] | None = ..., label: str | None = ..., position: str | None = ..., ) -> None: ... def to_latex( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[Hashable] | None = None, header: bool_t | Sequence[str] = True, index: bool_t = True, na_rep: str = "NaN", formatters: FormattersType | None = None, float_format: FloatFormatType | None = None, sparsify: bool_t | None = None, index_names: bool_t = True, bold_rows: bool_t = False, column_format: str | None = None, longtable: bool_t | None = None, escape: bool_t | None = None, encoding: str | None = None, decimal: str = ".", multicolumn: bool_t | None = None, multicolumn_format: str | None = None, multirow: bool_t | None = None, caption: str | tuple[str, str] | None = None, label: str | None = None, position: str | None = None, ) -> str | None: r""" Render object to a LaTeX tabular, longtable, or nested table. Requires ``\usepackage{{booktabs}}``. The output can be copy/pasted into a main LaTeX document or read from an external file with ``\input{{table.tex}}``. .. versionchanged:: 1.2.0 Added position argument, changed meaning of caption argument. .. versionchanged:: 2.0.0 Refactored to use the Styler implementation via jinja2 templating. Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. columns : list of label, optional The subset of columns to write. Writes all columns by default. header : bool or list of str, default True Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names. index : bool, default True Write row names (index). na_rep : str, default 'NaN' Missing data representation. formatters : list of functions or dict of {{str: function}}, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function or str, optional, default None Formatter for floating point numbers. For example ``float_format="%.2f"`` and ``float_format="{{:0.2f}}".format`` will both result in 0.1234 being formatted as 0.12. sparsify : bool, optional Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. By default, the value will be read from the config module. index_names : bool, default True Prints the names of the indexes. bold_rows : bool, default False Make the row labels bold in the output. column_format : str, optional The columns format as specified in `LaTeX table format <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3 columns. By default, 'l' will be used for all columns except columns of numbers, which default to 'r'. longtable : bool, optional Use a longtable environment instead of tabular. Requires adding a \usepackage{{longtable}} to your LaTeX preamble. By default, the value will be read from the pandas config module, and set to `True` if the option ``styler.latex.environment`` is `"longtable"`. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed. escape : bool, optional By default, the value will be read from the pandas config module and set to `True` if the option ``styler.format.escape`` is `"latex"`. When set to False prevents from escaping latex special characters in column names. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed, as has the default value to `False`. encoding : str, optional A string representing the encoding to use in the output file, defaults to 'utf-8'. decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. multicolumn : bool, default True Use \multicolumn to enhance MultiIndex columns. The default will be read from the config module, and is set as the option ``styler.sparse.columns``. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed. multicolumn_format : str, default 'r' The alignment for multicolumns, similar to `column_format` The default will be read from the config module, and is set as the option ``styler.latex.multicol_align``. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed, as has the default value to "r". multirow : bool, default True Use \multirow to enhance MultiIndex rows. Requires adding a \usepackage{{multirow}} to your LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained rows, separating groups via clines. The default will be read from the pandas config module, and is set as the option ``styler.sparse.index``. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed, as has the default value to `True`. caption : str or tuple, optional Tuple (full_caption, short_caption), which results in ``\caption[short_caption]{{full_caption}}``; if a single string is passed, no short caption will be set. .. versionchanged:: 1.2.0 Optionally allow caption to be a tuple ``(full_caption, short_caption)``. label : str, optional The LaTeX label to be placed inside ``\label{{}}`` in the output. This is used with ``\ref{{}}`` in the main ``.tex`` file. position : str, optional The LaTeX positional argument for tables, to be placed after ``\begin{{}}`` in the output. .. versionadded:: 1.2.0 Returns ------- str or None If buf is None, returns the result as a string. Otherwise returns None. See Also -------- io.formats.style.Styler.to_latex : Render a DataFrame to LaTeX with conditional formatting. DataFrame.to_string : Render a DataFrame to a console-friendly tabular output. DataFrame.to_html : Render a DataFrame as an HTML table. Notes ----- As of v2.0.0 this method has changed to use the Styler implementation as part of :meth:`.Styler.to_latex` via ``jinja2`` templating. This means that ``jinja2`` is a requirement, and needs to be installed, for this method to function. It is advised that users switch to using Styler, since that implementation is more frequently updated and contains much more flexibility with the output. Examples -------- Convert a general DataFrame to LaTeX with formatting: >>> df = pd.DataFrame(dict(name=['Raphael', 'Donatello'], ... age=[26, 45], ... height=[181.23, 177.65])) >>> print(df.to_latex(index=False, ... formatters={"name": str.upper}, ... float_format="{:.1f}".format, ... )) # doctest: +SKIP \begin{tabular}{lrr} \toprule name & age & height \\ \midrule RAPHAEL & 26 & 181.2 \\ DONATELLO & 45 & 177.7 \\ \bottomrule \end{tabular} """ # Get defaults from the pandas config if self.ndim == 1: self = self.to_frame() if longtable is None: longtable = config.get_option("styler.latex.environment") == "longtable" if escape is None: escape = config.get_option("styler.format.escape") == "latex" if multicolumn is None: multicolumn = config.get_option("styler.sparse.columns") if multicolumn_format is None: multicolumn_format = config.get_option("styler.latex.multicol_align") if multirow is None: multirow = config.get_option("styler.sparse.index") if column_format is not None and not isinstance(column_format, str): raise ValueError("`column_format` must be str or unicode") length = len(self.columns) if columns is None else len(columns) if isinstance(header, (list, tuple)) and len(header) != length: raise ValueError(f"Writing {length} cols but got {len(header)} aliases") # Refactor formatters/float_format/decimal/na_rep/escape to Styler structure base_format_ = { "na_rep": na_rep, "escape": "latex" if escape else None, "decimal": decimal, } index_format_: dict[str, Any] = {"axis": 0, **base_format_} column_format_: dict[str, Any] = {"axis": 1, **base_format_} if isinstance(float_format, str): float_format_: Callable | None = lambda x: float_format % x else: float_format_ = float_format def _wrap(x, alt_format_): if isinstance(x, (float, complex)) and float_format_ is not None: return float_format_(x) else: return alt_format_(x) formatters_: list | tuple | dict | Callable | None = None if isinstance(formatters, list): formatters_ = { c: partial(_wrap, alt_format_=formatters[i]) for i, c in enumerate(self.columns) } elif isinstance(formatters, dict): index_formatter = formatters.pop("__index__", None) column_formatter = formatters.pop("__columns__", None) if index_formatter is not None: index_format_.update({"formatter": index_formatter}) if column_formatter is not None: column_format_.update({"formatter": column_formatter}) formatters_ = formatters float_columns = self.select_dtypes(include="float").columns for col in float_columns: if col not in formatters.keys(): formatters_.update({col: float_format_}) elif formatters is None and float_format is not None: formatters_ = partial(_wrap, alt_format_=lambda v: v) format_index_ = [index_format_, column_format_] # Deal with hiding indexes and relabelling column names hide_: list[dict] = [] relabel_index_: list[dict] = [] if columns: hide_.append( { "subset": [c for c in self.columns if c not in columns], "axis": "columns", } ) if header is False: hide_.append({"axis": "columns"}) elif isinstance(header, (list, tuple)): relabel_index_.append({"labels": header, "axis": "columns"}) format_index_ = [index_format_] # column_format is overwritten if index is False: hide_.append({"axis": "index"}) if index_names is False: hide_.append({"names": True, "axis": "index"}) render_kwargs_ = { "hrules": True, "sparse_index": sparsify, "sparse_columns": sparsify, "environment": "longtable" if longtable else None, "multicol_align": multicolumn_format if multicolumn else f"naive-{multicolumn_format}", "multirow_align": "t" if multirow else "naive", "encoding": encoding, "caption": caption, "label": label, "position": position, "column_format": column_format, "clines": "skip-last;data" if (multirow and isinstance(self.index, MultiIndex)) else None, "bold_rows": bold_rows, } return self._to_latex_via_styler( buf, hide=hide_, relabel_index=relabel_index_, format={"formatter": formatters_, **base_format_}, format_index=format_index_, render_kwargs=render_kwargs_, ) def _to_latex_via_styler( self, buf=None, *, hide: dict | list[dict] | None = None, relabel_index: dict | list[dict] | None = None, format: dict | list[dict] | None = None, format_index: dict | list[dict] | None = None, render_kwargs: dict | None = None, ): """ Render object to a LaTeX tabular, longtable, or nested table. Uses the ``Styler`` implementation with the following, ordered, method chaining: .. code-block:: python styler = Styler(DataFrame) styler.hide(**hide) styler.relabel_index(**relabel_index) styler.format(**format) styler.format_index(**format_index) styler.to_latex(buf=buf, **render_kwargs) Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. hide : dict, list of dict Keyword args to pass to the method call of ``Styler.hide``. If a list will call the method numerous times. relabel_index : dict, list of dict Keyword args to pass to the method of ``Styler.relabel_index``. If a list will call the method numerous times. format : dict, list of dict Keyword args to pass to the method call of ``Styler.format``. If a list will call the method numerous times. format_index : dict, list of dict Keyword args to pass to the method call of ``Styler.format_index``. If a list will call the method numerous times. render_kwargs : dict Keyword args to pass to the method call of ``Styler.to_latex``. Returns ------- str or None If buf is None, returns the result as a string. Otherwise returns None. """ from pandas.io.formats.style import Styler self = cast("DataFrame", self) styler = Styler(self, uuid="") for kw_name in ["hide", "relabel_index", "format", "format_index"]: kw = vars()[kw_name] if isinstance(kw, dict): getattr(styler, kw_name)(**kw) elif isinstance(kw, list): for sub_kw in kw: getattr(styler, kw_name)(**sub_kw) # bold_rows is not a direct kwarg of Styler.to_latex render_kwargs = {} if render_kwargs is None else render_kwargs if render_kwargs.pop("bold_rows"): styler.applymap_index(lambda v: "textbf:--rwrap;") return styler.to_latex(buf=buf, **render_kwargs) def to_csv( self, path_or_buf: None = ..., sep: str = ..., na_rep: str = ..., float_format: str | Callable | None = ..., columns: Sequence[Hashable] | None = ..., header: bool_t | list[str] = ..., index: bool_t = ..., index_label: IndexLabel | None = ..., mode: str = ..., encoding: str | None = ..., compression: CompressionOptions = ..., quoting: int | None = ..., quotechar: str = ..., lineterminator: str | None = ..., chunksize: int | None = ..., date_format: str | None = ..., doublequote: bool_t = ..., escapechar: str | None = ..., decimal: str = ..., errors: str = ..., storage_options: StorageOptions = ..., ) -> str: ... def to_csv( self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str], sep: str = ..., na_rep: str = ..., float_format: str | Callable | None = ..., columns: Sequence[Hashable] | None = ..., header: bool_t | list[str] = ..., index: bool_t = ..., index_label: IndexLabel | None = ..., mode: str = ..., encoding: str | None = ..., compression: CompressionOptions = ..., quoting: int | None = ..., quotechar: str = ..., lineterminator: str | None = ..., chunksize: int | None = ..., date_format: str | None = ..., doublequote: bool_t = ..., escapechar: str | None = ..., decimal: str = ..., errors: str = ..., storage_options: StorageOptions = ..., ) -> None: ... storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buf", ) def to_csv( self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, sep: str = ",", na_rep: str = "", float_format: str | Callable | None = None, columns: Sequence[Hashable] | None = None, header: bool_t | list[str] = True, index: bool_t = True, index_label: IndexLabel | None = None, mode: str = "w", encoding: str | None = None, compression: CompressionOptions = "infer", quoting: int | None = None, quotechar: str = '"', lineterminator: str | None = None, chunksize: int | None = None, date_format: str | None = None, doublequote: bool_t = True, escapechar: str | None = None, decimal: str = ".", errors: str = "strict", storage_options: StorageOptions = None, ) -> str | None: r""" Write object to a comma-separated values (csv) file. Parameters ---------- path_or_buf : str, path object, file-like object, or None, default None String, path object (implementing os.PathLike[str]), or file-like object implementing a write() function. If None, the result is returned as a string. If a non-binary file object is passed, it should be opened with `newline=''`, disabling universal newlines. If a binary file object is passed, `mode` might need to contain a `'b'`. .. versionchanged:: 1.2.0 Support for binary file objects was introduced. sep : str, default ',' String of length 1. Field delimiter for the output file. na_rep : str, default '' Missing data representation. float_format : str, Callable, default None Format string for floating point numbers. If a Callable is given, it takes precedence over other numeric formatting parameters, like decimal. columns : sequence, optional Columns to write. header : bool or list of str, default True Write out the column names. If a list of strings is given it is assumed to be aliases for the column names. index : bool, default True Write row names (index). index_label : str or sequence, or False, default None Column label for index column(s) if desired. If None is given, and `header` and `index` are True, then the index names are used. A sequence should be given if the object uses MultiIndex. If False do not print fields for index names. Use index_label=False for easier importing in R. mode : str, default 'w' Python write mode. The available write modes are the same as :py:func:`open`. encoding : str, optional A string representing the encoding to use in the output file, defaults to 'utf-8'. `encoding` is not supported if `path_or_buf` is a non-binary file object. {compression_options} .. versionchanged:: 1.0.0 May now be a dict with key 'method' as compression mode and other entries as additional compression options if compression mode is 'zip'. .. versionchanged:: 1.1.0 Passing compression options as keys in dict is supported for compression modes 'gzip', 'bz2', 'zstd', and 'zip'. .. versionchanged:: 1.2.0 Compression is supported for binary file objects. .. versionchanged:: 1.2.0 Previous versions forwarded dict entries for 'gzip' to `gzip.open` instead of `gzip.GzipFile` which prevented setting `mtime`. quoting : optional constant from csv module Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` then floats are converted to strings and thus csv.QUOTE_NONNUMERIC will treat them as non-numeric. quotechar : str, default '\"' String of length 1. Character used to quote fields. lineterminator : str, optional The newline character or character sequence to use in the output file. Defaults to `os.linesep`, which depends on the OS in which this method is called ('\\n' for linux, '\\r\\n' for Windows, i.e.). .. versionchanged:: 1.5.0 Previously was line_terminator, changed for consistency with read_csv and the standard library 'csv' module. chunksize : int or None Rows to write at a time. date_format : str, default None Format string for datetime objects. doublequote : bool, default True Control quoting of `quotechar` inside a field. escapechar : str, default None String of length 1. Character used to escape `sep` and `quotechar` when appropriate. decimal : str, default '.' Character recognized as decimal separator. E.g. use ',' for European data. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. .. versionadded:: 1.1.0 {storage_options} .. versionadded:: 1.2.0 Returns ------- None or str If path_or_buf is None, returns the resulting csv format as a string. Otherwise returns None. See Also -------- read_csv : Load a CSV file into a DataFrame. to_excel : Write DataFrame to an Excel file. Examples -------- >>> df = pd.DataFrame({{'name': ['Raphael', 'Donatello'], ... 'mask': ['red', 'purple'], ... 'weapon': ['sai', 'bo staff']}}) >>> df.to_csv(index=False) 'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n' Create 'out.zip' containing 'out.csv' >>> compression_opts = dict(method='zip', ... archive_name='out.csv') # doctest: +SKIP >>> df.to_csv('out.zip', index=False, ... compression=compression_opts) # doctest: +SKIP To write a csv file to a new folder or nested folder you will first need to create it using either Pathlib or os: >>> from pathlib import Path # doctest: +SKIP >>> filepath = Path('folder/subfolder/out.csv') # doctest: +SKIP >>> filepath.parent.mkdir(parents=True, exist_ok=True) # doctest: +SKIP >>> df.to_csv(filepath) # doctest: +SKIP >>> import os # doctest: +SKIP >>> os.makedirs('folder/subfolder', exist_ok=True) # doctest: +SKIP >>> df.to_csv('folder/subfolder/out.csv') # doctest: +SKIP """ df = self if isinstance(self, ABCDataFrame) else self.to_frame() formatter = DataFrameFormatter( frame=df, header=header, index=index, na_rep=na_rep, float_format=float_format, decimal=decimal, ) return DataFrameRenderer(formatter).to_csv( path_or_buf, lineterminator=lineterminator, sep=sep, encoding=encoding, errors=errors, compression=compression, quoting=quoting, columns=columns, index_label=index_label, mode=mode, chunksize=chunksize, quotechar=quotechar, date_format=date_format, doublequote=doublequote, escapechar=escapechar, storage_options=storage_options, ) # ---------------------------------------------------------------------- # Lookup Caching def _reset_cacher(self) -> None: """ Reset the cacher. """ raise AbstractMethodError(self) def _maybe_update_cacher( self, clear: bool_t = False, verify_is_copy: bool_t = True, inplace: bool_t = False, ) -> None: """ See if we need to update our parent cacher if clear, then clear our cache. Parameters ---------- clear : bool, default False Clear the item cache. verify_is_copy : bool, default True Provide is_copy checks. """ if using_copy_on_write(): return if verify_is_copy: self._check_setitem_copy(t="referent") if clear: self._clear_item_cache() def _clear_item_cache(self) -> None: raise AbstractMethodError(self) # ---------------------------------------------------------------------- # Indexing Methods def take(self: NDFrameT, indices, axis: Axis = 0, **kwargs) -> NDFrameT: """ Return the elements in the given *positional* indices along an axis. This means that we are not indexing according to actual values in the index attribute of the object. We are indexing according to the actual position of the element in the object. Parameters ---------- indices : array-like An array of ints indicating which positions to take. axis : {0 or 'index', 1 or 'columns', None}, default 0 The axis on which to select elements. ``0`` means that we are selecting rows, ``1`` means that we are selecting columns. For `Series` this parameter is unused and defaults to 0. **kwargs For compatibility with :meth:`numpy.take`. Has no effect on the output. Returns ------- same type as caller An array-like containing the elements taken from the object. See Also -------- DataFrame.loc : Select a subset of a DataFrame by labels. DataFrame.iloc : Select a subset of a DataFrame by positions. numpy.take : Take elements from an array along an axis. Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey', 'mammal', np.nan)], ... columns=['name', 'class', 'max_speed'], ... index=[0, 2, 3, 1]) >>> df name class max_speed 0 falcon bird 389.0 2 parrot bird 24.0 3 lion mammal 80.5 1 monkey mammal NaN Take elements at positions 0 and 3 along the axis 0 (default). Note how the actual indices selected (0 and 1) do not correspond to our selected indices 0 and 3. That's because we are selecting the 0th and 3rd rows, not rows whose indices equal 0 and 3. >>> df.take([0, 3]) name class max_speed 0 falcon bird 389.0 1 monkey mammal NaN Take elements at indices 1 and 2 along the axis 1 (column selection). >>> df.take([1, 2], axis=1) class max_speed 0 bird 389.0 2 bird 24.0 3 mammal 80.5 1 mammal NaN We may take elements using negative integers for positive indices, starting from the end of the object, just like with Python lists. >>> df.take([-1, -2]) name class max_speed 1 monkey mammal NaN 3 lion mammal 80.5 """ nv.validate_take((), kwargs) return self._take(indices, axis) def _take( self: NDFrameT, indices, axis: Axis = 0, convert_indices: bool_t = True, ) -> NDFrameT: """ Internal version of the `take` allowing specification of additional args. See the docstring of `take` for full explanation of the parameters. """ if not isinstance(indices, slice): indices = np.asarray(indices, dtype=np.intp) if ( axis == 0 and indices.ndim == 1 and using_copy_on_write() and is_range_indexer(indices, len(self)) ): return self.copy(deep=None) new_data = self._mgr.take( indices, axis=self._get_block_manager_axis(axis), verify=True, convert_indices=convert_indices, ) return self._constructor(new_data).__finalize__(self, method="take") def _take_with_is_copy(self: NDFrameT, indices, axis: Axis = 0) -> NDFrameT: """ Internal version of the `take` method that sets the `_is_copy` attribute to keep track of the parent dataframe (using in indexing for the SettingWithCopyWarning). See the docstring of `take` for full explanation of the parameters. """ result = self._take(indices=indices, axis=axis) # Maybe set copy if we didn't actually change the index. if not result._get_axis(axis).equals(self._get_axis(axis)): result._set_is_copy(self) return result def xs( self: NDFrameT, key: IndexLabel, axis: Axis = 0, level: IndexLabel = None, drop_level: bool_t = True, ) -> NDFrameT: """ Return cross-section from the Series/DataFrame. This method takes a `key` argument to select data at a particular level of a MultiIndex. Parameters ---------- key : label or tuple of label Label contained in the index, or partially in a MultiIndex. axis : {0 or 'index', 1 or 'columns'}, default 0 Axis to retrieve cross-section on. level : object, defaults to first n levels (n=1 or len(key)) In case of a key partially contained in a MultiIndex, indicate which levels are used. Levels can be referred by label or position. drop_level : bool, default True If False, returns object with same levels as self. Returns ------- Series or DataFrame Cross-section from the original Series or DataFrame corresponding to the selected index levels. See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. DataFrame.iloc : Purely integer-location based indexing for selection by position. Notes ----- `xs` can not be used to set values. MultiIndex Slicers is a generic way to get/set values on any level or levels. It is a superset of `xs` functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`. Examples -------- >>> d = {'num_legs': [4, 4, 2, 2], ... 'num_wings': [0, 0, 2, 2], ... 'class': ['mammal', 'mammal', 'mammal', 'bird'], ... 'animal': ['cat', 'dog', 'bat', 'penguin'], ... 'locomotion': ['walks', 'walks', 'flies', 'walks']} >>> df = pd.DataFrame(data=d) >>> df = df.set_index(['class', 'animal', 'locomotion']) >>> df num_legs num_wings class animal locomotion mammal cat walks 4 0 dog walks 4 0 bat flies 2 2 bird penguin walks 2 2 Get values at specified index >>> df.xs('mammal') num_legs num_wings animal locomotion cat walks 4 0 dog walks 4 0 bat flies 2 2 Get values at several indexes >>> df.xs(('mammal', 'dog', 'walks')) num_legs 4 num_wings 0 Name: (mammal, dog, walks), dtype: int64 Get values at specified index and level >>> df.xs('cat', level=1) num_legs num_wings class locomotion mammal walks 4 0 Get values at several indexes and levels >>> df.xs(('bird', 'walks'), ... level=[0, 'locomotion']) num_legs num_wings animal penguin 2 2 Get values at specified column and axis >>> df.xs('num_wings', axis=1) class animal locomotion mammal cat walks 0 dog walks 0 bat flies 2 bird penguin walks 2 Name: num_wings, dtype: int64 """ axis = self._get_axis_number(axis) labels = self._get_axis(axis) if isinstance(key, list): raise TypeError("list keys are not supported in xs, pass a tuple instead") if level is not None: if not isinstance(labels, MultiIndex): raise TypeError("Index must be a MultiIndex") loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level) # create the tuple of the indexer _indexer = [slice(None)] * self.ndim _indexer[axis] = loc indexer = tuple(_indexer) result = self.iloc[indexer] setattr(result, result._get_axis_name(axis), new_ax) return result if axis == 1: if drop_level: return self[key] index = self.columns else: index = self.index if isinstance(index, MultiIndex): loc, new_index = index._get_loc_level(key, level=0) if not drop_level: if lib.is_integer(loc): new_index = index[loc : loc + 1] else: new_index = index[loc] else: loc = index.get_loc(key) if isinstance(loc, np.ndarray): if loc.dtype == np.bool_: (inds,) = loc.nonzero() return self._take_with_is_copy(inds, axis=axis) else: return self._take_with_is_copy(loc, axis=axis) if not is_scalar(loc): new_index = index[loc] if is_scalar(loc) and axis == 0: # In this case loc should be an integer if self.ndim == 1: # if we encounter an array-like and we only have 1 dim # that means that their are list/ndarrays inside the Series! # so just return them (GH 6394) return self._values[loc] new_mgr = self._mgr.fast_xs(loc) result = self._constructor_sliced( new_mgr, name=self.index[loc] ).__finalize__(self) elif is_scalar(loc): result = self.iloc[:, slice(loc, loc + 1)] elif axis == 1: result = self.iloc[:, loc] else: result = self.iloc[loc] result.index = new_index # this could be a view # but only in a single-dtyped view sliceable case result._set_is_copy(self, copy=not result._is_view) return result def __getitem__(self, item): raise AbstractMethodError(self) def _slice(self: NDFrameT, slobj: slice, axis: Axis = 0) -> NDFrameT: """ Construct a slice of this container. Slicing with this method is *always* positional. """ assert isinstance(slobj, slice), type(slobj) axis = self._get_block_manager_axis(axis) result = self._constructor(self._mgr.get_slice(slobj, axis=axis)) result = result.__finalize__(self) # this could be a view # but only in a single-dtyped view sliceable case is_copy = axis != 0 or result._is_view result._set_is_copy(self, copy=is_copy) return result def _set_is_copy(self, ref: NDFrame, copy: bool_t = True) -> None: if not copy: self._is_copy = None else: assert ref is not None self._is_copy = weakref.ref(ref) def _check_is_chained_assignment_possible(self) -> bool_t: """ Check if we are a view, have a cacher, and are of mixed type. If so, then force a setitem_copy check. Should be called just near setting a value Will return a boolean if it we are a view and are cached, but a single-dtype meaning that the cacher should be updated following setting. """ if self._is_copy: self._check_setitem_copy(t="referent") return False def _check_setitem_copy(self, t: str = "setting", force: bool_t = False): """ Parameters ---------- t : str, the type of setting error force : bool, default False If True, then force showing an error. validate if we are doing a setitem on a chained copy. It is technically possible to figure out that we are setting on a copy even WITH a multi-dtyped pandas object. In other words, some blocks may be views while other are not. Currently _is_view will ALWAYS return False for multi-blocks to avoid having to handle this case. df = DataFrame(np.arange(0,9), columns=['count']) df['group'] = 'b' # This technically need not raise SettingWithCopy if both are view # (which is not generally guaranteed but is usually True. However, # this is in general not a good practice and we recommend using .loc. df.iloc[0:5]['group'] = 'a' """ if using_copy_on_write(): return # return early if the check is not needed if not (force or self._is_copy): return value = config.get_option("mode.chained_assignment") if value is None: return # see if the copy is not actually referred; if so, then dissolve # the copy weakref if self._is_copy is not None and not isinstance(self._is_copy, str): r = self._is_copy() if not gc.get_referents(r) or (r is not None and r.shape == self.shape): self._is_copy = None return # a custom message if isinstance(self._is_copy, str): t = self._is_copy elif t == "referent": t = ( "\n" "A value is trying to be set on a copy of a slice from a " "DataFrame\n\n" "See the caveats in the documentation: " "https://pandas.pydata.org/pandas-docs/stable/user_guide/" "indexing.html#returning-a-view-versus-a-copy" ) else: t = ( "\n" "A value is trying to be set on a copy of a slice from a " "DataFrame.\n" "Try using .loc[row_indexer,col_indexer] = value " "instead\n\nSee the caveats in the documentation: " "https://pandas.pydata.org/pandas-docs/stable/user_guide/" "indexing.html#returning-a-view-versus-a-copy" ) if value == "raise": raise SettingWithCopyError(t) if value == "warn": warnings.warn(t, SettingWithCopyWarning, stacklevel=find_stack_level()) def __delitem__(self, key) -> None: """ Delete item """ deleted = False maybe_shortcut = False if self.ndim == 2 and isinstance(self.columns, MultiIndex): try: # By using engine's __contains__ we effectively # restrict to same-length tuples maybe_shortcut = key not in self.columns._engine except TypeError: pass if maybe_shortcut: # Allow shorthand to delete all columns whose first len(key) # elements match key: if not isinstance(key, tuple): key = (key,) for col in self.columns: if isinstance(col, tuple) and col[: len(key)] == key: del self[col] deleted = True if not deleted: # If the above loop ran and didn't delete anything because # there was no match, this call should raise the appropriate # exception: loc = self.axes[-1].get_loc(key) self._mgr = self._mgr.idelete(loc) # delete from the caches try: del self._item_cache[key] except KeyError: pass # ---------------------------------------------------------------------- # Unsorted def _check_inplace_and_allows_duplicate_labels(self, inplace): if inplace and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'inplace=True' when " "'self.flags.allows_duplicate_labels' is False." ) def get(self, key, default=None): """ Get item from object for given key (ex: DataFrame column). Returns default value if not found. Parameters ---------- key : object Returns ------- same type as items contained in object Examples -------- >>> df = pd.DataFrame( ... [ ... [24.3, 75.7, "high"], ... [31, 87.8, "high"], ... [22, 71.6, "medium"], ... [35, 95, "medium"], ... ], ... columns=["temp_celsius", "temp_fahrenheit", "windspeed"], ... index=pd.date_range(start="2014-02-12", end="2014-02-15", freq="D"), ... ) >>> df temp_celsius temp_fahrenheit windspeed 2014-02-12 24.3 75.7 high 2014-02-13 31.0 87.8 high 2014-02-14 22.0 71.6 medium 2014-02-15 35.0 95.0 medium >>> df.get(["temp_celsius", "windspeed"]) temp_celsius windspeed 2014-02-12 24.3 high 2014-02-13 31.0 high 2014-02-14 22.0 medium 2014-02-15 35.0 medium >>> ser = df['windspeed'] >>> ser.get('2014-02-13') 'high' If the key isn't found, the default value will be used. >>> df.get(["temp_celsius", "temp_kelvin"], default="default_value") 'default_value' >>> ser.get('2014-02-10', '[unknown]') '[unknown]' """ try: return self[key] except (KeyError, ValueError, IndexError): return default def _is_view(self) -> bool_t: """Return boolean indicating if self is view of another array""" return self._mgr.is_view def reindex_like( self: NDFrameT, other, method: Literal["backfill", "bfill", "pad", "ffill", "nearest"] | None = None, copy: bool_t | None = None, limit=None, tolerance=None, ) -> NDFrameT: """ Return an object with matching indices as other object. Conform the object to the same index on all axes. Optional filling logic, placing NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and copy=False. Parameters ---------- other : Object of the same data type Its row and column indices are used to define the new indices of this object. method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'} Method to use for filling holes in reindexed DataFrame. Please note: this is only applicable to DataFrames/Series with a monotonically increasing/decreasing index. * None (default): don't fill gaps * pad / ffill: propagate last valid observation forward to next valid * backfill / bfill: use next valid observation to fill gap * nearest: use nearest valid observations to fill gap. copy : bool, default True Return a new object, even if the passed indexes are the same. limit : int, default None Maximum number of consecutive labels to fill for inexact matches. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- Series or DataFrame Same type as caller, but with changed indices on each axis. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. DataFrame.reindex : Change to new indices or expand indices. Notes ----- Same as calling ``.reindex(index=other.index, columns=other.columns,...)``. Examples -------- >>> df1 = pd.DataFrame([[24.3, 75.7, 'high'], ... [31, 87.8, 'high'], ... [22, 71.6, 'medium'], ... [35, 95, 'medium']], ... columns=['temp_celsius', 'temp_fahrenheit', ... 'windspeed'], ... index=pd.date_range(start='2014-02-12', ... end='2014-02-15', freq='D')) >>> df1 temp_celsius temp_fahrenheit windspeed 2014-02-12 24.3 75.7 high 2014-02-13 31.0 87.8 high 2014-02-14 22.0 71.6 medium 2014-02-15 35.0 95.0 medium >>> df2 = pd.DataFrame([[28, 'low'], ... [30, 'low'], ... [35.1, 'medium']], ... columns=['temp_celsius', 'windspeed'], ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13', ... '2014-02-15'])) >>> df2 temp_celsius windspeed 2014-02-12 28.0 low 2014-02-13 30.0 low 2014-02-15 35.1 medium >>> df2.reindex_like(df1) temp_celsius temp_fahrenheit windspeed 2014-02-12 28.0 NaN low 2014-02-13 30.0 NaN low 2014-02-14 NaN NaN NaN 2014-02-15 35.1 NaN medium """ d = other._construct_axes_dict( axes=self._AXIS_ORDERS, method=method, copy=copy, limit=limit, tolerance=tolerance, ) return self.reindex(**d) def drop( self, labels: IndexLabel = ..., *, axis: Axis = ..., index: IndexLabel = ..., columns: IndexLabel = ..., level: Level | None = ..., inplace: Literal[True], errors: IgnoreRaise = ..., ) -> None: ... def drop( self: NDFrameT, labels: IndexLabel = ..., *, axis: Axis = ..., index: IndexLabel = ..., columns: IndexLabel = ..., level: Level | None = ..., inplace: Literal[False] = ..., errors: IgnoreRaise = ..., ) -> NDFrameT: ... def drop( self: NDFrameT, labels: IndexLabel = ..., *, axis: Axis = ..., index: IndexLabel = ..., columns: IndexLabel = ..., level: Level | None = ..., inplace: bool_t = ..., errors: IgnoreRaise = ..., ) -> NDFrameT | None: ... def drop( self: NDFrameT, labels: IndexLabel = None, *, axis: Axis = 0, index: IndexLabel = None, columns: IndexLabel = None, level: Level | None = None, inplace: bool_t = False, errors: IgnoreRaise = "raise", ) -> NDFrameT | None: inplace = validate_bool_kwarg(inplace, "inplace") if labels is not None: if index is not None or columns is not None: raise ValueError("Cannot specify both 'labels' and 'index'/'columns'") axis_name = self._get_axis_name(axis) axes = {axis_name: labels} elif index is not None or columns is not None: axes = {"index": index} if self.ndim == 2: axes["columns"] = columns else: raise ValueError( "Need to specify at least one of 'labels', 'index' or 'columns'" ) obj = self for axis, labels in axes.items(): if labels is not None: obj = obj._drop_axis(labels, axis, level=level, errors=errors) if inplace: self._update_inplace(obj) return None else: return obj def _drop_axis( self: NDFrameT, labels, axis, level=None, errors: IgnoreRaise = "raise", only_slice: bool_t = False, ) -> NDFrameT: """ Drop labels from specified axis. Used in the ``drop`` method internally. Parameters ---------- labels : single label or list-like axis : int or axis name level : int or level name, default None For MultiIndex errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and existing labels are dropped. only_slice : bool, default False Whether indexing along columns should be view-only. """ axis_num = self._get_axis_number(axis) axis = self._get_axis(axis) if axis.is_unique: if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError("axis must be a MultiIndex") new_axis = axis.drop(labels, level=level, errors=errors) else: new_axis = axis.drop(labels, errors=errors) indexer = axis.get_indexer(new_axis) # Case for non-unique axis else: is_tuple_labels = is_nested_list_like(labels) or isinstance(labels, tuple) labels = ensure_object(common.index_labels_to_array(labels)) if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError("axis must be a MultiIndex") mask = ~axis.get_level_values(level).isin(labels) # GH 18561 MultiIndex.drop should raise if label is absent if errors == "raise" and mask.all(): raise KeyError(f"{labels} not found in axis") elif ( isinstance(axis, MultiIndex) and labels.dtype == "object" and not is_tuple_labels ): # Set level to zero in case of MultiIndex and label is string, # because isin can't handle strings for MultiIndexes GH#36293 # In case of tuples we get dtype object but have to use isin GH#42771 mask = ~axis.get_level_values(0).isin(labels) else: mask = ~axis.isin(labels) # Check if label doesn't exist along axis labels_missing = (axis.get_indexer_for(labels) == -1).any() if errors == "raise" and labels_missing: raise KeyError(f"{labels} not found in axis") if is_extension_array_dtype(mask.dtype): # GH#45860 mask = mask.to_numpy(dtype=bool) indexer = mask.nonzero()[0] new_axis = axis.take(indexer) bm_axis = self.ndim - axis_num - 1 new_mgr = self._mgr.reindex_indexer( new_axis, indexer, axis=bm_axis, allow_dups=True, copy=None, only_slice=only_slice, ) result = self._constructor(new_mgr) if self.ndim == 1: result.name = self.name return result.__finalize__(self) def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None: """ Replace self internals with result. Parameters ---------- result : same type as self verify_is_copy : bool, default True Provide is_copy checks. """ # NOTE: This does *not* call __finalize__ and that's an explicit # decision that we may revisit in the future. self._reset_cache() self._clear_item_cache() self._mgr = result._mgr self._maybe_update_cacher(verify_is_copy=verify_is_copy, inplace=True) def add_prefix(self: NDFrameT, prefix: str, axis: Axis | None = None) -> NDFrameT: """ Prefix labels with string `prefix`. For Series, the row labels are prefixed. For DataFrame, the column labels are prefixed. Parameters ---------- prefix : str The string to add before each label. axis : {{0 or 'index', 1 or 'columns', None}}, default None Axis to add prefix on .. versionadded:: 2.0.0 Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_suffix: Suffix row labels with string `suffix`. DataFrame.add_suffix: Suffix column labels with string `suffix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_prefix('item_') item_0 1 item_1 2 item_2 3 item_3 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_prefix('col_') col_A col_B 0 1 3 1 2 4 2 3 5 3 4 6 """ f = lambda x: f"{prefix}{x}" axis_name = self._info_axis_name if axis is not None: axis_name = self._get_axis_name(axis) mapper = {axis_name: f} # error: Incompatible return value type (got "Optional[NDFrameT]", # expected "NDFrameT") # error: Argument 1 to "rename" of "NDFrame" has incompatible type # "**Dict[str, partial[str]]"; expected "Union[str, int, None]" # error: Keywords must be strings return self._rename(**mapper) # type: ignore[return-value, arg-type, misc] def add_suffix(self: NDFrameT, suffix: str, axis: Axis | None = None) -> NDFrameT: """ Suffix labels with string `suffix`. For Series, the row labels are suffixed. For DataFrame, the column labels are suffixed. Parameters ---------- suffix : str The string to add after each label. axis : {{0 or 'index', 1 or 'columns', None}}, default None Axis to add suffix on .. versionadded:: 2.0.0 Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string `prefix`. DataFrame.add_prefix: Prefix column labels with string `prefix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_suffix('_item') 0_item 1 1_item 2 2_item 3 3_item 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_suffix('_col') A_col B_col 0 1 3 1 2 4 2 3 5 3 4 6 """ f = lambda x: f"{x}{suffix}" axis_name = self._info_axis_name if axis is not None: axis_name = self._get_axis_name(axis) mapper = {axis_name: f} # error: Incompatible return value type (got "Optional[NDFrameT]", # expected "NDFrameT") # error: Argument 1 to "rename" of "NDFrame" has incompatible type # "**Dict[str, partial[str]]"; expected "Union[str, int, None]" # error: Keywords must be strings return self._rename(**mapper) # type: ignore[return-value, arg-type, misc] def sort_values( self: NDFrameT, *, axis: Axis = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[False] = ..., kind: str = ..., na_position: str = ..., ignore_index: bool_t = ..., key: ValueKeyFunc = ..., ) -> NDFrameT: ... def sort_values( self, *, axis: Axis = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[True], kind: str = ..., na_position: str = ..., ignore_index: bool_t = ..., key: ValueKeyFunc = ..., ) -> None: ... def sort_values( self: NDFrameT, *, axis: Axis = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: bool_t = ..., kind: str = ..., na_position: str = ..., ignore_index: bool_t = ..., key: ValueKeyFunc = ..., ) -> NDFrameT | None: ... def sort_values( self: NDFrameT, *, axis: Axis = 0, ascending: bool_t | Sequence[bool_t] = True, inplace: bool_t = False, kind: str = "quicksort", na_position: str = "last", ignore_index: bool_t = False, key: ValueKeyFunc = None, ) -> NDFrameT | None: """ Sort by the values along either axis. Parameters ----------%(optional_by)s axis : %(axes_single_arg)s, default 0 Axis to be sorted. ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. `mergesort` and `stable` are the only stable algorithms. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' Puts NaNs at the beginning if `first`; `last` puts NaNs at the end. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional Apply the key function to the values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect a ``Series`` and return a Series with the same shape as the input. It will be applied to each column in `by` independently. .. versionadded:: 1.1.0 Returns ------- DataFrame or None DataFrame with sorted values or None if ``inplace=True``. See Also -------- DataFrame.sort_index : Sort a DataFrame by the index. Series.sort_values : Similar method for a Series. Examples -------- >>> df = pd.DataFrame({ ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'], ... 'col2': [2, 1, 9, 8, 7, 4], ... 'col3': [0, 1, 9, 4, 2, 3], ... 'col4': ['a', 'B', 'c', 'D', 'e', 'F'] ... }) >>> df col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F Sort by col1 >>> df.sort_values(by=['col1']) col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 5 C 4 3 F 4 D 7 2 e 3 NaN 8 4 D Sort by multiple columns >>> df.sort_values(by=['col1', 'col2']) col1 col2 col3 col4 1 A 1 1 B 0 A 2 0 a 2 B 9 9 c 5 C 4 3 F 4 D 7 2 e 3 NaN 8 4 D Sort Descending >>> df.sort_values(by='col1', ascending=False) col1 col2 col3 col4 4 D 7 2 e 5 C 4 3 F 2 B 9 9 c 0 A 2 0 a 1 A 1 1 B 3 NaN 8 4 D Putting NAs first >>> df.sort_values(by='col1', ascending=False, na_position='first') col1 col2 col3 col4 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F 2 B 9 9 c 0 A 2 0 a 1 A 1 1 B Sorting with a key function >>> df.sort_values(by='col4', key=lambda col: col.str.lower()) col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F Natural sort with the key argument, using the `natsort <https://github.com/SethMMorton/natsort>` package. >>> df = pd.DataFrame({ ... "time": ['0hr', '128hr', '72hr', '48hr', '96hr'], ... "value": [10, 20, 30, 40, 50] ... }) >>> df time value 0 0hr 10 1 128hr 20 2 72hr 30 3 48hr 40 4 96hr 50 >>> from natsort import index_natsorted >>> df.sort_values( ... by="time", ... key=lambda x: np.argsort(index_natsorted(df["time"])) ... ) time value 0 0hr 10 3 48hr 40 2 72hr 30 4 96hr 50 1 128hr 20 """ raise AbstractMethodError(self) def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[True], kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool_t = ..., ignore_index: bool_t = ..., key: IndexKeyFunc = ..., ) -> None: ... def sort_index( self: NDFrameT, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[False] = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool_t = ..., ignore_index: bool_t = ..., key: IndexKeyFunc = ..., ) -> NDFrameT: ... def sort_index( self: NDFrameT, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: bool_t = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool_t = ..., ignore_index: bool_t = ..., key: IndexKeyFunc = ..., ) -> NDFrameT | None: ... def sort_index( self: NDFrameT, *, axis: Axis = 0, level: IndexLabel = None, ascending: bool_t | Sequence[bool_t] = True, inplace: bool_t = False, kind: SortKind = "quicksort", na_position: NaPosition = "last", sort_remaining: bool_t = True, ignore_index: bool_t = False, key: IndexKeyFunc = None, ) -> NDFrameT | None: inplace = validate_bool_kwarg(inplace, "inplace") axis = self._get_axis_number(axis) ascending = validate_ascending(ascending) target = self._get_axis(axis) indexer = get_indexer_indexer( target, level, ascending, kind, na_position, sort_remaining, key ) if indexer is None: if inplace: result = self else: result = self.copy(deep=None) if ignore_index: result.index = default_index(len(self)) if inplace: return None else: return result baxis = self._get_block_manager_axis(axis) new_data = self._mgr.take(indexer, axis=baxis, verify=False) # reconstruct axis if needed new_data.set_axis(baxis, new_data.axes[baxis]._sort_levels_monotonic()) if ignore_index: axis = 1 if isinstance(self, ABCDataFrame) else 0 new_data.set_axis(axis, default_index(len(indexer))) result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="sort_index") klass=_shared_doc_kwargs["klass"], optional_reindex="", ) def reindex( self: NDFrameT, labels=None, index=None, columns=None, axis: Axis | None = None, method: str | None = None, copy: bool_t | None = None, level: Level | None = None, fill_value: Scalar | None = np.nan, limit: int | None = None, tolerance=None, ) -> NDFrameT: """ Conform {klass} to new index with optional filling logic. Places NA/NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and ``copy=False``. Parameters ---------- {optional_reindex} method : {{None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}} Method to use for filling holes in reindexed DataFrame. Please note: this is only applicable to DataFrames/Series with a monotonically increasing/decreasing index. * None (default): don't fill gaps * pad / ffill: Propagate last valid observation forward to next valid. * backfill / bfill: Use next valid observation to fill gap. * nearest: Use nearest valid observations to fill gap. copy : bool, default True Return a new object, even if the passed indexes are the same. level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level. fill_value : scalar, default np.NaN Value to use for missing values. Defaults to NaN, but can be any "compatible" value. limit : int, default None Maximum number of consecutive elements to forward or backward fill. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations most satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- {klass} with changed index. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. DataFrame.reindex_like : Change to same indices as other DataFrame. Examples -------- ``DataFrame.reindex`` supports two calling conventions * ``(index=index_labels, columns=column_labels, ...)`` * ``(labels, axis={{'index', 'columns'}}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. Create a dataframe with some fictional data. >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror'] >>> df = pd.DataFrame({{'http_status': [200, 200, 404, 404, 301], ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]}}, ... index=index) >>> df http_status response_time Firefox 200 0.04 Chrome 200 0.02 Safari 404 0.07 IE10 404 0.08 Konqueror 301 1.00 Create a new index and reindex the dataframe. By default values in the new index that do not have corresponding records in the dataframe are assigned ``NaN``. >>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10', ... 'Chrome'] >>> df.reindex(new_index) http_status response_time Safari 404.0 0.07 Iceweasel NaN NaN Comodo Dragon NaN NaN IE10 404.0 0.08 Chrome 200.0 0.02 We can fill in the missing values by passing a value to the keyword ``fill_value``. Because the index is not monotonically increasing or decreasing, we cannot use arguments to the keyword ``method`` to fill the ``NaN`` values. >>> df.reindex(new_index, fill_value=0) http_status response_time Safari 404 0.07 Iceweasel 0 0.00 Comodo Dragon 0 0.00 IE10 404 0.08 Chrome 200 0.02 >>> df.reindex(new_index, fill_value='missing') http_status response_time Safari 404 0.07 Iceweasel missing missing Comodo Dragon missing missing IE10 404 0.08 Chrome 200 0.02 We can also reindex the columns. >>> df.reindex(columns=['http_status', 'user_agent']) http_status user_agent Firefox 200 NaN Chrome 200 NaN Safari 404 NaN IE10 404 NaN Konqueror 301 NaN Or we can use "axis-style" keyword arguments >>> df.reindex(['http_status', 'user_agent'], axis="columns") http_status user_agent Firefox 200 NaN Chrome 200 NaN Safari 404 NaN IE10 404 NaN Konqueror 301 NaN To further illustrate the filling functionality in ``reindex``, we will create a dataframe with a monotonically increasing index (for example, a sequence of dates). >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D') >>> df2 = pd.DataFrame({{"prices": [100, 101, np.nan, 100, 89, 88]}}, ... index=date_index) >>> df2 prices 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 Suppose we decide to expand the dataframe to cover a wider date range. >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D') >>> df2.reindex(date_index2) prices 2009-12-29 NaN 2009-12-30 NaN 2009-12-31 NaN 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 2010-01-07 NaN The index entries that did not have a value in the original data frame (for example, '2009-12-29') are by default filled with ``NaN``. If desired, we can fill in the missing values using one of several options. For example, to back-propagate the last valid value to fill the ``NaN`` values, pass ``bfill`` as an argument to the ``method`` keyword. >>> df2.reindex(date_index2, method='bfill') prices 2009-12-29 100.0 2009-12-30 100.0 2009-12-31 100.0 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 2010-01-07 NaN Please note that the ``NaN`` value present in the original dataframe (at index value 2010-01-03) will not be filled by any of the value propagation schemes. This is because filling while reindexing does not look at dataframe values, but only compares the original and desired indexes. If you do want to fill in the ``NaN`` values present in the original dataframe, use the ``fillna()`` method. See the :ref:`user guide <basics.reindexing>` for more. """ # TODO: Decide if we care about having different examples for different # kinds if index is not None and columns is not None and labels is not None: raise TypeError("Cannot specify all of 'labels', 'index', 'columns'.") elif index is not None or columns is not None: if axis is not None: raise TypeError( "Cannot specify both 'axis' and any of 'index' or 'columns'" ) if labels is not None: if index is not None: columns = labels else: index = labels else: if axis and self._get_axis_number(axis) == 1: columns = labels else: index = labels axes: dict[Literal["index", "columns"], Any] = { "index": index, "columns": columns, } method = clean_reindex_fill_method(method) # if all axes that are requested to reindex are equal, then only copy # if indicated must have index names equal here as well as values if copy and using_copy_on_write(): copy = False if all( self._get_axis(axis_name).identical(ax) for axis_name, ax in axes.items() if ax is not None ): return self.copy(deep=copy) # check if we are a multi reindex if self._needs_reindex_multi(axes, method, level): return self._reindex_multi(axes, copy, fill_value) # perform the reindex on the axes return self._reindex_axes( axes, level, limit, tolerance, method, fill_value, copy ).__finalize__(self, method="reindex") def _reindex_axes( self: NDFrameT, axes, level, limit, tolerance, method, fill_value, copy ) -> NDFrameT: """Perform the reindex for all the axes.""" obj = self for a in self._AXIS_ORDERS: labels = axes[a] if labels is None: continue ax = self._get_axis(a) new_index, indexer = ax.reindex( labels, level=level, limit=limit, tolerance=tolerance, method=method ) axis = self._get_axis_number(a) obj = obj._reindex_with_indexers( {axis: [new_index, indexer]}, fill_value=fill_value, copy=copy, allow_dups=False, ) # If we've made a copy once, no need to make another one copy = False return obj def _needs_reindex_multi(self, axes, method, level) -> bool_t: """Check if we do need a multi reindex.""" return ( (common.count_not_none(*axes.values()) == self._AXIS_LEN) and method is None and level is None and not self._is_mixed_type and not ( self.ndim == 2 and len(self.dtypes) == 1 and is_extension_array_dtype(self.dtypes.iloc[0]) ) ) def _reindex_multi(self, axes, copy, fill_value): raise AbstractMethodError(self) def _reindex_with_indexers( self: NDFrameT, reindexers, fill_value=None, copy: bool_t | None = False, allow_dups: bool_t = False, ) -> NDFrameT: """allow_dups indicates an internal call here""" # reindex doing multiple operations on different axes if indicated new_data = self._mgr for axis in sorted(reindexers.keys()): index, indexer = reindexers[axis] baxis = self._get_block_manager_axis(axis) if index is None: continue index = ensure_index(index) if indexer is not None: indexer = ensure_platform_int(indexer) # TODO: speed up on homogeneous DataFrame objects (see _reindex_multi) new_data = new_data.reindex_indexer( index, indexer, axis=baxis, fill_value=fill_value, allow_dups=allow_dups, copy=copy, ) # If we've made a copy once, no need to make another one copy = False if ( (copy or copy is None) and new_data is self._mgr and not using_copy_on_write() ): new_data = new_data.copy(deep=copy) elif using_copy_on_write() and new_data is self._mgr: new_data = new_data.copy(deep=False) return self._constructor(new_data).__finalize__(self) def filter( self: NDFrameT, items=None, like: str | None = None, regex: str | None = None, axis: Axis | None = None, ) -> NDFrameT: """ Subset the dataframe rows or columns according to the specified index labels. Note that this routine does not filter a dataframe on its contents. The filter is applied to the labels of the index. Parameters ---------- items : list-like Keep labels from axis which are in items. like : str Keep labels from axis for which "like in label == True". regex : str (regular expression) Keep labels from axis for which re.search(regex, label) == True. axis : {0 or ‘index’, 1 or ‘columns’, None}, default None The axis to filter on, expressed either as an index (int) or axis name (str). By default this is the info axis, 'columns' for DataFrame. For `Series` this parameter is unused and defaults to `None`. Returns ------- same type as input object See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. Notes ----- The ``items``, ``like``, and ``regex`` parameters are enforced to be mutually exclusive. ``axis`` defaults to the info axis that is used when indexing with ``[]``. Examples -------- >>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])), ... index=['mouse', 'rabbit'], ... columns=['one', 'two', 'three']) >>> df one two three mouse 1 2 3 rabbit 4 5 6 >>> # select columns by name >>> df.filter(items=['one', 'three']) one three mouse 1 3 rabbit 4 6 >>> # select columns by regular expression >>> df.filter(regex='e$', axis=1) one three mouse 1 3 rabbit 4 6 >>> # select rows containing 'bbi' >>> df.filter(like='bbi', axis=0) one two three rabbit 4 5 6 """ nkw = common.count_not_none(items, like, regex) if nkw > 1: raise TypeError( "Keyword arguments `items`, `like`, or `regex` " "are mutually exclusive" ) if axis is None: axis = self._info_axis_name labels = self._get_axis(axis) if items is not None: name = self._get_axis_name(axis) # error: Keywords must be strings return self.reindex( # type: ignore[misc] **{name: [r for r in items if r in labels]} # type: ignore[arg-type] ) elif like: def f(x) -> bool_t: assert like is not None # needed for mypy return like in ensure_str(x) values = labels.map(f) return self.loc(axis=axis)[values] elif regex: def f(x) -> bool_t: return matcher.search(ensure_str(x)) is not None matcher = re.compile(regex) values = labels.map(f) return self.loc(axis=axis)[values] else: raise TypeError("Must pass either `items`, `like`, or `regex`") def head(self: NDFrameT, n: int = 5) -> NDFrameT: """ Return the first `n` rows. This function returns the first `n` rows for the object based on position. It is useful for quickly testing if your object has the right type of data in it. For negative values of `n`, this function returns all rows except the last `|n|` rows, equivalent to ``df[:n]``. If n is larger than the number of rows, this function returns all rows. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- same type as caller The first `n` rows of the caller object. See Also -------- DataFrame.tail: Returns the last `n` rows. Examples -------- >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the first 5 lines >>> df.head() animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey Viewing the first `n` lines (three in this case) >>> df.head(3) animal 0 alligator 1 bee 2 falcon For negative values of `n` >>> df.head(-3) animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot """ return self.iloc[:n] def tail(self: NDFrameT, n: int = 5) -> NDFrameT: """ Return the last `n` rows. This function returns last `n` rows from the object based on position. It is useful for quickly verifying data, for example, after sorting or appending rows. For negative values of `n`, this function returns all rows except the first `|n|` rows, equivalent to ``df[|n|:]``. If n is larger than the number of rows, this function returns all rows. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- type of caller The last `n` rows of the caller object. See Also -------- DataFrame.head : The first `n` rows of the caller object. Examples -------- >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last 5 lines >>> df.tail() animal 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last `n` lines (three in this case) >>> df.tail(3) animal 6 shark 7 whale 8 zebra For negative values of `n` >>> df.tail(-3) animal 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra """ if n == 0: return self.iloc[0:0] return self.iloc[-n:] def sample( self: NDFrameT, n: int | None = None, frac: float | None = None, replace: bool_t = False, weights=None, random_state: RandomState | None = None, axis: Axis | None = None, ignore_index: bool_t = False, ) -> NDFrameT: """ Return a random sample of items from an axis of object. You can use `random_state` for reproducibility. Parameters ---------- n : int, optional Number of items from axis to return. Cannot be used with `frac`. Default = 1 if `frac` = None. frac : float, optional Fraction of axis items to return. Cannot be used with `n`. replace : bool, default False Allow or disallow sampling of the same row more than once. weights : str or ndarray-like, optional Default 'None' results in equal probability weighting. If passed a Series, will align with target object on index. Index values in weights not found in sampled object will be ignored and index values in sampled object not in weights will be assigned weights of zero. If called on a DataFrame, will accept the name of a column when axis = 0. Unless weights are a Series, weights must be same length as axis being sampled. If weights do not sum to 1, they will be normalized to sum to 1. Missing values in the weights column will be treated as zero. Infinite values not allowed. random_state : int, array-like, BitGenerator, np.random.RandomState, np.random.Generator, optional If int, array-like, or BitGenerator, seed for random number generator. If np.random.RandomState or np.random.Generator, use as given. .. versionchanged:: 1.1.0 array-like and BitGenerator object now passed to np.random.RandomState() as seed .. versionchanged:: 1.4.0 np.random.Generator objects now accepted axis : {0 or ‘index’, 1 or ‘columns’, None}, default None Axis to sample. Accepts axis number or name. Default is stat axis for given data type. For `Series` this parameter is unused and defaults to `None`. ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 1.3.0 Returns ------- Series or DataFrame A new object of same type as caller containing `n` items randomly sampled from the caller object. See Also -------- DataFrameGroupBy.sample: Generates random samples from each group of a DataFrame object. SeriesGroupBy.sample: Generates random samples from each group of a Series object. numpy.random.choice: Generates a random sample from a given 1-D numpy array. Notes ----- If `frac` > 1, `replacement` should be set to `True`. Examples -------- >>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0], ... 'num_wings': [2, 0, 0, 0], ... 'num_specimen_seen': [10, 2, 1, 8]}, ... index=['falcon', 'dog', 'spider', 'fish']) >>> df num_legs num_wings num_specimen_seen falcon 2 2 10 dog 4 0 2 spider 8 0 1 fish 0 0 8 Extract 3 random elements from the ``Series`` ``df['num_legs']``: Note that we use `random_state` to ensure the reproducibility of the examples. >>> df['num_legs'].sample(n=3, random_state=1) fish 0 spider 8 falcon 2 Name: num_legs, dtype: int64 A random 50% sample of the ``DataFrame`` with replacement: >>> df.sample(frac=0.5, replace=True, random_state=1) num_legs num_wings num_specimen_seen dog 4 0 2 fish 0 0 8 An upsample sample of the ``DataFrame`` with replacement: Note that `replace` parameter has to be `True` for `frac` parameter > 1. >>> df.sample(frac=2, replace=True, random_state=1) num_legs num_wings num_specimen_seen dog 4 0 2 fish 0 0 8 falcon 2 2 10 falcon 2 2 10 fish 0 0 8 dog 4 0 2 fish 0 0 8 dog 4 0 2 Using a DataFrame column as weights. Rows with larger value in the `num_specimen_seen` column are more likely to be sampled. >>> df.sample(n=2, weights='num_specimen_seen', random_state=1) num_legs num_wings num_specimen_seen falcon 2 2 10 fish 0 0 8 """ # noqa:E501 if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) obj_len = self.shape[axis] # Process random_state argument rs = common.random_state(random_state) size = sample.process_sampling_size(n, frac, replace) if size is None: assert frac is not None size = round(frac * obj_len) if weights is not None: weights = sample.preprocess_weights(self, weights, axis) sampled_indices = sample.sample(obj_len, size, replace, weights, rs) result = self.take(sampled_indices, axis=axis) if ignore_index: result.index = default_index(len(result)) return result def pipe( self, func: Callable[..., T] | tuple[Callable[..., T], str], *args, **kwargs, ) -> T: r""" Apply chainable functions that expect Series or DataFrames. Parameters ---------- func : function Function to apply to the {klass}. ``args``, and ``kwargs`` are passed into ``func``. Alternatively a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of ``callable`` that expects the {klass}. args : iterable, optional Positional arguments passed into ``func``. kwargs : mapping, optional A dictionary of keyword arguments passed into ``func``. Returns ------- the return type of ``func``. See Also -------- DataFrame.apply : Apply a function along input axis of DataFrame. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Series.map : Apply a mapping correspondence on a :class:`~pandas.Series`. Notes ----- Use ``.pipe`` when chaining together functions that expect Series, DataFrames or GroupBy objects. Instead of writing >>> func(g(h(df), arg1=a), arg2=b, arg3=c) # doctest: +SKIP You can write >>> (df.pipe(h) ... .pipe(g, arg1=a) ... .pipe(func, arg2=b, arg3=c) ... ) # doctest: +SKIP If you have a function that takes the data as (say) the second argument, pass a tuple indicating which keyword expects the data. For example, suppose ``func`` takes its data as ``arg2``: >>> (df.pipe(h) ... .pipe(g, arg1=a) ... .pipe((func, 'arg2'), arg1=a, arg3=c) ... ) # doctest: +SKIP """ if using_copy_on_write(): return common.pipe(self.copy(deep=None), func, *args, **kwargs) return common.pipe(self, func, *args, **kwargs) # ---------------------------------------------------------------------- # Attribute access def __finalize__( self: NDFrameT, other, method: str | None = None, **kwargs ) -> NDFrameT: """ Propagate metadata from other to self. Parameters ---------- other : the object from which to get the attributes that we are going to propagate method : str, optional A passed method name providing context on where ``__finalize__`` was called. .. warning:: The value passed as `method` are not currently considered stable across pandas releases. """ if isinstance(other, NDFrame): for name in other.attrs: self.attrs[name] = other.attrs[name] self.flags.allows_duplicate_labels = other.flags.allows_duplicate_labels # For subclasses using _metadata. for name in set(self._metadata) & set(other._metadata): assert isinstance(name, str) object.__setattr__(self, name, getattr(other, name, None)) if method == "concat": attrs = other.objs[0].attrs check_attrs = all(objs.attrs == attrs for objs in other.objs[1:]) if check_attrs: for name in attrs: self.attrs[name] = attrs[name] allows_duplicate_labels = all( x.flags.allows_duplicate_labels for x in other.objs ) self.flags.allows_duplicate_labels = allows_duplicate_labels return self def __getattr__(self, name: str): """ After regular attribute access, try looking up the name This allows simpler access to columns for interactive use. """ # Note: obj.x will always call obj.__getattribute__('x') prior to # calling obj.__getattr__('x'). if ( name not in self._internal_names_set and name not in self._metadata and name not in self._accessors and self._info_axis._can_hold_identifiers_and_holds_name(name) ): return self[name] return object.__getattribute__(self, name) def __setattr__(self, name: str, value) -> None: """ After regular attribute access, try setting the name This allows simpler access to columns for interactive use. """ # first try regular attribute access via __getattribute__, so that # e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify # the same attribute. try: object.__getattribute__(self, name) return object.__setattr__(self, name, value) except AttributeError: pass # if this fails, go on to more involved attribute setting # (note that this matches __getattr__, above). if name in self._internal_names_set: object.__setattr__(self, name, value) elif name in self._metadata: object.__setattr__(self, name, value) else: try: existing = getattr(self, name) if isinstance(existing, Index): object.__setattr__(self, name, value) elif name in self._info_axis: self[name] = value else: object.__setattr__(self, name, value) except (AttributeError, TypeError): if isinstance(self, ABCDataFrame) and (is_list_like(value)): warnings.warn( "Pandas doesn't allow columns to be " "created via a new attribute name - see " "https://pandas.pydata.org/pandas-docs/" "stable/indexing.html#attribute-access", stacklevel=find_stack_level(), ) object.__setattr__(self, name, value) def _dir_additions(self) -> set[str]: """ add the string-like attributes from the info_axis. If info_axis is a MultiIndex, its first level values are used. """ additions = super()._dir_additions() if self._info_axis._can_hold_strings: additions.update(self._info_axis._dir_additions_for_owner) return additions # ---------------------------------------------------------------------- # Consolidation of internals def _protect_consolidate(self, f): """ Consolidate _mgr -- if the blocks have changed, then clear the cache """ if isinstance(self._mgr, (ArrayManager, SingleArrayManager)): return f() blocks_before = len(self._mgr.blocks) result = f() if len(self._mgr.blocks) != blocks_before: self._clear_item_cache() return result def _consolidate_inplace(self) -> None: """Consolidate data in place and return None""" def f() -> None: self._mgr = self._mgr.consolidate() self._protect_consolidate(f) def _consolidate(self): """ Compute NDFrame with "consolidated" internals (data of each dtype grouped together in a single ndarray). Returns ------- consolidated : same type as caller """ f = lambda: self._mgr.consolidate() cons_data = self._protect_consolidate(f) return self._constructor(cons_data).__finalize__(self) def _is_mixed_type(self) -> bool_t: if self._mgr.is_single_block: return False if self._mgr.any_extension_types: # Even if they have the same dtype, we can't consolidate them, # so we pretend this is "mixed'" return True return self.dtypes.nunique() > 1 def _check_inplace_setting(self, value) -> bool_t: """check whether we allow in-place setting with this type of value""" if self._is_mixed_type and not self._mgr.is_numeric_mixed_type: # allow an actual np.nan through if is_float(value) and np.isnan(value) or value is lib.no_default: return True raise TypeError( "Cannot do inplace boolean setting on " "mixed-types with a non np.nan value" ) return True def _get_numeric_data(self: NDFrameT) -> NDFrameT: return self._constructor(self._mgr.get_numeric_data()).__finalize__(self) def _get_bool_data(self): return self._constructor(self._mgr.get_bool_data()).__finalize__(self) # ---------------------------------------------------------------------- # Internal Interface Methods def values(self): raise AbstractMethodError(self) def _values(self) -> ArrayLike: """internal implementation""" raise AbstractMethodError(self) def dtypes(self): """ Return the dtypes in the DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the ``object`` dtype. See :ref:`the User Guide <basics.dtypes>` for more. Returns ------- pandas.Series The data type of each column. Examples -------- >>> df = pd.DataFrame({'float': [1.0], ... 'int': [1], ... 'datetime': [pd.Timestamp('20180310')], ... 'string': ['foo']}) >>> df.dtypes float float64 int int64 datetime datetime64[ns] string object dtype: object """ data = self._mgr.get_dtypes() return self._constructor_sliced(data, index=self._info_axis, dtype=np.object_) def astype( self: NDFrameT, dtype, copy: bool_t | None = None, errors: IgnoreRaise = "raise" ) -> NDFrameT: """ Cast a pandas object to a specified dtype ``dtype``. Parameters ---------- dtype : str, data type, Series or Mapping of column name -> data type Use a str, numpy.dtype, pandas.ExtensionDtype or Python type to cast entire pandas object to the same type. Alternatively, use a mapping, e.g. {col: dtype, ...}, where col is a column label and dtype is a numpy.dtype or Python type to cast one or more of the DataFrame's columns to column-specific types. copy : bool, default True Return a copy when ``copy=True`` (be very careful setting ``copy=False`` as changes to values then may propagate to other pandas objects). errors : {'raise', 'ignore'}, default 'raise' Control raising of exceptions on invalid data for provided dtype. - ``raise`` : allow exceptions to be raised - ``ignore`` : suppress exceptions. On error return original object. Returns ------- same type as caller See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to a numeric type. numpy.ndarray.astype : Cast a numpy array to a specified type. Notes ----- .. versionchanged:: 2.0.0 Using ``astype`` to convert from timezone-naive dtype to timezone-aware dtype will raise an exception. Use :meth:`Series.dt.tz_localize` instead. Examples -------- Create a DataFrame: >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df.dtypes col1 int64 col2 int64 dtype: object Cast all columns to int32: >>> df.astype('int32').dtypes col1 int32 col2 int32 dtype: object Cast col1 to int32 using a dictionary: >>> df.astype({'col1': 'int32'}).dtypes col1 int32 col2 int64 dtype: object Create a series: >>> ser = pd.Series([1, 2], dtype='int32') >>> ser 0 1 1 2 dtype: int32 >>> ser.astype('int64') 0 1 1 2 dtype: int64 Convert to categorical type: >>> ser.astype('category') 0 1 1 2 dtype: category Categories (2, int32): [1, 2] Convert to ordered categorical type with custom ordering: >>> from pandas.api.types import CategoricalDtype >>> cat_dtype = CategoricalDtype( ... categories=[2, 1], ordered=True) >>> ser.astype(cat_dtype) 0 1 1 2 dtype: category Categories (2, int64): [2 < 1] Create a series of dates: >>> ser_date = pd.Series(pd.date_range('20200101', periods=3)) >>> ser_date 0 2020-01-01 1 2020-01-02 2 2020-01-03 dtype: datetime64[ns] """ if copy and using_copy_on_write(): copy = False if is_dict_like(dtype): if self.ndim == 1: # i.e. Series if len(dtype) > 1 or self.name not in dtype: raise KeyError( "Only the Series name can be used for " "the key in Series dtype mappings." ) new_type = dtype[self.name] return self.astype(new_type, copy, errors) # GH#44417 cast to Series so we can use .iat below, which will be # robust in case we from pandas import Series dtype_ser = Series(dtype, dtype=object) for col_name in dtype_ser.index: if col_name not in self: raise KeyError( "Only a column name can be used for the " "key in a dtype mappings argument. " f"'{col_name}' not found in columns." ) dtype_ser = dtype_ser.reindex(self.columns, fill_value=None, copy=False) results = [] for i, (col_name, col) in enumerate(self.items()): cdt = dtype_ser.iat[i] if isna(cdt): res_col = col.copy(deep=copy) else: try: res_col = col.astype(dtype=cdt, copy=copy, errors=errors) except ValueError as ex: ex.args = ( f"{ex}: Error while type casting for column '{col_name}'", ) raise results.append(res_col) elif is_extension_array_dtype(dtype) and self.ndim > 1: # GH 18099/22869: columnwise conversion to extension dtype # GH 24704: use iloc to handle duplicate column names # TODO(EA2D): special case not needed with 2D EAs results = [ self.iloc[:, i].astype(dtype, copy=copy) for i in range(len(self.columns)) ] else: # else, only a single dtype is given new_data = self._mgr.astype(dtype=dtype, copy=copy, errors=errors) return self._constructor(new_data).__finalize__(self, method="astype") # GH 33113: handle empty frame or series if not results: return self.copy(deep=None) # GH 19920: retain column metadata after concat result = concat(results, axis=1, copy=False) # GH#40810 retain subclass # error: Incompatible types in assignment # (expression has type "NDFrameT", variable has type "DataFrame") result = self._constructor(result) # type: ignore[assignment] result.columns = self.columns result = result.__finalize__(self, method="astype") # https://github.com/python/mypy/issues/8354 return cast(NDFrameT, result) def copy(self: NDFrameT, deep: bool_t | None = True) -> NDFrameT: """ Make a copy of this object's indices and data. When ``deep=True`` (default), a new object will be created with a copy of the calling object's data and indices. Modifications to the data or indices of the copy will not be reflected in the original object (see notes below). When ``deep=False``, a new object will be created without copying the calling object's data or index (only references to the data and index are copied). Any changes to the data of the original will be reflected in the shallow copy (and vice versa). Parameters ---------- deep : bool, default True Make a deep copy, including a copy of the data and the indices. With ``deep=False`` neither the indices nor the data are copied. Returns ------- Series or DataFrame Object type matches caller. Notes ----- When ``deep=True``, data is copied but actual Python objects will not be copied recursively, only the reference to the object. This is in contrast to `copy.deepcopy` in the Standard Library, which recursively copies object data (see examples below). While ``Index`` objects are copied when ``deep=True``, the underlying numpy array is not copied for performance reasons. Since ``Index`` is immutable, the underlying data can be safely shared and a copy is not needed. Since pandas is not thread safe, see the :ref:`gotchas <gotchas.thread-safety>` when copying in a threading environment. Examples -------- >>> s = pd.Series([1, 2], index=["a", "b"]) >>> s a 1 b 2 dtype: int64 >>> s_copy = s.copy() >>> s_copy a 1 b 2 dtype: int64 **Shallow copy versus default (deep) copy:** >>> s = pd.Series([1, 2], index=["a", "b"]) >>> deep = s.copy() >>> shallow = s.copy(deep=False) Shallow copy shares data and index with original. >>> s is shallow False >>> s.values is shallow.values and s.index is shallow.index True Deep copy has own copy of data and index. >>> s is deep False >>> s.values is deep.values or s.index is deep.index False Updates to the data shared by shallow copy and original is reflected in both; deep copy remains unchanged. >>> s[0] = 3 >>> shallow[1] = 4 >>> s a 3 b 4 dtype: int64 >>> shallow a 3 b 4 dtype: int64 >>> deep a 1 b 2 dtype: int64 Note that when copying an object containing Python objects, a deep copy will copy the data, but will not do so recursively. Updating a nested data object will be reflected in the deep copy. >>> s = pd.Series([[1, 2], [3, 4]]) >>> deep = s.copy() >>> s[0][0] = 10 >>> s 0 [10, 2] 1 [3, 4] dtype: object >>> deep 0 [10, 2] 1 [3, 4] dtype: object """ data = self._mgr.copy(deep=deep) self._clear_item_cache() return self._constructor(data).__finalize__(self, method="copy") def __copy__(self: NDFrameT, deep: bool_t = True) -> NDFrameT: return self.copy(deep=deep) def __deepcopy__(self: NDFrameT, memo=None) -> NDFrameT: """ Parameters ---------- memo, default None Standard signature. Unused """ return self.copy(deep=True) def infer_objects(self: NDFrameT, copy: bool_t | None = None) -> NDFrameT: """ Attempt to infer better dtypes for object columns. Attempts soft conversion of object-dtyped columns, leaving non-object and unconvertible columns unchanged. The inference rules are the same as during normal Series/DataFrame construction. Parameters ---------- copy : bool, default True Whether to make a copy for non-object or non-inferrable columns or Series. Returns ------- same type as input object See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to numeric type. convert_dtypes : Convert argument to best possible dtype. Examples -------- >>> df = pd.DataFrame({"A": ["a", 1, 2, 3]}) >>> df = df.iloc[1:] >>> df A 1 1 2 2 3 3 >>> df.dtypes A object dtype: object >>> df.infer_objects().dtypes A int64 dtype: object """ new_mgr = self._mgr.convert(copy=copy) return self._constructor(new_mgr).__finalize__(self, method="infer_objects") def convert_dtypes( self: NDFrameT, infer_objects: bool_t = True, convert_string: bool_t = True, convert_integer: bool_t = True, convert_boolean: bool_t = True, convert_floating: bool_t = True, dtype_backend: DtypeBackend = "numpy_nullable", ) -> NDFrameT: """ Convert columns to the best possible dtypes using dtypes supporting ``pd.NA``. Parameters ---------- infer_objects : bool, default True Whether object dtypes should be converted to the best possible types. convert_string : bool, default True Whether object dtypes should be converted to ``StringDtype()``. convert_integer : bool, default True Whether, if possible, conversion can be done to integer extension types. convert_boolean : bool, defaults True Whether object dtypes should be converted to ``BooleanDtypes()``. convert_floating : bool, defaults True Whether, if possible, conversion can be done to floating extension types. If `convert_integer` is also True, preference will be give to integer dtypes if the floats can be faithfully casted to integers. .. versionadded:: 1.2.0 dtype_backend : {"numpy_nullable", "pyarrow"}, default "numpy_nullable" Which dtype_backend to use, e.g. whether a DataFrame should use nullable dtypes for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 Returns ------- Series or DataFrame Copy of input object with new dtype. See Also -------- infer_objects : Infer dtypes of objects. to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to a numeric type. Notes ----- By default, ``convert_dtypes`` will attempt to convert a Series (or each Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options ``convert_string``, ``convert_integer``, ``convert_boolean`` and ``convert_floating``, it is possible to turn off individual conversions to ``StringDtype``, the integer extension types, ``BooleanDtype`` or floating extension types, respectively. For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference rules as during normal Series/DataFrame construction. Then, if possible, convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer or floating extension type, otherwise leave as ``object``. If the dtype is integer, convert to an appropriate integer extension type. If the dtype is numeric, and consists of all integers, convert to an appropriate integer extension type. Otherwise, convert to an appropriate floating extension type. .. versionchanged:: 1.2 Starting with pandas 1.2, this method also converts float columns to the nullable floating extension type. In the future, as new dtypes are added that support ``pd.NA``, the results of this method will change to support those new dtypes. .. versionadded:: 2.0 The nullable dtype implementation can be configured by calling ``pd.set_option("mode.dtype_backend", "pandas")`` to use numpy-backed nullable dtypes or ``pd.set_option("mode.dtype_backend", "pyarrow")`` to use pyarrow-backed nullable dtypes (using ``pd.ArrowDtype``). Examples -------- >>> df = pd.DataFrame( ... { ... "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")), ... "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")), ... "c": pd.Series([True, False, np.nan], dtype=np.dtype("O")), ... "d": pd.Series(["h", "i", np.nan], dtype=np.dtype("O")), ... "e": pd.Series([10, np.nan, 20], dtype=np.dtype("float")), ... "f": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")), ... } ... ) Start with a DataFrame with default dtypes. >>> df a b c d e f 0 1 x True h 10.0 NaN 1 2 y False i NaN 100.5 2 3 z NaN NaN 20.0 200.0 >>> df.dtypes a int32 b object c object d object e float64 f float64 dtype: object Convert the DataFrame to use best possible dtypes. >>> dfn = df.convert_dtypes() >>> dfn a b c d e f 0 1 x True h 10 <NA> 1 2 y False i <NA> 100.5 2 3 z <NA> <NA> 20 200.0 >>> dfn.dtypes a Int32 b string[python] c boolean d string[python] e Int64 f Float64 dtype: object Start with a Series of strings and missing data represented by ``np.nan``. >>> s = pd.Series(["a", "b", np.nan]) >>> s 0 a 1 b 2 NaN dtype: object Obtain a Series with dtype ``StringDtype``. >>> s.convert_dtypes() 0 a 1 b 2 <NA> dtype: string """ check_dtype_backend(dtype_backend) if self.ndim == 1: return self._convert_dtypes( infer_objects, convert_string, convert_integer, convert_boolean, convert_floating, dtype_backend=dtype_backend, ) else: results = [ col._convert_dtypes( infer_objects, convert_string, convert_integer, convert_boolean, convert_floating, dtype_backend=dtype_backend, ) for col_name, col in self.items() ] if len(results) > 0: result = concat(results, axis=1, copy=False, keys=self.columns) cons = cast(Type["DataFrame"], self._constructor) result = cons(result) result = result.__finalize__(self, method="convert_dtypes") # https://github.com/python/mypy/issues/8354 return cast(NDFrameT, result) else: return self.copy(deep=None) # ---------------------------------------------------------------------- # Filling NA's def fillna( self: NDFrameT, value: Hashable | Mapping | Series | DataFrame = ..., *, method: FillnaOptions | None = ..., axis: Axis | None = ..., inplace: Literal[False] = ..., limit: int | None = ..., downcast: dict | None = ..., ) -> NDFrameT: ... def fillna( self, value: Hashable | Mapping | Series | DataFrame = ..., *, method: FillnaOptions | None = ..., axis: Axis | None = ..., inplace: Literal[True], limit: int | None = ..., downcast: dict | None = ..., ) -> None: ... def fillna( self: NDFrameT, value: Hashable | Mapping | Series | DataFrame = ..., *, method: FillnaOptions | None = ..., axis: Axis | None = ..., inplace: bool_t = ..., limit: int | None = ..., downcast: dict | None = ..., ) -> NDFrameT | None: ... def fillna( self: NDFrameT, value: Hashable | Mapping | Series | DataFrame = None, *, method: FillnaOptions | None = None, axis: Axis | None = None, inplace: bool_t = False, limit: int | None = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Fill NA/NaN values using the specified method. Parameters ---------- value : scalar, dict, Series, or DataFrame Value to use to fill holes (e.g. 0), alternately a dict/Series/DataFrame of values specifying which value to use for each index (for a Series) or column (for a DataFrame). Values not in the dict/Series/DataFrame will not be filled. This value cannot be a list. method : {{'backfill', 'bfill', 'ffill', None}}, default None Method to use for filling holes in reindexed Series: * ffill: propagate last valid observation forward to next valid. * backfill / bfill: use next valid observation to fill gap. axis : {axes_single_arg} Axis along which to fill missing values. For `Series` this parameter is unused and defaults to 0. inplace : bool, default False If True, fill in-place. Note: this will modify any other views on this object (e.g., a no-copy slice for a column in a DataFrame). limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None. downcast : dict, default is None A dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate equal type (e.g. float64 to int64 if possible). Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. See Also -------- interpolate : Fill NaN values using interpolation. reindex : Conform object to new index. asfreq : Convert TimeSeries to specified frequency. Examples -------- >>> df = pd.DataFrame([[np.nan, 2, np.nan, 0], ... [3, 4, np.nan, 1], ... [np.nan, np.nan, np.nan, np.nan], ... [np.nan, 3, np.nan, 4]], ... columns=list("ABCD")) >>> df A B C D 0 NaN 2.0 NaN 0.0 1 3.0 4.0 NaN 1.0 2 NaN NaN NaN NaN 3 NaN 3.0 NaN 4.0 Replace all NaN elements with 0s. >>> df.fillna(0) A B C D 0 0.0 2.0 0.0 0.0 1 3.0 4.0 0.0 1.0 2 0.0 0.0 0.0 0.0 3 0.0 3.0 0.0 4.0 We can also propagate non-null values forward or backward. >>> df.fillna(method="ffill") A B C D 0 NaN 2.0 NaN 0.0 1 3.0 4.0 NaN 1.0 2 3.0 4.0 NaN 1.0 3 3.0 3.0 NaN 4.0 Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1, 2, and 3 respectively. >>> values = {{"A": 0, "B": 1, "C": 2, "D": 3}} >>> df.fillna(value=values) A B C D 0 0.0 2.0 2.0 0.0 1 3.0 4.0 2.0 1.0 2 0.0 1.0 2.0 3.0 3 0.0 3.0 2.0 4.0 Only replace the first NaN element. >>> df.fillna(value=values, limit=1) A B C D 0 0.0 2.0 2.0 0.0 1 3.0 4.0 NaN 1.0 2 NaN 1.0 NaN 3.0 3 NaN 3.0 NaN 4.0 When filling using a DataFrame, replacement happens along the same column names and same indices >>> df2 = pd.DataFrame(np.zeros((4, 4)), columns=list("ABCE")) >>> df.fillna(df2) A B C D 0 0.0 2.0 0.0 0.0 1 3.0 4.0 0.0 1.0 2 0.0 0.0 0.0 NaN 3 0.0 3.0 0.0 4.0 Note that column D is not affected since it is not present in df2. """ inplace = validate_bool_kwarg(inplace, "inplace") value, method = validate_fillna_kwargs(value, method) # set the default here, so functions examining the signaure # can detect if something was set (e.g. in groupby) (GH9221) if axis is None: axis = 0 axis = self._get_axis_number(axis) if value is None: if not self._mgr.is_single_block and axis == 1: if inplace: raise NotImplementedError() result = self.T.fillna(method=method, limit=limit).T return result new_data = self._mgr.interpolate( method=method, axis=axis, limit=limit, inplace=inplace, downcast=downcast, ) else: if self.ndim == 1: if isinstance(value, (dict, ABCSeries)): if not len(value): # test_fillna_nonscalar if inplace: return None return self.copy(deep=None) from pandas import Series value = Series(value) value = value.reindex(self.index, copy=False) value = value._values elif not is_list_like(value): pass else: raise TypeError( '"value" parameter must be a scalar, dict ' "or Series, but you passed a " f'"{type(value).__name__}"' ) new_data = self._mgr.fillna( value=value, limit=limit, inplace=inplace, downcast=downcast ) elif isinstance(value, (dict, ABCSeries)): if axis == 1: raise NotImplementedError( "Currently only can fill " "with dict/Series column " "by column" ) if using_copy_on_write(): result = self.copy(deep=None) else: result = self if inplace else self.copy() is_dict = isinstance(downcast, dict) for k, v in value.items(): if k not in result: continue # error: Item "None" of "Optional[Dict[Any, Any]]" has no # attribute "get" downcast_k = ( downcast if not is_dict else downcast.get(k) # type: ignore[union-attr] ) res_k = result[k].fillna(v, limit=limit, downcast=downcast_k) if not inplace: result[k] = res_k else: # We can write into our existing column(s) iff dtype # was preserved. if isinstance(res_k, ABCSeries): # i.e. 'k' only shows up once in self.columns if res_k.dtype == result[k].dtype: result.loc[:, k] = res_k else: # Different dtype -> no way to do inplace. result[k] = res_k else: # see test_fillna_dict_inplace_nonunique_columns locs = result.columns.get_loc(k) if isinstance(locs, slice): locs = np.arange(self.shape[1])[locs] elif ( isinstance(locs, np.ndarray) and locs.dtype.kind == "b" ): locs = locs.nonzero()[0] elif not ( isinstance(locs, np.ndarray) and locs.dtype.kind == "i" ): # Should never be reached, but let's cover our bases raise NotImplementedError( "Unexpected get_loc result, please report a bug at " "https://github.com/pandas-dev/pandas" ) for i, loc in enumerate(locs): res_loc = res_k.iloc[:, i] target = self.iloc[:, loc] if res_loc.dtype == target.dtype: result.iloc[:, loc] = res_loc else: result.isetitem(loc, res_loc) if inplace: return self._update_inplace(result) else: return result elif not is_list_like(value): if axis == 1: result = self.T.fillna(value=value, limit=limit).T new_data = result else: new_data = self._mgr.fillna( value=value, limit=limit, inplace=inplace, downcast=downcast ) elif isinstance(value, ABCDataFrame) and self.ndim == 2: new_data = self.where(self.notna(), value)._mgr else: raise ValueError(f"invalid fill value with a {type(value)}") result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="fillna") def ffill( self: NDFrameT, *, axis: None | Axis = ..., inplace: Literal[False] = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT: ... def ffill( self, *, axis: None | Axis = ..., inplace: Literal[True], limit: None | int = ..., downcast: dict | None = ..., ) -> None: ... def ffill( self: NDFrameT, *, axis: None | Axis = ..., inplace: bool_t = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT | None: ... def ffill( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ return self.fillna( method="ffill", axis=axis, inplace=inplace, limit=limit, downcast=downcast ) def pad( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``. .. deprecated:: 2.0 {klass}.pad is deprecated. Use {klass}.ffill instead. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ warnings.warn( "DataFrame.pad/Series.pad is deprecated. Use " "DataFrame.ffill/Series.ffill instead", FutureWarning, stacklevel=find_stack_level(), ) return self.ffill(axis=axis, inplace=inplace, limit=limit, downcast=downcast) def bfill( self: NDFrameT, *, axis: None | Axis = ..., inplace: Literal[False] = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT: ... def bfill( self, *, axis: None | Axis = ..., inplace: Literal[True], limit: None | int = ..., downcast: dict | None = ..., ) -> None: ... def bfill( self: NDFrameT, *, axis: None | Axis = ..., inplace: bool_t = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT | None: ... def bfill( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ return self.fillna( method="bfill", axis=axis, inplace=inplace, limit=limit, downcast=downcast ) def backfill( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``. .. deprecated:: 2.0 {klass}.backfill is deprecated. Use {klass}.bfill instead. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ warnings.warn( "DataFrame.backfill/Series.backfill is deprecated. Use " "DataFrame.bfill/Series.bfill instead", FutureWarning, stacklevel=find_stack_level(), ) return self.bfill(axis=axis, inplace=inplace, limit=limit, downcast=downcast) def replace( self: NDFrameT, to_replace=..., value=..., *, inplace: Literal[False] = ..., limit: int | None = ..., regex: bool_t = ..., method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., ) -> NDFrameT: ... def replace( self, to_replace=..., value=..., *, inplace: Literal[True], limit: int | None = ..., regex: bool_t = ..., method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., ) -> None: ... def replace( self: NDFrameT, to_replace=..., value=..., *, inplace: bool_t = ..., limit: int | None = ..., regex: bool_t = ..., method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., ) -> NDFrameT | None: ... _shared_docs["replace"], klass=_shared_doc_kwargs["klass"], inplace=_shared_doc_kwargs["inplace"], replace_iloc=_shared_doc_kwargs["replace_iloc"], ) def replace( self: NDFrameT, to_replace=None, value=lib.no_default, *, inplace: bool_t = False, limit: int | None = None, regex: bool_t = False, method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = lib.no_default, ) -> NDFrameT | None: if not ( is_scalar(to_replace) or is_re_compilable(to_replace) or is_list_like(to_replace) ): raise TypeError( "Expecting 'to_replace' to be either a scalar, array-like, " "dict or None, got invalid type " f"{repr(type(to_replace).__name__)}" ) inplace = validate_bool_kwarg(inplace, "inplace") if not is_bool(regex) and to_replace is not None: raise ValueError("'to_replace' must be 'None' if 'regex' is not a bool") if value is lib.no_default or method is not lib.no_default: # GH#36984 if the user explicitly passes value=None we want to # respect that. We have the corner case where the user explicitly # passes value=None *and* a method, which we interpret as meaning # they want the (documented) default behavior. if method is lib.no_default: # TODO: get this to show up as the default in the docs? method = "pad" # passing a single value that is scalar like # when value is None (GH5319), for compat if not is_dict_like(to_replace) and not is_dict_like(regex): to_replace = [to_replace] if isinstance(to_replace, (tuple, list)): # TODO: Consider copy-on-write for non-replaced columns's here if isinstance(self, ABCDataFrame): from pandas import Series result = self.apply( Series._replace_single, args=(to_replace, method, inplace, limit), ) if inplace: return None return result return self._replace_single(to_replace, method, inplace, limit) if not is_dict_like(to_replace): if not is_dict_like(regex): raise TypeError( 'If "to_replace" and "value" are both None ' 'and "to_replace" is not a list, then ' "regex must be a mapping" ) to_replace = regex regex = True items = list(to_replace.items()) if items: keys, values = zip(*items) else: keys, values = ([], []) are_mappings = [is_dict_like(v) for v in values] if any(are_mappings): if not all(are_mappings): raise TypeError( "If a nested mapping is passed, all values " "of the top level mapping must be mappings" ) # passed a nested dict/Series to_rep_dict = {} value_dict = {} for k, v in items: keys, values = list(zip(*v.items())) or ([], []) to_rep_dict[k] = list(keys) value_dict[k] = list(values) to_replace, value = to_rep_dict, value_dict else: to_replace, value = keys, values return self.replace( to_replace, value, inplace=inplace, limit=limit, regex=regex ) else: # need a non-zero len on all axes if not self.size: if inplace: return None return self.copy(deep=None) if is_dict_like(to_replace): if is_dict_like(value): # {'A' : NA} -> {'A' : 0} # Note: Checking below for `in foo.keys()` instead of # `in foo` is needed for when we have a Series and not dict mapping = { col: (to_replace[col], value[col]) for col in to_replace.keys() if col in value.keys() and col in self } return self._replace_columnwise(mapping, inplace, regex) # {'A': NA} -> 0 elif not is_list_like(value): # Operate column-wise if self.ndim == 1: raise ValueError( "Series.replace cannot use dict-like to_replace " "and non-None value" ) mapping = { col: (to_rep, value) for col, to_rep in to_replace.items() } return self._replace_columnwise(mapping, inplace, regex) else: raise TypeError("value argument must be scalar, dict, or Series") elif is_list_like(to_replace): if not is_list_like(value): # e.g. to_replace = [NA, ''] and value is 0, # so we replace NA with 0 and then replace '' with 0 value = [value] * len(to_replace) # e.g. we have to_replace = [NA, ''] and value = [0, 'missing'] if len(to_replace) != len(value): raise ValueError( f"Replacement lists must match in length. " f"Expecting {len(to_replace)} got {len(value)} " ) new_data = self._mgr.replace_list( src_list=to_replace, dest_list=value, inplace=inplace, regex=regex, ) elif to_replace is None: if not ( is_re_compilable(regex) or is_list_like(regex) or is_dict_like(regex) ): raise TypeError( f"'regex' must be a string or a compiled regular expression " f"or a list or dict of strings or regular expressions, " f"you passed a {repr(type(regex).__name__)}" ) return self.replace( regex, value, inplace=inplace, limit=limit, regex=True ) else: # dest iterable dict-like if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1} # Operate column-wise if self.ndim == 1: raise ValueError( "Series.replace cannot use dict-value and " "non-None to_replace" ) mapping = {col: (to_replace, val) for col, val in value.items()} return self._replace_columnwise(mapping, inplace, regex) elif not is_list_like(value): # NA -> 0 regex = should_use_regex(regex, to_replace) if regex: new_data = self._mgr.replace_regex( to_replace=to_replace, value=value, inplace=inplace, ) else: new_data = self._mgr.replace( to_replace=to_replace, value=value, inplace=inplace ) else: raise TypeError( f'Invalid "to_replace" type: {repr(type(to_replace).__name__)}' ) result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="replace") def interpolate( self: NDFrameT, method: str = "linear", *, axis: Axis = 0, limit: int | None = None, inplace: bool_t = False, limit_direction: str | None = None, limit_area: str | None = None, downcast: str | None = None, **kwargs, ) -> NDFrameT | None: """ Fill NaN values using an interpolation method. Please note that only ``method='linear'`` is supported for DataFrame/Series with a MultiIndex. Parameters ---------- method : str, default 'linear' Interpolation technique to use. One of: * 'linear': Ignore the index and treat the values as equally spaced. This is the only method supported on MultiIndexes. * 'time': Works on daily and higher resolution data to interpolate given length of interval. * 'index', 'values': use the actual numerical values of the index. * 'pad': Fill in NaNs using existing values. * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'barycentric', 'polynomial': Passed to `scipy.interpolate.interp1d`, whereas 'spline' is passed to `scipy.interpolate.UnivariateSpline`. These methods use the numerical values of the index. Both 'polynomial' and 'spline' require that you also specify an `order` (int), e.g. ``df.interpolate(method='polynomial', order=5)``. Note that, `slinear` method in Pandas refers to the Scipy first order `spline` instead of Pandas first order `spline`. * 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima', 'cubicspline': Wrappers around the SciPy interpolation methods of similar names. See `Notes`. * 'from_derivatives': Refers to `scipy.interpolate.BPoly.from_derivatives` which replaces 'piecewise_polynomial' interpolation method in scipy 0.18. axis : {{0 or 'index', 1 or 'columns', None}}, default None Axis to interpolate along. For `Series` this parameter is unused and defaults to 0. limit : int, optional Maximum number of consecutive NaNs to fill. Must be greater than 0. inplace : bool, default False Update the data in place if possible. limit_direction : {{'forward', 'backward', 'both'}}, Optional Consecutive NaNs will be filled in this direction. If limit is specified: * If 'method' is 'pad' or 'ffill', 'limit_direction' must be 'forward'. * If 'method' is 'backfill' or 'bfill', 'limit_direction' must be 'backwards'. If 'limit' is not specified: * If 'method' is 'backfill' or 'bfill', the default is 'backward' * else the default is 'forward' .. versionchanged:: 1.1.0 raises ValueError if `limit_direction` is 'forward' or 'both' and method is 'backfill' or 'bfill'. raises ValueError if `limit_direction` is 'backward' or 'both' and method is 'pad' or 'ffill'. limit_area : {{`None`, 'inside', 'outside'}}, default None If limit is specified, consecutive NaNs will be filled with this restriction. * ``None``: No fill restriction. * 'inside': Only fill NaNs surrounded by valid values (interpolate). * 'outside': Only fill NaNs outside valid values (extrapolate). downcast : optional, 'infer' or None, defaults to None Downcast dtypes if possible. ``**kwargs`` : optional Keyword arguments to pass on to the interpolating function. Returns ------- Series or DataFrame or None Returns the same object type as the caller, interpolated at some or all ``NaN`` values or None if ``inplace=True``. See Also -------- fillna : Fill missing values using different methods. scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials (Akima interpolator). scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the Bernstein basis. scipy.interpolate.interp1d : Interpolate a 1-D function. scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh interpolator). scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic interpolation. scipy.interpolate.CubicSpline : Cubic spline data interpolator. Notes ----- The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima' methods are wrappers around the respective SciPy implementations of similar names. These use the actual numerical values of the index. For more information on their behavior, see the `SciPy documentation <https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__. Examples -------- Filling in ``NaN`` in a :class:`~pandas.Series` via linear interpolation. >>> s = pd.Series([0, 1, np.nan, 3]) >>> s 0 0.0 1 1.0 2 NaN 3 3.0 dtype: float64 >>> s.interpolate() 0 0.0 1 1.0 2 2.0 3 3.0 dtype: float64 Filling in ``NaN`` in a Series by padding, but filling at most two consecutive ``NaN`` at a time. >>> s = pd.Series([np.nan, "single_one", np.nan, ... "fill_two_more", np.nan, np.nan, np.nan, ... 4.71, np.nan]) >>> s 0 NaN 1 single_one 2 NaN 3 fill_two_more 4 NaN 5 NaN 6 NaN 7 4.71 8 NaN dtype: object >>> s.interpolate(method='pad', limit=2) 0 NaN 1 single_one 2 single_one 3 fill_two_more 4 fill_two_more 5 fill_two_more 6 NaN 7 4.71 8 4.71 dtype: object Filling in ``NaN`` in a Series via polynomial interpolation or splines: Both 'polynomial' and 'spline' methods require that you also specify an ``order`` (int). >>> s = pd.Series([0, 2, np.nan, 8]) >>> s.interpolate(method='polynomial', order=2) 0 0.000000 1 2.000000 2 4.666667 3 8.000000 dtype: float64 Fill the DataFrame forward (that is, going down) along each column using linear interpolation. Note how the last entry in column 'a' is interpolated differently, because there is no entry after it to use for interpolation. Note how the first entry in column 'b' remains ``NaN``, because there is no entry before it to use for interpolation. >>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0), ... (np.nan, 2.0, np.nan, np.nan), ... (2.0, 3.0, np.nan, 9.0), ... (np.nan, 4.0, -4.0, 16.0)], ... columns=list('abcd')) >>> df a b c d 0 0.0 NaN -1.0 1.0 1 NaN 2.0 NaN NaN 2 2.0 3.0 NaN 9.0 3 NaN 4.0 -4.0 16.0 >>> df.interpolate(method='linear', limit_direction='forward', axis=0) a b c d 0 0.0 NaN -1.0 1.0 1 1.0 2.0 -2.0 5.0 2 2.0 3.0 -3.0 9.0 3 2.0 4.0 -4.0 16.0 Using polynomial interpolation. >>> df['d'].interpolate(method='polynomial', order=2) 0 1.0 1 4.0 2 9.0 3 16.0 Name: d, dtype: float64 """ inplace = validate_bool_kwarg(inplace, "inplace") axis = self._get_axis_number(axis) fillna_methods = ["ffill", "bfill", "pad", "backfill"] should_transpose = axis == 1 and method not in fillna_methods obj = self.T if should_transpose else self if obj.empty: return self.copy() if method not in fillna_methods: axis = self._info_axis_number if isinstance(obj.index, MultiIndex) and method != "linear": raise ValueError( "Only `method=linear` interpolation is supported on MultiIndexes." ) # Set `limit_direction` depending on `method` if limit_direction is None: limit_direction = ( "backward" if method in ("backfill", "bfill") else "forward" ) else: if method in ("pad", "ffill") and limit_direction != "forward": raise ValueError( f"`limit_direction` must be 'forward' for method `{method}`" ) if method in ("backfill", "bfill") and limit_direction != "backward": raise ValueError( f"`limit_direction` must be 'backward' for method `{method}`" ) if obj.ndim == 2 and np.all(obj.dtypes == np.dtype("object")): raise TypeError( "Cannot interpolate with all object-dtype columns " "in the DataFrame. Try setting at least one " "column to a numeric dtype." ) # create/use the index if method == "linear": # prior default index = Index(np.arange(len(obj.index))) else: index = obj.index methods = {"index", "values", "nearest", "time"} is_numeric_or_datetime = ( is_numeric_dtype(index.dtype) or is_datetime64_any_dtype(index.dtype) or is_timedelta64_dtype(index.dtype) ) if method not in methods and not is_numeric_or_datetime: raise ValueError( "Index column must be numeric or datetime type when " f"using {method} method other than linear. " "Try setting a numeric or datetime index column before " "interpolating." ) if isna(index).any(): raise NotImplementedError( "Interpolation with NaNs in the index " "has not been implemented. Try filling " "those NaNs before interpolating." ) new_data = obj._mgr.interpolate( method=method, axis=axis, index=index, limit=limit, limit_direction=limit_direction, limit_area=limit_area, inplace=inplace, downcast=downcast, **kwargs, ) result = self._constructor(new_data) if should_transpose: result = result.T if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="interpolate") # ---------------------------------------------------------------------- # Timeseries methods Methods def asof(self, where, subset=None): """ Return the last row(s) without any NaNs before `where`. The last row (for each element in `where`, if list) without any NaN is taken. In case of a :class:`~pandas.DataFrame`, the last row without NaN considering only the subset of columns (if not `None`) If there is no good value, NaN is returned for a Series or a Series of NaN values for a DataFrame Parameters ---------- where : date or array-like of dates Date(s) before which the last row(s) are returned. subset : str or array-like of str, default `None` For DataFrame, if not `None`, only use these columns to check for NaNs. Returns ------- scalar, Series, or DataFrame The return can be: * scalar : when `self` is a Series and `where` is a scalar * Series: when `self` is a Series and `where` is an array-like, or when `self` is a DataFrame and `where` is a scalar * DataFrame : when `self` is a DataFrame and `where` is an array-like Return scalar, Series, or DataFrame. See Also -------- merge_asof : Perform an asof merge. Similar to left join. Notes ----- Dates are assumed to be sorted. Raises if this is not the case. Examples -------- A Series and a scalar `where`. >>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40]) >>> s 10 1.0 20 2.0 30 NaN 40 4.0 dtype: float64 >>> s.asof(20) 2.0 For a sequence `where`, a Series is returned. The first value is NaN, because the first element of `where` is before the first index value. >>> s.asof([5, 20]) 5 NaN 20 2.0 dtype: float64 Missing values are not considered. The following is ``2.0``, not NaN, even though NaN is at the index location for ``30``. >>> s.asof(30) 2.0 Take all columns into consideration >>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50], ... 'b': [None, None, None, None, 500]}, ... index=pd.DatetimeIndex(['2018-02-27 09:01:00', ... '2018-02-27 09:02:00', ... '2018-02-27 09:03:00', ... '2018-02-27 09:04:00', ... '2018-02-27 09:05:00'])) >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', ... '2018-02-27 09:04:30'])) a b 2018-02-27 09:03:30 NaN NaN 2018-02-27 09:04:30 NaN NaN Take a single column into consideration >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', ... '2018-02-27 09:04:30']), ... subset=['a']) a b 2018-02-27 09:03:30 30 NaN 2018-02-27 09:04:30 40 NaN """ if isinstance(where, str): where = Timestamp(where) if not self.index.is_monotonic_increasing: raise ValueError("asof requires a sorted index") is_series = isinstance(self, ABCSeries) if is_series: if subset is not None: raise ValueError("subset is not valid for Series") else: if subset is None: subset = self.columns if not is_list_like(subset): subset = [subset] is_list = is_list_like(where) if not is_list: start = self.index[0] if isinstance(self.index, PeriodIndex): where = Period(where, freq=self.index.freq) if where < start: if not is_series: return self._constructor_sliced( index=self.columns, name=where, dtype=np.float64 ) return np.nan # It's always much faster to use a *while* loop here for # Series than pre-computing all the NAs. However a # *while* loop is extremely expensive for DataFrame # so we later pre-compute all the NAs and use the same # code path whether *where* is a scalar or list. # See PR: https://github.com/pandas-dev/pandas/pull/14476 if is_series: loc = self.index.searchsorted(where, side="right") if loc > 0: loc -= 1 values = self._values while loc > 0 and isna(values[loc]): loc -= 1 return values[loc] if not isinstance(where, Index): where = Index(where) if is_list else Index([where]) nulls = self.isna() if is_series else self[subset].isna().any(axis=1) if nulls.all(): if is_series: self = cast("Series", self) return self._constructor(np.nan, index=where, name=self.name) elif is_list: self = cast("DataFrame", self) return self._constructor(np.nan, index=where, columns=self.columns) else: self = cast("DataFrame", self) return self._constructor_sliced( np.nan, index=self.columns, name=where[0] ) locs = self.index.asof_locs(where, ~(nulls._values)) # mask the missing missing = locs == -1 data = self.take(locs) data.index = where if missing.any(): # GH#16063 only do this setting when necessary, otherwise # we'd cast e.g. bools to floats data.loc[missing] = np.nan return data if is_list else data.iloc[-1] # ---------------------------------------------------------------------- # Action Methods def isna(self: NDFrameT) -> NDFrameT: """ Detect missing values. Return a boolean same-sized object indicating if the values are NA. NA values, such as None or :attr:`numpy.NaN`, gets mapped to True values. Everything else gets mapped to False values. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). Returns ------- {klass} Mask of bool values for each element in {klass} that indicates whether an element is an NA value. See Also -------- {klass}.isnull : Alias of isna. {klass}.notna : Boolean inverse of isna. {klass}.dropna : Omit axes labels with missing values. isna : Top-level isna. Examples -------- Show which entries in a DataFrame are NA. >>> df = pd.DataFrame(dict(age=[5, 6, np.NaN], ... born=[pd.NaT, pd.Timestamp('1939-05-27'), ... pd.Timestamp('1940-04-25')], ... name=['Alfred', 'Batman', ''], ... toy=[None, 'Batmobile', 'Joker'])) >>> df age born name toy 0 5.0 NaT Alfred None 1 6.0 1939-05-27 Batman Batmobile 2 NaN 1940-04-25 Joker >>> df.isna() age born name toy 0 False True False True 1 False False False False 2 True False False False Show which entries in a Series are NA. >>> ser = pd.Series([5, 6, np.NaN]) >>> ser 0 5.0 1 6.0 2 NaN dtype: float64 >>> ser.isna() 0 False 1 False 2 True dtype: bool """ return isna(self).__finalize__(self, method="isna") def isnull(self: NDFrameT) -> NDFrameT: return isna(self).__finalize__(self, method="isnull") def notna(self: NDFrameT) -> NDFrameT: """ Detect existing (non-missing) values. Return a boolean same-sized object indicating if the values are not NA. Non-missing values get mapped to True. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). NA values, such as None or :attr:`numpy.NaN`, get mapped to False values. Returns ------- {klass} Mask of bool values for each element in {klass} that indicates whether an element is not an NA value. See Also -------- {klass}.notnull : Alias of notna. {klass}.isna : Boolean inverse of notna. {klass}.dropna : Omit axes labels with missing values. notna : Top-level notna. Examples -------- Show which entries in a DataFrame are not NA. >>> df = pd.DataFrame(dict(age=[5, 6, np.NaN], ... born=[pd.NaT, pd.Timestamp('1939-05-27'), ... pd.Timestamp('1940-04-25')], ... name=['Alfred', 'Batman', ''], ... toy=[None, 'Batmobile', 'Joker'])) >>> df age born name toy 0 5.0 NaT Alfred None 1 6.0 1939-05-27 Batman Batmobile 2 NaN 1940-04-25 Joker >>> df.notna() age born name toy 0 True False True False 1 True True True True 2 False True True True Show which entries in a Series are not NA. >>> ser = pd.Series([5, 6, np.NaN]) >>> ser 0 5.0 1 6.0 2 NaN dtype: float64 >>> ser.notna() 0 True 1 True 2 False dtype: bool """ return notna(self).__finalize__(self, method="notna") def notnull(self: NDFrameT) -> NDFrameT: return notna(self).__finalize__(self, method="notnull") def _clip_with_scalar(self, lower, upper, inplace: bool_t = False): if (lower is not None and np.any(isna(lower))) or ( upper is not None and np.any(isna(upper)) ): raise ValueError("Cannot use an NA value as a clip threshold") result = self mask = isna(self._values) with np.errstate(all="ignore"): if upper is not None: subset = self <= upper result = result.where(subset, upper, axis=None, inplace=False) if lower is not None: subset = self >= lower result = result.where(subset, lower, axis=None, inplace=False) if np.any(mask): result[mask] = np.nan if inplace: return self._update_inplace(result) else: return result def _clip_with_one_bound(self, threshold, method, axis, inplace): if axis is not None: axis = self._get_axis_number(axis) # method is self.le for upper bound and self.ge for lower bound if is_scalar(threshold) and is_number(threshold): if method.__name__ == "le": return self._clip_with_scalar(None, threshold, inplace=inplace) return self._clip_with_scalar(threshold, None, inplace=inplace) # GH #15390 # In order for where method to work, the threshold must # be transformed to NDFrame from other array like structure. if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold): if isinstance(self, ABCSeries): threshold = self._constructor(threshold, index=self.index) else: threshold = align_method_FRAME(self, threshold, axis, flex=None)[1] # GH 40420 # Treat missing thresholds as no bounds, not clipping the values if is_list_like(threshold): fill_value = np.inf if method.__name__ == "le" else -np.inf threshold_inf = threshold.fillna(fill_value) else: threshold_inf = threshold subset = method(threshold_inf, axis=axis) | isna(self) # GH 40420 return self.where(subset, threshold, axis=axis, inplace=inplace) def clip( self: NDFrameT, lower=None, upper=None, *, axis: Axis | None = None, inplace: bool_t = False, **kwargs, ) -> NDFrameT | None: """ Trim values at input threshold(s). Assigns values outside boundary to boundary values. Thresholds can be singular values or array like, and in the latter case the clipping is performed element-wise in the specified axis. Parameters ---------- lower : float or array-like, default None Minimum threshold value. All values below this threshold will be set to it. A missing threshold (e.g `NA`) will not clip the value. upper : float or array-like, default None Maximum threshold value. All values above this threshold will be set to it. A missing threshold (e.g `NA`) will not clip the value. axis : {{0 or 'index', 1 or 'columns', None}}, default None Align object with lower and upper along the given axis. For `Series` this parameter is unused and defaults to `None`. inplace : bool, default False Whether to perform the operation in place on the data. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with numpy. Returns ------- Series or DataFrame or None Same type as calling object with the values outside the clip boundaries replaced or None if ``inplace=True``. See Also -------- Series.clip : Trim values at input threshold in series. DataFrame.clip : Trim values at input threshold in dataframe. numpy.clip : Clip (limit) the values in an array. Examples -------- >>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]} >>> df = pd.DataFrame(data) >>> df col_0 col_1 0 9 -2 1 -3 -7 2 0 6 3 -1 8 4 5 -5 Clips per column using lower and upper thresholds: >>> df.clip(-4, 6) col_0 col_1 0 6 -2 1 -3 -4 2 0 6 3 -1 6 4 5 -4 Clips using specific lower and upper thresholds per column element: >>> t = pd.Series([2, -4, -1, 6, 3]) >>> t 0 2 1 -4 2 -1 3 6 4 3 dtype: int64 >>> df.clip(t, t + 4, axis=0) col_0 col_1 0 6 2 1 -3 -4 2 0 3 3 6 8 4 5 3 Clips using specific lower threshold per column element, with missing values: >>> t = pd.Series([2, -4, np.NaN, 6, 3]) >>> t 0 2.0 1 -4.0 2 NaN 3 6.0 4 3.0 dtype: float64 >>> df.clip(t, axis=0) col_0 col_1 0 9 2 1 -3 -4 2 0 6 3 6 8 4 5 3 """ inplace = validate_bool_kwarg(inplace, "inplace") axis = nv.validate_clip_with_axis(axis, (), kwargs) if axis is not None: axis = self._get_axis_number(axis) # GH 17276 # numpy doesn't like NaN as a clip value # so ignore # GH 19992 # numpy doesn't drop a list-like bound containing NaN isna_lower = isna(lower) if not is_list_like(lower): if np.any(isna_lower): lower = None elif np.all(isna_lower): lower = None isna_upper = isna(upper) if not is_list_like(upper): if np.any(isna_upper): upper = None elif np.all(isna_upper): upper = None # GH 2747 (arguments were reversed) if ( lower is not None and upper is not None and is_scalar(lower) and is_scalar(upper) ): lower, upper = min(lower, upper), max(lower, upper) # fast-path for scalars if (lower is None or (is_scalar(lower) and is_number(lower))) and ( upper is None or (is_scalar(upper) and is_number(upper)) ): return self._clip_with_scalar(lower, upper, inplace=inplace) result = self if lower is not None: result = result._clip_with_one_bound( lower, method=self.ge, axis=axis, inplace=inplace ) if upper is not None: if inplace: result = self result = result._clip_with_one_bound( upper, method=self.le, axis=axis, inplace=inplace ) return result def asfreq( self: NDFrameT, freq: Frequency, method: FillnaOptions | None = None, how: str | None = None, normalize: bool_t = False, fill_value: Hashable = None, ) -> NDFrameT: """ Convert time series to specified frequency. Returns the original data conformed to a new index with the specified frequency. If the index of this {klass} is a :class:`~pandas.PeriodIndex`, the new index is the result of transforming the original index with :meth:`PeriodIndex.asfreq <pandas.PeriodIndex.asfreq>` (so the original index will map one-to-one to the new index). Otherwise, the new index will be equivalent to ``pd.date_range(start, end, freq=freq)`` where ``start`` and ``end`` are, respectively, the first and last entries in the original index (see :func:`pandas.date_range`). The values corresponding to any timesteps in the new index which were not present in the original index will be null (``NaN``), unless a method for filling such unknowns is provided (see the ``method`` parameter below). The :meth:`resample` method is more appropriate if an operation on each group of timesteps (such as an aggregate) is necessary to represent the data at the new frequency. Parameters ---------- freq : DateOffset or str Frequency DateOffset or string. method : {{'backfill'/'bfill', 'pad'/'ffill'}}, default None Method to use for filling holes in reindexed Series (note this does not fill NaNs that already were present): * 'pad' / 'ffill': propagate last valid observation forward to next valid * 'backfill' / 'bfill': use NEXT valid observation to fill. how : {{'start', 'end'}}, default end For PeriodIndex only (see PeriodIndex.asfreq). normalize : bool, default False Whether to reset output index to midnight. fill_value : scalar, optional Value to use for missing values, applied during upsampling (note this does not fill NaNs that already were present). Returns ------- {klass} {klass} object reindexed to the specified frequency. See Also -------- reindex : Conform DataFrame to new index with optional filling logic. Notes ----- To learn more about the frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- Start by creating a series with 4 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=4, freq='T') >>> series = pd.Series([0.0, None, 2.0, 3.0], index=index) >>> df = pd.DataFrame({{'s': series}}) >>> df s 2000-01-01 00:00:00 0.0 2000-01-01 00:01:00 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:03:00 3.0 Upsample the series into 30 second bins. >>> df.asfreq(freq='30S') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 NaN 2000-01-01 00:03:00 3.0 Upsample again, providing a ``fill value``. >>> df.asfreq(freq='30S', fill_value=9.0) s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 9.0 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 9.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 9.0 2000-01-01 00:03:00 3.0 Upsample again, providing a ``method``. >>> df.asfreq(freq='30S', method='bfill') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 2.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 3.0 2000-01-01 00:03:00 3.0 """ from pandas.core.resample import asfreq return asfreq( self, freq, method=method, how=how, normalize=normalize, fill_value=fill_value, ) def at_time( self: NDFrameT, time, asof: bool_t = False, axis: Axis | None = None ) -> NDFrameT: """ Select values at particular time of day (e.g., 9:30AM). Parameters ---------- time : datetime.time or str The values to select. axis : {0 or 'index', 1 or 'columns'}, default 0 For `Series` this parameter is unused and defaults to 0. Returns ------- Series or DataFrame Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- between_time : Select values between particular times of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_at_time : Get just the index locations for values at particular time of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='12H') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 00:00:00 1 2018-04-09 12:00:00 2 2018-04-10 00:00:00 3 2018-04-10 12:00:00 4 >>> ts.at_time('12:00') A 2018-04-09 12:00:00 2 2018-04-10 12:00:00 4 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) index = self._get_axis(axis) if not isinstance(index, DatetimeIndex): raise TypeError("Index must be DatetimeIndex") indexer = index.indexer_at_time(time, asof=asof) return self._take_with_is_copy(indexer, axis=axis) def between_time( self: NDFrameT, start_time, end_time, inclusive: IntervalClosedType = "both", axis: Axis | None = None, ) -> NDFrameT: """ Select values between particular times of the day (e.g., 9:00-9:30 AM). By setting ``start_time`` to be later than ``end_time``, you can get the times that are *not* between the two times. Parameters ---------- start_time : datetime.time or str Initial time as a time filter limit. end_time : datetime.time or str End time as a time filter limit. inclusive : {"both", "neither", "left", "right"}, default "both" Include boundaries; whether to set each bound as closed or open. axis : {0 or 'index', 1 or 'columns'}, default 0 Determine range time on index or columns value. For `Series` this parameter is unused and defaults to 0. Returns ------- Series or DataFrame Data from the original object filtered to the specified dates range. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- at_time : Select values at a particular time of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_between_time : Get just the index locations for values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 00:00:00 1 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 2018-04-12 01:00:00 4 >>> ts.between_time('0:15', '0:45') A 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 You get the times that are *not* between two times by setting ``start_time`` later than ``end_time``: >>> ts.between_time('0:45', '0:15') A 2018-04-09 00:00:00 1 2018-04-12 01:00:00 4 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) index = self._get_axis(axis) if not isinstance(index, DatetimeIndex): raise TypeError("Index must be DatetimeIndex") left_inclusive, right_inclusive = validate_inclusive(inclusive) indexer = index.indexer_between_time( start_time, end_time, include_start=left_inclusive, include_end=right_inclusive, ) return self._take_with_is_copy(indexer, axis=axis) def resample( self, rule, axis: Axis = 0, closed: str | None = None, label: str | None = None, convention: str = "start", kind: str | None = None, on: Level = None, level: Level = None, origin: str | TimestampConvertibleTypes = "start_day", offset: TimedeltaConvertibleTypes | None = None, group_keys: bool_t = False, ) -> Resampler: """ Resample time-series data. Convenience method for frequency conversion and resampling of time series. The object must have a datetime-like index (`DatetimeIndex`, `PeriodIndex`, or `TimedeltaIndex`), or the caller must pass the label of a datetime-like series/index to the ``on``/``level`` keyword parameter. Parameters ---------- rule : DateOffset, Timedelta or str The offset string or object representing target conversion. axis : {{0 or 'index', 1 or 'columns'}}, default 0 Which axis to use for up- or down-sampling. For `Series` this parameter is unused and defaults to 0. Must be `DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`. closed : {{'right', 'left'}}, default None Which side of bin interval is closed. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. label : {{'right', 'left'}}, default None Which bin edge label to label bucket with. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. convention : {{'start', 'end', 's', 'e'}}, default 'start' For `PeriodIndex` only, controls whether to use the start or end of `rule`. kind : {{'timestamp', 'period'}}, optional, default None Pass 'timestamp' to convert the resulting index to a `DateTimeIndex` or 'period' to convert it to a `PeriodIndex`. By default the input representation is retained. on : str, optional For a DataFrame, column to use instead of index for resampling. Column must be datetime-like. level : str or int, optional For a MultiIndex, level (name or number) to use for resampling. `level` must be datetime-like. origin : Timestamp or str, default 'start_day' The timestamp on which to adjust the grouping. The timezone of origin must match the timezone of the index. If string, must be one of the following: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries .. versionadded:: 1.1.0 - 'end': `origin` is the last value of the timeseries - 'end_day': `origin` is the ceiling midnight of the last day .. versionadded:: 1.3.0 offset : Timedelta or str, default is None An offset timedelta added to the origin. .. versionadded:: 1.1.0 group_keys : bool, default False Whether to include the group keys in the result index when using ``.apply()`` on the resampled object. .. versionadded:: 1.5.0 Not specifying ``group_keys`` will retain values-dependent behavior from pandas 1.4 and earlier (see :ref:`pandas 1.5.0 Release notes <whatsnew_150.enhancements.resample_group_keys>` for examples). .. versionchanged:: 2.0.0 ``group_keys`` now defaults to ``False``. Returns ------- pandas.core.Resampler :class:`~pandas.core.Resampler` object. See Also -------- Series.resample : Resample a Series. DataFrame.resample : Resample a DataFrame. groupby : Group {klass} by mapping, function, label, or list of labels. asfreq : Reindex a {klass} with the given frequency without grouping. Notes ----- See the `user guide <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`__ for more. To learn more about the offset strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__. Examples -------- Start by creating a series with 9 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=9, freq='T') >>> series = pd.Series(range(9), index=index) >>> series 2000-01-01 00:00:00 0 2000-01-01 00:01:00 1 2000-01-01 00:02:00 2 2000-01-01 00:03:00 3 2000-01-01 00:04:00 4 2000-01-01 00:05:00 5 2000-01-01 00:06:00 6 2000-01-01 00:07:00 7 2000-01-01 00:08:00 8 Freq: T, dtype: int64 Downsample the series into 3 minute bins and sum the values of the timestamps falling into a bin. >>> series.resample('3T').sum() 2000-01-01 00:00:00 3 2000-01-01 00:03:00 12 2000-01-01 00:06:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but label each bin using the right edge instead of the left. Please note that the value in the bucket used as the label is not included in the bucket, which it labels. For example, in the original series the bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed value in the resampled bucket with the label ``2000-01-01 00:03:00`` does not include 3 (if it did, the summed value would be 6, not 3). To include this value close the right side of the bin interval as illustrated in the example below this one. >>> series.resample('3T', label='right').sum() 2000-01-01 00:03:00 3 2000-01-01 00:06:00 12 2000-01-01 00:09:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but close the right side of the bin interval. >>> series.resample('3T', label='right', closed='right').sum() 2000-01-01 00:00:00 0 2000-01-01 00:03:00 6 2000-01-01 00:06:00 15 2000-01-01 00:09:00 15 Freq: 3T, dtype: int64 Upsample the series into 30 second bins. >>> series.resample('30S').asfreq()[0:5] # Select first 5 rows 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 1.0 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 Freq: 30S, dtype: float64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``ffill`` method. >>> series.resample('30S').ffill()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 0 2000-01-01 00:01:00 1 2000-01-01 00:01:30 1 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``bfill`` method. >>> series.resample('30S').bfill()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 1 2000-01-01 00:01:00 1 2000-01-01 00:01:30 2 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Pass a custom function via ``apply`` >>> def custom_resampler(arraylike): ... return np.sum(arraylike) + 5 ... >>> series.resample('3T').apply(custom_resampler) 2000-01-01 00:00:00 8 2000-01-01 00:03:00 17 2000-01-01 00:06:00 26 Freq: 3T, dtype: int64 For a Series with a PeriodIndex, the keyword `convention` can be used to control whether to use the start or end of `rule`. Resample a year by quarter using 'start' `convention`. Values are assigned to the first quarter of the period. >>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01', ... freq='A', ... periods=2)) >>> s 2012 1 2013 2 Freq: A-DEC, dtype: int64 >>> s.resample('Q', convention='start').asfreq() 2012Q1 1.0 2012Q2 NaN 2012Q3 NaN 2012Q4 NaN 2013Q1 2.0 2013Q2 NaN 2013Q3 NaN 2013Q4 NaN Freq: Q-DEC, dtype: float64 Resample quarters by month using 'end' `convention`. Values are assigned to the last month of the period. >>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01', ... freq='Q', ... periods=4)) >>> q 2018Q1 1 2018Q2 2 2018Q3 3 2018Q4 4 Freq: Q-DEC, dtype: int64 >>> q.resample('M', convention='end').asfreq() 2018-03 1.0 2018-04 NaN 2018-05 NaN 2018-06 2.0 2018-07 NaN 2018-08 NaN 2018-09 3.0 2018-10 NaN 2018-11 NaN 2018-12 4.0 Freq: M, dtype: float64 For DataFrame objects, the keyword `on` can be used to specify the column instead of the index for resampling. >>> d = {{'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}} >>> df = pd.DataFrame(d) >>> df['week_starting'] = pd.date_range('01/01/2018', ... periods=8, ... freq='W') >>> df price volume week_starting 0 10 50 2018-01-07 1 11 60 2018-01-14 2 9 40 2018-01-21 3 13 100 2018-01-28 4 14 50 2018-02-04 5 18 100 2018-02-11 6 17 40 2018-02-18 7 19 50 2018-02-25 >>> df.resample('M', on='week_starting').mean() price volume week_starting 2018-01-31 10.75 62.5 2018-02-28 17.00 60.0 For a DataFrame with MultiIndex, the keyword `level` can be used to specify on which level the resampling needs to take place. >>> days = pd.date_range('1/1/2000', periods=4, freq='D') >>> d2 = {{'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}} >>> df2 = pd.DataFrame( ... d2, ... index=pd.MultiIndex.from_product( ... [days, ['morning', 'afternoon']] ... ) ... ) >>> df2 price volume 2000-01-01 morning 10 50 afternoon 11 60 2000-01-02 morning 9 40 afternoon 13 100 2000-01-03 morning 14 50 afternoon 18 100 2000-01-04 morning 17 40 afternoon 19 50 >>> df2.resample('D', level=0).sum() price volume 2000-01-01 21 110 2000-01-02 22 140 2000-01-03 32 150 2000-01-04 36 90 If you want to adjust the start of the bins based on a fixed timestamp: >>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00' >>> rng = pd.date_range(start, end, freq='7min') >>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng) >>> ts 2000-10-01 23:30:00 0 2000-10-01 23:37:00 3 2000-10-01 23:44:00 6 2000-10-01 23:51:00 9 2000-10-01 23:58:00 12 2000-10-02 00:05:00 15 2000-10-02 00:12:00 18 2000-10-02 00:19:00 21 2000-10-02 00:26:00 24 Freq: 7T, dtype: int64 >>> ts.resample('17min').sum() 2000-10-01 23:14:00 0 2000-10-01 23:31:00 9 2000-10-01 23:48:00 21 2000-10-02 00:05:00 54 2000-10-02 00:22:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', origin='epoch').sum() 2000-10-01 23:18:00 0 2000-10-01 23:35:00 18 2000-10-01 23:52:00 27 2000-10-02 00:09:00 39 2000-10-02 00:26:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', origin='2000-01-01').sum() 2000-10-01 23:24:00 3 2000-10-01 23:41:00 15 2000-10-01 23:58:00 45 2000-10-02 00:15:00 45 Freq: 17T, dtype: int64 If you want to adjust the start of the bins with an `offset` Timedelta, the two following lines are equivalent: >>> ts.resample('17min', origin='start').sum() 2000-10-01 23:30:00 9 2000-10-01 23:47:00 21 2000-10-02 00:04:00 54 2000-10-02 00:21:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', offset='23h30min').sum() 2000-10-01 23:30:00 9 2000-10-01 23:47:00 21 2000-10-02 00:04:00 54 2000-10-02 00:21:00 24 Freq: 17T, dtype: int64 If you want to take the largest Timestamp as the end of the bins: >>> ts.resample('17min', origin='end').sum() 2000-10-01 23:35:00 0 2000-10-01 23:52:00 18 2000-10-02 00:09:00 27 2000-10-02 00:26:00 63 Freq: 17T, dtype: int64 In contrast with the `start_day`, you can use `end_day` to take the ceiling midnight of the largest Timestamp as the end of the bins and drop the bins not containing data: >>> ts.resample('17min', origin='end_day').sum() 2000-10-01 23:38:00 3 2000-10-01 23:55:00 15 2000-10-02 00:12:00 45 2000-10-02 00:29:00 45 Freq: 17T, dtype: int64 """ from pandas.core.resample import get_resampler axis = self._get_axis_number(axis) return get_resampler( cast("Series | DataFrame", self), freq=rule, label=label, closed=closed, axis=axis, kind=kind, convention=convention, key=on, level=level, origin=origin, offset=offset, group_keys=group_keys, ) def first(self: NDFrameT, offset) -> NDFrameT: """ Select initial periods of time series data based on a date offset. When having a DataFrame with dates as index, this function can select the first few rows based on a date offset. Parameters ---------- offset : str, DateOffset or dateutil.relativedelta The offset length of the data that will be selected. For instance, '1M' will display all the rows having their index within the first month. Returns ------- Series or DataFrame A subset of the caller. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- last : Select final periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the first 3 days: >>> ts.first('3D') A 2018-04-09 1 2018-04-11 2 Notice the data for 3 first calendar days were returned, not the first 3 days observed in the dataset, and therefore data for 2018-04-13 was not returned. """ if not isinstance(self.index, DatetimeIndex): raise TypeError("'first' only supports a DatetimeIndex index") if len(self.index) == 0: return self.copy(deep=False) offset = to_offset(offset) if not isinstance(offset, Tick) and offset.is_on_offset(self.index[0]): # GH#29623 if first value is end of period, remove offset with n = 1 # before adding the real offset end_date = end = self.index[0] - offset.base + offset else: end_date = end = self.index[0] + offset # Tick-like, e.g. 3 weeks if isinstance(offset, Tick) and end_date in self.index: end = self.index.searchsorted(end_date, side="left") return self.iloc[:end] return self.loc[:end] def last(self: NDFrameT, offset) -> NDFrameT: """ Select final periods of time series data based on a date offset. For a DataFrame with a sorted DatetimeIndex, this function selects the last few rows based on a date offset. Parameters ---------- offset : str, DateOffset, dateutil.relativedelta The offset length of the data that will be selected. For instance, '3D' will display all the rows having their index within the last 3 days. Returns ------- Series or DataFrame A subset of the caller. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- first : Select initial periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the last 3 days: >>> ts.last('3D') A 2018-04-13 3 2018-04-15 4 Notice the data for 3 last calendar days were returned, not the last 3 observed days in the dataset, and therefore data for 2018-04-11 was not returned. """ if not isinstance(self.index, DatetimeIndex): raise TypeError("'last' only supports a DatetimeIndex index") if len(self.index) == 0: return self.copy(deep=False) offset = to_offset(offset) start_date = self.index[-1] - offset start = self.index.searchsorted(start_date, side="right") return self.iloc[start:] def rank( self: NDFrameT, axis: Axis = 0, method: str = "average", numeric_only: bool_t = False, na_option: str = "keep", ascending: bool_t = True, pct: bool_t = False, ) -> NDFrameT: """ Compute numerical data ranks (1 through n) along axis. By default, equal values are assigned a rank that is the average of the ranks of those values. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 Index to direct ranking. For `Series` this parameter is unused and defaults to 0. method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' How to rank the group of records that have the same value (i.e. ties): * average: average rank of the group * min: lowest rank in the group * max: highest rank in the group * first: ranks assigned in order they appear in the array * dense: like 'min', but rank always increases by 1 between groups. numeric_only : bool, default False For DataFrame objects, rank only numeric columns if set to True. .. versionchanged:: 2.0.0 The default value of ``numeric_only`` is now ``False``. na_option : {'keep', 'top', 'bottom'}, default 'keep' How to rank NaN values: * keep: assign NaN rank to NaN values * top: assign lowest rank to NaN values * bottom: assign highest rank to NaN values ascending : bool, default True Whether or not the elements should be ranked in ascending order. pct : bool, default False Whether or not to display the returned rankings in percentile form. Returns ------- same type as caller Return a Series or DataFrame with data ranks as values. See Also -------- core.groupby.DataFrameGroupBy.rank : Rank of values within each group. core.groupby.SeriesGroupBy.rank : Rank of values within each group. Examples -------- >>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog', ... 'spider', 'snake'], ... 'Number_legs': [4, 2, 4, 8, np.nan]}) >>> df Animal Number_legs 0 cat 4.0 1 penguin 2.0 2 dog 4.0 3 spider 8.0 4 snake NaN Ties are assigned the mean of the ranks (by default) for the group. >>> s = pd.Series(range(5), index=list("abcde")) >>> s["d"] = s["b"] >>> s.rank() a 1.0 b 2.5 c 4.0 d 2.5 e 5.0 dtype: float64 The following example shows how the method behaves with the above parameters: * default_rank: this is the default behaviour obtained without using any parameter. * max_rank: setting ``method = 'max'`` the records that have the same values are ranked using the highest rank (e.g.: since 'cat' and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.) * NA_bottom: choosing ``na_option = 'bottom'``, if there are records with NaN values they are placed at the bottom of the ranking. * pct_rank: when setting ``pct = True``, the ranking is expressed as percentile rank. >>> df['default_rank'] = df['Number_legs'].rank() >>> df['max_rank'] = df['Number_legs'].rank(method='max') >>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom') >>> df['pct_rank'] = df['Number_legs'].rank(pct=True) >>> df Animal Number_legs default_rank max_rank NA_bottom pct_rank 0 cat 4.0 2.5 3.0 2.5 0.625 1 penguin 2.0 1.0 1.0 1.0 0.250 2 dog 4.0 2.5 3.0 2.5 0.625 3 spider 8.0 4.0 4.0 4.0 1.000 4 snake NaN NaN NaN 5.0 NaN """ axis_int = self._get_axis_number(axis) if na_option not in {"keep", "top", "bottom"}: msg = "na_option must be one of 'keep', 'top', or 'bottom'" raise ValueError(msg) def ranker(data): if data.ndim == 2: # i.e. DataFrame, we cast to ndarray values = data.values else: # i.e. Series, can dispatch to EA values = data._values if isinstance(values, ExtensionArray): ranks = values._rank( axis=axis_int, method=method, ascending=ascending, na_option=na_option, pct=pct, ) else: ranks = algos.rank( values, axis=axis_int, method=method, ascending=ascending, na_option=na_option, pct=pct, ) ranks_obj = self._constructor(ranks, **data._construct_axes_dict()) return ranks_obj.__finalize__(self, method="rank") if numeric_only: if self.ndim == 1 and not is_numeric_dtype(self.dtype): # GH#47500 raise TypeError( "Series.rank does not allow numeric_only=True with " "non-numeric dtype." ) data = self._get_numeric_data() else: data = self return ranker(data) def compare( self, other, align_axis: Axis = 1, keep_shape: bool_t = False, keep_equal: bool_t = False, result_names: Suffixes = ("self", "other"), ): if type(self) is not type(other): cls_self, cls_other = type(self).__name__, type(other).__name__ raise TypeError( f"can only compare '{cls_self}' (not '{cls_other}') with '{cls_self}'" ) mask = ~((self == other) | (self.isna() & other.isna())) mask.fillna(True, inplace=True) if not keep_equal: self = self.where(mask) other = other.where(mask) if not keep_shape: if isinstance(self, ABCDataFrame): cmask = mask.any() rmask = mask.any(axis=1) self = self.loc[rmask, cmask] other = other.loc[rmask, cmask] else: self = self[mask] other = other[mask] if not isinstance(result_names, tuple): raise TypeError( f"Passing 'result_names' as a {type(result_names)} is not " "supported. Provide 'result_names' as a tuple instead." ) if align_axis in (1, "columns"): # This is needed for Series axis = 1 else: axis = self._get_axis_number(align_axis) diff = concat([self, other], axis=axis, keys=result_names) if axis >= self.ndim: # No need to reorganize data if stacking on new axis # This currently applies for stacking two Series on columns return diff ax = diff._get_axis(axis) ax_names = np.array(ax.names) # set index names to positions to avoid confusion ax.names = np.arange(len(ax_names)) # bring self-other to inner level order = list(range(1, ax.nlevels)) + [0] if isinstance(diff, ABCDataFrame): diff = diff.reorder_levels(order, axis=axis) else: diff = diff.reorder_levels(order) # restore the index names in order diff._get_axis(axis=axis).names = ax_names[order] # reorder axis to keep things organized indices = ( np.arange(diff.shape[axis]).reshape([2, diff.shape[axis] // 2]).T.flatten() ) diff = diff.take(indices, axis=axis) return diff def align( self: NDFrameT, other: NDFrameT, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool_t | None = None, fill_value: Hashable = None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> NDFrameT: """ Align two objects on their axes with the specified join method. Join method is specified for each axis Index. Parameters ---------- other : DataFrame or Series join : {{'outer', 'inner', 'left', 'right'}}, default 'outer' axis : allowed axis of the other object, default None Align on index (0), columns (1), or both (None). level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level. copy : bool, default True Always returns new objects. If copy=False and no reindexing is required then original objects are returned. fill_value : scalar, default np.NaN Value to use for missing values. Defaults to NaN, but can be any "compatible" value. method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None Method to use for filling holes in reindexed Series: - pad / ffill: propagate last valid observation forward to next valid. - backfill / bfill: use NEXT valid observation to fill gap. limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None. fill_axis : {axes_single_arg}, default 0 Filling axis, method and limit. broadcast_axis : {axes_single_arg}, default None Broadcast values along this axis, if aligning two objects of different dimensions. Returns ------- tuple of ({klass}, type of other) Aligned objects. Examples -------- >>> df = pd.DataFrame( ... [[1, 2, 3, 4], [6, 7, 8, 9]], columns=["D", "B", "E", "A"], index=[1, 2] ... ) >>> other = pd.DataFrame( ... [[10, 20, 30, 40], [60, 70, 80, 90], [600, 700, 800, 900]], ... columns=["A", "B", "C", "D"], ... index=[2, 3, 4], ... ) >>> df D B E A 1 1 2 3 4 2 6 7 8 9 >>> other A B C D 2 10 20 30 40 3 60 70 80 90 4 600 700 800 900 Align on columns: >>> left, right = df.align(other, join="outer", axis=1) >>> left A B C D E 1 4 2 NaN 1 3 2 9 7 NaN 6 8 >>> right A B C D E 2 10 20 30 40 NaN 3 60 70 80 90 NaN 4 600 700 800 900 NaN We can also align on the index: >>> left, right = df.align(other, join="outer", axis=0) >>> left D B E A 1 1.0 2.0 3.0 4.0 2 6.0 7.0 8.0 9.0 3 NaN NaN NaN NaN 4 NaN NaN NaN NaN >>> right A B C D 1 NaN NaN NaN NaN 2 10.0 20.0 30.0 40.0 3 60.0 70.0 80.0 90.0 4 600.0 700.0 800.0 900.0 Finally, the default `axis=None` will align on both index and columns: >>> left, right = df.align(other, join="outer", axis=None) >>> left A B C D E 1 4.0 2.0 NaN 1.0 3.0 2 9.0 7.0 NaN 6.0 8.0 3 NaN NaN NaN NaN NaN 4 NaN NaN NaN NaN NaN >>> right A B C D E 1 NaN NaN NaN NaN NaN 2 10.0 20.0 30.0 40.0 NaN 3 60.0 70.0 80.0 90.0 NaN 4 600.0 700.0 800.0 900.0 NaN """ method = clean_fill_method(method) if broadcast_axis == 1 and self.ndim != other.ndim: if isinstance(self, ABCSeries): # this means other is a DataFrame, and we need to broadcast # self cons = self._constructor_expanddim df = cons( {c: self for c in other.columns}, **other._construct_axes_dict() ) return df._align_frame( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) elif isinstance(other, ABCSeries): # this means self is a DataFrame, and we need to broadcast # other cons = other._constructor_expanddim df = cons( {c: other for c in self.columns}, **self._construct_axes_dict() ) return self._align_frame( df, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) if axis is not None: axis = self._get_axis_number(axis) if isinstance(other, ABCDataFrame): return self._align_frame( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) elif isinstance(other, ABCSeries): return self._align_series( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def _align_frame( self, other, join: AlignJoin = "outer", axis: Axis | None = None, level=None, copy: bool_t | None = None, fill_value=None, method=None, limit=None, fill_axis: Axis = 0, ): # defaults join_index, join_columns = None, None ilidx, iridx = None, None clidx, cridx = None, None is_series = isinstance(self, ABCSeries) if (axis is None or axis == 0) and not self.index.equals(other.index): join_index, ilidx, iridx = self.index.join( other.index, how=join, level=level, return_indexers=True ) if ( (axis is None or axis == 1) and not is_series and not self.columns.equals(other.columns) ): join_columns, clidx, cridx = self.columns.join( other.columns, how=join, level=level, return_indexers=True ) if is_series: reindexers = {0: [join_index, ilidx]} else: reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]} left = self._reindex_with_indexers( reindexers, copy=copy, fill_value=fill_value, allow_dups=True ) # other must be always DataFrame right = other._reindex_with_indexers( {0: [join_index, iridx], 1: [join_columns, cridx]}, copy=copy, fill_value=fill_value, allow_dups=True, ) if method is not None: _left = left.fillna(method=method, axis=fill_axis, limit=limit) assert _left is not None # needed for mypy left = _left right = right.fillna(method=method, axis=fill_axis, limit=limit) # if DatetimeIndex have different tz, convert to UTC left, right = _align_as_utc(left, right, join_index) return ( left.__finalize__(self), right.__finalize__(other), ) def _align_series( self, other, join: AlignJoin = "outer", axis: Axis | None = None, level=None, copy: bool_t | None = None, fill_value=None, method=None, limit=None, fill_axis: Axis = 0, ): is_series = isinstance(self, ABCSeries) if copy and using_copy_on_write(): copy = False if (not is_series and axis is None) or axis not in [None, 0, 1]: raise ValueError("Must specify axis=0 or 1") if is_series and axis == 1: raise ValueError("cannot align series to a series other than axis 0") # series/series compat, other must always be a Series if not axis: # equal if self.index.equals(other.index): join_index, lidx, ridx = None, None, None else: join_index, lidx, ridx = self.index.join( other.index, how=join, level=level, return_indexers=True ) if is_series: left = self._reindex_indexer(join_index, lidx, copy) elif lidx is None or join_index is None: left = self.copy(deep=copy) else: left = self._constructor( self._mgr.reindex_indexer(join_index, lidx, axis=1, copy=copy) ) right = other._reindex_indexer(join_index, ridx, copy) else: # one has > 1 ndim fdata = self._mgr join_index = self.axes[1] lidx, ridx = None, None if not join_index.equals(other.index): join_index, lidx, ridx = join_index.join( other.index, how=join, level=level, return_indexers=True ) if lidx is not None: bm_axis = self._get_block_manager_axis(1) fdata = fdata.reindex_indexer(join_index, lidx, axis=bm_axis) if copy and fdata is self._mgr: fdata = fdata.copy() left = self._constructor(fdata) if ridx is None: right = other.copy(deep=copy) else: right = other.reindex(join_index, level=level) # fill fill_na = notna(fill_value) or (method is not None) if fill_na: left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis) right = right.fillna(fill_value, method=method, limit=limit) # if DatetimeIndex have different tz, convert to UTC if is_series or (not is_series and axis == 0): left, right = _align_as_utc(left, right, join_index) return ( left.__finalize__(self), right.__finalize__(other), ) def _where( self, cond, other=lib.no_default, inplace: bool_t = False, axis: Axis | None = None, level=None, ): """ Equivalent to public method `where`, except that `other` is not applied as a function even if callable. Used in __setitem__. """ inplace = validate_bool_kwarg(inplace, "inplace") if axis is not None: axis = self._get_axis_number(axis) # align the cond to same shape as myself cond = common.apply_if_callable(cond, self) if isinstance(cond, NDFrame): # CoW: Make sure reference is not kept alive cond = cond.align(self, join="right", broadcast_axis=1, copy=False)[0] else: if not hasattr(cond, "shape"): cond = np.asanyarray(cond) if cond.shape != self.shape: raise ValueError("Array conditional must be same shape as self") cond = self._constructor(cond, **self._construct_axes_dict(), copy=False) # make sure we are boolean fill_value = bool(inplace) cond = cond.fillna(fill_value) msg = "Boolean array expected for the condition, not {dtype}" if not cond.empty: if not isinstance(cond, ABCDataFrame): # This is a single-dimensional object. if not is_bool_dtype(cond): raise ValueError(msg.format(dtype=cond.dtype)) else: for _dt in cond.dtypes: if not is_bool_dtype(_dt): raise ValueError(msg.format(dtype=_dt)) else: # GH#21947 we have an empty DataFrame/Series, could be object-dtype cond = cond.astype(bool) cond = -cond if inplace else cond cond = cond.reindex(self._info_axis, axis=self._info_axis_number, copy=False) # try to align with other if isinstance(other, NDFrame): # align with me if other.ndim <= self.ndim: # CoW: Make sure reference is not kept alive other = self.align( other, join="left", axis=axis, level=level, fill_value=None, copy=False, )[1] # if we are NOT aligned, raise as we cannot where index if axis is None and not other._indexed_same(self): raise InvalidIndexError if other.ndim < self.ndim: # TODO(EA2D): avoid object-dtype cast in EA case GH#38729 other = other._values if axis == 0: other = np.reshape(other, (-1, 1)) elif axis == 1: other = np.reshape(other, (1, -1)) other = np.broadcast_to(other, self.shape) # slice me out of the other else: raise NotImplementedError( "cannot align with a higher dimensional NDFrame" ) elif not isinstance(other, (MultiIndex, NDFrame)): # mainly just catching Index here other = extract_array(other, extract_numpy=True) if isinstance(other, (np.ndarray, ExtensionArray)): if other.shape != self.shape: if self.ndim != 1: # In the ndim == 1 case we may have # other length 1, which we treat as scalar (GH#2745, GH#4192) # or len(other) == icond.sum(), which we treat like # __setitem__ (GH#3235) raise ValueError( "other must be the same shape as self when an ndarray" ) # we are the same shape, so create an actual object for alignment else: other = self._constructor( other, **self._construct_axes_dict(), copy=False ) if axis is None: axis = 0 if self.ndim == getattr(other, "ndim", 0): align = True else: align = self._get_axis_number(axis) == 1 if inplace: # we may have different type blocks come out of putmask, so # reconstruct the block manager self._check_inplace_setting(other) new_data = self._mgr.putmask(mask=cond, new=other, align=align) result = self._constructor(new_data) return self._update_inplace(result) else: new_data = self._mgr.where( other=other, cond=cond, align=align, ) result = self._constructor(new_data) return result.__finalize__(self) def where( self: NDFrameT, cond, other=..., *, inplace: Literal[False] = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT: ... def where( self, cond, other=..., *, inplace: Literal[True], axis: Axis | None = ..., level: Level = ..., ) -> None: ... def where( self: NDFrameT, cond, other=..., *, inplace: bool_t = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT | None: ... klass=_shared_doc_kwargs["klass"], cond="True", cond_rev="False", name="where", name_other="mask", ) def where( self: NDFrameT, cond, other=np.nan, *, inplace: bool_t = False, axis: Axis | None = None, level: Level = None, ) -> NDFrameT | None: """ Replace values where the condition is {cond_rev}. Parameters ---------- cond : bool {klass}, array-like, or callable Where `cond` is {cond}, keep the original value. Where {cond_rev}, replace with corresponding value from `other`. If `cond` is callable, it is computed on the {klass} and should return boolean {klass} or array. The callable must not change input {klass} (though pandas doesn't check it). other : scalar, {klass}, or callable Entries where `cond` is {cond_rev} are replaced with corresponding value from `other`. If other is callable, it is computed on the {klass} and should return scalar or {klass}. The callable must not change input {klass} (though pandas doesn't check it). If not specified, entries will be filled with the corresponding NULL value (``np.nan`` for numpy dtypes, ``pd.NA`` for extension dtypes). inplace : bool, default False Whether to perform the operation in place on the data. axis : int, default None Alignment axis if needed. For `Series` this parameter is unused and defaults to 0. level : int, default None Alignment level if needed. Returns ------- Same type as caller or None if ``inplace=True``. See Also -------- :func:`DataFrame.{name_other}` : Return an object of same shape as self. Notes ----- The {name} method is an application of the if-then idiom. For each element in the calling DataFrame, if ``cond`` is ``{cond}`` the element is used; otherwise the corresponding element from the DataFrame ``other`` is used. If the axis of ``other`` does not align with axis of ``cond`` {klass}, the misaligned index positions will be filled with {cond_rev}. The signature for :func:`DataFrame.where` differs from :func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to ``np.where(m, df1, df2)``. For further details and examples see the ``{name}`` documentation in :ref:`indexing <indexing.where_mask>`. The dtype of the object takes precedence. The fill value is casted to the object's dtype, if this can be done losslessly. Examples -------- >>> s = pd.Series(range(5)) >>> s.where(s > 0) 0 NaN 1 1.0 2 2.0 3 3.0 4 4.0 dtype: float64 >>> s.mask(s > 0) 0 0.0 1 NaN 2 NaN 3 NaN 4 NaN dtype: float64 >>> s = pd.Series(range(5)) >>> t = pd.Series([True, False]) >>> s.where(t, 99) 0 0 1 99 2 99 3 99 4 99 dtype: int64 >>> s.mask(t, 99) 0 99 1 1 2 99 3 99 4 99 dtype: int64 >>> s.where(s > 1, 10) 0 10 1 10 2 2 3 3 4 4 dtype: int64 >>> s.mask(s > 1, 10) 0 0 1 1 2 10 3 10 4 10 dtype: int64 >>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B']) >>> df A B 0 0 1 1 2 3 2 4 5 3 6 7 4 8 9 >>> m = df % 3 == 0 >>> df.where(m, -df) A B 0 0 -1 1 -2 3 2 -4 -5 3 6 -7 4 -8 9 >>> df.where(m, -df) == np.where(m, df, -df) A B 0 True True 1 True True 2 True True 3 True True 4 True True >>> df.where(m, -df) == df.mask(~m, -df) A B 0 True True 1 True True 2 True True 3 True True 4 True True """ other = common.apply_if_callable(other, self) return self._where(cond, other, inplace, axis, level) def mask( self: NDFrameT, cond, other=..., *, inplace: Literal[False] = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT: ... def mask( self, cond, other=..., *, inplace: Literal[True], axis: Axis | None = ..., level: Level = ..., ) -> None: ... def mask( self: NDFrameT, cond, other=..., *, inplace: bool_t = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT | None: ... where, klass=_shared_doc_kwargs["klass"], cond="False", cond_rev="True", name="mask", name_other="where", ) def mask( self: NDFrameT, cond, other=lib.no_default, *, inplace: bool_t = False, axis: Axis | None = None, level: Level = None, ) -> NDFrameT | None: inplace = validate_bool_kwarg(inplace, "inplace") cond = common.apply_if_callable(cond, self) # see gh-21891 if not hasattr(cond, "__invert__"): cond = np.array(cond) return self.where( ~cond, other=other, inplace=inplace, axis=axis, level=level, ) def shift( self: NDFrameT, periods: int = 1, freq=None, axis: Axis = 0, fill_value: Hashable = None, ) -> NDFrameT: """ Shift index by desired number of periods with an optional time `freq`. When `freq` is not passed, shift the index without realigning the data. If `freq` is passed (in this case, the index must be date or datetime, or it will raise a `NotImplementedError`), the index will be increased using the periods and the `freq`. `freq` can be inferred when specified as "infer" as long as either freq or inferred_freq attribute is set in the index. Parameters ---------- periods : int Number of periods to shift. Can be positive or negative. freq : DateOffset, tseries.offsets, timedelta, or str, optional Offset to use from the tseries module or time rule (e.g. 'EOM'). If `freq` is specified then the index values are shifted but the data is not realigned. That is, use `freq` if you would like to extend the index when shifting and preserve the original data. If `freq` is specified as "infer" then it will be inferred from the freq or inferred_freq attributes of the index. If neither of those attributes exist, a ValueError is thrown. axis : {{0 or 'index', 1 or 'columns', None}}, default None Shift direction. For `Series` this parameter is unused and defaults to 0. fill_value : object, optional The scalar value to use for newly introduced missing values. the default depends on the dtype of `self`. For numeric data, ``np.nan`` is used. For datetime, timedelta, or period data, etc. :attr:`NaT` is used. For extension dtypes, ``self.dtype.na_value`` is used. .. versionchanged:: 1.1.0 Returns ------- {klass} Copy of input object, shifted. See Also -------- Index.shift : Shift values of Index. DatetimeIndex.shift : Shift values of DatetimeIndex. PeriodIndex.shift : Shift values of PeriodIndex. Examples -------- >>> df = pd.DataFrame({{"Col1": [10, 20, 15, 30, 45], ... "Col2": [13, 23, 18, 33, 48], ... "Col3": [17, 27, 22, 37, 52]}}, ... index=pd.date_range("2020-01-01", "2020-01-05")) >>> df Col1 Col2 Col3 2020-01-01 10 13 17 2020-01-02 20 23 27 2020-01-03 15 18 22 2020-01-04 30 33 37 2020-01-05 45 48 52 >>> df.shift(periods=3) Col1 Col2 Col3 2020-01-01 NaN NaN NaN 2020-01-02 NaN NaN NaN 2020-01-03 NaN NaN NaN 2020-01-04 10.0 13.0 17.0 2020-01-05 20.0 23.0 27.0 >>> df.shift(periods=1, axis="columns") Col1 Col2 Col3 2020-01-01 NaN 10 13 2020-01-02 NaN 20 23 2020-01-03 NaN 15 18 2020-01-04 NaN 30 33 2020-01-05 NaN 45 48 >>> df.shift(periods=3, fill_value=0) Col1 Col2 Col3 2020-01-01 0 0 0 2020-01-02 0 0 0 2020-01-03 0 0 0 2020-01-04 10 13 17 2020-01-05 20 23 27 >>> df.shift(periods=3, freq="D") Col1 Col2 Col3 2020-01-04 10 13 17 2020-01-05 20 23 27 2020-01-06 15 18 22 2020-01-07 30 33 37 2020-01-08 45 48 52 >>> df.shift(periods=3, freq="infer") Col1 Col2 Col3 2020-01-04 10 13 17 2020-01-05 20 23 27 2020-01-06 15 18 22 2020-01-07 30 33 37 2020-01-08 45 48 52 """ if periods == 0: return self.copy(deep=None) if freq is None: # when freq is None, data is shifted, index is not axis = self._get_axis_number(axis) new_data = self._mgr.shift( periods=periods, axis=axis, fill_value=fill_value ) return self._constructor(new_data).__finalize__(self, method="shift") # when freq is given, index is shifted, data is not index = self._get_axis(axis) if freq == "infer": freq = getattr(index, "freq", None) if freq is None: freq = getattr(index, "inferred_freq", None) if freq is None: msg = "Freq was not set in the index hence cannot be inferred" raise ValueError(msg) elif isinstance(freq, str): freq = to_offset(freq) if isinstance(index, PeriodIndex): orig_freq = to_offset(index.freq) if freq != orig_freq: assert orig_freq is not None # for mypy raise ValueError( f"Given freq {freq.rule_code} does not match " f"PeriodIndex freq {orig_freq.rule_code}" ) new_ax = index.shift(periods) else: new_ax = index.shift(periods, freq) result = self.set_axis(new_ax, axis=axis) return result.__finalize__(self, method="shift") def truncate( self: NDFrameT, before=None, after=None, axis: Axis | None = None, copy: bool_t | None = None, ) -> NDFrameT: """ Truncate a Series or DataFrame before and after some index value. This is a useful shorthand for boolean indexing based on index values above or below certain thresholds. Parameters ---------- before : date, str, int Truncate all rows before this index value. after : date, str, int Truncate all rows after this index value. axis : {0 or 'index', 1 or 'columns'}, optional Axis to truncate. Truncates the index (rows) by default. For `Series` this parameter is unused and defaults to 0. copy : bool, default is True, Return a copy of the truncated section. Returns ------- type of caller The truncated Series or DataFrame. See Also -------- DataFrame.loc : Select a subset of a DataFrame by label. DataFrame.iloc : Select a subset of a DataFrame by position. Notes ----- If the index being truncated contains only datetime values, `before` and `after` may be specified as strings instead of Timestamps. Examples -------- >>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'], ... 'B': ['f', 'g', 'h', 'i', 'j'], ... 'C': ['k', 'l', 'm', 'n', 'o']}, ... index=[1, 2, 3, 4, 5]) >>> df A B C 1 a f k 2 b g l 3 c h m 4 d i n 5 e j o >>> df.truncate(before=2, after=4) A B C 2 b g l 3 c h m 4 d i n The columns of a DataFrame can be truncated. >>> df.truncate(before="A", after="B", axis="columns") A B 1 a f 2 b g 3 c h 4 d i 5 e j For Series, only rows can be truncated. >>> df['A'].truncate(before=2, after=4) 2 b 3 c 4 d Name: A, dtype: object The index values in ``truncate`` can be datetimes or string dates. >>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s') >>> df = pd.DataFrame(index=dates, data={'A': 1}) >>> df.tail() A 2016-01-31 23:59:56 1 2016-01-31 23:59:57 1 2016-01-31 23:59:58 1 2016-01-31 23:59:59 1 2016-02-01 00:00:00 1 >>> df.truncate(before=pd.Timestamp('2016-01-05'), ... after=pd.Timestamp('2016-01-10')).tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Because the index is a DatetimeIndex containing only dates, we can specify `before` and `after` as strings. They will be coerced to Timestamps before truncation. >>> df.truncate('2016-01-05', '2016-01-10').tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Note that ``truncate`` assumes a 0 value for any unspecified time component (midnight). This differs from partial string slicing, which returns any partially matching dates. >>> df.loc['2016-01-05':'2016-01-10', :].tail() A 2016-01-10 23:59:55 1 2016-01-10 23:59:56 1 2016-01-10 23:59:57 1 2016-01-10 23:59:58 1 2016-01-10 23:59:59 1 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) ax = self._get_axis(axis) # GH 17935 # Check that index is sorted if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing: raise ValueError("truncate requires a sorted index") # if we have a date index, convert to dates, otherwise # treat like a slice if ax._is_all_dates: from pandas.core.tools.datetimes import to_datetime before = to_datetime(before) after = to_datetime(after) if before is not None and after is not None and before > after: raise ValueError(f"Truncate: {after} must be after {before}") if len(ax) > 1 and ax.is_monotonic_decreasing and ax.nunique() > 1: before, after = after, before slicer = [slice(None, None)] * self._AXIS_LEN slicer[axis] = slice(before, after) result = self.loc[tuple(slicer)] if isinstance(ax, MultiIndex): setattr(result, self._get_axis_name(axis), ax.truncate(before, after)) result = result.copy(deep=copy and not using_copy_on_write()) return result def tz_convert( self: NDFrameT, tz, axis: Axis = 0, level=None, copy: bool_t | None = None ) -> NDFrameT: """ Convert tz-aware axis to target time zone. Parameters ---------- tz : str or tzinfo object or None Target time zone. Passing ``None`` will convert to UTC and remove the timezone information. axis : {{0 or 'index', 1 or 'columns'}}, default 0 The axis to convert level : int, str, default None If axis is a MultiIndex, convert a specific level. Otherwise must be None. copy : bool, default True Also make a copy of the underlying data. Returns ------- {klass} Object with time zone converted axis. Raises ------ TypeError If the axis is tz-naive. Examples -------- Change to another time zone: >>> s = pd.Series( ... [1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00']), ... ) >>> s.tz_convert('Asia/Shanghai') 2018-09-15 07:30:00+08:00 1 dtype: int64 Pass None to convert to UTC and get a tz-naive index: >>> s = pd.Series([1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00'])) >>> s.tz_convert(None) 2018-09-14 23:30:00 1 dtype: int64 """ axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_convert(ax, tz): if not hasattr(ax, "tz_convert"): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError( f"{ax_name} is not a valid DatetimeIndex or PeriodIndex" ) ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_convert(tz) return ax # if a level is given it must be a MultiIndex level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_convert(ax.levels[level], tz) ax = ax.set_levels(new_level, level=level) else: if level not in (None, 0, ax.name): raise ValueError(f"The level {level} is not valid") ax = _tz_convert(ax, tz) result = self.copy(deep=copy and not using_copy_on_write()) result = result.set_axis(ax, axis=axis, copy=False) return result.__finalize__(self, method="tz_convert") def tz_localize( self: NDFrameT, tz, axis: Axis = 0, level=None, copy: bool_t | None = None, ambiguous: TimeAmbiguous = "raise", nonexistent: TimeNonexistent = "raise", ) -> NDFrameT: """ Localize tz-naive index of a Series or DataFrame to target time zone. This operation localizes the Index. To localize the values in a timezone-naive Series, use :meth:`Series.dt.tz_localize`. Parameters ---------- tz : str or tzinfo or None Time zone to localize. Passing ``None`` will remove the time zone information and preserve local time. axis : {{0 or 'index', 1 or 'columns'}}, default 0 The axis to localize level : int, str, default None If axis ia a MultiIndex, localize a specific level. Otherwise must be None. copy : bool, default True Also make a copy of the underlying data. ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' When clocks moved backward due to DST, ambiguous times may arise. For example in Central European Time (UTC+01), when going from 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter dictates how ambiguous times should be handled. - 'infer' will attempt to infer fall dst-transition hours based on order - bool-ndarray where True signifies a DST time, False designates a non-DST time (note that this flag is only applicable for ambiguous times) - 'NaT' will return NaT where there are ambiguous times - 'raise' will raise an AmbiguousTimeError if there are ambiguous times. nonexistent : str, default 'raise' A nonexistent time does not exist in a particular timezone where clocks moved forward due to DST. Valid values are: - 'shift_forward' will shift the nonexistent time forward to the closest existing time - 'shift_backward' will shift the nonexistent time backward to the closest existing time - 'NaT' will return NaT where there are nonexistent times - timedelta objects will shift nonexistent times by the timedelta - 'raise' will raise an NonExistentTimeError if there are nonexistent times. Returns ------- {klass} Same type as the input. Raises ------ TypeError If the TimeSeries is tz-aware and tz is not None. Examples -------- Localize local times: >>> s = pd.Series( ... [1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00']), ... ) >>> s.tz_localize('CET') 2018-09-15 01:30:00+02:00 1 dtype: int64 Pass None to convert to tz-naive index and preserve local time: >>> s = pd.Series([1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00'])) >>> s.tz_localize(None) 2018-09-15 01:30:00 1 dtype: int64 Be careful with DST changes. When there is sequential data, pandas can infer the DST time: >>> s = pd.Series(range(7), ... index=pd.DatetimeIndex(['2018-10-28 01:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 03:00:00', ... '2018-10-28 03:30:00'])) >>> s.tz_localize('CET', ambiguous='infer') 2018-10-28 01:30:00+02:00 0 2018-10-28 02:00:00+02:00 1 2018-10-28 02:30:00+02:00 2 2018-10-28 02:00:00+01:00 3 2018-10-28 02:30:00+01:00 4 2018-10-28 03:00:00+01:00 5 2018-10-28 03:30:00+01:00 6 dtype: int64 In some cases, inferring the DST is impossible. In such cases, you can pass an ndarray to the ambiguous parameter to set the DST explicitly >>> s = pd.Series(range(3), ... index=pd.DatetimeIndex(['2018-10-28 01:20:00', ... '2018-10-28 02:36:00', ... '2018-10-28 03:46:00'])) >>> s.tz_localize('CET', ambiguous=np.array([True, True, False])) 2018-10-28 01:20:00+02:00 0 2018-10-28 02:36:00+02:00 1 2018-10-28 03:46:00+01:00 2 dtype: int64 If the DST transition causes nonexistent times, you can shift these dates forward or backward with a timedelta object or `'shift_forward'` or `'shift_backward'`. >>> s = pd.Series(range(2), ... index=pd.DatetimeIndex(['2015-03-29 02:30:00', ... '2015-03-29 03:30:00'])) >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward') 2015-03-29 03:00:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward') 2015-03-29 01:59:59.999999999+01:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H')) 2015-03-29 03:30:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 """ nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward") if nonexistent not in nonexistent_options and not isinstance( nonexistent, dt.timedelta ): raise ValueError( "The nonexistent argument must be one of 'raise', " "'NaT', 'shift_forward', 'shift_backward' or " "a timedelta object" ) axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_localize(ax, tz, ambiguous, nonexistent): if not hasattr(ax, "tz_localize"): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError( f"{ax_name} is not a valid DatetimeIndex or PeriodIndex" ) ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent) return ax # if a level is given it must be a MultiIndex level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent) ax = ax.set_levels(new_level, level=level) else: if level not in (None, 0, ax.name): raise ValueError(f"The level {level} is not valid") ax = _tz_localize(ax, tz, ambiguous, nonexistent) result = self.copy(deep=copy and not using_copy_on_write()) result = result.set_axis(ax, axis=axis, copy=False) return result.__finalize__(self, method="tz_localize") # ---------------------------------------------------------------------- # Numeric Methods def describe( self: NDFrameT, percentiles=None, include=None, exclude=None, ) -> NDFrameT: """ Generate descriptive statistics. Descriptive statistics include those that summarize the central tendency, dispersion and shape of a dataset's distribution, excluding ``NaN`` values. Analyzes both numeric and object series, as well as ``DataFrame`` column sets of mixed data types. The output will vary depending on what is provided. Refer to the notes below for more detail. Parameters ---------- percentiles : list-like of numbers, optional The percentiles to include in the output. All should fall between 0 and 1. The default is ``[.25, .5, .75]``, which returns the 25th, 50th, and 75th percentiles. include : 'all', list-like of dtypes or None (default), optional A white list of data types to include in the result. Ignored for ``Series``. Here are the options: - 'all' : All columns of the input will be included in the output. - A list-like of dtypes : Limits the results to the provided data types. To limit the result to numeric types submit ``numpy.number``. To limit it instead to object columns submit the ``numpy.object`` data type. Strings can also be used in the style of ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To select pandas categorical columns, use ``'category'`` - None (default) : The result will include all numeric columns. exclude : list-like of dtypes or None (default), optional, A black list of data types to omit from the result. Ignored for ``Series``. Here are the options: - A list-like of dtypes : Excludes the provided data types from the result. To exclude numeric types submit ``numpy.number``. To exclude object columns submit the data type ``numpy.object``. Strings can also be used in the style of ``select_dtypes`` (e.g. ``df.describe(exclude=['O'])``). To exclude pandas categorical columns, use ``'category'`` - None (default) : The result will exclude nothing. Returns ------- Series or DataFrame Summary statistics of the Series or Dataframe provided. See Also -------- DataFrame.count: Count number of non-NA/null observations. DataFrame.max: Maximum of the values in the object. DataFrame.min: Minimum of the values in the object. DataFrame.mean: Mean of the values. DataFrame.std: Standard deviation of the observations. DataFrame.select_dtypes: Subset of a DataFrame including/excluding columns based on their dtype. Notes ----- For numeric data, the result's index will include ``count``, ``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and upper percentiles. By default the lower percentile is ``25`` and the upper percentile is ``75``. The ``50`` percentile is the same as the median. For object data (e.g. strings or timestamps), the result's index will include ``count``, ``unique``, ``top``, and ``freq``. The ``top`` is the most common value. The ``freq`` is the most common value's frequency. Timestamps also include the ``first`` and ``last`` items. If multiple object values have the highest count, then the ``count`` and ``top`` results will be arbitrarily chosen from among those with the highest count. For mixed data types provided via a ``DataFrame``, the default is to return only an analysis of numeric columns. If the dataframe consists only of object and categorical data without any numeric columns, the default is to return an analysis of both the object and categorical columns. If ``include='all'`` is provided as an option, the result will include a union of attributes of each type. The `include` and `exclude` parameters can be used to limit which columns in a ``DataFrame`` are analyzed for the output. The parameters are ignored when analyzing a ``Series``. Examples -------- Describing a numeric ``Series``. >>> s = pd.Series([1, 2, 3]) >>> s.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 dtype: float64 Describing a categorical ``Series``. >>> s = pd.Series(['a', 'a', 'b', 'c']) >>> s.describe() count 4 unique 3 top a freq 2 dtype: object Describing a timestamp ``Series``. >>> s = pd.Series([ ... np.datetime64("2000-01-01"), ... np.datetime64("2010-01-01"), ... np.datetime64("2010-01-01") ... ]) >>> s.describe() count 3 mean 2006-09-01 08:00:00 min 2000-01-01 00:00:00 25% 2004-12-31 12:00:00 50% 2010-01-01 00:00:00 75% 2010-01-01 00:00:00 max 2010-01-01 00:00:00 dtype: object Describing a ``DataFrame``. By default only numeric fields are returned. >>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']), ... 'numeric': [1, 2, 3], ... 'object': ['a', 'b', 'c'] ... }) >>> df.describe() numeric count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Describing all columns of a ``DataFrame`` regardless of data type. >>> df.describe(include='all') # doctest: +SKIP categorical numeric object count 3 3.0 3 unique 3 NaN 3 top f NaN a freq 1 NaN 1 mean NaN 2.0 NaN std NaN 1.0 NaN min NaN 1.0 NaN 25% NaN 1.5 NaN 50% NaN 2.0 NaN 75% NaN 2.5 NaN max NaN 3.0 NaN Describing a column from a ``DataFrame`` by accessing it as an attribute. >>> df.numeric.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Name: numeric, dtype: float64 Including only numeric columns in a ``DataFrame`` description. >>> df.describe(include=[np.number]) numeric count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Including only string columns in a ``DataFrame`` description. >>> df.describe(include=[object]) # doctest: +SKIP object count 3 unique 3 top a freq 1 Including only categorical columns from a ``DataFrame`` description. >>> df.describe(include=['category']) categorical count 3 unique 3 top d freq 1 Excluding numeric columns from a ``DataFrame`` description. >>> df.describe(exclude=[np.number]) # doctest: +SKIP categorical object count 3 3 unique 3 3 top f a freq 1 1 Excluding object columns from a ``DataFrame`` description. >>> df.describe(exclude=[object]) # doctest: +SKIP categorical numeric count 3 3.0 unique 3 NaN top f NaN freq 1 NaN mean NaN 2.0 std NaN 1.0 min NaN 1.0 25% NaN 1.5 50% NaN 2.0 75% NaN 2.5 max NaN 3.0 """ return describe_ndframe( obj=self, include=include, exclude=exclude, percentiles=percentiles, ) def pct_change( self: NDFrameT, periods: int = 1, fill_method: Literal["backfill", "bfill", "pad", "ffill"] | None = "pad", limit=None, freq=None, **kwargs, ) -> NDFrameT: """ Percentage change between the current and a prior element. Computes the percentage change from the immediately previous row by default. This is useful in comparing the percentage of change in a time series of elements. Parameters ---------- periods : int, default 1 Periods to shift for forming percent change. fill_method : {'backfill', 'bfill', 'pad', 'ffill', None}, default 'pad' How to handle NAs **before** computing percent changes. limit : int, default None The number of consecutive NAs to fill before stopping. freq : DateOffset, timedelta, or str, optional Increment to use from time series API (e.g. 'M' or BDay()). **kwargs Additional keyword arguments are passed into `DataFrame.shift` or `Series.shift`. Returns ------- Series or DataFrame The same type as the calling object. See Also -------- Series.diff : Compute the difference of two elements in a Series. DataFrame.diff : Compute the difference of two elements in a DataFrame. Series.shift : Shift the index by some number of periods. DataFrame.shift : Shift the index by some number of periods. Examples -------- **Series** >>> s = pd.Series([90, 91, 85]) >>> s 0 90 1 91 2 85 dtype: int64 >>> s.pct_change() 0 NaN 1 0.011111 2 -0.065934 dtype: float64 >>> s.pct_change(periods=2) 0 NaN 1 NaN 2 -0.055556 dtype: float64 See the percentage change in a Series where filling NAs with last valid observation forward to next valid. >>> s = pd.Series([90, 91, None, 85]) >>> s 0 90.0 1 91.0 2 NaN 3 85.0 dtype: float64 >>> s.pct_change(fill_method='ffill') 0 NaN 1 0.011111 2 0.000000 3 -0.065934 dtype: float64 **DataFrame** Percentage change in French franc, Deutsche Mark, and Italian lira from 1980-01-01 to 1980-03-01. >>> df = pd.DataFrame({ ... 'FR': [4.0405, 4.0963, 4.3149], ... 'GR': [1.7246, 1.7482, 1.8519], ... 'IT': [804.74, 810.01, 860.13]}, ... index=['1980-01-01', '1980-02-01', '1980-03-01']) >>> df FR GR IT 1980-01-01 4.0405 1.7246 804.74 1980-02-01 4.0963 1.7482 810.01 1980-03-01 4.3149 1.8519 860.13 >>> df.pct_change() FR GR IT 1980-01-01 NaN NaN NaN 1980-02-01 0.013810 0.013684 0.006549 1980-03-01 0.053365 0.059318 0.061876 Percentage of change in GOOG and APPL stock volume. Shows computing the percentage change between columns. >>> df = pd.DataFrame({ ... '2016': [1769950, 30586265], ... '2015': [1500923, 40912316], ... '2014': [1371819, 41403351]}, ... index=['GOOG', 'APPL']) >>> df 2016 2015 2014 GOOG 1769950 1500923 1371819 APPL 30586265 40912316 41403351 >>> df.pct_change(axis='columns', periods=-1) 2016 2015 2014 GOOG 0.179241 0.094112 NaN APPL -0.252395 -0.011860 NaN """ axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name)) if fill_method is None: data = self else: _data = self.fillna(method=fill_method, axis=axis, limit=limit) assert _data is not None # needed for mypy data = _data shifted = data.shift(periods=periods, freq=freq, axis=axis, **kwargs) # Unsupported left operand type for / ("NDFrameT") rs = data / shifted - 1 # type: ignore[operator] if freq is not None: # Shift method is implemented differently when freq is not None # We want to restore the original index rs = rs.loc[~rs.index.duplicated()] rs = rs.reindex_like(data) return rs.__finalize__(self, method="pct_change") def _logical_func( self, name: str, func, axis: Axis = 0, bool_only: bool_t = False, skipna: bool_t = True, **kwargs, ) -> Series | bool_t: nv.validate_logical_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if self.ndim > 1 and axis is None: # Reduce along one dimension then the other, to simplify DataFrame._reduce res = self._logical_func( name, func, axis=0, bool_only=bool_only, skipna=skipna, **kwargs ) return res._logical_func(name, func, skipna=skipna, **kwargs) if ( self.ndim > 1 and axis == 1 and len(self._mgr.arrays) > 1 # TODO(EA2D): special-case not needed and all(x.ndim == 2 for x in self._mgr.arrays) and not kwargs ): # Fastpath avoiding potentially expensive transpose obj = self if bool_only: obj = self._get_bool_data() return obj._reduce_axis1(name, func, skipna=skipna) return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=bool_only, filter_type="bool", ) def any( self, axis: Axis = 0, bool_only: bool_t = False, skipna: bool_t = True, **kwargs, ) -> DataFrame | Series | bool_t: return self._logical_func( "any", nanops.nanany, axis, bool_only, skipna, **kwargs ) def all( self, axis: Axis = 0, bool_only: bool_t = False, skipna: bool_t = True, **kwargs, ) -> Series | bool_t: return self._logical_func( "all", nanops.nanall, axis, bool_only, skipna, **kwargs ) def _accum_func( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs, ): skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name) if axis is None: axis = self._stat_axis_number else: axis = self._get_axis_number(axis) if axis == 1: return self.T._accum_func( name, func, axis=0, skipna=skipna, *args, **kwargs # noqa: B026 ).T def block_accum_func(blk_values): values = blk_values.T if hasattr(blk_values, "T") else blk_values result: np.ndarray | ExtensionArray if isinstance(values, ExtensionArray): result = values._accumulate(name, skipna=skipna, **kwargs) else: result = nanops.na_accum_func(values, func, skipna=skipna) result = result.T if hasattr(result, "T") else result return result result = self._mgr.apply(block_accum_func) return self._constructor(result).__finalize__(self, method=name) def cummax(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func( "cummax", np.maximum.accumulate, axis, skipna, *args, **kwargs ) def cummin(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func( "cummin", np.minimum.accumulate, axis, skipna, *args, **kwargs ) def cumsum(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func("cumsum", np.cumsum, axis, skipna, *args, **kwargs) def cumprod(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func("cumprod", np.cumprod, axis, skipna, *args, **kwargs) def _stat_function_ddof( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: nv.validate_stat_ddof_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if axis is None: axis = self._stat_axis_number return self._reduce( func, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof ) def sem( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function_ddof( "sem", nanops.nansem, axis, skipna, ddof, numeric_only, **kwargs ) def var( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function_ddof( "var", nanops.nanvar, axis, skipna, ddof, numeric_only, **kwargs ) def std( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function_ddof( "std", nanops.nanstd, axis, skipna, ddof, numeric_only, **kwargs ) def _stat_function( self, name: str, func, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): if name == "median": nv.validate_median((), kwargs) else: nv.validate_stat_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only ) def min( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return self._stat_function( "min", nanops.nanmin, axis, skipna, numeric_only, **kwargs, ) def max( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return self._stat_function( "max", nanops.nanmax, axis, skipna, numeric_only, **kwargs, ) def mean( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "mean", nanops.nanmean, axis, skipna, numeric_only, **kwargs ) def median( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "median", nanops.nanmedian, axis, skipna, numeric_only, **kwargs ) def skew( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "skew", nanops.nanskew, axis, skipna, numeric_only, **kwargs ) def kurt( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "kurt", nanops.nankurt, axis, skipna, numeric_only, **kwargs ) kurtosis = kurt def _min_count_stat_function( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): if name == "sum": nv.validate_sum((), kwargs) elif name == "prod": nv.validate_prod((), kwargs) else: nv.validate_stat_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if axis is None: axis = self._stat_axis_number return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count, ) def sum( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return self._min_count_stat_function( "sum", nanops.nansum, axis, skipna, numeric_only, min_count, **kwargs ) def prod( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return self._min_count_stat_function( "prod", nanops.nanprod, axis, skipna, numeric_only, min_count, **kwargs, ) product = prod def _add_numeric_operations(cls) -> None: """ Add the operations to the cls; evaluate the doc strings again """ axis_descr, name1, name2 = _doc_params(cls) _bool_doc, desc=_any_desc, name1=name1, name2=name2, axis_descr=axis_descr, see_also=_any_see_also, examples=_any_examples, empty_value=False, ) def any( self, *, axis: Axis = 0, bool_only=None, skipna: bool_t = True, **kwargs, ): return NDFrame.any( self, axis=axis, bool_only=bool_only, skipna=skipna, **kwargs, ) setattr(cls, "any", any) _bool_doc, desc=_all_desc, name1=name1, name2=name2, axis_descr=axis_descr, see_also=_all_see_also, examples=_all_examples, empty_value=True, ) def all( self, axis: Axis = 0, bool_only=None, skipna: bool_t = True, **kwargs, ): return NDFrame.all(self, axis, bool_only, skipna, **kwargs) setattr(cls, "all", all) _num_ddof_doc, desc="Return unbiased standard error of the mean over requested " "axis.\n\nNormalized by N-1 by default. This can be changed " "using the ddof argument", name1=name1, name2=name2, axis_descr=axis_descr, notes="", examples="", ) def sem( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ): return NDFrame.sem(self, axis, skipna, ddof, numeric_only, **kwargs) setattr(cls, "sem", sem) _num_ddof_doc, desc="Return unbiased variance over requested axis.\n\nNormalized by " "N-1 by default. This can be changed using the ddof argument.", name1=name1, name2=name2, axis_descr=axis_descr, notes="", examples=_var_examples, ) def var( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ): return NDFrame.var(self, axis, skipna, ddof, numeric_only, **kwargs) setattr(cls, "var", var) _num_ddof_doc, desc="Return sample standard deviation over requested axis." "\n\nNormalized by N-1 by default. This can be changed using the " "ddof argument.", name1=name1, name2=name2, axis_descr=axis_descr, notes=_std_notes, examples=_std_examples, ) def std( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ): return NDFrame.std(self, axis, skipna, ddof, numeric_only, **kwargs) setattr(cls, "std", std) _cnum_doc, desc="minimum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="min", examples=_cummin_examples, ) def cummin( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cummin(self, axis, skipna, *args, **kwargs) setattr(cls, "cummin", cummin) _cnum_doc, desc="maximum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="max", examples=_cummax_examples, ) def cummax( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cummax(self, axis, skipna, *args, **kwargs) setattr(cls, "cummax", cummax) _cnum_doc, desc="sum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="sum", examples=_cumsum_examples, ) def cumsum( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cumsum(self, axis, skipna, *args, **kwargs) setattr(cls, "cumsum", cumsum) _cnum_doc, desc="product", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="prod", examples=_cumprod_examples, ) def cumprod( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cumprod(self, axis, skipna, *args, **kwargs) setattr(cls, "cumprod", cumprod) # error: Untyped decorator makes function "sum" untyped _num_doc, desc="Return the sum of the values over the requested axis.\n\n" "This is equivalent to the method ``numpy.sum``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count=_min_count_stub, see_also=_stat_func_see_also, examples=_sum_examples, ) def sum( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return NDFrame.sum(self, axis, skipna, numeric_only, min_count, **kwargs) setattr(cls, "sum", sum) _num_doc, desc="Return the product of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count=_min_count_stub, see_also=_stat_func_see_also, examples=_prod_examples, ) def prod( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return NDFrame.prod(self, axis, skipna, numeric_only, min_count, **kwargs) setattr(cls, "prod", prod) cls.product = prod _num_doc, desc="Return the mean of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def mean( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.mean(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "mean", mean) _num_doc, desc="Return unbiased skew over requested axis.\n\nNormalized by N-1.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def skew( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.skew(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "skew", skew) _num_doc, desc="Return unbiased kurtosis over requested axis.\n\n" "Kurtosis obtained using Fisher's definition of\n" "kurtosis (kurtosis of normal == 0.0). Normalized " "by N-1.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def kurt( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.kurt(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "kurt", kurt) cls.kurtosis = kurt _num_doc, desc="Return the median of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def median( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.median(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "median", median) _num_doc, desc="Return the maximum of the values over the requested axis.\n\n" "If you want the *index* of the maximum, use ``idxmax``. This is " "the equivalent of the ``numpy.ndarray`` method ``argmax``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also=_stat_func_see_also, examples=_max_examples, ) def max( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.max(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "max", max) _num_doc, desc="Return the minimum of the values over the requested axis.\n\n" "If you want the *index* of the minimum, use ``idxmin``. This is " "the equivalent of the ``numpy.ndarray`` method ``argmin``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also=_stat_func_see_also, examples=_min_examples, ) def min( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.min(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "min", min) def rolling( self, window: int | dt.timedelta | str | BaseOffset | BaseIndexer, min_periods: int | None = None, center: bool_t = False, win_type: str | None = None, on: str | None = None, axis: Axis = 0, closed: str | None = None, step: int | None = None, method: str = "single", ) -> Window | Rolling: axis = self._get_axis_number(axis) if win_type is not None: return Window( self, window=window, min_periods=min_periods, center=center, win_type=win_type, on=on, axis=axis, closed=closed, step=step, method=method, ) return Rolling( self, window=window, min_periods=min_periods, center=center, win_type=win_type, on=on, axis=axis, closed=closed, step=step, method=method, ) def expanding( self, min_periods: int = 1, axis: Axis = 0, method: str = "single", ) -> Expanding: axis = self._get_axis_number(axis) return Expanding(self, min_periods=min_periods, axis=axis, method=method) def ewm( self, com: float | None = None, span: float | None = None, halflife: float | TimedeltaConvertibleTypes | None = None, alpha: float | None = None, min_periods: int | None = 0, adjust: bool_t = True, ignore_na: bool_t = False, axis: Axis = 0, times: np.ndarray | DataFrame | Series | None = None, method: str = "single", ) -> ExponentialMovingWindow: axis = self._get_axis_number(axis) return ExponentialMovingWindow( self, com=com, span=span, halflife=halflife, alpha=alpha, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, axis=axis, times=times, method=method, ) # ---------------------------------------------------------------------- # Arithmetic Methods def _inplace_method(self, other, op): """ Wrap arithmetic method to operate inplace. """ result = op(self, other) if ( self.ndim == 1 and result._indexed_same(self) and is_dtype_equal(result.dtype, self.dtype) ): # GH#36498 this inplace op can _actually_ be inplace. # Item "ArrayManager" of "Union[ArrayManager, SingleArrayManager, # BlockManager, SingleBlockManager]" has no attribute "setitem_inplace" self._mgr.setitem_inplace( # type: ignore[union-attr] slice(None), result._values ) return self # Delete cacher self._reset_cacher() # this makes sure that we are aligned like the input # we are updating inplace so we want to ignore is_copy self._update_inplace( result.reindex_like(self, copy=False), verify_is_copy=False ) return self def __iadd__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for + ("Type[NDFrame]") return self._inplace_method(other, type(self).__add__) # type: ignore[operator] def __isub__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for - ("Type[NDFrame]") return self._inplace_method(other, type(self).__sub__) # type: ignore[operator] def __imul__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for * ("Type[NDFrame]") return self._inplace_method(other, type(self).__mul__) # type: ignore[operator] def __itruediv__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for / ("Type[NDFrame]") return self._inplace_method( other, type(self).__truediv__ # type: ignore[operator] ) def __ifloordiv__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for // ("Type[NDFrame]") return self._inplace_method( other, type(self).__floordiv__ # type: ignore[operator] ) def __imod__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for % ("Type[NDFrame]") return self._inplace_method(other, type(self).__mod__) # type: ignore[operator] def __ipow__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for ** ("Type[NDFrame]") return self._inplace_method(other, type(self).__pow__) # type: ignore[operator] def __iand__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for & ("Type[NDFrame]") return self._inplace_method(other, type(self).__and__) # type: ignore[operator] def __ior__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for | ("Type[NDFrame]") return self._inplace_method(other, type(self).__or__) # type: ignore[operator] def __ixor__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for ^ ("Type[NDFrame]") return self._inplace_method(other, type(self).__xor__) # type: ignore[operator] # ---------------------------------------------------------------------- # Misc methods def _find_valid_index(self, *, how: str) -> Hashable | None: """ Retrieves the index of the first valid value. Parameters ---------- how : {'first', 'last'} Use this parameter to change between the first or last valid index. Returns ------- idx_first_valid : type of index """ idxpos = find_valid_index(self._values, how=how, is_valid=~isna(self._values)) if idxpos is None: return None return self.index[idxpos] def first_valid_index(self) -> Hashable | None: """ Return index for {position} non-NA value or None, if no non-NA value is found. Returns ------- type of index Notes ----- If all elements are non-NA/null, returns None. Also returns None for empty {klass}. """ return self._find_valid_index(how="first") def last_valid_index(self) -> Hashable | None: return self._find_valid_index(how="last") def to_json( path_or_buf: None, obj: NDFrame, orient: str | None = ..., date_format: str = ..., double_precision: int = ..., force_ascii: bool = ..., date_unit: str = ..., default_handler: Callable[[Any], JSONSerializable] | None = ..., lines: bool = ..., compression: CompressionOptions = ..., index: bool = ..., indent: int = ..., storage_options: StorageOptions = ..., mode: Literal["a", "w"] = ..., ) -> str: ...
null
173,501
from __future__ import annotations from abc import ( ABC, abstractmethod, ) from collections import abc from io import StringIO from itertools import islice from types import TracebackType from typing import ( TYPE_CHECKING, Any, Callable, Generic, Literal, Mapping, TypeVar, overload, ) import numpy as np from pandas._libs import lib from pandas._libs.json import ( dumps, loads, ) from pandas._libs.tslibs import iNaT from pandas._typing import ( CompressionOptions, DtypeArg, DtypeBackend, FilePath, IndexLabel, JSONEngine, JSONSerializable, ReadBuffer, StorageOptions, WriteBuffer, ) from pandas.compat._optional import import_optional_dependency from pandas.errors import AbstractMethodError from pandas.util._decorators import doc from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( ensure_str, is_period_dtype, ) from pandas.core.dtypes.generic import ABCIndex from pandas import ( ArrowDtype, DataFrame, MultiIndex, Series, isna, notna, to_datetime, ) from pandas.core.reshape.concat import concat from pandas.core.shared_docs import _shared_docs from pandas.io.common import ( IOHandles, dedup_names, extension_to_compression, file_exists, get_handle, is_fsspec_url, is_potential_multi_index, is_url, stringify_path, ) from pandas.io.json._normalize import convert_to_line_delimits from pandas.io.json._table_schema import ( build_table_schema, parse_table_schema, ) from pandas.io.parsers.readers import validate_integer class Writer(ABC): _default_orient: str def __init__( self, obj: NDFrame, orient: str | None, date_format: str, double_precision: int, ensure_ascii: bool, date_unit: str, index: bool, default_handler: Callable[[Any], JSONSerializable] | None = None, indent: int = 0, ) -> None: self.obj = obj if orient is None: orient = self._default_orient self.orient = orient self.date_format = date_format self.double_precision = double_precision self.ensure_ascii = ensure_ascii self.date_unit = date_unit self.default_handler = default_handler self.index = index self.indent = indent self.is_copy = None self._format_axes() def _format_axes(self): raise AbstractMethodError(self) def write(self) -> str: iso_dates = self.date_format == "iso" return dumps( self.obj_to_write, orient=self.orient, double_precision=self.double_precision, ensure_ascii=self.ensure_ascii, date_unit=self.date_unit, iso_dates=iso_dates, default_handler=self.default_handler, indent=self.indent, ) def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]: """Object to write in JSON format.""" class SeriesWriter(Writer): _default_orient = "index" def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]: if not self.index and self.orient == "split": return {"name": self.obj.name, "data": self.obj.values} else: return self.obj def _format_axes(self): if not self.obj.index.is_unique and self.orient == "index": raise ValueError(f"Series index must be unique for orient='{self.orient}'") class FrameWriter(Writer): _default_orient = "columns" def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]: if not self.index and self.orient == "split": obj_to_write = self.obj.to_dict(orient="split") del obj_to_write["index"] else: obj_to_write = self.obj return obj_to_write def _format_axes(self): """ Try to format axes if they are datelike. """ if not self.obj.index.is_unique and self.orient in ("index", "columns"): raise ValueError( f"DataFrame index must be unique for orient='{self.orient}'." ) if not self.obj.columns.is_unique and self.orient in ( "index", "columns", "records", ): raise ValueError( f"DataFrame columns must be unique for orient='{self.orient}'." ) class JSONTableWriter(FrameWriter): _default_orient = "records" def __init__( self, obj, orient: str | None, date_format: str, double_precision: int, ensure_ascii: bool, date_unit: str, index: bool, default_handler: Callable[[Any], JSONSerializable] | None = None, indent: int = 0, ) -> None: """ Adds a `schema` attribute with the Table Schema, resets the index (can't do in caller, because the schema inference needs to know what the index is, forces orient to records, and forces date_format to 'iso'. """ super().__init__( obj, orient, date_format, double_precision, ensure_ascii, date_unit, index, default_handler=default_handler, indent=indent, ) if date_format != "iso": msg = ( "Trying to write with `orient='table'` and " f"`date_format='{date_format}'`. Table Schema requires dates " "to be formatted with `date_format='iso'`" ) raise ValueError(msg) self.schema = build_table_schema(obj, index=self.index) # NotImplemented on a column MultiIndex if obj.ndim == 2 and isinstance(obj.columns, MultiIndex): raise NotImplementedError( "orient='table' is not supported for MultiIndex columns" ) # TODO: Do this timedelta properly in objToJSON.c See GH #15137 if ( (obj.ndim == 1) and (obj.name in set(obj.index.names)) or len(obj.columns.intersection(obj.index.names)) ): msg = "Overlapping names between the index and columns" raise ValueError(msg) obj = obj.copy() timedeltas = obj.select_dtypes(include=["timedelta"]).columns if len(timedeltas): obj[timedeltas] = obj[timedeltas].applymap(lambda x: x.isoformat()) # Convert PeriodIndex to datetimes before serializing if is_period_dtype(obj.index.dtype): obj.index = obj.index.to_timestamp() # exclude index from obj if index=False if not self.index: self.obj = obj.reset_index(drop=True) else: self.obj = obj.reset_index(drop=False) self.date_format = "iso" self.orient = "records" self.index = index def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]: return {"schema": self.schema, "data": self.obj} Any = object() class Callable(BaseTypingInstance): def py__call__(self, arguments): """ def x() -> Callable[[Callable[..., _T]], _T]: ... """ # The 0th index are the arguments. try: param_values = self._generics_manager[0] result_values = self._generics_manager[1] except IndexError: debug.warning('Callable[...] defined without two arguments') return NO_VALUES else: from jedi.inference.gradual.annotation import infer_return_for_callable return infer_return_for_callable(arguments, param_values, result_values) def py__get__(self, instance, class_value): return ValueSet([self]) Literal: _SpecialForm = ... JSONSerializable = Optional[Union[PythonScalar, List, Dict]] class WriteBuffer(BaseBuffer, Protocol[AnyStr_contra]): def write(self, __b: AnyStr_contra) -> Any: # for gzip.GzipFile, bz2.BZ2File ... def flush(self) -> Any: # for gzip.GzipFile, bz2.BZ2File ... FilePath = Union[str, "PathLike[str]"] StorageOptions = Optional[Dict[str, Any]] CompressionOptions = Optional[ Union[Literal["infer", "gzip", "bz2", "zip", "xz", "zstd", "tar"], CompressionDict] ] ) def get_handle( path_or_buf: FilePath | BaseBuffer, mode: str, *, encoding: str | None = ..., compression: CompressionOptions = ..., memory_map: bool = ..., is_text: Literal[False], errors: str | None = ..., storage_options: StorageOptions = ..., ) -> IOHandles[bytes]: ... def get_handle( path_or_buf: FilePath | BaseBuffer, mode: str, *, encoding: str | None = ..., compression: CompressionOptions = ..., memory_map: bool = ..., is_text: Literal[True] = ..., errors: str | None = ..., storage_options: StorageOptions = ..., ) -> IOHandles[str]: ... def get_handle( path_or_buf: FilePath | BaseBuffer, mode: str, *, encoding: str | None = ..., compression: CompressionOptions = ..., memory_map: bool = ..., is_text: bool = ..., errors: str | None = ..., storage_options: StorageOptions = ..., ) -> IOHandles[str] | IOHandles[bytes]: ... def get_handle( path_or_buf: FilePath | BaseBuffer, mode: str, *, encoding: str | None = None, compression: CompressionOptions = None, memory_map: bool = False, is_text: bool = True, errors: str | None = None, storage_options: StorageOptions = None, ) -> IOHandles[str] | IOHandles[bytes]: """ Get file handle for given path/buffer and mode. Parameters ---------- path_or_buf : str or file handle File path or object. mode : str Mode to open path_or_buf with. encoding : str or None Encoding to use. {compression_options} .. versionchanged:: 1.0.0 May now be a dict with key 'method' as compression mode and other keys as compression options if compression mode is 'zip'. .. versionchanged:: 1.1.0 Passing compression options as keys in dict is now supported for compression modes 'gzip', 'bz2', 'zstd' and 'zip'. .. versionchanged:: 1.4.0 Zstandard support. memory_map : bool, default False See parsers._parser_params for more information. Only used by read_csv. is_text : bool, default True Whether the type of the content passed to the file/buffer is string or bytes. This is not the same as `"b" not in mode`. If a string content is passed to a binary file/buffer, a wrapper is inserted. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. storage_options: StorageOptions = None Passed to _get_filepath_or_buffer .. versionchanged:: 1.2.0 Returns the dataclass IOHandles """ # Windows does not default to utf-8. Set to utf-8 for a consistent behavior encoding = encoding or "utf-8" errors = errors or "strict" # read_csv does not know whether the buffer is opened in binary/text mode if _is_binary_mode(path_or_buf, mode) and "b" not in mode: mode += "b" # validate encoding and errors codecs.lookup(encoding) if isinstance(errors, str): codecs.lookup_error(errors) # open URLs ioargs = _get_filepath_or_buffer( path_or_buf, encoding=encoding, compression=compression, mode=mode, storage_options=storage_options, ) handle = ioargs.filepath_or_buffer handles: list[BaseBuffer] # memory mapping needs to be the first step # only used for read_csv handle, memory_map, handles = _maybe_memory_map(handle, memory_map) is_path = isinstance(handle, str) compression_args = dict(ioargs.compression) compression = compression_args.pop("method") # Only for write methods if "r" not in mode and is_path: check_parent_directory(str(handle)) if compression: if compression != "zstd": # compression libraries do not like an explicit text-mode ioargs.mode = ioargs.mode.replace("t", "") elif compression == "zstd" and "b" not in ioargs.mode: # python-zstandard defaults to text mode, but we always expect # compression libraries to use binary mode. ioargs.mode += "b" # GZ Compression if compression == "gzip": if isinstance(handle, str): # error: Incompatible types in assignment (expression has type # "GzipFile", variable has type "Union[str, BaseBuffer]") handle = gzip.GzipFile( # type: ignore[assignment] filename=handle, mode=ioargs.mode, **compression_args, ) else: handle = gzip.GzipFile( # No overload variant of "GzipFile" matches argument types # "Union[str, BaseBuffer]", "str", "Dict[str, Any]" fileobj=handle, # type: ignore[call-overload] mode=ioargs.mode, **compression_args, ) # BZ Compression elif compression == "bz2": # Overload of "BZ2File" to handle pickle protocol 5 # "Union[str, BaseBuffer]", "str", "Dict[str, Any]" handle = _BZ2File( # type: ignore[call-overload] handle, mode=ioargs.mode, **compression_args, ) # ZIP Compression elif compression == "zip": # error: Argument 1 to "_BytesZipFile" has incompatible type # "Union[str, BaseBuffer]"; expected "Union[Union[str, PathLike[str]], # ReadBuffer[bytes], WriteBuffer[bytes]]" handle = _BytesZipFile( handle, ioargs.mode, **compression_args # type: ignore[arg-type] ) if handle.buffer.mode == "r": handles.append(handle) zip_names = handle.buffer.namelist() if len(zip_names) == 1: handle = handle.buffer.open(zip_names.pop()) elif not zip_names: raise ValueError(f"Zero files found in ZIP file {path_or_buf}") else: raise ValueError( "Multiple files found in ZIP file. " f"Only one file per ZIP: {zip_names}" ) # TAR Encoding elif compression == "tar": compression_args.setdefault("mode", ioargs.mode) if isinstance(handle, str): handle = _BytesTarFile(name=handle, **compression_args) else: # error: Argument "fileobj" to "_BytesTarFile" has incompatible # type "BaseBuffer"; expected "Union[ReadBuffer[bytes], # WriteBuffer[bytes], None]" handle = _BytesTarFile( fileobj=handle, **compression_args # type: ignore[arg-type] ) assert isinstance(handle, _BytesTarFile) if "r" in handle.buffer.mode: handles.append(handle) files = handle.buffer.getnames() if len(files) == 1: file = handle.buffer.extractfile(files[0]) assert file is not None handle = file elif not files: raise ValueError(f"Zero files found in TAR archive {path_or_buf}") else: raise ValueError( "Multiple files found in TAR archive. " f"Only one file per TAR archive: {files}" ) # XZ Compression elif compression == "xz": # error: Argument 1 to "LZMAFile" has incompatible type "Union[str, # BaseBuffer]"; expected "Optional[Union[Union[str, bytes, PathLike[str], # PathLike[bytes]], IO[bytes]]]" handle = get_lzma_file()(handle, ioargs.mode) # type: ignore[arg-type] # Zstd Compression elif compression == "zstd": zstd = import_optional_dependency("zstandard") if "r" in ioargs.mode: open_args = {"dctx": zstd.ZstdDecompressor(**compression_args)} else: open_args = {"cctx": zstd.ZstdCompressor(**compression_args)} handle = zstd.open( handle, mode=ioargs.mode, **open_args, ) # Unrecognized Compression else: msg = f"Unrecognized compression type: {compression}" raise ValueError(msg) assert not isinstance(handle, str) handles.append(handle) elif isinstance(handle, str): # Check whether the filename is to be opened in binary mode. # Binary mode does not support 'encoding' and 'newline'. if ioargs.encoding and "b" not in ioargs.mode: # Encoding handle = open( handle, ioargs.mode, encoding=ioargs.encoding, errors=errors, newline="", ) else: # Binary mode handle = open(handle, ioargs.mode) handles.append(handle) # Convert BytesIO or file objects passed with an encoding is_wrapped = False if not is_text and ioargs.mode == "rb" and isinstance(handle, TextIOBase): # not added to handles as it does not open/buffer resources handle = _BytesIOWrapper( handle, encoding=ioargs.encoding, ) elif is_text and ( compression or memory_map or _is_binary_mode(handle, ioargs.mode) ): if ( not hasattr(handle, "readable") or not hasattr(handle, "writable") or not hasattr(handle, "seekable") ): handle = _IOWrapper(handle) # error: Argument 1 to "TextIOWrapper" has incompatible type # "_IOWrapper"; expected "IO[bytes]" handle = TextIOWrapper( handle, # type: ignore[arg-type] encoding=ioargs.encoding, errors=errors, newline="", ) handles.append(handle) # only marked as wrapped when the caller provided a handle is_wrapped = not ( isinstance(ioargs.filepath_or_buffer, str) or ioargs.should_close ) if "r" in ioargs.mode and not hasattr(handle, "read"): raise TypeError( "Expected file path name or file-like object, " f"got {type(ioargs.filepath_or_buffer)} type" ) handles.reverse() # close the most recently added buffer first if ioargs.should_close: assert not isinstance(ioargs.filepath_or_buffer, str) handles.append(ioargs.filepath_or_buffer) return IOHandles( # error: Argument "handle" to "IOHandles" has incompatible type # "Union[TextIOWrapper, GzipFile, BaseBuffer, typing.IO[bytes], # typing.IO[Any]]"; expected "pandas._typing.IO[Any]" handle=handle, # type: ignore[arg-type] # error: Argument "created_handles" to "IOHandles" has incompatible type # "List[BaseBuffer]"; expected "List[Union[IO[bytes], IO[str]]]" created_handles=handles, # type: ignore[arg-type] is_wrapped=is_wrapped, compression=ioargs.compression, ) def convert_to_line_delimits(s: str) -> str: """ Helper function that converts JSON lists to line delimited JSON. """ # Determine we have a JSON list to turn to lines otherwise just return the # json object, only lists can if not s[0] == "[" and s[-1] == "]": return s s = s[1:-1] return convert_json_to_lines(s) class NDFrame(PandasObject, indexing.IndexingMixin): """ N-dimensional analogue of DataFrame. Store multi-dimensional in a size-mutable, labeled data structure Parameters ---------- data : BlockManager axes : list copy : bool, default False """ _internal_names: list[str] = [ "_mgr", "_cacher", "_item_cache", "_cache", "_is_copy", "_subtyp", "_name", "_default_kind", "_default_fill_value", "_metadata", "__array_struct__", "__array_interface__", "_flags", ] _internal_names_set: set[str] = set(_internal_names) _accessors: set[str] = set() _hidden_attrs: frozenset[str] = frozenset([]) _metadata: list[str] = [] _is_copy: weakref.ReferenceType[NDFrame] | None = None _mgr: Manager _attrs: dict[Hashable, Any] _typ: str # ---------------------------------------------------------------------- # Constructors def __init__( self, data: Manager, copy: bool_t = False, attrs: Mapping[Hashable, Any] | None = None, ) -> None: # copy kwarg is retained for mypy compat, is not used object.__setattr__(self, "_is_copy", None) object.__setattr__(self, "_mgr", data) object.__setattr__(self, "_item_cache", {}) if attrs is None: attrs = {} else: attrs = dict(attrs) object.__setattr__(self, "_attrs", attrs) object.__setattr__(self, "_flags", Flags(self, allows_duplicate_labels=True)) def _init_mgr( cls, mgr: Manager, axes, dtype: Dtype | None = None, copy: bool_t = False, ) -> Manager: """passed a manager and a axes dict""" for a, axe in axes.items(): if axe is not None: axe = ensure_index(axe) bm_axis = cls._get_block_manager_axis(a) mgr = mgr.reindex_axis(axe, axis=bm_axis) # make a copy if explicitly requested if copy: mgr = mgr.copy() if dtype is not None: # avoid further copies if we can if ( isinstance(mgr, BlockManager) and len(mgr.blocks) == 1 and is_dtype_equal(mgr.blocks[0].values.dtype, dtype) ): pass else: mgr = mgr.astype(dtype=dtype) return mgr def _as_manager(self: NDFrameT, typ: str, copy: bool_t = True) -> NDFrameT: """ Private helper function to create a DataFrame with specific manager. Parameters ---------- typ : {"block", "array"} copy : bool, default True Only controls whether the conversion from Block->ArrayManager copies the 1D arrays (to ensure proper/contiguous memory layout). Returns ------- DataFrame New DataFrame using specified manager type. Is not guaranteed to be a copy or not. """ new_mgr: Manager new_mgr = mgr_to_mgr(self._mgr, typ=typ, copy=copy) # fastpath of passing a manager doesn't check the option/manager class return self._constructor(new_mgr).__finalize__(self) # ---------------------------------------------------------------------- # attrs and flags def attrs(self) -> dict[Hashable, Any]: """ Dictionary of global attributes of this dataset. .. warning:: attrs is experimental and may change without warning. See Also -------- DataFrame.flags : Global flags applying to this object. """ if self._attrs is None: self._attrs = {} return self._attrs def attrs(self, value: Mapping[Hashable, Any]) -> None: self._attrs = dict(value) def flags(self) -> Flags: """ Get the properties associated with this pandas object. The available flags are * :attr:`Flags.allows_duplicate_labels` See Also -------- Flags : Flags that apply to pandas objects. DataFrame.attrs : Global metadata applying to this dataset. Notes ----- "Flags" differ from "metadata". Flags reflect properties of the pandas object (the Series or DataFrame). Metadata refer to properties of the dataset, and should be stored in :attr:`DataFrame.attrs`. Examples -------- >>> df = pd.DataFrame({"A": [1, 2]}) >>> df.flags <Flags(allows_duplicate_labels=True)> Flags can be get or set using ``.`` >>> df.flags.allows_duplicate_labels True >>> df.flags.allows_duplicate_labels = False Or by slicing with a key >>> df.flags["allows_duplicate_labels"] False >>> df.flags["allows_duplicate_labels"] = True """ return self._flags def set_flags( self: NDFrameT, *, copy: bool_t = False, allows_duplicate_labels: bool_t | None = None, ) -> NDFrameT: """ Return a new object with updated flags. Parameters ---------- copy : bool, default False Specify if a copy of the object should be made. allows_duplicate_labels : bool, optional Whether the returned object allows duplicate labels. Returns ------- Series or DataFrame The same type as the caller. See Also -------- DataFrame.attrs : Global metadata applying to this dataset. DataFrame.flags : Global flags applying to this object. Notes ----- This method returns a new object that's a view on the same data as the input. Mutating the input or the output values will be reflected in the other. This method is intended to be used in method chains. "Flags" differ from "metadata". Flags reflect properties of the pandas object (the Series or DataFrame). Metadata refer to properties of the dataset, and should be stored in :attr:`DataFrame.attrs`. Examples -------- >>> df = pd.DataFrame({"A": [1, 2]}) >>> df.flags.allows_duplicate_labels True >>> df2 = df.set_flags(allows_duplicate_labels=False) >>> df2.flags.allows_duplicate_labels False """ df = self.copy(deep=copy and not using_copy_on_write()) if allows_duplicate_labels is not None: df.flags["allows_duplicate_labels"] = allows_duplicate_labels return df def _validate_dtype(cls, dtype) -> DtypeObj | None: """validate the passed dtype""" if dtype is not None: dtype = pandas_dtype(dtype) # a compound dtype if dtype.kind == "V": raise NotImplementedError( "compound dtypes are not implemented " f"in the {cls.__name__} constructor" ) return dtype # ---------------------------------------------------------------------- # Construction def _constructor(self: NDFrameT) -> Callable[..., NDFrameT]: """ Used when a manipulation result has the same dimensions as the original. """ raise AbstractMethodError(self) # ---------------------------------------------------------------------- # Internals def _data(self): # GH#33054 retained because some downstream packages uses this, # e.g. fastparquet return self._mgr # ---------------------------------------------------------------------- # Axis _stat_axis_number = 0 _stat_axis_name = "index" _AXIS_ORDERS: list[Literal["index", "columns"]] _AXIS_TO_AXIS_NUMBER: dict[Axis, AxisInt] = {0: 0, "index": 0, "rows": 0} _info_axis_number: int _info_axis_name: Literal["index", "columns"] _AXIS_LEN: int def _construct_axes_dict(self, axes: Sequence[Axis] | None = None, **kwargs): """Return an axes dictionary for myself.""" d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)} # error: Argument 1 to "update" of "MutableMapping" has incompatible type # "Dict[str, Any]"; expected "SupportsKeysAndGetItem[Union[int, str], Any]" d.update(kwargs) # type: ignore[arg-type] return d def _get_axis_number(cls, axis: Axis) -> AxisInt: try: return cls._AXIS_TO_AXIS_NUMBER[axis] except KeyError: raise ValueError(f"No axis named {axis} for object type {cls.__name__}") def _get_axis_name(cls, axis: Axis) -> Literal["index", "columns"]: axis_number = cls._get_axis_number(axis) return cls._AXIS_ORDERS[axis_number] def _get_axis(self, axis: Axis) -> Index: axis_number = self._get_axis_number(axis) assert axis_number in {0, 1} return self.index if axis_number == 0 else self.columns def _get_block_manager_axis(cls, axis: Axis) -> AxisInt: """Map the axis to the block_manager axis.""" axis = cls._get_axis_number(axis) ndim = cls._AXIS_LEN if ndim == 2: # i.e. DataFrame return 1 - axis return axis def _get_axis_resolvers(self, axis: str) -> dict[str, Series | MultiIndex]: # index or columns axis_index = getattr(self, axis) d = {} prefix = axis[0] for i, name in enumerate(axis_index.names): if name is not None: key = level = name else: # prefix with 'i' or 'c' depending on the input axis # e.g., you must do ilevel_0 for the 0th level of an unnamed # multiiindex key = f"{prefix}level_{i}" level = i level_values = axis_index.get_level_values(level) s = level_values.to_series() s.index = axis_index d[key] = s # put the index/columns itself in the dict if isinstance(axis_index, MultiIndex): dindex = axis_index else: dindex = axis_index.to_series() d[axis] = dindex return d def _get_index_resolvers(self) -> dict[Hashable, Series | MultiIndex]: from pandas.core.computation.parsing import clean_column_name d: dict[str, Series | MultiIndex] = {} for axis_name in self._AXIS_ORDERS: d.update(self._get_axis_resolvers(axis_name)) return {clean_column_name(k): v for k, v in d.items() if not isinstance(k, int)} def _get_cleaned_column_resolvers(self) -> dict[Hashable, Series]: """ Return the special character free column resolvers of a dataframe. Column names with special characters are 'cleaned up' so that they can be referred to by backtick quoting. Used in :meth:`DataFrame.eval`. """ from pandas.core.computation.parsing import clean_column_name if isinstance(self, ABCSeries): return {clean_column_name(self.name): self} return { clean_column_name(k): v for k, v in self.items() if not isinstance(k, int) } def _info_axis(self) -> Index: return getattr(self, self._info_axis_name) def _stat_axis(self) -> Index: return getattr(self, self._stat_axis_name) def shape(self) -> tuple[int, ...]: """ Return a tuple of axis dimensions """ return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS) def axes(self) -> list[Index]: """ Return index label(s) of the internal NDFrame """ # we do it this way because if we have reversed axes, then # the block manager shows then reversed return [self._get_axis(a) for a in self._AXIS_ORDERS] def ndim(self) -> int: """ Return an int representing the number of axes / array dimensions. Return 1 if Series. Otherwise return 2 if DataFrame. See Also -------- ndarray.ndim : Number of array dimensions. Examples -------- >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3}) >>> s.ndim 1 >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.ndim 2 """ return self._mgr.ndim def size(self) -> int: """ Return an int representing the number of elements in this object. Return the number of rows if Series. Otherwise return the number of rows times number of columns if DataFrame. See Also -------- ndarray.size : Number of elements in the array. Examples -------- >>> s = pd.Series({'a': 1, 'b': 2, 'c': 3}) >>> s.size 3 >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.size 4 """ # error: Incompatible return value type (got "signedinteger[_64Bit]", # expected "int") [return-value] return np.prod(self.shape) # type: ignore[return-value] def set_axis( self: NDFrameT, labels, *, axis: Axis = 0, copy: bool_t | None = None, ) -> NDFrameT: """ Assign desired index to given axis. Indexes for%(extended_summary_sub)s row labels can be changed by assigning a list-like or Index. Parameters ---------- labels : list-like, Index The values for the new index. axis : %(axes_single_arg)s, default 0 The axis to update. The value 0 identifies the rows. For `Series` this parameter is unused and defaults to 0. copy : bool, default True Whether to make a copy of the underlying data. .. versionadded:: 1.5.0 Returns ------- %(klass)s An object of type %(klass)s. See Also -------- %(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s. """ return self._set_axis_nocheck(labels, axis, inplace=False, copy=copy) def _set_axis_nocheck( self, labels, axis: Axis, inplace: bool_t, copy: bool_t | None ): if inplace: setattr(self, self._get_axis_name(axis), labels) else: # With copy=False, we create a new object but don't copy the # underlying data. obj = self.copy(deep=copy and not using_copy_on_write()) setattr(obj, obj._get_axis_name(axis), labels) return obj def _set_axis(self, axis: AxisInt, labels: AnyArrayLike | list) -> None: """ This is called from the cython code when we set the `index` attribute directly, e.g. `series.index = [1, 2, 3]`. """ labels = ensure_index(labels) self._mgr.set_axis(axis, labels) self._clear_item_cache() def swapaxes( self: NDFrameT, axis1: Axis, axis2: Axis, copy: bool_t | None = None ) -> NDFrameT: """ Interchange axes and swap values axes appropriately. Returns ------- same as input """ i = self._get_axis_number(axis1) j = self._get_axis_number(axis2) if i == j: return self.copy(deep=copy and not using_copy_on_write()) mapping = {i: j, j: i} new_axes = [self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN)] new_values = self._values.swapaxes(i, j) # type: ignore[union-attr] if ( using_copy_on_write() and self._mgr.is_single_block and isinstance(self._mgr, BlockManager) ): # This should only get hit in case of having a single block, otherwise a # copy is made, we don't have to set up references. new_mgr = ndarray_to_mgr( new_values, new_axes[0], new_axes[1], dtype=None, copy=False, typ="block", ) assert isinstance(new_mgr, BlockManager) assert isinstance(self._mgr, BlockManager) new_mgr.blocks[0].refs = self._mgr.blocks[0].refs new_mgr.blocks[0].refs.add_reference( new_mgr.blocks[0] # type: ignore[arg-type] ) return self._constructor(new_mgr).__finalize__(self, method="swapaxes") elif (copy or copy is None) and self._mgr.is_single_block: new_values = new_values.copy() return self._constructor( new_values, *new_axes, # The no-copy case for CoW is handled above copy=False, ).__finalize__(self, method="swapaxes") def droplevel(self: NDFrameT, level: IndexLabel, axis: Axis = 0) -> NDFrameT: """ Return {klass} with requested index / column level(s) removed. Parameters ---------- level : int, str, or list-like If a string is given, must be the name of a level If list-like, elements must be names or positional indexes of levels. axis : {{0 or 'index', 1 or 'columns'}}, default 0 Axis along which the level(s) is removed: * 0 or 'index': remove level(s) in column. * 1 or 'columns': remove level(s) in row. For `Series` this parameter is unused and defaults to 0. Returns ------- {klass} {klass} with requested index / column level(s) removed. Examples -------- >>> df = pd.DataFrame([ ... [1, 2, 3, 4], ... [5, 6, 7, 8], ... [9, 10, 11, 12] ... ]).set_index([0, 1]).rename_axis(['a', 'b']) >>> df.columns = pd.MultiIndex.from_tuples([ ... ('c', 'e'), ('d', 'f') ... ], names=['level_1', 'level_2']) >>> df level_1 c d level_2 e f a b 1 2 3 4 5 6 7 8 9 10 11 12 >>> df.droplevel('a') level_1 c d level_2 e f b 2 3 4 6 7 8 10 11 12 >>> df.droplevel('level_2', axis=1) level_1 c d a b 1 2 3 4 5 6 7 8 9 10 11 12 """ labels = self._get_axis(axis) new_labels = labels.droplevel(level) return self.set_axis(new_labels, axis=axis, copy=None) def pop(self, item: Hashable) -> Series | Any: result = self[item] del self[item] return result def squeeze(self, axis: Axis | None = None): """ Squeeze 1 dimensional axis objects into scalars. Series or DataFrames with a single element are squeezed to a scalar. DataFrames with a single column or a single row are squeezed to a Series. Otherwise the object is unchanged. This method is most useful when you don't know if your object is a Series or DataFrame, but you do know it has just a single column. In that case you can safely call `squeeze` to ensure you have a Series. Parameters ---------- axis : {0 or 'index', 1 or 'columns', None}, default None A specific axis to squeeze. By default, all length-1 axes are squeezed. For `Series` this parameter is unused and defaults to `None`. Returns ------- DataFrame, Series, or scalar The projection after squeezing `axis` or all the axes. See Also -------- Series.iloc : Integer-location based indexing for selecting scalars. DataFrame.iloc : Integer-location based indexing for selecting Series. Series.to_frame : Inverse of DataFrame.squeeze for a single-column DataFrame. Examples -------- >>> primes = pd.Series([2, 3, 5, 7]) Slicing might produce a Series with a single value: >>> even_primes = primes[primes % 2 == 0] >>> even_primes 0 2 dtype: int64 >>> even_primes.squeeze() 2 Squeezing objects with more than one value in every axis does nothing: >>> odd_primes = primes[primes % 2 == 1] >>> odd_primes 1 3 2 5 3 7 dtype: int64 >>> odd_primes.squeeze() 1 3 2 5 3 7 dtype: int64 Squeezing is even more effective when used with DataFrames. >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b']) >>> df a b 0 1 2 1 3 4 Slicing a single column will produce a DataFrame with the columns having only one value: >>> df_a = df[['a']] >>> df_a a 0 1 1 3 So the columns can be squeezed down, resulting in a Series: >>> df_a.squeeze('columns') 0 1 1 3 Name: a, dtype: int64 Slicing a single row from a single column will produce a single scalar DataFrame: >>> df_0a = df.loc[df.index < 1, ['a']] >>> df_0a a 0 1 Squeezing the rows produces a single scalar Series: >>> df_0a.squeeze('rows') a 1 Name: 0, dtype: int64 Squeezing all axes will project directly into a scalar: >>> df_0a.squeeze() 1 """ axes = range(self._AXIS_LEN) if axis is None else (self._get_axis_number(axis),) return self.iloc[ tuple( 0 if i in axes and len(a) == 1 else slice(None) for i, a in enumerate(self.axes) ) ] # ---------------------------------------------------------------------- # Rename def _rename( self: NDFrameT, mapper: Renamer | None = None, *, index: Renamer | None = None, columns: Renamer | None = None, axis: Axis | None = None, copy: bool_t | None = None, inplace: bool_t = False, level: Level | None = None, errors: str = "ignore", ) -> NDFrameT | None: # called by Series.rename and DataFrame.rename if mapper is None and index is None and columns is None: raise TypeError("must pass an index to rename") if index is not None or columns is not None: if axis is not None: raise TypeError( "Cannot specify both 'axis' and any of 'index' or 'columns'" ) if mapper is not None: raise TypeError( "Cannot specify both 'mapper' and any of 'index' or 'columns'" ) else: # use the mapper argument if axis and self._get_axis_number(axis) == 1: columns = mapper else: index = mapper self._check_inplace_and_allows_duplicate_labels(inplace) result = self if inplace else self.copy(deep=copy and not using_copy_on_write()) for axis_no, replacements in enumerate((index, columns)): if replacements is None: continue ax = self._get_axis(axis_no) f = common.get_rename_function(replacements) if level is not None: level = ax._get_level_number(level) # GH 13473 if not callable(replacements): if ax._is_multi and level is not None: indexer = ax.get_level_values(level).get_indexer_for(replacements) else: indexer = ax.get_indexer_for(replacements) if errors == "raise" and len(indexer[indexer == -1]): missing_labels = [ label for index, label in enumerate(replacements) if indexer[index] == -1 ] raise KeyError(f"{missing_labels} not found in axis") new_index = ax._transform_index(f, level=level) result._set_axis_nocheck(new_index, axis=axis_no, inplace=True, copy=False) result._clear_item_cache() if inplace: self._update_inplace(result) return None else: return result.__finalize__(self, method="rename") def rename_axis( self: NDFrameT, mapper: IndexLabel | lib.NoDefault = ..., *, index=..., columns=..., axis: Axis = ..., copy: bool_t | None = ..., inplace: Literal[False] = ..., ) -> NDFrameT: ... def rename_axis( self, mapper: IndexLabel | lib.NoDefault = ..., *, index=..., columns=..., axis: Axis = ..., copy: bool_t | None = ..., inplace: Literal[True], ) -> None: ... def rename_axis( self: NDFrameT, mapper: IndexLabel | lib.NoDefault = ..., *, index=..., columns=..., axis: Axis = ..., copy: bool_t | None = ..., inplace: bool_t = ..., ) -> NDFrameT | None: ... def rename_axis( self: NDFrameT, mapper: IndexLabel | lib.NoDefault = lib.no_default, *, index=lib.no_default, columns=lib.no_default, axis: Axis = 0, copy: bool_t | None = None, inplace: bool_t = False, ) -> NDFrameT | None: """ Set the name of the axis for the index or columns. Parameters ---------- mapper : scalar, list-like, optional Value to set the axis name attribute. index, columns : scalar, list-like, dict-like or function, optional A scalar, list-like, dict-like or functions transformations to apply to that axis' values. Note that the ``columns`` parameter is not allowed if the object is a Series. This parameter only apply for DataFrame type objects. Use either ``mapper`` and ``axis`` to specify the axis to target with ``mapper``, or ``index`` and/or ``columns``. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to rename. For `Series` this parameter is unused and defaults to 0. copy : bool, default None Also copy underlying data. inplace : bool, default False Modifies the object directly, instead of creating a new Series or DataFrame. Returns ------- Series, DataFrame, or None The same type as the caller or None if ``inplace=True``. See Also -------- Series.rename : Alter Series index labels or name. DataFrame.rename : Alter DataFrame index labels or name. Index.rename : Set new names on index. Notes ----- ``DataFrame.rename_axis`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` The first calling convention will only modify the names of the index and/or the names of the Index object that is the columns. In this case, the parameter ``copy`` is ignored. The second calling convention will modify the names of the corresponding index if mapper is a list or a scalar. However, if mapper is dict-like or a function, it will use the deprecated behavior of modifying the axis *labels*. We *highly* recommend using keyword arguments to clarify your intent. Examples -------- **Series** >>> s = pd.Series(["dog", "cat", "monkey"]) >>> s 0 dog 1 cat 2 monkey dtype: object >>> s.rename_axis("animal") animal 0 dog 1 cat 2 monkey dtype: object **DataFrame** >>> df = pd.DataFrame({"num_legs": [4, 4, 2], ... "num_arms": [0, 0, 2]}, ... ["dog", "cat", "monkey"]) >>> df num_legs num_arms dog 4 0 cat 4 0 monkey 2 2 >>> df = df.rename_axis("animal") >>> df num_legs num_arms animal dog 4 0 cat 4 0 monkey 2 2 >>> df = df.rename_axis("limbs", axis="columns") >>> df limbs num_legs num_arms animal dog 4 0 cat 4 0 monkey 2 2 **MultiIndex** >>> df.index = pd.MultiIndex.from_product([['mammal'], ... ['dog', 'cat', 'monkey']], ... names=['type', 'name']) >>> df limbs num_legs num_arms type name mammal dog 4 0 cat 4 0 monkey 2 2 >>> df.rename_axis(index={'type': 'class'}) limbs num_legs num_arms class name mammal dog 4 0 cat 4 0 monkey 2 2 >>> df.rename_axis(columns=str.upper) LIMBS num_legs num_arms type name mammal dog 4 0 cat 4 0 monkey 2 2 """ axes = {"index": index, "columns": columns} if axis is not None: axis = self._get_axis_number(axis) inplace = validate_bool_kwarg(inplace, "inplace") if copy and using_copy_on_write(): copy = False if mapper is not lib.no_default: # Use v0.23 behavior if a scalar or list non_mapper = is_scalar(mapper) or ( is_list_like(mapper) and not is_dict_like(mapper) ) if non_mapper: return self._set_axis_name( mapper, axis=axis, inplace=inplace, copy=copy ) else: raise ValueError("Use `.rename` to alter labels with a mapper.") else: # Use new behavior. Means that index and/or columns # is specified result = self if inplace else self.copy(deep=copy) for axis in range(self._AXIS_LEN): v = axes.get(self._get_axis_name(axis)) if v is lib.no_default: continue non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v)) if non_mapper: newnames = v else: f = common.get_rename_function(v) curnames = self._get_axis(axis).names newnames = [f(name) for name in curnames] result._set_axis_name(newnames, axis=axis, inplace=True, copy=copy) if not inplace: return result return None def _set_axis_name( self, name, axis: Axis = 0, inplace: bool_t = False, copy: bool_t | None = True ): """ Set the name(s) of the axis. Parameters ---------- name : str or list of str Name(s) to set. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to set the label. The value 0 or 'index' specifies index, and the value 1 or 'columns' specifies columns. inplace : bool, default False If `True`, do operation inplace and return None. copy: Whether to make a copy of the result. Returns ------- Series, DataFrame, or None The same type as the caller or `None` if `inplace` is `True`. See Also -------- DataFrame.rename : Alter the axis labels of :class:`DataFrame`. Series.rename : Alter the index labels or set the index name of :class:`Series`. Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`. Examples -------- >>> df = pd.DataFrame({"num_legs": [4, 4, 2]}, ... ["dog", "cat", "monkey"]) >>> df num_legs dog 4 cat 4 monkey 2 >>> df._set_axis_name("animal") num_legs animal dog 4 cat 4 monkey 2 >>> df.index = pd.MultiIndex.from_product( ... [["mammal"], ['dog', 'cat', 'monkey']]) >>> df._set_axis_name(["type", "name"]) num_legs type name mammal dog 4 cat 4 monkey 2 """ axis = self._get_axis_number(axis) idx = self._get_axis(axis).set_names(name) inplace = validate_bool_kwarg(inplace, "inplace") renamed = self if inplace else self.copy(deep=copy) if axis == 0: renamed.index = idx else: renamed.columns = idx if not inplace: return renamed # ---------------------------------------------------------------------- # Comparison Methods def _indexed_same(self, other) -> bool_t: return all( self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS ) def equals(self, other: object) -> bool_t: """ Test whether two objects contain the same elements. This function allows two Series or DataFrames to be compared against each other to see if they have the same shape and elements. NaNs in the same location are considered equal. The row/column index do not need to have the same type, as long as the values are considered equal. Corresponding columns must be of the same dtype. Parameters ---------- other : Series or DataFrame The other Series or DataFrame to be compared with the first. Returns ------- bool True if all elements are the same in both objects, False otherwise. See Also -------- Series.eq : Compare two Series objects of the same length and return a Series where each element is True if the element in each Series is equal, False otherwise. DataFrame.eq : Compare two DataFrame objects of the same shape and return a DataFrame where each element is True if the respective element in each DataFrame is equal, False otherwise. testing.assert_series_equal : Raises an AssertionError if left and right are not equal. Provides an easy interface to ignore inequality in dtypes, indexes and precision among others. testing.assert_frame_equal : Like assert_series_equal, but targets DataFrames. numpy.array_equal : Return True if two arrays have the same shape and elements, False otherwise. Examples -------- >>> df = pd.DataFrame({1: [10], 2: [20]}) >>> df 1 2 0 10 20 DataFrames df and exactly_equal have the same types and values for their elements and column labels, which will return True. >>> exactly_equal = pd.DataFrame({1: [10], 2: [20]}) >>> exactly_equal 1 2 0 10 20 >>> df.equals(exactly_equal) True DataFrames df and different_column_type have the same element types and values, but have different types for the column labels, which will still return True. >>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]}) >>> different_column_type 1.0 2.0 0 10 20 >>> df.equals(different_column_type) True DataFrames df and different_data_type have different types for the same values for their elements, and will return False even though their column labels are the same values and types. >>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]}) >>> different_data_type 1 2 0 10.0 20.0 >>> df.equals(different_data_type) False """ if not (isinstance(other, type(self)) or isinstance(self, type(other))): return False other = cast(NDFrame, other) return self._mgr.equals(other._mgr) # ------------------------------------------------------------------------- # Unary Methods def __neg__(self: NDFrameT) -> NDFrameT: def blk_func(values: ArrayLike): if is_bool_dtype(values.dtype): # error: Argument 1 to "inv" has incompatible type "Union # [ExtensionArray, ndarray[Any, Any]]"; expected # "_SupportsInversion[ndarray[Any, dtype[bool_]]]" return operator.inv(values) # type: ignore[arg-type] else: # error: Argument 1 to "neg" has incompatible type "Union # [ExtensionArray, ndarray[Any, Any]]"; expected # "_SupportsNeg[ndarray[Any, dtype[Any]]]" return operator.neg(values) # type: ignore[arg-type] new_data = self._mgr.apply(blk_func) res = self._constructor(new_data) return res.__finalize__(self, method="__neg__") def __pos__(self: NDFrameT) -> NDFrameT: def blk_func(values: ArrayLike): if is_bool_dtype(values.dtype): return values.copy() else: # error: Argument 1 to "pos" has incompatible type "Union # [ExtensionArray, ndarray[Any, Any]]"; expected # "_SupportsPos[ndarray[Any, dtype[Any]]]" return operator.pos(values) # type: ignore[arg-type] new_data = self._mgr.apply(blk_func) res = self._constructor(new_data) return res.__finalize__(self, method="__pos__") def __invert__(self: NDFrameT) -> NDFrameT: if not self.size: # inv fails with 0 len return self.copy(deep=False) new_data = self._mgr.apply(operator.invert) return self._constructor(new_data).__finalize__(self, method="__invert__") def __nonzero__(self) -> NoReturn: raise ValueError( f"The truth value of a {type(self).__name__} is ambiguous. " "Use a.empty, a.bool(), a.item(), a.any() or a.all()." ) __bool__ = __nonzero__ def bool(self) -> bool_t: """ Return the bool of a single element Series or DataFrame. This must be a boolean scalar value, either True or False. It will raise a ValueError if the Series or DataFrame does not have exactly 1 element, or that element is not boolean (integer values 0 and 1 will also raise an exception). Returns ------- bool The value in the Series or DataFrame. See Also -------- Series.astype : Change the data type of a Series, including to boolean. DataFrame.astype : Change the data type of a DataFrame, including to boolean. numpy.bool_ : NumPy boolean data type, used by pandas for boolean values. Examples -------- The method will only work for single element objects with a boolean value: >>> pd.Series([True]).bool() True >>> pd.Series([False]).bool() False >>> pd.DataFrame({'col': [True]}).bool() True >>> pd.DataFrame({'col': [False]}).bool() False """ v = self.squeeze() if isinstance(v, (bool, np.bool_)): return bool(v) elif is_scalar(v): raise ValueError( "bool cannot act on a non-boolean single element " f"{type(self).__name__}" ) self.__nonzero__() # for mypy (__nonzero__ raises) return True def abs(self: NDFrameT) -> NDFrameT: """ Return a Series/DataFrame with absolute numeric value of each element. This function only applies to elements that are all numeric. Returns ------- abs Series/DataFrame containing the absolute value of each element. See Also -------- numpy.absolute : Calculate the absolute value element-wise. Notes ----- For ``complex`` inputs, ``1.2 + 1j``, the absolute value is :math:`\\sqrt{ a^2 + b^2 }`. Examples -------- Absolute numeric values in a Series. >>> s = pd.Series([-1.10, 2, -3.33, 4]) >>> s.abs() 0 1.10 1 2.00 2 3.33 3 4.00 dtype: float64 Absolute numeric values in a Series with complex numbers. >>> s = pd.Series([1.2 + 1j]) >>> s.abs() 0 1.56205 dtype: float64 Absolute numeric values in a Series with a Timedelta element. >>> s = pd.Series([pd.Timedelta('1 days')]) >>> s.abs() 0 1 days dtype: timedelta64[ns] Select rows with data closest to certain value using argsort (from `StackOverflow <https://stackoverflow.com/a/17758115>`__). >>> df = pd.DataFrame({ ... 'a': [4, 5, 6, 7], ... 'b': [10, 20, 30, 40], ... 'c': [100, 50, -30, -50] ... }) >>> df a b c 0 4 10 100 1 5 20 50 2 6 30 -30 3 7 40 -50 >>> df.loc[(df.c - 43).abs().argsort()] a b c 1 5 20 50 0 4 10 100 2 6 30 -30 3 7 40 -50 """ res_mgr = self._mgr.apply(np.abs) return self._constructor(res_mgr).__finalize__(self, name="abs") def __abs__(self: NDFrameT) -> NDFrameT: return self.abs() def __round__(self: NDFrameT, decimals: int = 0) -> NDFrameT: return self.round(decimals).__finalize__(self, method="__round__") # ------------------------------------------------------------------------- # Label or Level Combination Helpers # # A collection of helper methods for DataFrame/Series operations that # accept a combination of column/index labels and levels. All such # operations should utilize/extend these methods when possible so that we # have consistent precedence and validation logic throughout the library. def _is_level_reference(self, key: Level, axis: Axis = 0) -> bool_t: """ Test whether a key is a level reference for a given axis. To be considered a level reference, `key` must be a string that: - (axis=0): Matches the name of an index level and does NOT match a column label. - (axis=1): Matches the name of a column level and does NOT match an index label. Parameters ---------- key : Hashable Potential level name for the given axis axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- is_level : bool """ axis_int = self._get_axis_number(axis) return ( key is not None and is_hashable(key) and key in self.axes[axis_int].names and not self._is_label_reference(key, axis=axis_int) ) def _is_label_reference(self, key: Level, axis: Axis = 0) -> bool_t: """ Test whether a key is a label reference for a given axis. To be considered a label reference, `key` must be a string that: - (axis=0): Matches a column label - (axis=1): Matches an index label Parameters ---------- key : Hashable Potential label name, i.e. Index entry. axis : int, default 0 Axis perpendicular to the axis that labels are associated with (0 means search for column labels, 1 means search for index labels) Returns ------- is_label: bool """ axis_int = self._get_axis_number(axis) other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis_int) return ( key is not None and is_hashable(key) and any(key in self.axes[ax] for ax in other_axes) ) def _is_label_or_level_reference(self, key: Level, axis: AxisInt = 0) -> bool_t: """ Test whether a key is a label or level reference for a given axis. To be considered either a label or a level reference, `key` must be a string that: - (axis=0): Matches a column label or an index level - (axis=1): Matches an index label or a column level Parameters ---------- key : Hashable Potential label or level name axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- bool """ return self._is_level_reference(key, axis=axis) or self._is_label_reference( key, axis=axis ) def _check_label_or_level_ambiguity(self, key: Level, axis: Axis = 0) -> None: """ Check whether `key` is ambiguous. By ambiguous, we mean that it matches both a level of the input `axis` and a label of the other axis. Parameters ---------- key : Hashable Label or level name. axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns). Raises ------ ValueError: `key` is ambiguous """ axis_int = self._get_axis_number(axis) other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis_int) if ( key is not None and is_hashable(key) and key in self.axes[axis_int].names and any(key in self.axes[ax] for ax in other_axes) ): # Build an informative and grammatical warning level_article, level_type = ( ("an", "index") if axis_int == 0 else ("a", "column") ) label_article, label_type = ( ("a", "column") if axis_int == 0 else ("an", "index") ) msg = ( f"'{key}' is both {level_article} {level_type} level and " f"{label_article} {label_type} label, which is ambiguous." ) raise ValueError(msg) def _get_label_or_level_values(self, key: Level, axis: AxisInt = 0) -> ArrayLike: """ Return a 1-D array of values associated with `key`, a label or level from the given `axis`. Retrieval logic: - (axis=0): Return column values if `key` matches a column label. Otherwise return index level values if `key` matches an index level. - (axis=1): Return row values if `key` matches an index label. Otherwise return column level values if 'key' matches a column level Parameters ---------- key : Hashable Label or level name. axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- np.ndarray or ExtensionArray Raises ------ KeyError if `key` matches neither a label nor a level ValueError if `key` matches multiple labels """ axis = self._get_axis_number(axis) other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis] if self._is_label_reference(key, axis=axis): self._check_label_or_level_ambiguity(key, axis=axis) values = self.xs(key, axis=other_axes[0])._values elif self._is_level_reference(key, axis=axis): values = self.axes[axis].get_level_values(key)._values else: raise KeyError(key) # Check for duplicates if values.ndim > 1: if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex): multi_message = ( "\n" "For a multi-index, the label must be a " "tuple with elements corresponding to each level." ) else: multi_message = "" label_axis_name = "column" if axis == 0 else "index" raise ValueError( f"The {label_axis_name} label '{key}' is not unique.{multi_message}" ) return values def _drop_labels_or_levels(self, keys, axis: AxisInt = 0): """ Drop labels and/or levels for the given `axis`. For each key in `keys`: - (axis=0): If key matches a column label then drop the column. Otherwise if key matches an index level then drop the level. - (axis=1): If key matches an index label then drop the row. Otherwise if key matches a column level then drop the level. Parameters ---------- keys : str or list of str labels or levels to drop axis : int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Returns ------- dropped: DataFrame Raises ------ ValueError if any `keys` match neither a label nor a level """ axis = self._get_axis_number(axis) # Validate keys keys = common.maybe_make_list(keys) invalid_keys = [ k for k in keys if not self._is_label_or_level_reference(k, axis=axis) ] if invalid_keys: raise ValueError( "The following keys are not valid labels or " f"levels for axis {axis}: {invalid_keys}" ) # Compute levels and labels to drop levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)] labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)] # Perform copy upfront and then use inplace operations below. # This ensures that we always perform exactly one copy. # ``copy`` and/or ``inplace`` options could be added in the future. dropped = self.copy(deep=False) if axis == 0: # Handle dropping index levels if levels_to_drop: dropped.reset_index(levels_to_drop, drop=True, inplace=True) # Handle dropping columns labels if labels_to_drop: dropped.drop(labels_to_drop, axis=1, inplace=True) else: # Handle dropping column levels if levels_to_drop: if isinstance(dropped.columns, MultiIndex): # Drop the specified levels from the MultiIndex dropped.columns = dropped.columns.droplevel(levels_to_drop) else: # Drop the last level of Index by replacing with # a RangeIndex dropped.columns = RangeIndex(dropped.columns.size) # Handle dropping index labels if labels_to_drop: dropped.drop(labels_to_drop, axis=0, inplace=True) return dropped # ---------------------------------------------------------------------- # Iteration # https://github.com/python/typeshed/issues/2148#issuecomment-520783318 # Incompatible types in assignment (expression has type "None", base class # "object" defined the type as "Callable[[object], int]") __hash__: ClassVar[None] # type: ignore[assignment] def __iter__(self) -> Iterator: """ Iterate over info axis. Returns ------- iterator Info axis as iterator. """ return iter(self._info_axis) # can we get a better explanation of this? def keys(self) -> Index: """ Get the 'info axis' (see Indexing for more). This is index for Series, columns for DataFrame. Returns ------- Index Info axis. """ return self._info_axis def items(self): """ Iterate over (label, values) on info axis This is index for Series and columns for DataFrame. Returns ------- Generator """ for h in self._info_axis: yield h, self[h] def __len__(self) -> int: """Returns length of info axis""" return len(self._info_axis) def __contains__(self, key) -> bool_t: """True if the key is in the info axis""" return key in self._info_axis def empty(self) -> bool_t: """ Indicator whether Series/DataFrame is empty. True if Series/DataFrame is entirely empty (no items), meaning any of the axes are of length 0. Returns ------- bool If Series/DataFrame is empty, return True, if not return False. See Also -------- Series.dropna : Return series without null values. DataFrame.dropna : Return DataFrame with labels on given axis omitted where (all or any) data are missing. Notes ----- If Series/DataFrame contains only NaNs, it is still not considered empty. See the example below. Examples -------- An example of an actual empty DataFrame. Notice the index is empty: >>> df_empty = pd.DataFrame({'A' : []}) >>> df_empty Empty DataFrame Columns: [A] Index: [] >>> df_empty.empty True If we only have NaNs in our DataFrame, it is not considered empty! We will need to drop the NaNs to make the DataFrame empty: >>> df = pd.DataFrame({'A' : [np.nan]}) >>> df A 0 NaN >>> df.empty False >>> df.dropna().empty True >>> ser_empty = pd.Series({'A' : []}) >>> ser_empty A [] dtype: object >>> ser_empty.empty False >>> ser_empty = pd.Series() >>> ser_empty.empty True """ return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS) # ---------------------------------------------------------------------- # Array Interface # This is also set in IndexOpsMixin # GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented __array_priority__: int = 1000 def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: values = self._values arr = np.asarray(values, dtype=dtype) if ( astype_is_view(values.dtype, arr.dtype) and using_copy_on_write() and self._mgr.is_single_block ): # Check if both conversions can be done without a copy if astype_is_view(self.dtypes.iloc[0], values.dtype) and astype_is_view( values.dtype, arr.dtype ): arr = arr.view() arr.flags.writeable = False return arr def __array_ufunc__( self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any ): return arraylike.array_ufunc(self, ufunc, method, *inputs, **kwargs) # ---------------------------------------------------------------------- # Picklability def __getstate__(self) -> dict[str, Any]: meta = {k: getattr(self, k, None) for k in self._metadata} return { "_mgr": self._mgr, "_typ": self._typ, "_metadata": self._metadata, "attrs": self.attrs, "_flags": {k: self.flags[k] for k in self.flags._keys}, **meta, } def __setstate__(self, state) -> None: if isinstance(state, BlockManager): self._mgr = state elif isinstance(state, dict): if "_data" in state and "_mgr" not in state: # compat for older pickles state["_mgr"] = state.pop("_data") typ = state.get("_typ") if typ is not None: attrs = state.get("_attrs", {}) object.__setattr__(self, "_attrs", attrs) flags = state.get("_flags", {"allows_duplicate_labels": True}) object.__setattr__(self, "_flags", Flags(self, **flags)) # set in the order of internal names # to avoid definitional recursion # e.g. say fill_value needing _mgr to be # defined meta = set(self._internal_names + self._metadata) for k in list(meta): if k in state and k != "_flags": v = state[k] object.__setattr__(self, k, v) for k, v in state.items(): if k not in meta: object.__setattr__(self, k, v) else: raise NotImplementedError("Pre-0.12 pickles are no longer supported") elif len(state) == 2: raise NotImplementedError("Pre-0.12 pickles are no longer supported") self._item_cache: dict[Hashable, Series] = {} # ---------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str: # string representation based upon iterating over self # (since, by definition, `PandasContainers` are iterable) prepr = f"[{','.join(map(pprint_thing, self))}]" return f"{type(self).__name__}({prepr})" def _repr_latex_(self): """ Returns a LaTeX representation for a particular object. Mainly for use with nbconvert (jupyter notebook conversion to pdf). """ if config.get_option("styler.render.repr") == "latex": return self.to_latex() else: return None def _repr_data_resource_(self): """ Not a real Jupyter special repr method, but we use the same naming convention. """ if config.get_option("display.html.table_schema"): data = self.head(config.get_option("display.max_rows")) as_json = data.to_json(orient="table") as_json = cast(str, as_json) return loads(as_json, object_pairs_hook=collections.OrderedDict) # ---------------------------------------------------------------------- # I/O Methods klass="object", storage_options=_shared_docs["storage_options"], storage_options_versionadded="1.2.0", ) def to_excel( self, excel_writer, sheet_name: str = "Sheet1", na_rep: str = "", float_format: str | None = None, columns: Sequence[Hashable] | None = None, header: Sequence[Hashable] | bool_t = True, index: bool_t = True, index_label: IndexLabel = None, startrow: int = 0, startcol: int = 0, engine: str | None = None, merge_cells: bool_t = True, inf_rep: str = "inf", freeze_panes: tuple[int, int] | None = None, storage_options: StorageOptions = None, ) -> None: """ Write {klass} to an Excel sheet. To write a single {klass} to an Excel .xlsx file it is only necessary to specify a target file name. To write to multiple sheets it is necessary to create an `ExcelWriter` object with a target file name, and specify a sheet in the file to write to. Multiple sheets may be written to by specifying unique `sheet_name`. With all data written to the file it is necessary to save the changes. Note that creating an `ExcelWriter` object with a file name that already exists will result in the contents of the existing file being erased. Parameters ---------- excel_writer : path-like, file-like, or ExcelWriter object File path or existing ExcelWriter. sheet_name : str, default 'Sheet1' Name of sheet which will contain DataFrame. na_rep : str, default '' Missing data representation. float_format : str, optional Format string for floating point numbers. For example ``float_format="%.2f"`` will format 0.1234 to 0.12. columns : sequence or list of str, optional Columns to write. header : bool or list of str, default True Write out the column names. If a list of string is given it is assumed to be aliases for the column names. index : bool, default True Write row names (index). index_label : str or sequence, optional Column label for index column(s) if desired. If not specified, and `header` and `index` are True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. startrow : int, default 0 Upper left cell row to dump data frame. startcol : int, default 0 Upper left cell column to dump data frame. engine : str, optional Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this via the options ``io.excel.xlsx.writer`` or ``io.excel.xlsm.writer``. merge_cells : bool, default True Write MultiIndex and Hierarchical Rows as merged cells. inf_rep : str, default 'inf' Representation for infinity (there is no native representation for infinity in Excel). freeze_panes : tuple of int (length 2), optional Specifies the one-based bottommost row and rightmost column that is to be frozen. {storage_options} .. versionadded:: {storage_options_versionadded} See Also -------- to_csv : Write DataFrame to a comma-separated values (csv) file. ExcelWriter : Class for writing DataFrame objects into excel sheets. read_excel : Read an Excel file into a pandas DataFrame. read_csv : Read a comma-separated values (csv) file into DataFrame. io.formats.style.Styler.to_excel : Add styles to Excel sheet. Notes ----- For compatibility with :meth:`~DataFrame.to_csv`, to_excel serializes lists and dicts to strings before writing. Once a workbook has been saved it is not possible to write further data without rewriting the whole workbook. Examples -------- Create, write to and save a workbook: >>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']], ... index=['row 1', 'row 2'], ... columns=['col 1', 'col 2']) >>> df1.to_excel("output.xlsx") # doctest: +SKIP To specify the sheet name: >>> df1.to_excel("output.xlsx", ... sheet_name='Sheet_name_1') # doctest: +SKIP If you wish to write to more than one sheet in the workbook, it is necessary to specify an ExcelWriter object: >>> df2 = df1.copy() >>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP ... df1.to_excel(writer, sheet_name='Sheet_name_1') ... df2.to_excel(writer, sheet_name='Sheet_name_2') ExcelWriter can also be used to append to an existing Excel file: >>> with pd.ExcelWriter('output.xlsx', ... mode='a') as writer: # doctest: +SKIP ... df.to_excel(writer, sheet_name='Sheet_name_3') To set the library that is used to write the Excel file, you can pass the `engine` keyword (the default engine is automatically chosen depending on the file extension): >>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP """ df = self if isinstance(self, ABCDataFrame) else self.to_frame() from pandas.io.formats.excel import ExcelFormatter formatter = ExcelFormatter( df, na_rep=na_rep, cols=columns, header=header, float_format=float_format, index=index, index_label=index_label, merge_cells=merge_cells, inf_rep=inf_rep, ) formatter.write( excel_writer, sheet_name=sheet_name, startrow=startrow, startcol=startcol, freeze_panes=freeze_panes, engine=engine, storage_options=storage_options, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buf", ) def to_json( self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, orient: str | None = None, date_format: str | None = None, double_precision: int = 10, force_ascii: bool_t = True, date_unit: str = "ms", default_handler: Callable[[Any], JSONSerializable] | None = None, lines: bool_t = False, compression: CompressionOptions = "infer", index: bool_t = True, indent: int | None = None, storage_options: StorageOptions = None, mode: Literal["a", "w"] = "w", ) -> str | None: """ Convert the object to a JSON string. Note NaN's and None will be converted to null and datetime objects will be converted to UNIX timestamps. Parameters ---------- path_or_buf : str, path object, file-like object, or None, default None String, path object (implementing os.PathLike[str]), or file-like object implementing a write() function. If None, the result is returned as a string. orient : str Indication of expected JSON string format. * Series: - default is 'index' - allowed values are: {{'split', 'records', 'index', 'table'}}. * DataFrame: - default is 'columns' - allowed values are: {{'split', 'records', 'index', 'columns', 'values', 'table'}}. * The format of the JSON string: - 'split' : dict like {{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}} - 'records' : list like [{{column -> value}}, ... , {{column -> value}}] - 'index' : dict like {{index -> {{column -> value}}}} - 'columns' : dict like {{column -> {{index -> value}}}} - 'values' : just the values array - 'table' : dict like {{'schema': {{schema}}, 'data': {{data}}}} Describing the data, where data component is like ``orient='records'``. date_format : {{None, 'epoch', 'iso'}} Type of date conversion. 'epoch' = epoch milliseconds, 'iso' = ISO8601. The default depends on the `orient`. For ``orient='table'``, the default is 'iso'. For all other orients, the default is 'epoch'. double_precision : int, default 10 The number of decimal places to use when encoding floating point values. force_ascii : bool, default True Force encoded string to be ASCII. date_unit : str, default 'ms' (milliseconds) The time unit to encode to, governs timestamp and ISO8601 precision. One of 's', 'ms', 'us', 'ns' for second, millisecond, microsecond, and nanosecond respectively. default_handler : callable, default None Handler to call if object cannot otherwise be converted to a suitable format for JSON. Should receive a single argument which is the object to convert and return a serialisable object. lines : bool, default False If 'orient' is 'records' write out line-delimited json format. Will throw ValueError if incorrect 'orient' since others are not list-like. {compression_options} .. versionchanged:: 1.4.0 Zstandard support. index : bool, default True Whether to include the index values in the JSON string. Not including the index (``index=False``) is only supported when orient is 'split' or 'table'. indent : int, optional Length of whitespace used to indent each record. {storage_options} .. versionadded:: 1.2.0 mode : str, default 'w' (writing) Specify the IO mode for output when supplying a path_or_buf. Accepted args are 'w' (writing) and 'a' (append) only. mode='a' is only supported when lines is True and orient is 'records'. Returns ------- None or str If path_or_buf is None, returns the resulting json format as a string. Otherwise returns None. See Also -------- read_json : Convert a JSON string to pandas object. Notes ----- The behavior of ``indent=0`` varies from the stdlib, which does not indent the output but does insert newlines. Currently, ``indent=0`` and the default ``indent=None`` are equivalent in pandas, though this may change in a future release. ``orient='table'`` contains a 'pandas_version' field under 'schema'. This stores the version of `pandas` used in the latest revision of the schema. Examples -------- >>> from json import loads, dumps >>> df = pd.DataFrame( ... [["a", "b"], ["c", "d"]], ... index=["row 1", "row 2"], ... columns=["col 1", "col 2"], ... ) >>> result = df.to_json(orient="split") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "columns": [ "col 1", "col 2" ], "index": [ "row 1", "row 2" ], "data": [ [ "a", "b" ], [ "c", "d" ] ] }} Encoding/decoding a Dataframe using ``'records'`` formatted JSON. Note that index labels are not preserved with this encoding. >>> result = df.to_json(orient="records") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP [ {{ "col 1": "a", "col 2": "b" }}, {{ "col 1": "c", "col 2": "d" }} ] Encoding/decoding a Dataframe using ``'index'`` formatted JSON: >>> result = df.to_json(orient="index") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "row 1": {{ "col 1": "a", "col 2": "b" }}, "row 2": {{ "col 1": "c", "col 2": "d" }} }} Encoding/decoding a Dataframe using ``'columns'`` formatted JSON: >>> result = df.to_json(orient="columns") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "col 1": {{ "row 1": "a", "row 2": "c" }}, "col 2": {{ "row 1": "b", "row 2": "d" }} }} Encoding/decoding a Dataframe using ``'values'`` formatted JSON: >>> result = df.to_json(orient="values") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP [ [ "a", "b" ], [ "c", "d" ] ] Encoding with Table Schema: >>> result = df.to_json(orient="table") >>> parsed = loads(result) >>> dumps(parsed, indent=4) # doctest: +SKIP {{ "schema": {{ "fields": [ {{ "name": "index", "type": "string" }}, {{ "name": "col 1", "type": "string" }}, {{ "name": "col 2", "type": "string" }} ], "primaryKey": [ "index" ], "pandas_version": "1.4.0" }}, "data": [ {{ "index": "row 1", "col 1": "a", "col 2": "b" }}, {{ "index": "row 2", "col 1": "c", "col 2": "d" }} ] }} """ from pandas.io import json if date_format is None and orient == "table": date_format = "iso" elif date_format is None: date_format = "epoch" config.is_nonnegative_int(indent) indent = indent or 0 return json.to_json( path_or_buf=path_or_buf, obj=self, orient=orient, date_format=date_format, double_precision=double_precision, force_ascii=force_ascii, date_unit=date_unit, default_handler=default_handler, lines=lines, compression=compression, index=index, indent=indent, storage_options=storage_options, mode=mode, ) def to_hdf( self, path_or_buf: FilePath | HDFStore, key: str, mode: str = "a", complevel: int | None = None, complib: str | None = None, append: bool_t = False, format: str | None = None, index: bool_t = True, min_itemsize: int | dict[str, int] | None = None, nan_rep=None, dropna: bool_t | None = None, data_columns: Literal[True] | list[str] | None = None, errors: str = "strict", encoding: str = "UTF-8", ) -> None: """ Write the contained data to an HDF5 file using HDFStore. Hierarchical Data Format (HDF) is self-describing, allowing an application to interpret the structure and contents of a file with no outside information. One HDF file can hold a mix of related objects which can be accessed as a group or as individual objects. In order to add another DataFrame or Series to an existing HDF file please use append mode and a different a key. .. warning:: One can store a subclass of ``DataFrame`` or ``Series`` to HDF5, but the type of the subclass is lost upon storing. For more information see the :ref:`user guide <io.hdf5>`. Parameters ---------- path_or_buf : str or pandas.HDFStore File path or HDFStore object. key : str Identifier for the group in the store. mode : {'a', 'w', 'r+'}, default 'a' Mode to open file: - 'w': write, a new file is created (an existing file with the same name would be deleted). - 'a': append, an existing file is opened for reading and writing, and if the file does not exist it is created. - 'r+': similar to 'a', but the file must already exist. complevel : {0-9}, default None Specifies a compression level for data. A value of 0 or None disables compression. complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib' Specifies the compression library to be used. As of v0.20.2 these additional compressors for Blosc are supported (default if no compressor specified: 'blosc:blosclz'): {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy', 'blosc:zlib', 'blosc:zstd'}. Specifying a compression library which is not available issues a ValueError. append : bool, default False For Table formats, append the input data to the existing. format : {'fixed', 'table', None}, default 'fixed' Possible values: - 'fixed': Fixed format. Fast writing/reading. Not-appendable, nor searchable. - 'table': Table format. Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching / selecting subsets of the data. - If None, pd.get_option('io.hdf.default_format') is checked, followed by fallback to "fixed". index : bool, default True Write DataFrame index as a column. min_itemsize : dict or int, optional Map column names to minimum string sizes for columns. nan_rep : Any, optional How to represent null values as str. Not allowed with append=True. dropna : bool, default False, optional Remove missing values. data_columns : list of columns or True, optional List of columns to create as indexed data columns for on-disk queries, or True to use all columns. By default only the axes of the object are indexed. See :ref:`Query via data columns<io.hdf5-query-data-columns>`. for more information. Applicable only to format='table'. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. encoding : str, default "UTF-8" See Also -------- read_hdf : Read from HDF file. DataFrame.to_orc : Write a DataFrame to the binary orc format. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. DataFrame.to_sql : Write to a SQL table. DataFrame.to_feather : Write out feather-format for DataFrames. DataFrame.to_csv : Write out to a csv file. Examples -------- >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}, ... index=['a', 'b', 'c']) # doctest: +SKIP >>> df.to_hdf('data.h5', key='df', mode='w') # doctest: +SKIP We can add another object to the same file: >>> s = pd.Series([1, 2, 3, 4]) # doctest: +SKIP >>> s.to_hdf('data.h5', key='s') # doctest: +SKIP Reading from HDF file: >>> pd.read_hdf('data.h5', 'df') # doctest: +SKIP A B a 1 4 b 2 5 c 3 6 >>> pd.read_hdf('data.h5', 's') # doctest: +SKIP 0 1 1 2 2 3 3 4 dtype: int64 """ from pandas.io import pytables # Argument 3 to "to_hdf" has incompatible type "NDFrame"; expected # "Union[DataFrame, Series]" [arg-type] pytables.to_hdf( path_or_buf, key, self, # type: ignore[arg-type] mode=mode, complevel=complevel, complib=complib, append=append, format=format, index=index, min_itemsize=min_itemsize, nan_rep=nan_rep, dropna=dropna, data_columns=data_columns, errors=errors, encoding=encoding, ) def to_sql( self, name: str, con, schema: str | None = None, if_exists: Literal["fail", "replace", "append"] = "fail", index: bool_t = True, index_label: IndexLabel = None, chunksize: int | None = None, dtype: DtypeArg | None = None, method: str | None = None, ) -> int | None: """ Write records stored in a DataFrame to a SQL database. Databases supported by SQLAlchemy [1]_ are supported. Tables can be newly created, appended to, or overwritten. Parameters ---------- name : str Name of SQL table. con : sqlalchemy.engine.(Engine or Connection) or sqlite3.Connection Using SQLAlchemy makes it possible to use any DB supported by that library. Legacy support is provided for sqlite3.Connection objects. The user is responsible for engine disposal and connection closure for the SQLAlchemy connectable. See `here \ <https://docs.sqlalchemy.org/en/20/core/connections.html>`_. If passing a sqlalchemy.engine.Connection which is already in a transaction, the transaction will not be committed. If passing a sqlite3.Connection, it will not be possible to roll back the record insertion. schema : str, optional Specify the schema (if database flavor supports this). If None, use default schema. if_exists : {'fail', 'replace', 'append'}, default 'fail' How to behave if the table already exists. * fail: Raise a ValueError. * replace: Drop the table before inserting new values. * append: Insert new values to the existing table. index : bool, default True Write DataFrame index as a column. Uses `index_label` as the column name in the table. index_label : str or sequence, default None Column label for index column(s). If None is given (default) and `index` is True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. chunksize : int, optional Specify the number of rows in each batch to be written at a time. By default, all rows will be written at once. dtype : dict or scalar, optional Specifying the datatype for columns. If a dictionary is used, the keys should be the column names and the values should be the SQLAlchemy types or strings for the sqlite3 legacy mode. If a scalar is provided, it will be applied to all columns. method : {None, 'multi', callable}, optional Controls the SQL insertion clause used: * None : Uses standard SQL ``INSERT`` clause (one per row). * 'multi': Pass multiple values in a single ``INSERT`` clause. * callable with signature ``(pd_table, conn, keys, data_iter)``. Details and a sample callable implementation can be found in the section :ref:`insert method <io.sql.method>`. Returns ------- None or int Number of rows affected by to_sql. None is returned if the callable passed into ``method`` does not return an integer number of rows. The number of returned rows affected is the sum of the ``rowcount`` attribute of ``sqlite3.Cursor`` or SQLAlchemy connectable which may not reflect the exact number of written rows as stipulated in the `sqlite3 <https://docs.python.org/3/library/sqlite3.html#sqlite3.Cursor.rowcount>`__ or `SQLAlchemy <https://docs.sqlalchemy.org/en/20/core/connections.html#sqlalchemy.engine.CursorResult.rowcount>`__. .. versionadded:: 1.4.0 Raises ------ ValueError When the table already exists and `if_exists` is 'fail' (the default). See Also -------- read_sql : Read a DataFrame from a table. Notes ----- Timezone aware datetime columns will be written as ``Timestamp with timezone`` type with SQLAlchemy if supported by the database. Otherwise, the datetimes will be stored as timezone unaware timestamps local to the original timezone. References ---------- .. [1] https://docs.sqlalchemy.org .. [2] https://www.python.org/dev/peps/pep-0249/ Examples -------- Create an in-memory SQLite database. >>> from sqlalchemy import create_engine >>> engine = create_engine('sqlite://', echo=False) Create a table from scratch with 3 rows. >>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']}) >>> df name 0 User 1 1 User 2 2 User 3 >>> df.to_sql('users', con=engine) 3 >>> from sqlalchemy import text >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM users")).fetchall() [(0, 'User 1'), (1, 'User 2'), (2, 'User 3')] An `sqlalchemy.engine.Connection` can also be passed to `con`: >>> with engine.begin() as connection: ... df1 = pd.DataFrame({'name' : ['User 4', 'User 5']}) ... df1.to_sql('users', con=connection, if_exists='append') 2 This is allowed to support operations that require that the same DBAPI connection is used for the entire operation. >>> df2 = pd.DataFrame({'name' : ['User 6', 'User 7']}) >>> df2.to_sql('users', con=engine, if_exists='append') 2 >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM users")).fetchall() [(0, 'User 1'), (1, 'User 2'), (2, 'User 3'), (0, 'User 4'), (1, 'User 5'), (0, 'User 6'), (1, 'User 7')] Overwrite the table with just ``df2``. >>> df2.to_sql('users', con=engine, if_exists='replace', ... index_label='id') 2 >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM users")).fetchall() [(0, 'User 6'), (1, 'User 7')] Specify the dtype (especially useful for integers with missing values). Notice that while pandas is forced to store the data as floating point, the database supports nullable integers. When fetching the data with Python, we get back integer scalars. >>> df = pd.DataFrame({"A": [1, None, 2]}) >>> df A 0 1.0 1 NaN 2 2.0 >>> from sqlalchemy.types import Integer >>> df.to_sql('integers', con=engine, index=False, ... dtype={"A": Integer()}) 3 >>> with engine.connect() as conn: ... conn.execute(text("SELECT * FROM integers")).fetchall() [(1,), (None,), (2,)] """ # noqa:E501 from pandas.io import sql return sql.to_sql( self, name, con, schema=schema, if_exists=if_exists, index=index, index_label=index_label, chunksize=chunksize, dtype=dtype, method=method, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path", ) def to_pickle( self, path: FilePath | WriteBuffer[bytes], compression: CompressionOptions = "infer", protocol: int = pickle.HIGHEST_PROTOCOL, storage_options: StorageOptions = None, ) -> None: """ Pickle (serialize) object to file. Parameters ---------- path : str, path object, or file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. File path where the pickled object will be stored. {compression_options} protocol : int Int which indicates which protocol should be used by the pickler, default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible values are 0, 1, 2, 3, 4, 5. A negative value for the protocol parameter is equivalent to setting its value to HIGHEST_PROTOCOL. .. [1] https://docs.python.org/3/library/pickle.html. {storage_options} .. versionadded:: 1.2.0 See Also -------- read_pickle : Load pickled pandas object (or any object) from file. DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_sql : Write DataFrame to a SQL database. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Examples -------- >>> original_df = pd.DataFrame({{"foo": range(5), "bar": range(5, 10)}}) # doctest: +SKIP >>> original_df # doctest: +SKIP foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> original_df.to_pickle("./dummy.pkl") # doctest: +SKIP >>> unpickled_df = pd.read_pickle("./dummy.pkl") # doctest: +SKIP >>> unpickled_df # doctest: +SKIP foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 """ # noqa: E501 from pandas.io.pickle import to_pickle to_pickle( self, path, compression=compression, protocol=protocol, storage_options=storage_options, ) def to_clipboard( self, excel: bool_t = True, sep: str | None = None, **kwargs ) -> None: r""" Copy object to the system clipboard. Write a text representation of object to the system clipboard. This can be pasted into Excel, for example. Parameters ---------- excel : bool, default True Produce output in a csv format for easy pasting into excel. - True, use the provided separator for csv pasting. - False, write a string representation of the object to the clipboard. sep : str, default ``'\t'`` Field delimiter. **kwargs These parameters will be passed to DataFrame.to_csv. See Also -------- DataFrame.to_csv : Write a DataFrame to a comma-separated values (csv) file. read_clipboard : Read text from clipboard and pass to read_csv. Notes ----- Requirements for your platform. - Linux : `xclip`, or `xsel` (with `PyQt4` modules) - Windows : none - macOS : none This method uses the processes developed for the package `pyperclip`. A solution to render any output string format is given in the examples. Examples -------- Copy the contents of a DataFrame to the clipboard. >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) >>> df.to_clipboard(sep=',') # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # ,A,B,C ... # 0,1,2,3 ... # 1,4,5,6 We can omit the index by passing the keyword `index` and setting it to false. >>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # A,B,C ... # 1,2,3 ... # 4,5,6 Using the original `pyperclip` package for any string output format. .. code-block:: python import pyperclip html = df.style.to_html() pyperclip.copy(html) """ from pandas.io import clipboards clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs) def to_xarray(self): """ Return an xarray object from the pandas object. Returns ------- xarray.DataArray or xarray.Dataset Data in the pandas structure converted to Dataset if the object is a DataFrame, or a DataArray if the object is a Series. See Also -------- DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Notes ----- See the `xarray docs <https://xarray.pydata.org/en/stable/>`__ Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2), ... ('parrot', 'bird', 24.0, 2), ... ('lion', 'mammal', 80.5, 4), ... ('monkey', 'mammal', np.nan, 4)], ... columns=['name', 'class', 'max_speed', ... 'num_legs']) >>> df name class max_speed num_legs 0 falcon bird 389.0 2 1 parrot bird 24.0 2 2 lion mammal 80.5 4 3 monkey mammal NaN 4 >>> df.to_xarray() <xarray.Dataset> Dimensions: (index: 4) Coordinates: * index (index) int64 0 1 2 3 Data variables: name (index) object 'falcon' 'parrot' 'lion' 'monkey' class (index) object 'bird' 'bird' 'mammal' 'mammal' max_speed (index) float64 389.0 24.0 80.5 nan num_legs (index) int64 2 2 4 4 >>> df['max_speed'].to_xarray() <xarray.DataArray 'max_speed' (index: 4)> array([389. , 24. , 80.5, nan]) Coordinates: * index (index) int64 0 1 2 3 >>> dates = pd.to_datetime(['2018-01-01', '2018-01-01', ... '2018-01-02', '2018-01-02']) >>> df_multiindex = pd.DataFrame({'date': dates, ... 'animal': ['falcon', 'parrot', ... 'falcon', 'parrot'], ... 'speed': [350, 18, 361, 15]}) >>> df_multiindex = df_multiindex.set_index(['date', 'animal']) >>> df_multiindex speed date animal 2018-01-01 falcon 350 parrot 18 2018-01-02 falcon 361 parrot 15 >>> df_multiindex.to_xarray() <xarray.Dataset> Dimensions: (date: 2, animal: 2) Coordinates: * date (date) datetime64[ns] 2018-01-01 2018-01-02 * animal (animal) object 'falcon' 'parrot' Data variables: speed (date, animal) int64 350 18 361 15 """ xarray = import_optional_dependency("xarray") if self.ndim == 1: return xarray.DataArray.from_series(self) else: return xarray.Dataset.from_dataframe(self) def to_latex( self, buf: None = ..., columns: Sequence[Hashable] | None = ..., header: bool_t | Sequence[str] = ..., index: bool_t = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool_t | None = ..., index_names: bool_t = ..., bold_rows: bool_t = ..., column_format: str | None = ..., longtable: bool_t | None = ..., escape: bool_t | None = ..., encoding: str | None = ..., decimal: str = ..., multicolumn: bool_t | None = ..., multicolumn_format: str | None = ..., multirow: bool_t | None = ..., caption: str | tuple[str, str] | None = ..., label: str | None = ..., position: str | None = ..., ) -> str: ... def to_latex( self, buf: FilePath | WriteBuffer[str], columns: Sequence[Hashable] | None = ..., header: bool_t | Sequence[str] = ..., index: bool_t = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool_t | None = ..., index_names: bool_t = ..., bold_rows: bool_t = ..., column_format: str | None = ..., longtable: bool_t | None = ..., escape: bool_t | None = ..., encoding: str | None = ..., decimal: str = ..., multicolumn: bool_t | None = ..., multicolumn_format: str | None = ..., multirow: bool_t | None = ..., caption: str | tuple[str, str] | None = ..., label: str | None = ..., position: str | None = ..., ) -> None: ... def to_latex( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[Hashable] | None = None, header: bool_t | Sequence[str] = True, index: bool_t = True, na_rep: str = "NaN", formatters: FormattersType | None = None, float_format: FloatFormatType | None = None, sparsify: bool_t | None = None, index_names: bool_t = True, bold_rows: bool_t = False, column_format: str | None = None, longtable: bool_t | None = None, escape: bool_t | None = None, encoding: str | None = None, decimal: str = ".", multicolumn: bool_t | None = None, multicolumn_format: str | None = None, multirow: bool_t | None = None, caption: str | tuple[str, str] | None = None, label: str | None = None, position: str | None = None, ) -> str | None: r""" Render object to a LaTeX tabular, longtable, or nested table. Requires ``\usepackage{{booktabs}}``. The output can be copy/pasted into a main LaTeX document or read from an external file with ``\input{{table.tex}}``. .. versionchanged:: 1.2.0 Added position argument, changed meaning of caption argument. .. versionchanged:: 2.0.0 Refactored to use the Styler implementation via jinja2 templating. Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. columns : list of label, optional The subset of columns to write. Writes all columns by default. header : bool or list of str, default True Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names. index : bool, default True Write row names (index). na_rep : str, default 'NaN' Missing data representation. formatters : list of functions or dict of {{str: function}}, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function or str, optional, default None Formatter for floating point numbers. For example ``float_format="%.2f"`` and ``float_format="{{:0.2f}}".format`` will both result in 0.1234 being formatted as 0.12. sparsify : bool, optional Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. By default, the value will be read from the config module. index_names : bool, default True Prints the names of the indexes. bold_rows : bool, default False Make the row labels bold in the output. column_format : str, optional The columns format as specified in `LaTeX table format <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3 columns. By default, 'l' will be used for all columns except columns of numbers, which default to 'r'. longtable : bool, optional Use a longtable environment instead of tabular. Requires adding a \usepackage{{longtable}} to your LaTeX preamble. By default, the value will be read from the pandas config module, and set to `True` if the option ``styler.latex.environment`` is `"longtable"`. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed. escape : bool, optional By default, the value will be read from the pandas config module and set to `True` if the option ``styler.format.escape`` is `"latex"`. When set to False prevents from escaping latex special characters in column names. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed, as has the default value to `False`. encoding : str, optional A string representing the encoding to use in the output file, defaults to 'utf-8'. decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. multicolumn : bool, default True Use \multicolumn to enhance MultiIndex columns. The default will be read from the config module, and is set as the option ``styler.sparse.columns``. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed. multicolumn_format : str, default 'r' The alignment for multicolumns, similar to `column_format` The default will be read from the config module, and is set as the option ``styler.latex.multicol_align``. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed, as has the default value to "r". multirow : bool, default True Use \multirow to enhance MultiIndex rows. Requires adding a \usepackage{{multirow}} to your LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained rows, separating groups via clines. The default will be read from the pandas config module, and is set as the option ``styler.sparse.index``. .. versionchanged:: 2.0.0 The pandas option affecting this argument has changed, as has the default value to `True`. caption : str or tuple, optional Tuple (full_caption, short_caption), which results in ``\caption[short_caption]{{full_caption}}``; if a single string is passed, no short caption will be set. .. versionchanged:: 1.2.0 Optionally allow caption to be a tuple ``(full_caption, short_caption)``. label : str, optional The LaTeX label to be placed inside ``\label{{}}`` in the output. This is used with ``\ref{{}}`` in the main ``.tex`` file. position : str, optional The LaTeX positional argument for tables, to be placed after ``\begin{{}}`` in the output. .. versionadded:: 1.2.0 Returns ------- str or None If buf is None, returns the result as a string. Otherwise returns None. See Also -------- io.formats.style.Styler.to_latex : Render a DataFrame to LaTeX with conditional formatting. DataFrame.to_string : Render a DataFrame to a console-friendly tabular output. DataFrame.to_html : Render a DataFrame as an HTML table. Notes ----- As of v2.0.0 this method has changed to use the Styler implementation as part of :meth:`.Styler.to_latex` via ``jinja2`` templating. This means that ``jinja2`` is a requirement, and needs to be installed, for this method to function. It is advised that users switch to using Styler, since that implementation is more frequently updated and contains much more flexibility with the output. Examples -------- Convert a general DataFrame to LaTeX with formatting: >>> df = pd.DataFrame(dict(name=['Raphael', 'Donatello'], ... age=[26, 45], ... height=[181.23, 177.65])) >>> print(df.to_latex(index=False, ... formatters={"name": str.upper}, ... float_format="{:.1f}".format, ... )) # doctest: +SKIP \begin{tabular}{lrr} \toprule name & age & height \\ \midrule RAPHAEL & 26 & 181.2 \\ DONATELLO & 45 & 177.7 \\ \bottomrule \end{tabular} """ # Get defaults from the pandas config if self.ndim == 1: self = self.to_frame() if longtable is None: longtable = config.get_option("styler.latex.environment") == "longtable" if escape is None: escape = config.get_option("styler.format.escape") == "latex" if multicolumn is None: multicolumn = config.get_option("styler.sparse.columns") if multicolumn_format is None: multicolumn_format = config.get_option("styler.latex.multicol_align") if multirow is None: multirow = config.get_option("styler.sparse.index") if column_format is not None and not isinstance(column_format, str): raise ValueError("`column_format` must be str or unicode") length = len(self.columns) if columns is None else len(columns) if isinstance(header, (list, tuple)) and len(header) != length: raise ValueError(f"Writing {length} cols but got {len(header)} aliases") # Refactor formatters/float_format/decimal/na_rep/escape to Styler structure base_format_ = { "na_rep": na_rep, "escape": "latex" if escape else None, "decimal": decimal, } index_format_: dict[str, Any] = {"axis": 0, **base_format_} column_format_: dict[str, Any] = {"axis": 1, **base_format_} if isinstance(float_format, str): float_format_: Callable | None = lambda x: float_format % x else: float_format_ = float_format def _wrap(x, alt_format_): if isinstance(x, (float, complex)) and float_format_ is not None: return float_format_(x) else: return alt_format_(x) formatters_: list | tuple | dict | Callable | None = None if isinstance(formatters, list): formatters_ = { c: partial(_wrap, alt_format_=formatters[i]) for i, c in enumerate(self.columns) } elif isinstance(formatters, dict): index_formatter = formatters.pop("__index__", None) column_formatter = formatters.pop("__columns__", None) if index_formatter is not None: index_format_.update({"formatter": index_formatter}) if column_formatter is not None: column_format_.update({"formatter": column_formatter}) formatters_ = formatters float_columns = self.select_dtypes(include="float").columns for col in float_columns: if col not in formatters.keys(): formatters_.update({col: float_format_}) elif formatters is None and float_format is not None: formatters_ = partial(_wrap, alt_format_=lambda v: v) format_index_ = [index_format_, column_format_] # Deal with hiding indexes and relabelling column names hide_: list[dict] = [] relabel_index_: list[dict] = [] if columns: hide_.append( { "subset": [c for c in self.columns if c not in columns], "axis": "columns", } ) if header is False: hide_.append({"axis": "columns"}) elif isinstance(header, (list, tuple)): relabel_index_.append({"labels": header, "axis": "columns"}) format_index_ = [index_format_] # column_format is overwritten if index is False: hide_.append({"axis": "index"}) if index_names is False: hide_.append({"names": True, "axis": "index"}) render_kwargs_ = { "hrules": True, "sparse_index": sparsify, "sparse_columns": sparsify, "environment": "longtable" if longtable else None, "multicol_align": multicolumn_format if multicolumn else f"naive-{multicolumn_format}", "multirow_align": "t" if multirow else "naive", "encoding": encoding, "caption": caption, "label": label, "position": position, "column_format": column_format, "clines": "skip-last;data" if (multirow and isinstance(self.index, MultiIndex)) else None, "bold_rows": bold_rows, } return self._to_latex_via_styler( buf, hide=hide_, relabel_index=relabel_index_, format={"formatter": formatters_, **base_format_}, format_index=format_index_, render_kwargs=render_kwargs_, ) def _to_latex_via_styler( self, buf=None, *, hide: dict | list[dict] | None = None, relabel_index: dict | list[dict] | None = None, format: dict | list[dict] | None = None, format_index: dict | list[dict] | None = None, render_kwargs: dict | None = None, ): """ Render object to a LaTeX tabular, longtable, or nested table. Uses the ``Styler`` implementation with the following, ordered, method chaining: .. code-block:: python styler = Styler(DataFrame) styler.hide(**hide) styler.relabel_index(**relabel_index) styler.format(**format) styler.format_index(**format_index) styler.to_latex(buf=buf, **render_kwargs) Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. hide : dict, list of dict Keyword args to pass to the method call of ``Styler.hide``. If a list will call the method numerous times. relabel_index : dict, list of dict Keyword args to pass to the method of ``Styler.relabel_index``. If a list will call the method numerous times. format : dict, list of dict Keyword args to pass to the method call of ``Styler.format``. If a list will call the method numerous times. format_index : dict, list of dict Keyword args to pass to the method call of ``Styler.format_index``. If a list will call the method numerous times. render_kwargs : dict Keyword args to pass to the method call of ``Styler.to_latex``. Returns ------- str or None If buf is None, returns the result as a string. Otherwise returns None. """ from pandas.io.formats.style import Styler self = cast("DataFrame", self) styler = Styler(self, uuid="") for kw_name in ["hide", "relabel_index", "format", "format_index"]: kw = vars()[kw_name] if isinstance(kw, dict): getattr(styler, kw_name)(**kw) elif isinstance(kw, list): for sub_kw in kw: getattr(styler, kw_name)(**sub_kw) # bold_rows is not a direct kwarg of Styler.to_latex render_kwargs = {} if render_kwargs is None else render_kwargs if render_kwargs.pop("bold_rows"): styler.applymap_index(lambda v: "textbf:--rwrap;") return styler.to_latex(buf=buf, **render_kwargs) def to_csv( self, path_or_buf: None = ..., sep: str = ..., na_rep: str = ..., float_format: str | Callable | None = ..., columns: Sequence[Hashable] | None = ..., header: bool_t | list[str] = ..., index: bool_t = ..., index_label: IndexLabel | None = ..., mode: str = ..., encoding: str | None = ..., compression: CompressionOptions = ..., quoting: int | None = ..., quotechar: str = ..., lineterminator: str | None = ..., chunksize: int | None = ..., date_format: str | None = ..., doublequote: bool_t = ..., escapechar: str | None = ..., decimal: str = ..., errors: str = ..., storage_options: StorageOptions = ..., ) -> str: ... def to_csv( self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str], sep: str = ..., na_rep: str = ..., float_format: str | Callable | None = ..., columns: Sequence[Hashable] | None = ..., header: bool_t | list[str] = ..., index: bool_t = ..., index_label: IndexLabel | None = ..., mode: str = ..., encoding: str | None = ..., compression: CompressionOptions = ..., quoting: int | None = ..., quotechar: str = ..., lineterminator: str | None = ..., chunksize: int | None = ..., date_format: str | None = ..., doublequote: bool_t = ..., escapechar: str | None = ..., decimal: str = ..., errors: str = ..., storage_options: StorageOptions = ..., ) -> None: ... storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buf", ) def to_csv( self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, sep: str = ",", na_rep: str = "", float_format: str | Callable | None = None, columns: Sequence[Hashable] | None = None, header: bool_t | list[str] = True, index: bool_t = True, index_label: IndexLabel | None = None, mode: str = "w", encoding: str | None = None, compression: CompressionOptions = "infer", quoting: int | None = None, quotechar: str = '"', lineterminator: str | None = None, chunksize: int | None = None, date_format: str | None = None, doublequote: bool_t = True, escapechar: str | None = None, decimal: str = ".", errors: str = "strict", storage_options: StorageOptions = None, ) -> str | None: r""" Write object to a comma-separated values (csv) file. Parameters ---------- path_or_buf : str, path object, file-like object, or None, default None String, path object (implementing os.PathLike[str]), or file-like object implementing a write() function. If None, the result is returned as a string. If a non-binary file object is passed, it should be opened with `newline=''`, disabling universal newlines. If a binary file object is passed, `mode` might need to contain a `'b'`. .. versionchanged:: 1.2.0 Support for binary file objects was introduced. sep : str, default ',' String of length 1. Field delimiter for the output file. na_rep : str, default '' Missing data representation. float_format : str, Callable, default None Format string for floating point numbers. If a Callable is given, it takes precedence over other numeric formatting parameters, like decimal. columns : sequence, optional Columns to write. header : bool or list of str, default True Write out the column names. If a list of strings is given it is assumed to be aliases for the column names. index : bool, default True Write row names (index). index_label : str or sequence, or False, default None Column label for index column(s) if desired. If None is given, and `header` and `index` are True, then the index names are used. A sequence should be given if the object uses MultiIndex. If False do not print fields for index names. Use index_label=False for easier importing in R. mode : str, default 'w' Python write mode. The available write modes are the same as :py:func:`open`. encoding : str, optional A string representing the encoding to use in the output file, defaults to 'utf-8'. `encoding` is not supported if `path_or_buf` is a non-binary file object. {compression_options} .. versionchanged:: 1.0.0 May now be a dict with key 'method' as compression mode and other entries as additional compression options if compression mode is 'zip'. .. versionchanged:: 1.1.0 Passing compression options as keys in dict is supported for compression modes 'gzip', 'bz2', 'zstd', and 'zip'. .. versionchanged:: 1.2.0 Compression is supported for binary file objects. .. versionchanged:: 1.2.0 Previous versions forwarded dict entries for 'gzip' to `gzip.open` instead of `gzip.GzipFile` which prevented setting `mtime`. quoting : optional constant from csv module Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` then floats are converted to strings and thus csv.QUOTE_NONNUMERIC will treat them as non-numeric. quotechar : str, default '\"' String of length 1. Character used to quote fields. lineterminator : str, optional The newline character or character sequence to use in the output file. Defaults to `os.linesep`, which depends on the OS in which this method is called ('\\n' for linux, '\\r\\n' for Windows, i.e.). .. versionchanged:: 1.5.0 Previously was line_terminator, changed for consistency with read_csv and the standard library 'csv' module. chunksize : int or None Rows to write at a time. date_format : str, default None Format string for datetime objects. doublequote : bool, default True Control quoting of `quotechar` inside a field. escapechar : str, default None String of length 1. Character used to escape `sep` and `quotechar` when appropriate. decimal : str, default '.' Character recognized as decimal separator. E.g. use ',' for European data. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. .. versionadded:: 1.1.0 {storage_options} .. versionadded:: 1.2.0 Returns ------- None or str If path_or_buf is None, returns the resulting csv format as a string. Otherwise returns None. See Also -------- read_csv : Load a CSV file into a DataFrame. to_excel : Write DataFrame to an Excel file. Examples -------- >>> df = pd.DataFrame({{'name': ['Raphael', 'Donatello'], ... 'mask': ['red', 'purple'], ... 'weapon': ['sai', 'bo staff']}}) >>> df.to_csv(index=False) 'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n' Create 'out.zip' containing 'out.csv' >>> compression_opts = dict(method='zip', ... archive_name='out.csv') # doctest: +SKIP >>> df.to_csv('out.zip', index=False, ... compression=compression_opts) # doctest: +SKIP To write a csv file to a new folder or nested folder you will first need to create it using either Pathlib or os: >>> from pathlib import Path # doctest: +SKIP >>> filepath = Path('folder/subfolder/out.csv') # doctest: +SKIP >>> filepath.parent.mkdir(parents=True, exist_ok=True) # doctest: +SKIP >>> df.to_csv(filepath) # doctest: +SKIP >>> import os # doctest: +SKIP >>> os.makedirs('folder/subfolder', exist_ok=True) # doctest: +SKIP >>> df.to_csv('folder/subfolder/out.csv') # doctest: +SKIP """ df = self if isinstance(self, ABCDataFrame) else self.to_frame() formatter = DataFrameFormatter( frame=df, header=header, index=index, na_rep=na_rep, float_format=float_format, decimal=decimal, ) return DataFrameRenderer(formatter).to_csv( path_or_buf, lineterminator=lineterminator, sep=sep, encoding=encoding, errors=errors, compression=compression, quoting=quoting, columns=columns, index_label=index_label, mode=mode, chunksize=chunksize, quotechar=quotechar, date_format=date_format, doublequote=doublequote, escapechar=escapechar, storage_options=storage_options, ) # ---------------------------------------------------------------------- # Lookup Caching def _reset_cacher(self) -> None: """ Reset the cacher. """ raise AbstractMethodError(self) def _maybe_update_cacher( self, clear: bool_t = False, verify_is_copy: bool_t = True, inplace: bool_t = False, ) -> None: """ See if we need to update our parent cacher if clear, then clear our cache. Parameters ---------- clear : bool, default False Clear the item cache. verify_is_copy : bool, default True Provide is_copy checks. """ if using_copy_on_write(): return if verify_is_copy: self._check_setitem_copy(t="referent") if clear: self._clear_item_cache() def _clear_item_cache(self) -> None: raise AbstractMethodError(self) # ---------------------------------------------------------------------- # Indexing Methods def take(self: NDFrameT, indices, axis: Axis = 0, **kwargs) -> NDFrameT: """ Return the elements in the given *positional* indices along an axis. This means that we are not indexing according to actual values in the index attribute of the object. We are indexing according to the actual position of the element in the object. Parameters ---------- indices : array-like An array of ints indicating which positions to take. axis : {0 or 'index', 1 or 'columns', None}, default 0 The axis on which to select elements. ``0`` means that we are selecting rows, ``1`` means that we are selecting columns. For `Series` this parameter is unused and defaults to 0. **kwargs For compatibility with :meth:`numpy.take`. Has no effect on the output. Returns ------- same type as caller An array-like containing the elements taken from the object. See Also -------- DataFrame.loc : Select a subset of a DataFrame by labels. DataFrame.iloc : Select a subset of a DataFrame by positions. numpy.take : Take elements from an array along an axis. Examples -------- >>> df = pd.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey', 'mammal', np.nan)], ... columns=['name', 'class', 'max_speed'], ... index=[0, 2, 3, 1]) >>> df name class max_speed 0 falcon bird 389.0 2 parrot bird 24.0 3 lion mammal 80.5 1 monkey mammal NaN Take elements at positions 0 and 3 along the axis 0 (default). Note how the actual indices selected (0 and 1) do not correspond to our selected indices 0 and 3. That's because we are selecting the 0th and 3rd rows, not rows whose indices equal 0 and 3. >>> df.take([0, 3]) name class max_speed 0 falcon bird 389.0 1 monkey mammal NaN Take elements at indices 1 and 2 along the axis 1 (column selection). >>> df.take([1, 2], axis=1) class max_speed 0 bird 389.0 2 bird 24.0 3 mammal 80.5 1 mammal NaN We may take elements using negative integers for positive indices, starting from the end of the object, just like with Python lists. >>> df.take([-1, -2]) name class max_speed 1 monkey mammal NaN 3 lion mammal 80.5 """ nv.validate_take((), kwargs) return self._take(indices, axis) def _take( self: NDFrameT, indices, axis: Axis = 0, convert_indices: bool_t = True, ) -> NDFrameT: """ Internal version of the `take` allowing specification of additional args. See the docstring of `take` for full explanation of the parameters. """ if not isinstance(indices, slice): indices = np.asarray(indices, dtype=np.intp) if ( axis == 0 and indices.ndim == 1 and using_copy_on_write() and is_range_indexer(indices, len(self)) ): return self.copy(deep=None) new_data = self._mgr.take( indices, axis=self._get_block_manager_axis(axis), verify=True, convert_indices=convert_indices, ) return self._constructor(new_data).__finalize__(self, method="take") def _take_with_is_copy(self: NDFrameT, indices, axis: Axis = 0) -> NDFrameT: """ Internal version of the `take` method that sets the `_is_copy` attribute to keep track of the parent dataframe (using in indexing for the SettingWithCopyWarning). See the docstring of `take` for full explanation of the parameters. """ result = self._take(indices=indices, axis=axis) # Maybe set copy if we didn't actually change the index. if not result._get_axis(axis).equals(self._get_axis(axis)): result._set_is_copy(self) return result def xs( self: NDFrameT, key: IndexLabel, axis: Axis = 0, level: IndexLabel = None, drop_level: bool_t = True, ) -> NDFrameT: """ Return cross-section from the Series/DataFrame. This method takes a `key` argument to select data at a particular level of a MultiIndex. Parameters ---------- key : label or tuple of label Label contained in the index, or partially in a MultiIndex. axis : {0 or 'index', 1 or 'columns'}, default 0 Axis to retrieve cross-section on. level : object, defaults to first n levels (n=1 or len(key)) In case of a key partially contained in a MultiIndex, indicate which levels are used. Levels can be referred by label or position. drop_level : bool, default True If False, returns object with same levels as self. Returns ------- Series or DataFrame Cross-section from the original Series or DataFrame corresponding to the selected index levels. See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. DataFrame.iloc : Purely integer-location based indexing for selection by position. Notes ----- `xs` can not be used to set values. MultiIndex Slicers is a generic way to get/set values on any level or levels. It is a superset of `xs` functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`. Examples -------- >>> d = {'num_legs': [4, 4, 2, 2], ... 'num_wings': [0, 0, 2, 2], ... 'class': ['mammal', 'mammal', 'mammal', 'bird'], ... 'animal': ['cat', 'dog', 'bat', 'penguin'], ... 'locomotion': ['walks', 'walks', 'flies', 'walks']} >>> df = pd.DataFrame(data=d) >>> df = df.set_index(['class', 'animal', 'locomotion']) >>> df num_legs num_wings class animal locomotion mammal cat walks 4 0 dog walks 4 0 bat flies 2 2 bird penguin walks 2 2 Get values at specified index >>> df.xs('mammal') num_legs num_wings animal locomotion cat walks 4 0 dog walks 4 0 bat flies 2 2 Get values at several indexes >>> df.xs(('mammal', 'dog', 'walks')) num_legs 4 num_wings 0 Name: (mammal, dog, walks), dtype: int64 Get values at specified index and level >>> df.xs('cat', level=1) num_legs num_wings class locomotion mammal walks 4 0 Get values at several indexes and levels >>> df.xs(('bird', 'walks'), ... level=[0, 'locomotion']) num_legs num_wings animal penguin 2 2 Get values at specified column and axis >>> df.xs('num_wings', axis=1) class animal locomotion mammal cat walks 0 dog walks 0 bat flies 2 bird penguin walks 2 Name: num_wings, dtype: int64 """ axis = self._get_axis_number(axis) labels = self._get_axis(axis) if isinstance(key, list): raise TypeError("list keys are not supported in xs, pass a tuple instead") if level is not None: if not isinstance(labels, MultiIndex): raise TypeError("Index must be a MultiIndex") loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level) # create the tuple of the indexer _indexer = [slice(None)] * self.ndim _indexer[axis] = loc indexer = tuple(_indexer) result = self.iloc[indexer] setattr(result, result._get_axis_name(axis), new_ax) return result if axis == 1: if drop_level: return self[key] index = self.columns else: index = self.index if isinstance(index, MultiIndex): loc, new_index = index._get_loc_level(key, level=0) if not drop_level: if lib.is_integer(loc): new_index = index[loc : loc + 1] else: new_index = index[loc] else: loc = index.get_loc(key) if isinstance(loc, np.ndarray): if loc.dtype == np.bool_: (inds,) = loc.nonzero() return self._take_with_is_copy(inds, axis=axis) else: return self._take_with_is_copy(loc, axis=axis) if not is_scalar(loc): new_index = index[loc] if is_scalar(loc) and axis == 0: # In this case loc should be an integer if self.ndim == 1: # if we encounter an array-like and we only have 1 dim # that means that their are list/ndarrays inside the Series! # so just return them (GH 6394) return self._values[loc] new_mgr = self._mgr.fast_xs(loc) result = self._constructor_sliced( new_mgr, name=self.index[loc] ).__finalize__(self) elif is_scalar(loc): result = self.iloc[:, slice(loc, loc + 1)] elif axis == 1: result = self.iloc[:, loc] else: result = self.iloc[loc] result.index = new_index # this could be a view # but only in a single-dtyped view sliceable case result._set_is_copy(self, copy=not result._is_view) return result def __getitem__(self, item): raise AbstractMethodError(self) def _slice(self: NDFrameT, slobj: slice, axis: Axis = 0) -> NDFrameT: """ Construct a slice of this container. Slicing with this method is *always* positional. """ assert isinstance(slobj, slice), type(slobj) axis = self._get_block_manager_axis(axis) result = self._constructor(self._mgr.get_slice(slobj, axis=axis)) result = result.__finalize__(self) # this could be a view # but only in a single-dtyped view sliceable case is_copy = axis != 0 or result._is_view result._set_is_copy(self, copy=is_copy) return result def _set_is_copy(self, ref: NDFrame, copy: bool_t = True) -> None: if not copy: self._is_copy = None else: assert ref is not None self._is_copy = weakref.ref(ref) def _check_is_chained_assignment_possible(self) -> bool_t: """ Check if we are a view, have a cacher, and are of mixed type. If so, then force a setitem_copy check. Should be called just near setting a value Will return a boolean if it we are a view and are cached, but a single-dtype meaning that the cacher should be updated following setting. """ if self._is_copy: self._check_setitem_copy(t="referent") return False def _check_setitem_copy(self, t: str = "setting", force: bool_t = False): """ Parameters ---------- t : str, the type of setting error force : bool, default False If True, then force showing an error. validate if we are doing a setitem on a chained copy. It is technically possible to figure out that we are setting on a copy even WITH a multi-dtyped pandas object. In other words, some blocks may be views while other are not. Currently _is_view will ALWAYS return False for multi-blocks to avoid having to handle this case. df = DataFrame(np.arange(0,9), columns=['count']) df['group'] = 'b' # This technically need not raise SettingWithCopy if both are view # (which is not generally guaranteed but is usually True. However, # this is in general not a good practice and we recommend using .loc. df.iloc[0:5]['group'] = 'a' """ if using_copy_on_write(): return # return early if the check is not needed if not (force or self._is_copy): return value = config.get_option("mode.chained_assignment") if value is None: return # see if the copy is not actually referred; if so, then dissolve # the copy weakref if self._is_copy is not None and not isinstance(self._is_copy, str): r = self._is_copy() if not gc.get_referents(r) or (r is not None and r.shape == self.shape): self._is_copy = None return # a custom message if isinstance(self._is_copy, str): t = self._is_copy elif t == "referent": t = ( "\n" "A value is trying to be set on a copy of a slice from a " "DataFrame\n\n" "See the caveats in the documentation: " "https://pandas.pydata.org/pandas-docs/stable/user_guide/" "indexing.html#returning-a-view-versus-a-copy" ) else: t = ( "\n" "A value is trying to be set on a copy of a slice from a " "DataFrame.\n" "Try using .loc[row_indexer,col_indexer] = value " "instead\n\nSee the caveats in the documentation: " "https://pandas.pydata.org/pandas-docs/stable/user_guide/" "indexing.html#returning-a-view-versus-a-copy" ) if value == "raise": raise SettingWithCopyError(t) if value == "warn": warnings.warn(t, SettingWithCopyWarning, stacklevel=find_stack_level()) def __delitem__(self, key) -> None: """ Delete item """ deleted = False maybe_shortcut = False if self.ndim == 2 and isinstance(self.columns, MultiIndex): try: # By using engine's __contains__ we effectively # restrict to same-length tuples maybe_shortcut = key not in self.columns._engine except TypeError: pass if maybe_shortcut: # Allow shorthand to delete all columns whose first len(key) # elements match key: if not isinstance(key, tuple): key = (key,) for col in self.columns: if isinstance(col, tuple) and col[: len(key)] == key: del self[col] deleted = True if not deleted: # If the above loop ran and didn't delete anything because # there was no match, this call should raise the appropriate # exception: loc = self.axes[-1].get_loc(key) self._mgr = self._mgr.idelete(loc) # delete from the caches try: del self._item_cache[key] except KeyError: pass # ---------------------------------------------------------------------- # Unsorted def _check_inplace_and_allows_duplicate_labels(self, inplace): if inplace and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'inplace=True' when " "'self.flags.allows_duplicate_labels' is False." ) def get(self, key, default=None): """ Get item from object for given key (ex: DataFrame column). Returns default value if not found. Parameters ---------- key : object Returns ------- same type as items contained in object Examples -------- >>> df = pd.DataFrame( ... [ ... [24.3, 75.7, "high"], ... [31, 87.8, "high"], ... [22, 71.6, "medium"], ... [35, 95, "medium"], ... ], ... columns=["temp_celsius", "temp_fahrenheit", "windspeed"], ... index=pd.date_range(start="2014-02-12", end="2014-02-15", freq="D"), ... ) >>> df temp_celsius temp_fahrenheit windspeed 2014-02-12 24.3 75.7 high 2014-02-13 31.0 87.8 high 2014-02-14 22.0 71.6 medium 2014-02-15 35.0 95.0 medium >>> df.get(["temp_celsius", "windspeed"]) temp_celsius windspeed 2014-02-12 24.3 high 2014-02-13 31.0 high 2014-02-14 22.0 medium 2014-02-15 35.0 medium >>> ser = df['windspeed'] >>> ser.get('2014-02-13') 'high' If the key isn't found, the default value will be used. >>> df.get(["temp_celsius", "temp_kelvin"], default="default_value") 'default_value' >>> ser.get('2014-02-10', '[unknown]') '[unknown]' """ try: return self[key] except (KeyError, ValueError, IndexError): return default def _is_view(self) -> bool_t: """Return boolean indicating if self is view of another array""" return self._mgr.is_view def reindex_like( self: NDFrameT, other, method: Literal["backfill", "bfill", "pad", "ffill", "nearest"] | None = None, copy: bool_t | None = None, limit=None, tolerance=None, ) -> NDFrameT: """ Return an object with matching indices as other object. Conform the object to the same index on all axes. Optional filling logic, placing NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and copy=False. Parameters ---------- other : Object of the same data type Its row and column indices are used to define the new indices of this object. method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'} Method to use for filling holes in reindexed DataFrame. Please note: this is only applicable to DataFrames/Series with a monotonically increasing/decreasing index. * None (default): don't fill gaps * pad / ffill: propagate last valid observation forward to next valid * backfill / bfill: use next valid observation to fill gap * nearest: use nearest valid observations to fill gap. copy : bool, default True Return a new object, even if the passed indexes are the same. limit : int, default None Maximum number of consecutive labels to fill for inexact matches. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- Series or DataFrame Same type as caller, but with changed indices on each axis. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. DataFrame.reindex : Change to new indices or expand indices. Notes ----- Same as calling ``.reindex(index=other.index, columns=other.columns,...)``. Examples -------- >>> df1 = pd.DataFrame([[24.3, 75.7, 'high'], ... [31, 87.8, 'high'], ... [22, 71.6, 'medium'], ... [35, 95, 'medium']], ... columns=['temp_celsius', 'temp_fahrenheit', ... 'windspeed'], ... index=pd.date_range(start='2014-02-12', ... end='2014-02-15', freq='D')) >>> df1 temp_celsius temp_fahrenheit windspeed 2014-02-12 24.3 75.7 high 2014-02-13 31.0 87.8 high 2014-02-14 22.0 71.6 medium 2014-02-15 35.0 95.0 medium >>> df2 = pd.DataFrame([[28, 'low'], ... [30, 'low'], ... [35.1, 'medium']], ... columns=['temp_celsius', 'windspeed'], ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13', ... '2014-02-15'])) >>> df2 temp_celsius windspeed 2014-02-12 28.0 low 2014-02-13 30.0 low 2014-02-15 35.1 medium >>> df2.reindex_like(df1) temp_celsius temp_fahrenheit windspeed 2014-02-12 28.0 NaN low 2014-02-13 30.0 NaN low 2014-02-14 NaN NaN NaN 2014-02-15 35.1 NaN medium """ d = other._construct_axes_dict( axes=self._AXIS_ORDERS, method=method, copy=copy, limit=limit, tolerance=tolerance, ) return self.reindex(**d) def drop( self, labels: IndexLabel = ..., *, axis: Axis = ..., index: IndexLabel = ..., columns: IndexLabel = ..., level: Level | None = ..., inplace: Literal[True], errors: IgnoreRaise = ..., ) -> None: ... def drop( self: NDFrameT, labels: IndexLabel = ..., *, axis: Axis = ..., index: IndexLabel = ..., columns: IndexLabel = ..., level: Level | None = ..., inplace: Literal[False] = ..., errors: IgnoreRaise = ..., ) -> NDFrameT: ... def drop( self: NDFrameT, labels: IndexLabel = ..., *, axis: Axis = ..., index: IndexLabel = ..., columns: IndexLabel = ..., level: Level | None = ..., inplace: bool_t = ..., errors: IgnoreRaise = ..., ) -> NDFrameT | None: ... def drop( self: NDFrameT, labels: IndexLabel = None, *, axis: Axis = 0, index: IndexLabel = None, columns: IndexLabel = None, level: Level | None = None, inplace: bool_t = False, errors: IgnoreRaise = "raise", ) -> NDFrameT | None: inplace = validate_bool_kwarg(inplace, "inplace") if labels is not None: if index is not None or columns is not None: raise ValueError("Cannot specify both 'labels' and 'index'/'columns'") axis_name = self._get_axis_name(axis) axes = {axis_name: labels} elif index is not None or columns is not None: axes = {"index": index} if self.ndim == 2: axes["columns"] = columns else: raise ValueError( "Need to specify at least one of 'labels', 'index' or 'columns'" ) obj = self for axis, labels in axes.items(): if labels is not None: obj = obj._drop_axis(labels, axis, level=level, errors=errors) if inplace: self._update_inplace(obj) return None else: return obj def _drop_axis( self: NDFrameT, labels, axis, level=None, errors: IgnoreRaise = "raise", only_slice: bool_t = False, ) -> NDFrameT: """ Drop labels from specified axis. Used in the ``drop`` method internally. Parameters ---------- labels : single label or list-like axis : int or axis name level : int or level name, default None For MultiIndex errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and existing labels are dropped. only_slice : bool, default False Whether indexing along columns should be view-only. """ axis_num = self._get_axis_number(axis) axis = self._get_axis(axis) if axis.is_unique: if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError("axis must be a MultiIndex") new_axis = axis.drop(labels, level=level, errors=errors) else: new_axis = axis.drop(labels, errors=errors) indexer = axis.get_indexer(new_axis) # Case for non-unique axis else: is_tuple_labels = is_nested_list_like(labels) or isinstance(labels, tuple) labels = ensure_object(common.index_labels_to_array(labels)) if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError("axis must be a MultiIndex") mask = ~axis.get_level_values(level).isin(labels) # GH 18561 MultiIndex.drop should raise if label is absent if errors == "raise" and mask.all(): raise KeyError(f"{labels} not found in axis") elif ( isinstance(axis, MultiIndex) and labels.dtype == "object" and not is_tuple_labels ): # Set level to zero in case of MultiIndex and label is string, # because isin can't handle strings for MultiIndexes GH#36293 # In case of tuples we get dtype object but have to use isin GH#42771 mask = ~axis.get_level_values(0).isin(labels) else: mask = ~axis.isin(labels) # Check if label doesn't exist along axis labels_missing = (axis.get_indexer_for(labels) == -1).any() if errors == "raise" and labels_missing: raise KeyError(f"{labels} not found in axis") if is_extension_array_dtype(mask.dtype): # GH#45860 mask = mask.to_numpy(dtype=bool) indexer = mask.nonzero()[0] new_axis = axis.take(indexer) bm_axis = self.ndim - axis_num - 1 new_mgr = self._mgr.reindex_indexer( new_axis, indexer, axis=bm_axis, allow_dups=True, copy=None, only_slice=only_slice, ) result = self._constructor(new_mgr) if self.ndim == 1: result.name = self.name return result.__finalize__(self) def _update_inplace(self, result, verify_is_copy: bool_t = True) -> None: """ Replace self internals with result. Parameters ---------- result : same type as self verify_is_copy : bool, default True Provide is_copy checks. """ # NOTE: This does *not* call __finalize__ and that's an explicit # decision that we may revisit in the future. self._reset_cache() self._clear_item_cache() self._mgr = result._mgr self._maybe_update_cacher(verify_is_copy=verify_is_copy, inplace=True) def add_prefix(self: NDFrameT, prefix: str, axis: Axis | None = None) -> NDFrameT: """ Prefix labels with string `prefix`. For Series, the row labels are prefixed. For DataFrame, the column labels are prefixed. Parameters ---------- prefix : str The string to add before each label. axis : {{0 or 'index', 1 or 'columns', None}}, default None Axis to add prefix on .. versionadded:: 2.0.0 Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_suffix: Suffix row labels with string `suffix`. DataFrame.add_suffix: Suffix column labels with string `suffix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_prefix('item_') item_0 1 item_1 2 item_2 3 item_3 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_prefix('col_') col_A col_B 0 1 3 1 2 4 2 3 5 3 4 6 """ f = lambda x: f"{prefix}{x}" axis_name = self._info_axis_name if axis is not None: axis_name = self._get_axis_name(axis) mapper = {axis_name: f} # error: Incompatible return value type (got "Optional[NDFrameT]", # expected "NDFrameT") # error: Argument 1 to "rename" of "NDFrame" has incompatible type # "**Dict[str, partial[str]]"; expected "Union[str, int, None]" # error: Keywords must be strings return self._rename(**mapper) # type: ignore[return-value, arg-type, misc] def add_suffix(self: NDFrameT, suffix: str, axis: Axis | None = None) -> NDFrameT: """ Suffix labels with string `suffix`. For Series, the row labels are suffixed. For DataFrame, the column labels are suffixed. Parameters ---------- suffix : str The string to add after each label. axis : {{0 or 'index', 1 or 'columns', None}}, default None Axis to add suffix on .. versionadded:: 2.0.0 Returns ------- Series or DataFrame New Series or DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string `prefix`. DataFrame.add_prefix: Prefix column labels with string `prefix`. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.add_suffix('_item') 0_item 1 1_item 2 2_item 3 3_item 4 dtype: int64 >>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_suffix('_col') A_col B_col 0 1 3 1 2 4 2 3 5 3 4 6 """ f = lambda x: f"{x}{suffix}" axis_name = self._info_axis_name if axis is not None: axis_name = self._get_axis_name(axis) mapper = {axis_name: f} # error: Incompatible return value type (got "Optional[NDFrameT]", # expected "NDFrameT") # error: Argument 1 to "rename" of "NDFrame" has incompatible type # "**Dict[str, partial[str]]"; expected "Union[str, int, None]" # error: Keywords must be strings return self._rename(**mapper) # type: ignore[return-value, arg-type, misc] def sort_values( self: NDFrameT, *, axis: Axis = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[False] = ..., kind: str = ..., na_position: str = ..., ignore_index: bool_t = ..., key: ValueKeyFunc = ..., ) -> NDFrameT: ... def sort_values( self, *, axis: Axis = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[True], kind: str = ..., na_position: str = ..., ignore_index: bool_t = ..., key: ValueKeyFunc = ..., ) -> None: ... def sort_values( self: NDFrameT, *, axis: Axis = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: bool_t = ..., kind: str = ..., na_position: str = ..., ignore_index: bool_t = ..., key: ValueKeyFunc = ..., ) -> NDFrameT | None: ... def sort_values( self: NDFrameT, *, axis: Axis = 0, ascending: bool_t | Sequence[bool_t] = True, inplace: bool_t = False, kind: str = "quicksort", na_position: str = "last", ignore_index: bool_t = False, key: ValueKeyFunc = None, ) -> NDFrameT | None: """ Sort by the values along either axis. Parameters ----------%(optional_by)s axis : %(axes_single_arg)s, default 0 Axis to be sorted. ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. `mergesort` and `stable` are the only stable algorithms. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' Puts NaNs at the beginning if `first`; `last` puts NaNs at the end. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional Apply the key function to the values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect a ``Series`` and return a Series with the same shape as the input. It will be applied to each column in `by` independently. .. versionadded:: 1.1.0 Returns ------- DataFrame or None DataFrame with sorted values or None if ``inplace=True``. See Also -------- DataFrame.sort_index : Sort a DataFrame by the index. Series.sort_values : Similar method for a Series. Examples -------- >>> df = pd.DataFrame({ ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'], ... 'col2': [2, 1, 9, 8, 7, 4], ... 'col3': [0, 1, 9, 4, 2, 3], ... 'col4': ['a', 'B', 'c', 'D', 'e', 'F'] ... }) >>> df col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F Sort by col1 >>> df.sort_values(by=['col1']) col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 5 C 4 3 F 4 D 7 2 e 3 NaN 8 4 D Sort by multiple columns >>> df.sort_values(by=['col1', 'col2']) col1 col2 col3 col4 1 A 1 1 B 0 A 2 0 a 2 B 9 9 c 5 C 4 3 F 4 D 7 2 e 3 NaN 8 4 D Sort Descending >>> df.sort_values(by='col1', ascending=False) col1 col2 col3 col4 4 D 7 2 e 5 C 4 3 F 2 B 9 9 c 0 A 2 0 a 1 A 1 1 B 3 NaN 8 4 D Putting NAs first >>> df.sort_values(by='col1', ascending=False, na_position='first') col1 col2 col3 col4 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F 2 B 9 9 c 0 A 2 0 a 1 A 1 1 B Sorting with a key function >>> df.sort_values(by='col4', key=lambda col: col.str.lower()) col1 col2 col3 col4 0 A 2 0 a 1 A 1 1 B 2 B 9 9 c 3 NaN 8 4 D 4 D 7 2 e 5 C 4 3 F Natural sort with the key argument, using the `natsort <https://github.com/SethMMorton/natsort>` package. >>> df = pd.DataFrame({ ... "time": ['0hr', '128hr', '72hr', '48hr', '96hr'], ... "value": [10, 20, 30, 40, 50] ... }) >>> df time value 0 0hr 10 1 128hr 20 2 72hr 30 3 48hr 40 4 96hr 50 >>> from natsort import index_natsorted >>> df.sort_values( ... by="time", ... key=lambda x: np.argsort(index_natsorted(df["time"])) ... ) time value 0 0hr 10 3 48hr 40 2 72hr 30 4 96hr 50 1 128hr 20 """ raise AbstractMethodError(self) def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[True], kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool_t = ..., ignore_index: bool_t = ..., key: IndexKeyFunc = ..., ) -> None: ... def sort_index( self: NDFrameT, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: Literal[False] = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool_t = ..., ignore_index: bool_t = ..., key: IndexKeyFunc = ..., ) -> NDFrameT: ... def sort_index( self: NDFrameT, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool_t | Sequence[bool_t] = ..., inplace: bool_t = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool_t = ..., ignore_index: bool_t = ..., key: IndexKeyFunc = ..., ) -> NDFrameT | None: ... def sort_index( self: NDFrameT, *, axis: Axis = 0, level: IndexLabel = None, ascending: bool_t | Sequence[bool_t] = True, inplace: bool_t = False, kind: SortKind = "quicksort", na_position: NaPosition = "last", sort_remaining: bool_t = True, ignore_index: bool_t = False, key: IndexKeyFunc = None, ) -> NDFrameT | None: inplace = validate_bool_kwarg(inplace, "inplace") axis = self._get_axis_number(axis) ascending = validate_ascending(ascending) target = self._get_axis(axis) indexer = get_indexer_indexer( target, level, ascending, kind, na_position, sort_remaining, key ) if indexer is None: if inplace: result = self else: result = self.copy(deep=None) if ignore_index: result.index = default_index(len(self)) if inplace: return None else: return result baxis = self._get_block_manager_axis(axis) new_data = self._mgr.take(indexer, axis=baxis, verify=False) # reconstruct axis if needed new_data.set_axis(baxis, new_data.axes[baxis]._sort_levels_monotonic()) if ignore_index: axis = 1 if isinstance(self, ABCDataFrame) else 0 new_data.set_axis(axis, default_index(len(indexer))) result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="sort_index") klass=_shared_doc_kwargs["klass"], optional_reindex="", ) def reindex( self: NDFrameT, labels=None, index=None, columns=None, axis: Axis | None = None, method: str | None = None, copy: bool_t | None = None, level: Level | None = None, fill_value: Scalar | None = np.nan, limit: int | None = None, tolerance=None, ) -> NDFrameT: """ Conform {klass} to new index with optional filling logic. Places NA/NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and ``copy=False``. Parameters ---------- {optional_reindex} method : {{None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}} Method to use for filling holes in reindexed DataFrame. Please note: this is only applicable to DataFrames/Series with a monotonically increasing/decreasing index. * None (default): don't fill gaps * pad / ffill: Propagate last valid observation forward to next valid. * backfill / bfill: Use next valid observation to fill gap. * nearest: Use nearest valid observations to fill gap. copy : bool, default True Return a new object, even if the passed indexes are the same. level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level. fill_value : scalar, default np.NaN Value to use for missing values. Defaults to NaN, but can be any "compatible" value. limit : int, default None Maximum number of consecutive elements to forward or backward fill. tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations most satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Tolerance may be a scalar value, which applies the same tolerance to all values, or list-like, which applies variable tolerance per element. List-like includes list, tuple, array, Series, and must be the same size as the index and its dtype must exactly match the index's type. Returns ------- {klass} with changed index. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. DataFrame.reindex_like : Change to same indices as other DataFrame. Examples -------- ``DataFrame.reindex`` supports two calling conventions * ``(index=index_labels, columns=column_labels, ...)`` * ``(labels, axis={{'index', 'columns'}}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. Create a dataframe with some fictional data. >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror'] >>> df = pd.DataFrame({{'http_status': [200, 200, 404, 404, 301], ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]}}, ... index=index) >>> df http_status response_time Firefox 200 0.04 Chrome 200 0.02 Safari 404 0.07 IE10 404 0.08 Konqueror 301 1.00 Create a new index and reindex the dataframe. By default values in the new index that do not have corresponding records in the dataframe are assigned ``NaN``. >>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10', ... 'Chrome'] >>> df.reindex(new_index) http_status response_time Safari 404.0 0.07 Iceweasel NaN NaN Comodo Dragon NaN NaN IE10 404.0 0.08 Chrome 200.0 0.02 We can fill in the missing values by passing a value to the keyword ``fill_value``. Because the index is not monotonically increasing or decreasing, we cannot use arguments to the keyword ``method`` to fill the ``NaN`` values. >>> df.reindex(new_index, fill_value=0) http_status response_time Safari 404 0.07 Iceweasel 0 0.00 Comodo Dragon 0 0.00 IE10 404 0.08 Chrome 200 0.02 >>> df.reindex(new_index, fill_value='missing') http_status response_time Safari 404 0.07 Iceweasel missing missing Comodo Dragon missing missing IE10 404 0.08 Chrome 200 0.02 We can also reindex the columns. >>> df.reindex(columns=['http_status', 'user_agent']) http_status user_agent Firefox 200 NaN Chrome 200 NaN Safari 404 NaN IE10 404 NaN Konqueror 301 NaN Or we can use "axis-style" keyword arguments >>> df.reindex(['http_status', 'user_agent'], axis="columns") http_status user_agent Firefox 200 NaN Chrome 200 NaN Safari 404 NaN IE10 404 NaN Konqueror 301 NaN To further illustrate the filling functionality in ``reindex``, we will create a dataframe with a monotonically increasing index (for example, a sequence of dates). >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D') >>> df2 = pd.DataFrame({{"prices": [100, 101, np.nan, 100, 89, 88]}}, ... index=date_index) >>> df2 prices 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 Suppose we decide to expand the dataframe to cover a wider date range. >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D') >>> df2.reindex(date_index2) prices 2009-12-29 NaN 2009-12-30 NaN 2009-12-31 NaN 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 2010-01-07 NaN The index entries that did not have a value in the original data frame (for example, '2009-12-29') are by default filled with ``NaN``. If desired, we can fill in the missing values using one of several options. For example, to back-propagate the last valid value to fill the ``NaN`` values, pass ``bfill`` as an argument to the ``method`` keyword. >>> df2.reindex(date_index2, method='bfill') prices 2009-12-29 100.0 2009-12-30 100.0 2009-12-31 100.0 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 2010-01-07 NaN Please note that the ``NaN`` value present in the original dataframe (at index value 2010-01-03) will not be filled by any of the value propagation schemes. This is because filling while reindexing does not look at dataframe values, but only compares the original and desired indexes. If you do want to fill in the ``NaN`` values present in the original dataframe, use the ``fillna()`` method. See the :ref:`user guide <basics.reindexing>` for more. """ # TODO: Decide if we care about having different examples for different # kinds if index is not None and columns is not None and labels is not None: raise TypeError("Cannot specify all of 'labels', 'index', 'columns'.") elif index is not None or columns is not None: if axis is not None: raise TypeError( "Cannot specify both 'axis' and any of 'index' or 'columns'" ) if labels is not None: if index is not None: columns = labels else: index = labels else: if axis and self._get_axis_number(axis) == 1: columns = labels else: index = labels axes: dict[Literal["index", "columns"], Any] = { "index": index, "columns": columns, } method = clean_reindex_fill_method(method) # if all axes that are requested to reindex are equal, then only copy # if indicated must have index names equal here as well as values if copy and using_copy_on_write(): copy = False if all( self._get_axis(axis_name).identical(ax) for axis_name, ax in axes.items() if ax is not None ): return self.copy(deep=copy) # check if we are a multi reindex if self._needs_reindex_multi(axes, method, level): return self._reindex_multi(axes, copy, fill_value) # perform the reindex on the axes return self._reindex_axes( axes, level, limit, tolerance, method, fill_value, copy ).__finalize__(self, method="reindex") def _reindex_axes( self: NDFrameT, axes, level, limit, tolerance, method, fill_value, copy ) -> NDFrameT: """Perform the reindex for all the axes.""" obj = self for a in self._AXIS_ORDERS: labels = axes[a] if labels is None: continue ax = self._get_axis(a) new_index, indexer = ax.reindex( labels, level=level, limit=limit, tolerance=tolerance, method=method ) axis = self._get_axis_number(a) obj = obj._reindex_with_indexers( {axis: [new_index, indexer]}, fill_value=fill_value, copy=copy, allow_dups=False, ) # If we've made a copy once, no need to make another one copy = False return obj def _needs_reindex_multi(self, axes, method, level) -> bool_t: """Check if we do need a multi reindex.""" return ( (common.count_not_none(*axes.values()) == self._AXIS_LEN) and method is None and level is None and not self._is_mixed_type and not ( self.ndim == 2 and len(self.dtypes) == 1 and is_extension_array_dtype(self.dtypes.iloc[0]) ) ) def _reindex_multi(self, axes, copy, fill_value): raise AbstractMethodError(self) def _reindex_with_indexers( self: NDFrameT, reindexers, fill_value=None, copy: bool_t | None = False, allow_dups: bool_t = False, ) -> NDFrameT: """allow_dups indicates an internal call here""" # reindex doing multiple operations on different axes if indicated new_data = self._mgr for axis in sorted(reindexers.keys()): index, indexer = reindexers[axis] baxis = self._get_block_manager_axis(axis) if index is None: continue index = ensure_index(index) if indexer is not None: indexer = ensure_platform_int(indexer) # TODO: speed up on homogeneous DataFrame objects (see _reindex_multi) new_data = new_data.reindex_indexer( index, indexer, axis=baxis, fill_value=fill_value, allow_dups=allow_dups, copy=copy, ) # If we've made a copy once, no need to make another one copy = False if ( (copy or copy is None) and new_data is self._mgr and not using_copy_on_write() ): new_data = new_data.copy(deep=copy) elif using_copy_on_write() and new_data is self._mgr: new_data = new_data.copy(deep=False) return self._constructor(new_data).__finalize__(self) def filter( self: NDFrameT, items=None, like: str | None = None, regex: str | None = None, axis: Axis | None = None, ) -> NDFrameT: """ Subset the dataframe rows or columns according to the specified index labels. Note that this routine does not filter a dataframe on its contents. The filter is applied to the labels of the index. Parameters ---------- items : list-like Keep labels from axis which are in items. like : str Keep labels from axis for which "like in label == True". regex : str (regular expression) Keep labels from axis for which re.search(regex, label) == True. axis : {0 or ‘index’, 1 or ‘columns’, None}, default None The axis to filter on, expressed either as an index (int) or axis name (str). By default this is the info axis, 'columns' for DataFrame. For `Series` this parameter is unused and defaults to `None`. Returns ------- same type as input object See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. Notes ----- The ``items``, ``like``, and ``regex`` parameters are enforced to be mutually exclusive. ``axis`` defaults to the info axis that is used when indexing with ``[]``. Examples -------- >>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])), ... index=['mouse', 'rabbit'], ... columns=['one', 'two', 'three']) >>> df one two three mouse 1 2 3 rabbit 4 5 6 >>> # select columns by name >>> df.filter(items=['one', 'three']) one three mouse 1 3 rabbit 4 6 >>> # select columns by regular expression >>> df.filter(regex='e$', axis=1) one three mouse 1 3 rabbit 4 6 >>> # select rows containing 'bbi' >>> df.filter(like='bbi', axis=0) one two three rabbit 4 5 6 """ nkw = common.count_not_none(items, like, regex) if nkw > 1: raise TypeError( "Keyword arguments `items`, `like`, or `regex` " "are mutually exclusive" ) if axis is None: axis = self._info_axis_name labels = self._get_axis(axis) if items is not None: name = self._get_axis_name(axis) # error: Keywords must be strings return self.reindex( # type: ignore[misc] **{name: [r for r in items if r in labels]} # type: ignore[arg-type] ) elif like: def f(x) -> bool_t: assert like is not None # needed for mypy return like in ensure_str(x) values = labels.map(f) return self.loc(axis=axis)[values] elif regex: def f(x) -> bool_t: return matcher.search(ensure_str(x)) is not None matcher = re.compile(regex) values = labels.map(f) return self.loc(axis=axis)[values] else: raise TypeError("Must pass either `items`, `like`, or `regex`") def head(self: NDFrameT, n: int = 5) -> NDFrameT: """ Return the first `n` rows. This function returns the first `n` rows for the object based on position. It is useful for quickly testing if your object has the right type of data in it. For negative values of `n`, this function returns all rows except the last `|n|` rows, equivalent to ``df[:n]``. If n is larger than the number of rows, this function returns all rows. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- same type as caller The first `n` rows of the caller object. See Also -------- DataFrame.tail: Returns the last `n` rows. Examples -------- >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the first 5 lines >>> df.head() animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey Viewing the first `n` lines (three in this case) >>> df.head(3) animal 0 alligator 1 bee 2 falcon For negative values of `n` >>> df.head(-3) animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot """ return self.iloc[:n] def tail(self: NDFrameT, n: int = 5) -> NDFrameT: """ Return the last `n` rows. This function returns last `n` rows from the object based on position. It is useful for quickly verifying data, for example, after sorting or appending rows. For negative values of `n`, this function returns all rows except the first `|n|` rows, equivalent to ``df[|n|:]``. If n is larger than the number of rows, this function returns all rows. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- type of caller The last `n` rows of the caller object. See Also -------- DataFrame.head : The first `n` rows of the caller object. Examples -------- >>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last 5 lines >>> df.tail() animal 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the last `n` lines (three in this case) >>> df.tail(3) animal 6 shark 7 whale 8 zebra For negative values of `n` >>> df.tail(-3) animal 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra """ if n == 0: return self.iloc[0:0] return self.iloc[-n:] def sample( self: NDFrameT, n: int | None = None, frac: float | None = None, replace: bool_t = False, weights=None, random_state: RandomState | None = None, axis: Axis | None = None, ignore_index: bool_t = False, ) -> NDFrameT: """ Return a random sample of items from an axis of object. You can use `random_state` for reproducibility. Parameters ---------- n : int, optional Number of items from axis to return. Cannot be used with `frac`. Default = 1 if `frac` = None. frac : float, optional Fraction of axis items to return. Cannot be used with `n`. replace : bool, default False Allow or disallow sampling of the same row more than once. weights : str or ndarray-like, optional Default 'None' results in equal probability weighting. If passed a Series, will align with target object on index. Index values in weights not found in sampled object will be ignored and index values in sampled object not in weights will be assigned weights of zero. If called on a DataFrame, will accept the name of a column when axis = 0. Unless weights are a Series, weights must be same length as axis being sampled. If weights do not sum to 1, they will be normalized to sum to 1. Missing values in the weights column will be treated as zero. Infinite values not allowed. random_state : int, array-like, BitGenerator, np.random.RandomState, np.random.Generator, optional If int, array-like, or BitGenerator, seed for random number generator. If np.random.RandomState or np.random.Generator, use as given. .. versionchanged:: 1.1.0 array-like and BitGenerator object now passed to np.random.RandomState() as seed .. versionchanged:: 1.4.0 np.random.Generator objects now accepted axis : {0 or ‘index’, 1 or ‘columns’, None}, default None Axis to sample. Accepts axis number or name. Default is stat axis for given data type. For `Series` this parameter is unused and defaults to `None`. ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 1.3.0 Returns ------- Series or DataFrame A new object of same type as caller containing `n` items randomly sampled from the caller object. See Also -------- DataFrameGroupBy.sample: Generates random samples from each group of a DataFrame object. SeriesGroupBy.sample: Generates random samples from each group of a Series object. numpy.random.choice: Generates a random sample from a given 1-D numpy array. Notes ----- If `frac` > 1, `replacement` should be set to `True`. Examples -------- >>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0], ... 'num_wings': [2, 0, 0, 0], ... 'num_specimen_seen': [10, 2, 1, 8]}, ... index=['falcon', 'dog', 'spider', 'fish']) >>> df num_legs num_wings num_specimen_seen falcon 2 2 10 dog 4 0 2 spider 8 0 1 fish 0 0 8 Extract 3 random elements from the ``Series`` ``df['num_legs']``: Note that we use `random_state` to ensure the reproducibility of the examples. >>> df['num_legs'].sample(n=3, random_state=1) fish 0 spider 8 falcon 2 Name: num_legs, dtype: int64 A random 50% sample of the ``DataFrame`` with replacement: >>> df.sample(frac=0.5, replace=True, random_state=1) num_legs num_wings num_specimen_seen dog 4 0 2 fish 0 0 8 An upsample sample of the ``DataFrame`` with replacement: Note that `replace` parameter has to be `True` for `frac` parameter > 1. >>> df.sample(frac=2, replace=True, random_state=1) num_legs num_wings num_specimen_seen dog 4 0 2 fish 0 0 8 falcon 2 2 10 falcon 2 2 10 fish 0 0 8 dog 4 0 2 fish 0 0 8 dog 4 0 2 Using a DataFrame column as weights. Rows with larger value in the `num_specimen_seen` column are more likely to be sampled. >>> df.sample(n=2, weights='num_specimen_seen', random_state=1) num_legs num_wings num_specimen_seen falcon 2 2 10 fish 0 0 8 """ # noqa:E501 if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) obj_len = self.shape[axis] # Process random_state argument rs = common.random_state(random_state) size = sample.process_sampling_size(n, frac, replace) if size is None: assert frac is not None size = round(frac * obj_len) if weights is not None: weights = sample.preprocess_weights(self, weights, axis) sampled_indices = sample.sample(obj_len, size, replace, weights, rs) result = self.take(sampled_indices, axis=axis) if ignore_index: result.index = default_index(len(result)) return result def pipe( self, func: Callable[..., T] | tuple[Callable[..., T], str], *args, **kwargs, ) -> T: r""" Apply chainable functions that expect Series or DataFrames. Parameters ---------- func : function Function to apply to the {klass}. ``args``, and ``kwargs`` are passed into ``func``. Alternatively a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of ``callable`` that expects the {klass}. args : iterable, optional Positional arguments passed into ``func``. kwargs : mapping, optional A dictionary of keyword arguments passed into ``func``. Returns ------- the return type of ``func``. See Also -------- DataFrame.apply : Apply a function along input axis of DataFrame. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Series.map : Apply a mapping correspondence on a :class:`~pandas.Series`. Notes ----- Use ``.pipe`` when chaining together functions that expect Series, DataFrames or GroupBy objects. Instead of writing >>> func(g(h(df), arg1=a), arg2=b, arg3=c) # doctest: +SKIP You can write >>> (df.pipe(h) ... .pipe(g, arg1=a) ... .pipe(func, arg2=b, arg3=c) ... ) # doctest: +SKIP If you have a function that takes the data as (say) the second argument, pass a tuple indicating which keyword expects the data. For example, suppose ``func`` takes its data as ``arg2``: >>> (df.pipe(h) ... .pipe(g, arg1=a) ... .pipe((func, 'arg2'), arg1=a, arg3=c) ... ) # doctest: +SKIP """ if using_copy_on_write(): return common.pipe(self.copy(deep=None), func, *args, **kwargs) return common.pipe(self, func, *args, **kwargs) # ---------------------------------------------------------------------- # Attribute access def __finalize__( self: NDFrameT, other, method: str | None = None, **kwargs ) -> NDFrameT: """ Propagate metadata from other to self. Parameters ---------- other : the object from which to get the attributes that we are going to propagate method : str, optional A passed method name providing context on where ``__finalize__`` was called. .. warning:: The value passed as `method` are not currently considered stable across pandas releases. """ if isinstance(other, NDFrame): for name in other.attrs: self.attrs[name] = other.attrs[name] self.flags.allows_duplicate_labels = other.flags.allows_duplicate_labels # For subclasses using _metadata. for name in set(self._metadata) & set(other._metadata): assert isinstance(name, str) object.__setattr__(self, name, getattr(other, name, None)) if method == "concat": attrs = other.objs[0].attrs check_attrs = all(objs.attrs == attrs for objs in other.objs[1:]) if check_attrs: for name in attrs: self.attrs[name] = attrs[name] allows_duplicate_labels = all( x.flags.allows_duplicate_labels for x in other.objs ) self.flags.allows_duplicate_labels = allows_duplicate_labels return self def __getattr__(self, name: str): """ After regular attribute access, try looking up the name This allows simpler access to columns for interactive use. """ # Note: obj.x will always call obj.__getattribute__('x') prior to # calling obj.__getattr__('x'). if ( name not in self._internal_names_set and name not in self._metadata and name not in self._accessors and self._info_axis._can_hold_identifiers_and_holds_name(name) ): return self[name] return object.__getattribute__(self, name) def __setattr__(self, name: str, value) -> None: """ After regular attribute access, try setting the name This allows simpler access to columns for interactive use. """ # first try regular attribute access via __getattribute__, so that # e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify # the same attribute. try: object.__getattribute__(self, name) return object.__setattr__(self, name, value) except AttributeError: pass # if this fails, go on to more involved attribute setting # (note that this matches __getattr__, above). if name in self._internal_names_set: object.__setattr__(self, name, value) elif name in self._metadata: object.__setattr__(self, name, value) else: try: existing = getattr(self, name) if isinstance(existing, Index): object.__setattr__(self, name, value) elif name in self._info_axis: self[name] = value else: object.__setattr__(self, name, value) except (AttributeError, TypeError): if isinstance(self, ABCDataFrame) and (is_list_like(value)): warnings.warn( "Pandas doesn't allow columns to be " "created via a new attribute name - see " "https://pandas.pydata.org/pandas-docs/" "stable/indexing.html#attribute-access", stacklevel=find_stack_level(), ) object.__setattr__(self, name, value) def _dir_additions(self) -> set[str]: """ add the string-like attributes from the info_axis. If info_axis is a MultiIndex, its first level values are used. """ additions = super()._dir_additions() if self._info_axis._can_hold_strings: additions.update(self._info_axis._dir_additions_for_owner) return additions # ---------------------------------------------------------------------- # Consolidation of internals def _protect_consolidate(self, f): """ Consolidate _mgr -- if the blocks have changed, then clear the cache """ if isinstance(self._mgr, (ArrayManager, SingleArrayManager)): return f() blocks_before = len(self._mgr.blocks) result = f() if len(self._mgr.blocks) != blocks_before: self._clear_item_cache() return result def _consolidate_inplace(self) -> None: """Consolidate data in place and return None""" def f() -> None: self._mgr = self._mgr.consolidate() self._protect_consolidate(f) def _consolidate(self): """ Compute NDFrame with "consolidated" internals (data of each dtype grouped together in a single ndarray). Returns ------- consolidated : same type as caller """ f = lambda: self._mgr.consolidate() cons_data = self._protect_consolidate(f) return self._constructor(cons_data).__finalize__(self) def _is_mixed_type(self) -> bool_t: if self._mgr.is_single_block: return False if self._mgr.any_extension_types: # Even if they have the same dtype, we can't consolidate them, # so we pretend this is "mixed'" return True return self.dtypes.nunique() > 1 def _check_inplace_setting(self, value) -> bool_t: """check whether we allow in-place setting with this type of value""" if self._is_mixed_type and not self._mgr.is_numeric_mixed_type: # allow an actual np.nan through if is_float(value) and np.isnan(value) or value is lib.no_default: return True raise TypeError( "Cannot do inplace boolean setting on " "mixed-types with a non np.nan value" ) return True def _get_numeric_data(self: NDFrameT) -> NDFrameT: return self._constructor(self._mgr.get_numeric_data()).__finalize__(self) def _get_bool_data(self): return self._constructor(self._mgr.get_bool_data()).__finalize__(self) # ---------------------------------------------------------------------- # Internal Interface Methods def values(self): raise AbstractMethodError(self) def _values(self) -> ArrayLike: """internal implementation""" raise AbstractMethodError(self) def dtypes(self): """ Return the dtypes in the DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the ``object`` dtype. See :ref:`the User Guide <basics.dtypes>` for more. Returns ------- pandas.Series The data type of each column. Examples -------- >>> df = pd.DataFrame({'float': [1.0], ... 'int': [1], ... 'datetime': [pd.Timestamp('20180310')], ... 'string': ['foo']}) >>> df.dtypes float float64 int int64 datetime datetime64[ns] string object dtype: object """ data = self._mgr.get_dtypes() return self._constructor_sliced(data, index=self._info_axis, dtype=np.object_) def astype( self: NDFrameT, dtype, copy: bool_t | None = None, errors: IgnoreRaise = "raise" ) -> NDFrameT: """ Cast a pandas object to a specified dtype ``dtype``. Parameters ---------- dtype : str, data type, Series or Mapping of column name -> data type Use a str, numpy.dtype, pandas.ExtensionDtype or Python type to cast entire pandas object to the same type. Alternatively, use a mapping, e.g. {col: dtype, ...}, where col is a column label and dtype is a numpy.dtype or Python type to cast one or more of the DataFrame's columns to column-specific types. copy : bool, default True Return a copy when ``copy=True`` (be very careful setting ``copy=False`` as changes to values then may propagate to other pandas objects). errors : {'raise', 'ignore'}, default 'raise' Control raising of exceptions on invalid data for provided dtype. - ``raise`` : allow exceptions to be raised - ``ignore`` : suppress exceptions. On error return original object. Returns ------- same type as caller See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to a numeric type. numpy.ndarray.astype : Cast a numpy array to a specified type. Notes ----- .. versionchanged:: 2.0.0 Using ``astype`` to convert from timezone-naive dtype to timezone-aware dtype will raise an exception. Use :meth:`Series.dt.tz_localize` instead. Examples -------- Create a DataFrame: >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df.dtypes col1 int64 col2 int64 dtype: object Cast all columns to int32: >>> df.astype('int32').dtypes col1 int32 col2 int32 dtype: object Cast col1 to int32 using a dictionary: >>> df.astype({'col1': 'int32'}).dtypes col1 int32 col2 int64 dtype: object Create a series: >>> ser = pd.Series([1, 2], dtype='int32') >>> ser 0 1 1 2 dtype: int32 >>> ser.astype('int64') 0 1 1 2 dtype: int64 Convert to categorical type: >>> ser.astype('category') 0 1 1 2 dtype: category Categories (2, int32): [1, 2] Convert to ordered categorical type with custom ordering: >>> from pandas.api.types import CategoricalDtype >>> cat_dtype = CategoricalDtype( ... categories=[2, 1], ordered=True) >>> ser.astype(cat_dtype) 0 1 1 2 dtype: category Categories (2, int64): [2 < 1] Create a series of dates: >>> ser_date = pd.Series(pd.date_range('20200101', periods=3)) >>> ser_date 0 2020-01-01 1 2020-01-02 2 2020-01-03 dtype: datetime64[ns] """ if copy and using_copy_on_write(): copy = False if is_dict_like(dtype): if self.ndim == 1: # i.e. Series if len(dtype) > 1 or self.name not in dtype: raise KeyError( "Only the Series name can be used for " "the key in Series dtype mappings." ) new_type = dtype[self.name] return self.astype(new_type, copy, errors) # GH#44417 cast to Series so we can use .iat below, which will be # robust in case we from pandas import Series dtype_ser = Series(dtype, dtype=object) for col_name in dtype_ser.index: if col_name not in self: raise KeyError( "Only a column name can be used for the " "key in a dtype mappings argument. " f"'{col_name}' not found in columns." ) dtype_ser = dtype_ser.reindex(self.columns, fill_value=None, copy=False) results = [] for i, (col_name, col) in enumerate(self.items()): cdt = dtype_ser.iat[i] if isna(cdt): res_col = col.copy(deep=copy) else: try: res_col = col.astype(dtype=cdt, copy=copy, errors=errors) except ValueError as ex: ex.args = ( f"{ex}: Error while type casting for column '{col_name}'", ) raise results.append(res_col) elif is_extension_array_dtype(dtype) and self.ndim > 1: # GH 18099/22869: columnwise conversion to extension dtype # GH 24704: use iloc to handle duplicate column names # TODO(EA2D): special case not needed with 2D EAs results = [ self.iloc[:, i].astype(dtype, copy=copy) for i in range(len(self.columns)) ] else: # else, only a single dtype is given new_data = self._mgr.astype(dtype=dtype, copy=copy, errors=errors) return self._constructor(new_data).__finalize__(self, method="astype") # GH 33113: handle empty frame or series if not results: return self.copy(deep=None) # GH 19920: retain column metadata after concat result = concat(results, axis=1, copy=False) # GH#40810 retain subclass # error: Incompatible types in assignment # (expression has type "NDFrameT", variable has type "DataFrame") result = self._constructor(result) # type: ignore[assignment] result.columns = self.columns result = result.__finalize__(self, method="astype") # https://github.com/python/mypy/issues/8354 return cast(NDFrameT, result) def copy(self: NDFrameT, deep: bool_t | None = True) -> NDFrameT: """ Make a copy of this object's indices and data. When ``deep=True`` (default), a new object will be created with a copy of the calling object's data and indices. Modifications to the data or indices of the copy will not be reflected in the original object (see notes below). When ``deep=False``, a new object will be created without copying the calling object's data or index (only references to the data and index are copied). Any changes to the data of the original will be reflected in the shallow copy (and vice versa). Parameters ---------- deep : bool, default True Make a deep copy, including a copy of the data and the indices. With ``deep=False`` neither the indices nor the data are copied. Returns ------- Series or DataFrame Object type matches caller. Notes ----- When ``deep=True``, data is copied but actual Python objects will not be copied recursively, only the reference to the object. This is in contrast to `copy.deepcopy` in the Standard Library, which recursively copies object data (see examples below). While ``Index`` objects are copied when ``deep=True``, the underlying numpy array is not copied for performance reasons. Since ``Index`` is immutable, the underlying data can be safely shared and a copy is not needed. Since pandas is not thread safe, see the :ref:`gotchas <gotchas.thread-safety>` when copying in a threading environment. Examples -------- >>> s = pd.Series([1, 2], index=["a", "b"]) >>> s a 1 b 2 dtype: int64 >>> s_copy = s.copy() >>> s_copy a 1 b 2 dtype: int64 **Shallow copy versus default (deep) copy:** >>> s = pd.Series([1, 2], index=["a", "b"]) >>> deep = s.copy() >>> shallow = s.copy(deep=False) Shallow copy shares data and index with original. >>> s is shallow False >>> s.values is shallow.values and s.index is shallow.index True Deep copy has own copy of data and index. >>> s is deep False >>> s.values is deep.values or s.index is deep.index False Updates to the data shared by shallow copy and original is reflected in both; deep copy remains unchanged. >>> s[0] = 3 >>> shallow[1] = 4 >>> s a 3 b 4 dtype: int64 >>> shallow a 3 b 4 dtype: int64 >>> deep a 1 b 2 dtype: int64 Note that when copying an object containing Python objects, a deep copy will copy the data, but will not do so recursively. Updating a nested data object will be reflected in the deep copy. >>> s = pd.Series([[1, 2], [3, 4]]) >>> deep = s.copy() >>> s[0][0] = 10 >>> s 0 [10, 2] 1 [3, 4] dtype: object >>> deep 0 [10, 2] 1 [3, 4] dtype: object """ data = self._mgr.copy(deep=deep) self._clear_item_cache() return self._constructor(data).__finalize__(self, method="copy") def __copy__(self: NDFrameT, deep: bool_t = True) -> NDFrameT: return self.copy(deep=deep) def __deepcopy__(self: NDFrameT, memo=None) -> NDFrameT: """ Parameters ---------- memo, default None Standard signature. Unused """ return self.copy(deep=True) def infer_objects(self: NDFrameT, copy: bool_t | None = None) -> NDFrameT: """ Attempt to infer better dtypes for object columns. Attempts soft conversion of object-dtyped columns, leaving non-object and unconvertible columns unchanged. The inference rules are the same as during normal Series/DataFrame construction. Parameters ---------- copy : bool, default True Whether to make a copy for non-object or non-inferrable columns or Series. Returns ------- same type as input object See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to numeric type. convert_dtypes : Convert argument to best possible dtype. Examples -------- >>> df = pd.DataFrame({"A": ["a", 1, 2, 3]}) >>> df = df.iloc[1:] >>> df A 1 1 2 2 3 3 >>> df.dtypes A object dtype: object >>> df.infer_objects().dtypes A int64 dtype: object """ new_mgr = self._mgr.convert(copy=copy) return self._constructor(new_mgr).__finalize__(self, method="infer_objects") def convert_dtypes( self: NDFrameT, infer_objects: bool_t = True, convert_string: bool_t = True, convert_integer: bool_t = True, convert_boolean: bool_t = True, convert_floating: bool_t = True, dtype_backend: DtypeBackend = "numpy_nullable", ) -> NDFrameT: """ Convert columns to the best possible dtypes using dtypes supporting ``pd.NA``. Parameters ---------- infer_objects : bool, default True Whether object dtypes should be converted to the best possible types. convert_string : bool, default True Whether object dtypes should be converted to ``StringDtype()``. convert_integer : bool, default True Whether, if possible, conversion can be done to integer extension types. convert_boolean : bool, defaults True Whether object dtypes should be converted to ``BooleanDtypes()``. convert_floating : bool, defaults True Whether, if possible, conversion can be done to floating extension types. If `convert_integer` is also True, preference will be give to integer dtypes if the floats can be faithfully casted to integers. .. versionadded:: 1.2.0 dtype_backend : {"numpy_nullable", "pyarrow"}, default "numpy_nullable" Which dtype_backend to use, e.g. whether a DataFrame should use nullable dtypes for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 Returns ------- Series or DataFrame Copy of input object with new dtype. See Also -------- infer_objects : Infer dtypes of objects. to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to a numeric type. Notes ----- By default, ``convert_dtypes`` will attempt to convert a Series (or each Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options ``convert_string``, ``convert_integer``, ``convert_boolean`` and ``convert_floating``, it is possible to turn off individual conversions to ``StringDtype``, the integer extension types, ``BooleanDtype`` or floating extension types, respectively. For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference rules as during normal Series/DataFrame construction. Then, if possible, convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer or floating extension type, otherwise leave as ``object``. If the dtype is integer, convert to an appropriate integer extension type. If the dtype is numeric, and consists of all integers, convert to an appropriate integer extension type. Otherwise, convert to an appropriate floating extension type. .. versionchanged:: 1.2 Starting with pandas 1.2, this method also converts float columns to the nullable floating extension type. In the future, as new dtypes are added that support ``pd.NA``, the results of this method will change to support those new dtypes. .. versionadded:: 2.0 The nullable dtype implementation can be configured by calling ``pd.set_option("mode.dtype_backend", "pandas")`` to use numpy-backed nullable dtypes or ``pd.set_option("mode.dtype_backend", "pyarrow")`` to use pyarrow-backed nullable dtypes (using ``pd.ArrowDtype``). Examples -------- >>> df = pd.DataFrame( ... { ... "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")), ... "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")), ... "c": pd.Series([True, False, np.nan], dtype=np.dtype("O")), ... "d": pd.Series(["h", "i", np.nan], dtype=np.dtype("O")), ... "e": pd.Series([10, np.nan, 20], dtype=np.dtype("float")), ... "f": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")), ... } ... ) Start with a DataFrame with default dtypes. >>> df a b c d e f 0 1 x True h 10.0 NaN 1 2 y False i NaN 100.5 2 3 z NaN NaN 20.0 200.0 >>> df.dtypes a int32 b object c object d object e float64 f float64 dtype: object Convert the DataFrame to use best possible dtypes. >>> dfn = df.convert_dtypes() >>> dfn a b c d e f 0 1 x True h 10 <NA> 1 2 y False i <NA> 100.5 2 3 z <NA> <NA> 20 200.0 >>> dfn.dtypes a Int32 b string[python] c boolean d string[python] e Int64 f Float64 dtype: object Start with a Series of strings and missing data represented by ``np.nan``. >>> s = pd.Series(["a", "b", np.nan]) >>> s 0 a 1 b 2 NaN dtype: object Obtain a Series with dtype ``StringDtype``. >>> s.convert_dtypes() 0 a 1 b 2 <NA> dtype: string """ check_dtype_backend(dtype_backend) if self.ndim == 1: return self._convert_dtypes( infer_objects, convert_string, convert_integer, convert_boolean, convert_floating, dtype_backend=dtype_backend, ) else: results = [ col._convert_dtypes( infer_objects, convert_string, convert_integer, convert_boolean, convert_floating, dtype_backend=dtype_backend, ) for col_name, col in self.items() ] if len(results) > 0: result = concat(results, axis=1, copy=False, keys=self.columns) cons = cast(Type["DataFrame"], self._constructor) result = cons(result) result = result.__finalize__(self, method="convert_dtypes") # https://github.com/python/mypy/issues/8354 return cast(NDFrameT, result) else: return self.copy(deep=None) # ---------------------------------------------------------------------- # Filling NA's def fillna( self: NDFrameT, value: Hashable | Mapping | Series | DataFrame = ..., *, method: FillnaOptions | None = ..., axis: Axis | None = ..., inplace: Literal[False] = ..., limit: int | None = ..., downcast: dict | None = ..., ) -> NDFrameT: ... def fillna( self, value: Hashable | Mapping | Series | DataFrame = ..., *, method: FillnaOptions | None = ..., axis: Axis | None = ..., inplace: Literal[True], limit: int | None = ..., downcast: dict | None = ..., ) -> None: ... def fillna( self: NDFrameT, value: Hashable | Mapping | Series | DataFrame = ..., *, method: FillnaOptions | None = ..., axis: Axis | None = ..., inplace: bool_t = ..., limit: int | None = ..., downcast: dict | None = ..., ) -> NDFrameT | None: ... def fillna( self: NDFrameT, value: Hashable | Mapping | Series | DataFrame = None, *, method: FillnaOptions | None = None, axis: Axis | None = None, inplace: bool_t = False, limit: int | None = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Fill NA/NaN values using the specified method. Parameters ---------- value : scalar, dict, Series, or DataFrame Value to use to fill holes (e.g. 0), alternately a dict/Series/DataFrame of values specifying which value to use for each index (for a Series) or column (for a DataFrame). Values not in the dict/Series/DataFrame will not be filled. This value cannot be a list. method : {{'backfill', 'bfill', 'ffill', None}}, default None Method to use for filling holes in reindexed Series: * ffill: propagate last valid observation forward to next valid. * backfill / bfill: use next valid observation to fill gap. axis : {axes_single_arg} Axis along which to fill missing values. For `Series` this parameter is unused and defaults to 0. inplace : bool, default False If True, fill in-place. Note: this will modify any other views on this object (e.g., a no-copy slice for a column in a DataFrame). limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None. downcast : dict, default is None A dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate equal type (e.g. float64 to int64 if possible). Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. See Also -------- interpolate : Fill NaN values using interpolation. reindex : Conform object to new index. asfreq : Convert TimeSeries to specified frequency. Examples -------- >>> df = pd.DataFrame([[np.nan, 2, np.nan, 0], ... [3, 4, np.nan, 1], ... [np.nan, np.nan, np.nan, np.nan], ... [np.nan, 3, np.nan, 4]], ... columns=list("ABCD")) >>> df A B C D 0 NaN 2.0 NaN 0.0 1 3.0 4.0 NaN 1.0 2 NaN NaN NaN NaN 3 NaN 3.0 NaN 4.0 Replace all NaN elements with 0s. >>> df.fillna(0) A B C D 0 0.0 2.0 0.0 0.0 1 3.0 4.0 0.0 1.0 2 0.0 0.0 0.0 0.0 3 0.0 3.0 0.0 4.0 We can also propagate non-null values forward or backward. >>> df.fillna(method="ffill") A B C D 0 NaN 2.0 NaN 0.0 1 3.0 4.0 NaN 1.0 2 3.0 4.0 NaN 1.0 3 3.0 3.0 NaN 4.0 Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1, 2, and 3 respectively. >>> values = {{"A": 0, "B": 1, "C": 2, "D": 3}} >>> df.fillna(value=values) A B C D 0 0.0 2.0 2.0 0.0 1 3.0 4.0 2.0 1.0 2 0.0 1.0 2.0 3.0 3 0.0 3.0 2.0 4.0 Only replace the first NaN element. >>> df.fillna(value=values, limit=1) A B C D 0 0.0 2.0 2.0 0.0 1 3.0 4.0 NaN 1.0 2 NaN 1.0 NaN 3.0 3 NaN 3.0 NaN 4.0 When filling using a DataFrame, replacement happens along the same column names and same indices >>> df2 = pd.DataFrame(np.zeros((4, 4)), columns=list("ABCE")) >>> df.fillna(df2) A B C D 0 0.0 2.0 0.0 0.0 1 3.0 4.0 0.0 1.0 2 0.0 0.0 0.0 NaN 3 0.0 3.0 0.0 4.0 Note that column D is not affected since it is not present in df2. """ inplace = validate_bool_kwarg(inplace, "inplace") value, method = validate_fillna_kwargs(value, method) # set the default here, so functions examining the signaure # can detect if something was set (e.g. in groupby) (GH9221) if axis is None: axis = 0 axis = self._get_axis_number(axis) if value is None: if not self._mgr.is_single_block and axis == 1: if inplace: raise NotImplementedError() result = self.T.fillna(method=method, limit=limit).T return result new_data = self._mgr.interpolate( method=method, axis=axis, limit=limit, inplace=inplace, downcast=downcast, ) else: if self.ndim == 1: if isinstance(value, (dict, ABCSeries)): if not len(value): # test_fillna_nonscalar if inplace: return None return self.copy(deep=None) from pandas import Series value = Series(value) value = value.reindex(self.index, copy=False) value = value._values elif not is_list_like(value): pass else: raise TypeError( '"value" parameter must be a scalar, dict ' "or Series, but you passed a " f'"{type(value).__name__}"' ) new_data = self._mgr.fillna( value=value, limit=limit, inplace=inplace, downcast=downcast ) elif isinstance(value, (dict, ABCSeries)): if axis == 1: raise NotImplementedError( "Currently only can fill " "with dict/Series column " "by column" ) if using_copy_on_write(): result = self.copy(deep=None) else: result = self if inplace else self.copy() is_dict = isinstance(downcast, dict) for k, v in value.items(): if k not in result: continue # error: Item "None" of "Optional[Dict[Any, Any]]" has no # attribute "get" downcast_k = ( downcast if not is_dict else downcast.get(k) # type: ignore[union-attr] ) res_k = result[k].fillna(v, limit=limit, downcast=downcast_k) if not inplace: result[k] = res_k else: # We can write into our existing column(s) iff dtype # was preserved. if isinstance(res_k, ABCSeries): # i.e. 'k' only shows up once in self.columns if res_k.dtype == result[k].dtype: result.loc[:, k] = res_k else: # Different dtype -> no way to do inplace. result[k] = res_k else: # see test_fillna_dict_inplace_nonunique_columns locs = result.columns.get_loc(k) if isinstance(locs, slice): locs = np.arange(self.shape[1])[locs] elif ( isinstance(locs, np.ndarray) and locs.dtype.kind == "b" ): locs = locs.nonzero()[0] elif not ( isinstance(locs, np.ndarray) and locs.dtype.kind == "i" ): # Should never be reached, but let's cover our bases raise NotImplementedError( "Unexpected get_loc result, please report a bug at " "https://github.com/pandas-dev/pandas" ) for i, loc in enumerate(locs): res_loc = res_k.iloc[:, i] target = self.iloc[:, loc] if res_loc.dtype == target.dtype: result.iloc[:, loc] = res_loc else: result.isetitem(loc, res_loc) if inplace: return self._update_inplace(result) else: return result elif not is_list_like(value): if axis == 1: result = self.T.fillna(value=value, limit=limit).T new_data = result else: new_data = self._mgr.fillna( value=value, limit=limit, inplace=inplace, downcast=downcast ) elif isinstance(value, ABCDataFrame) and self.ndim == 2: new_data = self.where(self.notna(), value)._mgr else: raise ValueError(f"invalid fill value with a {type(value)}") result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="fillna") def ffill( self: NDFrameT, *, axis: None | Axis = ..., inplace: Literal[False] = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT: ... def ffill( self, *, axis: None | Axis = ..., inplace: Literal[True], limit: None | int = ..., downcast: dict | None = ..., ) -> None: ... def ffill( self: NDFrameT, *, axis: None | Axis = ..., inplace: bool_t = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT | None: ... def ffill( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ return self.fillna( method="ffill", axis=axis, inplace=inplace, limit=limit, downcast=downcast ) def pad( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``. .. deprecated:: 2.0 {klass}.pad is deprecated. Use {klass}.ffill instead. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ warnings.warn( "DataFrame.pad/Series.pad is deprecated. Use " "DataFrame.ffill/Series.ffill instead", FutureWarning, stacklevel=find_stack_level(), ) return self.ffill(axis=axis, inplace=inplace, limit=limit, downcast=downcast) def bfill( self: NDFrameT, *, axis: None | Axis = ..., inplace: Literal[False] = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT: ... def bfill( self, *, axis: None | Axis = ..., inplace: Literal[True], limit: None | int = ..., downcast: dict | None = ..., ) -> None: ... def bfill( self: NDFrameT, *, axis: None | Axis = ..., inplace: bool_t = ..., limit: None | int = ..., downcast: dict | None = ..., ) -> NDFrameT | None: ... def bfill( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ return self.fillna( method="bfill", axis=axis, inplace=inplace, limit=limit, downcast=downcast ) def backfill( self: NDFrameT, *, axis: None | Axis = None, inplace: bool_t = False, limit: None | int = None, downcast: dict | None = None, ) -> NDFrameT | None: """ Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``. .. deprecated:: 2.0 {klass}.backfill is deprecated. Use {klass}.bfill instead. Returns ------- {klass} or None Object with missing values filled or None if ``inplace=True``. """ warnings.warn( "DataFrame.backfill/Series.backfill is deprecated. Use " "DataFrame.bfill/Series.bfill instead", FutureWarning, stacklevel=find_stack_level(), ) return self.bfill(axis=axis, inplace=inplace, limit=limit, downcast=downcast) def replace( self: NDFrameT, to_replace=..., value=..., *, inplace: Literal[False] = ..., limit: int | None = ..., regex: bool_t = ..., method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., ) -> NDFrameT: ... def replace( self, to_replace=..., value=..., *, inplace: Literal[True], limit: int | None = ..., regex: bool_t = ..., method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., ) -> None: ... def replace( self: NDFrameT, to_replace=..., value=..., *, inplace: bool_t = ..., limit: int | None = ..., regex: bool_t = ..., method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., ) -> NDFrameT | None: ... _shared_docs["replace"], klass=_shared_doc_kwargs["klass"], inplace=_shared_doc_kwargs["inplace"], replace_iloc=_shared_doc_kwargs["replace_iloc"], ) def replace( self: NDFrameT, to_replace=None, value=lib.no_default, *, inplace: bool_t = False, limit: int | None = None, regex: bool_t = False, method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = lib.no_default, ) -> NDFrameT | None: if not ( is_scalar(to_replace) or is_re_compilable(to_replace) or is_list_like(to_replace) ): raise TypeError( "Expecting 'to_replace' to be either a scalar, array-like, " "dict or None, got invalid type " f"{repr(type(to_replace).__name__)}" ) inplace = validate_bool_kwarg(inplace, "inplace") if not is_bool(regex) and to_replace is not None: raise ValueError("'to_replace' must be 'None' if 'regex' is not a bool") if value is lib.no_default or method is not lib.no_default: # GH#36984 if the user explicitly passes value=None we want to # respect that. We have the corner case where the user explicitly # passes value=None *and* a method, which we interpret as meaning # they want the (documented) default behavior. if method is lib.no_default: # TODO: get this to show up as the default in the docs? method = "pad" # passing a single value that is scalar like # when value is None (GH5319), for compat if not is_dict_like(to_replace) and not is_dict_like(regex): to_replace = [to_replace] if isinstance(to_replace, (tuple, list)): # TODO: Consider copy-on-write for non-replaced columns's here if isinstance(self, ABCDataFrame): from pandas import Series result = self.apply( Series._replace_single, args=(to_replace, method, inplace, limit), ) if inplace: return None return result return self._replace_single(to_replace, method, inplace, limit) if not is_dict_like(to_replace): if not is_dict_like(regex): raise TypeError( 'If "to_replace" and "value" are both None ' 'and "to_replace" is not a list, then ' "regex must be a mapping" ) to_replace = regex regex = True items = list(to_replace.items()) if items: keys, values = zip(*items) else: keys, values = ([], []) are_mappings = [is_dict_like(v) for v in values] if any(are_mappings): if not all(are_mappings): raise TypeError( "If a nested mapping is passed, all values " "of the top level mapping must be mappings" ) # passed a nested dict/Series to_rep_dict = {} value_dict = {} for k, v in items: keys, values = list(zip(*v.items())) or ([], []) to_rep_dict[k] = list(keys) value_dict[k] = list(values) to_replace, value = to_rep_dict, value_dict else: to_replace, value = keys, values return self.replace( to_replace, value, inplace=inplace, limit=limit, regex=regex ) else: # need a non-zero len on all axes if not self.size: if inplace: return None return self.copy(deep=None) if is_dict_like(to_replace): if is_dict_like(value): # {'A' : NA} -> {'A' : 0} # Note: Checking below for `in foo.keys()` instead of # `in foo` is needed for when we have a Series and not dict mapping = { col: (to_replace[col], value[col]) for col in to_replace.keys() if col in value.keys() and col in self } return self._replace_columnwise(mapping, inplace, regex) # {'A': NA} -> 0 elif not is_list_like(value): # Operate column-wise if self.ndim == 1: raise ValueError( "Series.replace cannot use dict-like to_replace " "and non-None value" ) mapping = { col: (to_rep, value) for col, to_rep in to_replace.items() } return self._replace_columnwise(mapping, inplace, regex) else: raise TypeError("value argument must be scalar, dict, or Series") elif is_list_like(to_replace): if not is_list_like(value): # e.g. to_replace = [NA, ''] and value is 0, # so we replace NA with 0 and then replace '' with 0 value = [value] * len(to_replace) # e.g. we have to_replace = [NA, ''] and value = [0, 'missing'] if len(to_replace) != len(value): raise ValueError( f"Replacement lists must match in length. " f"Expecting {len(to_replace)} got {len(value)} " ) new_data = self._mgr.replace_list( src_list=to_replace, dest_list=value, inplace=inplace, regex=regex, ) elif to_replace is None: if not ( is_re_compilable(regex) or is_list_like(regex) or is_dict_like(regex) ): raise TypeError( f"'regex' must be a string or a compiled regular expression " f"or a list or dict of strings or regular expressions, " f"you passed a {repr(type(regex).__name__)}" ) return self.replace( regex, value, inplace=inplace, limit=limit, regex=True ) else: # dest iterable dict-like if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1} # Operate column-wise if self.ndim == 1: raise ValueError( "Series.replace cannot use dict-value and " "non-None to_replace" ) mapping = {col: (to_replace, val) for col, val in value.items()} return self._replace_columnwise(mapping, inplace, regex) elif not is_list_like(value): # NA -> 0 regex = should_use_regex(regex, to_replace) if regex: new_data = self._mgr.replace_regex( to_replace=to_replace, value=value, inplace=inplace, ) else: new_data = self._mgr.replace( to_replace=to_replace, value=value, inplace=inplace ) else: raise TypeError( f'Invalid "to_replace" type: {repr(type(to_replace).__name__)}' ) result = self._constructor(new_data) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="replace") def interpolate( self: NDFrameT, method: str = "linear", *, axis: Axis = 0, limit: int | None = None, inplace: bool_t = False, limit_direction: str | None = None, limit_area: str | None = None, downcast: str | None = None, **kwargs, ) -> NDFrameT | None: """ Fill NaN values using an interpolation method. Please note that only ``method='linear'`` is supported for DataFrame/Series with a MultiIndex. Parameters ---------- method : str, default 'linear' Interpolation technique to use. One of: * 'linear': Ignore the index and treat the values as equally spaced. This is the only method supported on MultiIndexes. * 'time': Works on daily and higher resolution data to interpolate given length of interval. * 'index', 'values': use the actual numerical values of the index. * 'pad': Fill in NaNs using existing values. * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'barycentric', 'polynomial': Passed to `scipy.interpolate.interp1d`, whereas 'spline' is passed to `scipy.interpolate.UnivariateSpline`. These methods use the numerical values of the index. Both 'polynomial' and 'spline' require that you also specify an `order` (int), e.g. ``df.interpolate(method='polynomial', order=5)``. Note that, `slinear` method in Pandas refers to the Scipy first order `spline` instead of Pandas first order `spline`. * 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima', 'cubicspline': Wrappers around the SciPy interpolation methods of similar names. See `Notes`. * 'from_derivatives': Refers to `scipy.interpolate.BPoly.from_derivatives` which replaces 'piecewise_polynomial' interpolation method in scipy 0.18. axis : {{0 or 'index', 1 or 'columns', None}}, default None Axis to interpolate along. For `Series` this parameter is unused and defaults to 0. limit : int, optional Maximum number of consecutive NaNs to fill. Must be greater than 0. inplace : bool, default False Update the data in place if possible. limit_direction : {{'forward', 'backward', 'both'}}, Optional Consecutive NaNs will be filled in this direction. If limit is specified: * If 'method' is 'pad' or 'ffill', 'limit_direction' must be 'forward'. * If 'method' is 'backfill' or 'bfill', 'limit_direction' must be 'backwards'. If 'limit' is not specified: * If 'method' is 'backfill' or 'bfill', the default is 'backward' * else the default is 'forward' .. versionchanged:: 1.1.0 raises ValueError if `limit_direction` is 'forward' or 'both' and method is 'backfill' or 'bfill'. raises ValueError if `limit_direction` is 'backward' or 'both' and method is 'pad' or 'ffill'. limit_area : {{`None`, 'inside', 'outside'}}, default None If limit is specified, consecutive NaNs will be filled with this restriction. * ``None``: No fill restriction. * 'inside': Only fill NaNs surrounded by valid values (interpolate). * 'outside': Only fill NaNs outside valid values (extrapolate). downcast : optional, 'infer' or None, defaults to None Downcast dtypes if possible. ``**kwargs`` : optional Keyword arguments to pass on to the interpolating function. Returns ------- Series or DataFrame or None Returns the same object type as the caller, interpolated at some or all ``NaN`` values or None if ``inplace=True``. See Also -------- fillna : Fill missing values using different methods. scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials (Akima interpolator). scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the Bernstein basis. scipy.interpolate.interp1d : Interpolate a 1-D function. scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh interpolator). scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic interpolation. scipy.interpolate.CubicSpline : Cubic spline data interpolator. Notes ----- The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima' methods are wrappers around the respective SciPy implementations of similar names. These use the actual numerical values of the index. For more information on their behavior, see the `SciPy documentation <https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__. Examples -------- Filling in ``NaN`` in a :class:`~pandas.Series` via linear interpolation. >>> s = pd.Series([0, 1, np.nan, 3]) >>> s 0 0.0 1 1.0 2 NaN 3 3.0 dtype: float64 >>> s.interpolate() 0 0.0 1 1.0 2 2.0 3 3.0 dtype: float64 Filling in ``NaN`` in a Series by padding, but filling at most two consecutive ``NaN`` at a time. >>> s = pd.Series([np.nan, "single_one", np.nan, ... "fill_two_more", np.nan, np.nan, np.nan, ... 4.71, np.nan]) >>> s 0 NaN 1 single_one 2 NaN 3 fill_two_more 4 NaN 5 NaN 6 NaN 7 4.71 8 NaN dtype: object >>> s.interpolate(method='pad', limit=2) 0 NaN 1 single_one 2 single_one 3 fill_two_more 4 fill_two_more 5 fill_two_more 6 NaN 7 4.71 8 4.71 dtype: object Filling in ``NaN`` in a Series via polynomial interpolation or splines: Both 'polynomial' and 'spline' methods require that you also specify an ``order`` (int). >>> s = pd.Series([0, 2, np.nan, 8]) >>> s.interpolate(method='polynomial', order=2) 0 0.000000 1 2.000000 2 4.666667 3 8.000000 dtype: float64 Fill the DataFrame forward (that is, going down) along each column using linear interpolation. Note how the last entry in column 'a' is interpolated differently, because there is no entry after it to use for interpolation. Note how the first entry in column 'b' remains ``NaN``, because there is no entry before it to use for interpolation. >>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0), ... (np.nan, 2.0, np.nan, np.nan), ... (2.0, 3.0, np.nan, 9.0), ... (np.nan, 4.0, -4.0, 16.0)], ... columns=list('abcd')) >>> df a b c d 0 0.0 NaN -1.0 1.0 1 NaN 2.0 NaN NaN 2 2.0 3.0 NaN 9.0 3 NaN 4.0 -4.0 16.0 >>> df.interpolate(method='linear', limit_direction='forward', axis=0) a b c d 0 0.0 NaN -1.0 1.0 1 1.0 2.0 -2.0 5.0 2 2.0 3.0 -3.0 9.0 3 2.0 4.0 -4.0 16.0 Using polynomial interpolation. >>> df['d'].interpolate(method='polynomial', order=2) 0 1.0 1 4.0 2 9.0 3 16.0 Name: d, dtype: float64 """ inplace = validate_bool_kwarg(inplace, "inplace") axis = self._get_axis_number(axis) fillna_methods = ["ffill", "bfill", "pad", "backfill"] should_transpose = axis == 1 and method not in fillna_methods obj = self.T if should_transpose else self if obj.empty: return self.copy() if method not in fillna_methods: axis = self._info_axis_number if isinstance(obj.index, MultiIndex) and method != "linear": raise ValueError( "Only `method=linear` interpolation is supported on MultiIndexes." ) # Set `limit_direction` depending on `method` if limit_direction is None: limit_direction = ( "backward" if method in ("backfill", "bfill") else "forward" ) else: if method in ("pad", "ffill") and limit_direction != "forward": raise ValueError( f"`limit_direction` must be 'forward' for method `{method}`" ) if method in ("backfill", "bfill") and limit_direction != "backward": raise ValueError( f"`limit_direction` must be 'backward' for method `{method}`" ) if obj.ndim == 2 and np.all(obj.dtypes == np.dtype("object")): raise TypeError( "Cannot interpolate with all object-dtype columns " "in the DataFrame. Try setting at least one " "column to a numeric dtype." ) # create/use the index if method == "linear": # prior default index = Index(np.arange(len(obj.index))) else: index = obj.index methods = {"index", "values", "nearest", "time"} is_numeric_or_datetime = ( is_numeric_dtype(index.dtype) or is_datetime64_any_dtype(index.dtype) or is_timedelta64_dtype(index.dtype) ) if method not in methods and not is_numeric_or_datetime: raise ValueError( "Index column must be numeric or datetime type when " f"using {method} method other than linear. " "Try setting a numeric or datetime index column before " "interpolating." ) if isna(index).any(): raise NotImplementedError( "Interpolation with NaNs in the index " "has not been implemented. Try filling " "those NaNs before interpolating." ) new_data = obj._mgr.interpolate( method=method, axis=axis, index=index, limit=limit, limit_direction=limit_direction, limit_area=limit_area, inplace=inplace, downcast=downcast, **kwargs, ) result = self._constructor(new_data) if should_transpose: result = result.T if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method="interpolate") # ---------------------------------------------------------------------- # Timeseries methods Methods def asof(self, where, subset=None): """ Return the last row(s) without any NaNs before `where`. The last row (for each element in `where`, if list) without any NaN is taken. In case of a :class:`~pandas.DataFrame`, the last row without NaN considering only the subset of columns (if not `None`) If there is no good value, NaN is returned for a Series or a Series of NaN values for a DataFrame Parameters ---------- where : date or array-like of dates Date(s) before which the last row(s) are returned. subset : str or array-like of str, default `None` For DataFrame, if not `None`, only use these columns to check for NaNs. Returns ------- scalar, Series, or DataFrame The return can be: * scalar : when `self` is a Series and `where` is a scalar * Series: when `self` is a Series and `where` is an array-like, or when `self` is a DataFrame and `where` is a scalar * DataFrame : when `self` is a DataFrame and `where` is an array-like Return scalar, Series, or DataFrame. See Also -------- merge_asof : Perform an asof merge. Similar to left join. Notes ----- Dates are assumed to be sorted. Raises if this is not the case. Examples -------- A Series and a scalar `where`. >>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40]) >>> s 10 1.0 20 2.0 30 NaN 40 4.0 dtype: float64 >>> s.asof(20) 2.0 For a sequence `where`, a Series is returned. The first value is NaN, because the first element of `where` is before the first index value. >>> s.asof([5, 20]) 5 NaN 20 2.0 dtype: float64 Missing values are not considered. The following is ``2.0``, not NaN, even though NaN is at the index location for ``30``. >>> s.asof(30) 2.0 Take all columns into consideration >>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50], ... 'b': [None, None, None, None, 500]}, ... index=pd.DatetimeIndex(['2018-02-27 09:01:00', ... '2018-02-27 09:02:00', ... '2018-02-27 09:03:00', ... '2018-02-27 09:04:00', ... '2018-02-27 09:05:00'])) >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', ... '2018-02-27 09:04:30'])) a b 2018-02-27 09:03:30 NaN NaN 2018-02-27 09:04:30 NaN NaN Take a single column into consideration >>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30', ... '2018-02-27 09:04:30']), ... subset=['a']) a b 2018-02-27 09:03:30 30 NaN 2018-02-27 09:04:30 40 NaN """ if isinstance(where, str): where = Timestamp(where) if not self.index.is_monotonic_increasing: raise ValueError("asof requires a sorted index") is_series = isinstance(self, ABCSeries) if is_series: if subset is not None: raise ValueError("subset is not valid for Series") else: if subset is None: subset = self.columns if not is_list_like(subset): subset = [subset] is_list = is_list_like(where) if not is_list: start = self.index[0] if isinstance(self.index, PeriodIndex): where = Period(where, freq=self.index.freq) if where < start: if not is_series: return self._constructor_sliced( index=self.columns, name=where, dtype=np.float64 ) return np.nan # It's always much faster to use a *while* loop here for # Series than pre-computing all the NAs. However a # *while* loop is extremely expensive for DataFrame # so we later pre-compute all the NAs and use the same # code path whether *where* is a scalar or list. # See PR: https://github.com/pandas-dev/pandas/pull/14476 if is_series: loc = self.index.searchsorted(where, side="right") if loc > 0: loc -= 1 values = self._values while loc > 0 and isna(values[loc]): loc -= 1 return values[loc] if not isinstance(where, Index): where = Index(where) if is_list else Index([where]) nulls = self.isna() if is_series else self[subset].isna().any(axis=1) if nulls.all(): if is_series: self = cast("Series", self) return self._constructor(np.nan, index=where, name=self.name) elif is_list: self = cast("DataFrame", self) return self._constructor(np.nan, index=where, columns=self.columns) else: self = cast("DataFrame", self) return self._constructor_sliced( np.nan, index=self.columns, name=where[0] ) locs = self.index.asof_locs(where, ~(nulls._values)) # mask the missing missing = locs == -1 data = self.take(locs) data.index = where if missing.any(): # GH#16063 only do this setting when necessary, otherwise # we'd cast e.g. bools to floats data.loc[missing] = np.nan return data if is_list else data.iloc[-1] # ---------------------------------------------------------------------- # Action Methods def isna(self: NDFrameT) -> NDFrameT: """ Detect missing values. Return a boolean same-sized object indicating if the values are NA. NA values, such as None or :attr:`numpy.NaN`, gets mapped to True values. Everything else gets mapped to False values. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). Returns ------- {klass} Mask of bool values for each element in {klass} that indicates whether an element is an NA value. See Also -------- {klass}.isnull : Alias of isna. {klass}.notna : Boolean inverse of isna. {klass}.dropna : Omit axes labels with missing values. isna : Top-level isna. Examples -------- Show which entries in a DataFrame are NA. >>> df = pd.DataFrame(dict(age=[5, 6, np.NaN], ... born=[pd.NaT, pd.Timestamp('1939-05-27'), ... pd.Timestamp('1940-04-25')], ... name=['Alfred', 'Batman', ''], ... toy=[None, 'Batmobile', 'Joker'])) >>> df age born name toy 0 5.0 NaT Alfred None 1 6.0 1939-05-27 Batman Batmobile 2 NaN 1940-04-25 Joker >>> df.isna() age born name toy 0 False True False True 1 False False False False 2 True False False False Show which entries in a Series are NA. >>> ser = pd.Series([5, 6, np.NaN]) >>> ser 0 5.0 1 6.0 2 NaN dtype: float64 >>> ser.isna() 0 False 1 False 2 True dtype: bool """ return isna(self).__finalize__(self, method="isna") def isnull(self: NDFrameT) -> NDFrameT: return isna(self).__finalize__(self, method="isnull") def notna(self: NDFrameT) -> NDFrameT: """ Detect existing (non-missing) values. Return a boolean same-sized object indicating if the values are not NA. Non-missing values get mapped to True. Characters such as empty strings ``''`` or :attr:`numpy.inf` are not considered NA values (unless you set ``pandas.options.mode.use_inf_as_na = True``). NA values, such as None or :attr:`numpy.NaN`, get mapped to False values. Returns ------- {klass} Mask of bool values for each element in {klass} that indicates whether an element is not an NA value. See Also -------- {klass}.notnull : Alias of notna. {klass}.isna : Boolean inverse of notna. {klass}.dropna : Omit axes labels with missing values. notna : Top-level notna. Examples -------- Show which entries in a DataFrame are not NA. >>> df = pd.DataFrame(dict(age=[5, 6, np.NaN], ... born=[pd.NaT, pd.Timestamp('1939-05-27'), ... pd.Timestamp('1940-04-25')], ... name=['Alfred', 'Batman', ''], ... toy=[None, 'Batmobile', 'Joker'])) >>> df age born name toy 0 5.0 NaT Alfred None 1 6.0 1939-05-27 Batman Batmobile 2 NaN 1940-04-25 Joker >>> df.notna() age born name toy 0 True False True False 1 True True True True 2 False True True True Show which entries in a Series are not NA. >>> ser = pd.Series([5, 6, np.NaN]) >>> ser 0 5.0 1 6.0 2 NaN dtype: float64 >>> ser.notna() 0 True 1 True 2 False dtype: bool """ return notna(self).__finalize__(self, method="notna") def notnull(self: NDFrameT) -> NDFrameT: return notna(self).__finalize__(self, method="notnull") def _clip_with_scalar(self, lower, upper, inplace: bool_t = False): if (lower is not None and np.any(isna(lower))) or ( upper is not None and np.any(isna(upper)) ): raise ValueError("Cannot use an NA value as a clip threshold") result = self mask = isna(self._values) with np.errstate(all="ignore"): if upper is not None: subset = self <= upper result = result.where(subset, upper, axis=None, inplace=False) if lower is not None: subset = self >= lower result = result.where(subset, lower, axis=None, inplace=False) if np.any(mask): result[mask] = np.nan if inplace: return self._update_inplace(result) else: return result def _clip_with_one_bound(self, threshold, method, axis, inplace): if axis is not None: axis = self._get_axis_number(axis) # method is self.le for upper bound and self.ge for lower bound if is_scalar(threshold) and is_number(threshold): if method.__name__ == "le": return self._clip_with_scalar(None, threshold, inplace=inplace) return self._clip_with_scalar(threshold, None, inplace=inplace) # GH #15390 # In order for where method to work, the threshold must # be transformed to NDFrame from other array like structure. if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold): if isinstance(self, ABCSeries): threshold = self._constructor(threshold, index=self.index) else: threshold = align_method_FRAME(self, threshold, axis, flex=None)[1] # GH 40420 # Treat missing thresholds as no bounds, not clipping the values if is_list_like(threshold): fill_value = np.inf if method.__name__ == "le" else -np.inf threshold_inf = threshold.fillna(fill_value) else: threshold_inf = threshold subset = method(threshold_inf, axis=axis) | isna(self) # GH 40420 return self.where(subset, threshold, axis=axis, inplace=inplace) def clip( self: NDFrameT, lower=None, upper=None, *, axis: Axis | None = None, inplace: bool_t = False, **kwargs, ) -> NDFrameT | None: """ Trim values at input threshold(s). Assigns values outside boundary to boundary values. Thresholds can be singular values or array like, and in the latter case the clipping is performed element-wise in the specified axis. Parameters ---------- lower : float or array-like, default None Minimum threshold value. All values below this threshold will be set to it. A missing threshold (e.g `NA`) will not clip the value. upper : float or array-like, default None Maximum threshold value. All values above this threshold will be set to it. A missing threshold (e.g `NA`) will not clip the value. axis : {{0 or 'index', 1 or 'columns', None}}, default None Align object with lower and upper along the given axis. For `Series` this parameter is unused and defaults to `None`. inplace : bool, default False Whether to perform the operation in place on the data. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with numpy. Returns ------- Series or DataFrame or None Same type as calling object with the values outside the clip boundaries replaced or None if ``inplace=True``. See Also -------- Series.clip : Trim values at input threshold in series. DataFrame.clip : Trim values at input threshold in dataframe. numpy.clip : Clip (limit) the values in an array. Examples -------- >>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]} >>> df = pd.DataFrame(data) >>> df col_0 col_1 0 9 -2 1 -3 -7 2 0 6 3 -1 8 4 5 -5 Clips per column using lower and upper thresholds: >>> df.clip(-4, 6) col_0 col_1 0 6 -2 1 -3 -4 2 0 6 3 -1 6 4 5 -4 Clips using specific lower and upper thresholds per column element: >>> t = pd.Series([2, -4, -1, 6, 3]) >>> t 0 2 1 -4 2 -1 3 6 4 3 dtype: int64 >>> df.clip(t, t + 4, axis=0) col_0 col_1 0 6 2 1 -3 -4 2 0 3 3 6 8 4 5 3 Clips using specific lower threshold per column element, with missing values: >>> t = pd.Series([2, -4, np.NaN, 6, 3]) >>> t 0 2.0 1 -4.0 2 NaN 3 6.0 4 3.0 dtype: float64 >>> df.clip(t, axis=0) col_0 col_1 0 9 2 1 -3 -4 2 0 6 3 6 8 4 5 3 """ inplace = validate_bool_kwarg(inplace, "inplace") axis = nv.validate_clip_with_axis(axis, (), kwargs) if axis is not None: axis = self._get_axis_number(axis) # GH 17276 # numpy doesn't like NaN as a clip value # so ignore # GH 19992 # numpy doesn't drop a list-like bound containing NaN isna_lower = isna(lower) if not is_list_like(lower): if np.any(isna_lower): lower = None elif np.all(isna_lower): lower = None isna_upper = isna(upper) if not is_list_like(upper): if np.any(isna_upper): upper = None elif np.all(isna_upper): upper = None # GH 2747 (arguments were reversed) if ( lower is not None and upper is not None and is_scalar(lower) and is_scalar(upper) ): lower, upper = min(lower, upper), max(lower, upper) # fast-path for scalars if (lower is None or (is_scalar(lower) and is_number(lower))) and ( upper is None or (is_scalar(upper) and is_number(upper)) ): return self._clip_with_scalar(lower, upper, inplace=inplace) result = self if lower is not None: result = result._clip_with_one_bound( lower, method=self.ge, axis=axis, inplace=inplace ) if upper is not None: if inplace: result = self result = result._clip_with_one_bound( upper, method=self.le, axis=axis, inplace=inplace ) return result def asfreq( self: NDFrameT, freq: Frequency, method: FillnaOptions | None = None, how: str | None = None, normalize: bool_t = False, fill_value: Hashable = None, ) -> NDFrameT: """ Convert time series to specified frequency. Returns the original data conformed to a new index with the specified frequency. If the index of this {klass} is a :class:`~pandas.PeriodIndex`, the new index is the result of transforming the original index with :meth:`PeriodIndex.asfreq <pandas.PeriodIndex.asfreq>` (so the original index will map one-to-one to the new index). Otherwise, the new index will be equivalent to ``pd.date_range(start, end, freq=freq)`` where ``start`` and ``end`` are, respectively, the first and last entries in the original index (see :func:`pandas.date_range`). The values corresponding to any timesteps in the new index which were not present in the original index will be null (``NaN``), unless a method for filling such unknowns is provided (see the ``method`` parameter below). The :meth:`resample` method is more appropriate if an operation on each group of timesteps (such as an aggregate) is necessary to represent the data at the new frequency. Parameters ---------- freq : DateOffset or str Frequency DateOffset or string. method : {{'backfill'/'bfill', 'pad'/'ffill'}}, default None Method to use for filling holes in reindexed Series (note this does not fill NaNs that already were present): * 'pad' / 'ffill': propagate last valid observation forward to next valid * 'backfill' / 'bfill': use NEXT valid observation to fill. how : {{'start', 'end'}}, default end For PeriodIndex only (see PeriodIndex.asfreq). normalize : bool, default False Whether to reset output index to midnight. fill_value : scalar, optional Value to use for missing values, applied during upsampling (note this does not fill NaNs that already were present). Returns ------- {klass} {klass} object reindexed to the specified frequency. See Also -------- reindex : Conform DataFrame to new index with optional filling logic. Notes ----- To learn more about the frequency strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__. Examples -------- Start by creating a series with 4 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=4, freq='T') >>> series = pd.Series([0.0, None, 2.0, 3.0], index=index) >>> df = pd.DataFrame({{'s': series}}) >>> df s 2000-01-01 00:00:00 0.0 2000-01-01 00:01:00 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:03:00 3.0 Upsample the series into 30 second bins. >>> df.asfreq(freq='30S') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 NaN 2000-01-01 00:03:00 3.0 Upsample again, providing a ``fill value``. >>> df.asfreq(freq='30S', fill_value=9.0) s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 9.0 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 9.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 9.0 2000-01-01 00:03:00 3.0 Upsample again, providing a ``method``. >>> df.asfreq(freq='30S', method='bfill') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 2.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 3.0 2000-01-01 00:03:00 3.0 """ from pandas.core.resample import asfreq return asfreq( self, freq, method=method, how=how, normalize=normalize, fill_value=fill_value, ) def at_time( self: NDFrameT, time, asof: bool_t = False, axis: Axis | None = None ) -> NDFrameT: """ Select values at particular time of day (e.g., 9:30AM). Parameters ---------- time : datetime.time or str The values to select. axis : {0 or 'index', 1 or 'columns'}, default 0 For `Series` this parameter is unused and defaults to 0. Returns ------- Series or DataFrame Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- between_time : Select values between particular times of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_at_time : Get just the index locations for values at particular time of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='12H') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 00:00:00 1 2018-04-09 12:00:00 2 2018-04-10 00:00:00 3 2018-04-10 12:00:00 4 >>> ts.at_time('12:00') A 2018-04-09 12:00:00 2 2018-04-10 12:00:00 4 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) index = self._get_axis(axis) if not isinstance(index, DatetimeIndex): raise TypeError("Index must be DatetimeIndex") indexer = index.indexer_at_time(time, asof=asof) return self._take_with_is_copy(indexer, axis=axis) def between_time( self: NDFrameT, start_time, end_time, inclusive: IntervalClosedType = "both", axis: Axis | None = None, ) -> NDFrameT: """ Select values between particular times of the day (e.g., 9:00-9:30 AM). By setting ``start_time`` to be later than ``end_time``, you can get the times that are *not* between the two times. Parameters ---------- start_time : datetime.time or str Initial time as a time filter limit. end_time : datetime.time or str End time as a time filter limit. inclusive : {"both", "neither", "left", "right"}, default "both" Include boundaries; whether to set each bound as closed or open. axis : {0 or 'index', 1 or 'columns'}, default 0 Determine range time on index or columns value. For `Series` this parameter is unused and defaults to 0. Returns ------- Series or DataFrame Data from the original object filtered to the specified dates range. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- at_time : Select values at a particular time of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_between_time : Get just the index locations for values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 00:00:00 1 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 2018-04-12 01:00:00 4 >>> ts.between_time('0:15', '0:45') A 2018-04-10 00:20:00 2 2018-04-11 00:40:00 3 You get the times that are *not* between two times by setting ``start_time`` later than ``end_time``: >>> ts.between_time('0:45', '0:15') A 2018-04-09 00:00:00 1 2018-04-12 01:00:00 4 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) index = self._get_axis(axis) if not isinstance(index, DatetimeIndex): raise TypeError("Index must be DatetimeIndex") left_inclusive, right_inclusive = validate_inclusive(inclusive) indexer = index.indexer_between_time( start_time, end_time, include_start=left_inclusive, include_end=right_inclusive, ) return self._take_with_is_copy(indexer, axis=axis) def resample( self, rule, axis: Axis = 0, closed: str | None = None, label: str | None = None, convention: str = "start", kind: str | None = None, on: Level = None, level: Level = None, origin: str | TimestampConvertibleTypes = "start_day", offset: TimedeltaConvertibleTypes | None = None, group_keys: bool_t = False, ) -> Resampler: """ Resample time-series data. Convenience method for frequency conversion and resampling of time series. The object must have a datetime-like index (`DatetimeIndex`, `PeriodIndex`, or `TimedeltaIndex`), or the caller must pass the label of a datetime-like series/index to the ``on``/``level`` keyword parameter. Parameters ---------- rule : DateOffset, Timedelta or str The offset string or object representing target conversion. axis : {{0 or 'index', 1 or 'columns'}}, default 0 Which axis to use for up- or down-sampling. For `Series` this parameter is unused and defaults to 0. Must be `DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`. closed : {{'right', 'left'}}, default None Which side of bin interval is closed. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. label : {{'right', 'left'}}, default None Which bin edge label to label bucket with. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. convention : {{'start', 'end', 's', 'e'}}, default 'start' For `PeriodIndex` only, controls whether to use the start or end of `rule`. kind : {{'timestamp', 'period'}}, optional, default None Pass 'timestamp' to convert the resulting index to a `DateTimeIndex` or 'period' to convert it to a `PeriodIndex`. By default the input representation is retained. on : str, optional For a DataFrame, column to use instead of index for resampling. Column must be datetime-like. level : str or int, optional For a MultiIndex, level (name or number) to use for resampling. `level` must be datetime-like. origin : Timestamp or str, default 'start_day' The timestamp on which to adjust the grouping. The timezone of origin must match the timezone of the index. If string, must be one of the following: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries .. versionadded:: 1.1.0 - 'end': `origin` is the last value of the timeseries - 'end_day': `origin` is the ceiling midnight of the last day .. versionadded:: 1.3.0 offset : Timedelta or str, default is None An offset timedelta added to the origin. .. versionadded:: 1.1.0 group_keys : bool, default False Whether to include the group keys in the result index when using ``.apply()`` on the resampled object. .. versionadded:: 1.5.0 Not specifying ``group_keys`` will retain values-dependent behavior from pandas 1.4 and earlier (see :ref:`pandas 1.5.0 Release notes <whatsnew_150.enhancements.resample_group_keys>` for examples). .. versionchanged:: 2.0.0 ``group_keys`` now defaults to ``False``. Returns ------- pandas.core.Resampler :class:`~pandas.core.Resampler` object. See Also -------- Series.resample : Resample a Series. DataFrame.resample : Resample a DataFrame. groupby : Group {klass} by mapping, function, label, or list of labels. asfreq : Reindex a {klass} with the given frequency without grouping. Notes ----- See the `user guide <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`__ for more. To learn more about the offset strings, please see `this link <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__. Examples -------- Start by creating a series with 9 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=9, freq='T') >>> series = pd.Series(range(9), index=index) >>> series 2000-01-01 00:00:00 0 2000-01-01 00:01:00 1 2000-01-01 00:02:00 2 2000-01-01 00:03:00 3 2000-01-01 00:04:00 4 2000-01-01 00:05:00 5 2000-01-01 00:06:00 6 2000-01-01 00:07:00 7 2000-01-01 00:08:00 8 Freq: T, dtype: int64 Downsample the series into 3 minute bins and sum the values of the timestamps falling into a bin. >>> series.resample('3T').sum() 2000-01-01 00:00:00 3 2000-01-01 00:03:00 12 2000-01-01 00:06:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but label each bin using the right edge instead of the left. Please note that the value in the bucket used as the label is not included in the bucket, which it labels. For example, in the original series the bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed value in the resampled bucket with the label ``2000-01-01 00:03:00`` does not include 3 (if it did, the summed value would be 6, not 3). To include this value close the right side of the bin interval as illustrated in the example below this one. >>> series.resample('3T', label='right').sum() 2000-01-01 00:03:00 3 2000-01-01 00:06:00 12 2000-01-01 00:09:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but close the right side of the bin interval. >>> series.resample('3T', label='right', closed='right').sum() 2000-01-01 00:00:00 0 2000-01-01 00:03:00 6 2000-01-01 00:06:00 15 2000-01-01 00:09:00 15 Freq: 3T, dtype: int64 Upsample the series into 30 second bins. >>> series.resample('30S').asfreq()[0:5] # Select first 5 rows 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 1.0 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 Freq: 30S, dtype: float64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``ffill`` method. >>> series.resample('30S').ffill()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 0 2000-01-01 00:01:00 1 2000-01-01 00:01:30 1 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``bfill`` method. >>> series.resample('30S').bfill()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 1 2000-01-01 00:01:00 1 2000-01-01 00:01:30 2 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Pass a custom function via ``apply`` >>> def custom_resampler(arraylike): ... return np.sum(arraylike) + 5 ... >>> series.resample('3T').apply(custom_resampler) 2000-01-01 00:00:00 8 2000-01-01 00:03:00 17 2000-01-01 00:06:00 26 Freq: 3T, dtype: int64 For a Series with a PeriodIndex, the keyword `convention` can be used to control whether to use the start or end of `rule`. Resample a year by quarter using 'start' `convention`. Values are assigned to the first quarter of the period. >>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01', ... freq='A', ... periods=2)) >>> s 2012 1 2013 2 Freq: A-DEC, dtype: int64 >>> s.resample('Q', convention='start').asfreq() 2012Q1 1.0 2012Q2 NaN 2012Q3 NaN 2012Q4 NaN 2013Q1 2.0 2013Q2 NaN 2013Q3 NaN 2013Q4 NaN Freq: Q-DEC, dtype: float64 Resample quarters by month using 'end' `convention`. Values are assigned to the last month of the period. >>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01', ... freq='Q', ... periods=4)) >>> q 2018Q1 1 2018Q2 2 2018Q3 3 2018Q4 4 Freq: Q-DEC, dtype: int64 >>> q.resample('M', convention='end').asfreq() 2018-03 1.0 2018-04 NaN 2018-05 NaN 2018-06 2.0 2018-07 NaN 2018-08 NaN 2018-09 3.0 2018-10 NaN 2018-11 NaN 2018-12 4.0 Freq: M, dtype: float64 For DataFrame objects, the keyword `on` can be used to specify the column instead of the index for resampling. >>> d = {{'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}} >>> df = pd.DataFrame(d) >>> df['week_starting'] = pd.date_range('01/01/2018', ... periods=8, ... freq='W') >>> df price volume week_starting 0 10 50 2018-01-07 1 11 60 2018-01-14 2 9 40 2018-01-21 3 13 100 2018-01-28 4 14 50 2018-02-04 5 18 100 2018-02-11 6 17 40 2018-02-18 7 19 50 2018-02-25 >>> df.resample('M', on='week_starting').mean() price volume week_starting 2018-01-31 10.75 62.5 2018-02-28 17.00 60.0 For a DataFrame with MultiIndex, the keyword `level` can be used to specify on which level the resampling needs to take place. >>> days = pd.date_range('1/1/2000', periods=4, freq='D') >>> d2 = {{'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}} >>> df2 = pd.DataFrame( ... d2, ... index=pd.MultiIndex.from_product( ... [days, ['morning', 'afternoon']] ... ) ... ) >>> df2 price volume 2000-01-01 morning 10 50 afternoon 11 60 2000-01-02 morning 9 40 afternoon 13 100 2000-01-03 morning 14 50 afternoon 18 100 2000-01-04 morning 17 40 afternoon 19 50 >>> df2.resample('D', level=0).sum() price volume 2000-01-01 21 110 2000-01-02 22 140 2000-01-03 32 150 2000-01-04 36 90 If you want to adjust the start of the bins based on a fixed timestamp: >>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00' >>> rng = pd.date_range(start, end, freq='7min') >>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng) >>> ts 2000-10-01 23:30:00 0 2000-10-01 23:37:00 3 2000-10-01 23:44:00 6 2000-10-01 23:51:00 9 2000-10-01 23:58:00 12 2000-10-02 00:05:00 15 2000-10-02 00:12:00 18 2000-10-02 00:19:00 21 2000-10-02 00:26:00 24 Freq: 7T, dtype: int64 >>> ts.resample('17min').sum() 2000-10-01 23:14:00 0 2000-10-01 23:31:00 9 2000-10-01 23:48:00 21 2000-10-02 00:05:00 54 2000-10-02 00:22:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', origin='epoch').sum() 2000-10-01 23:18:00 0 2000-10-01 23:35:00 18 2000-10-01 23:52:00 27 2000-10-02 00:09:00 39 2000-10-02 00:26:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', origin='2000-01-01').sum() 2000-10-01 23:24:00 3 2000-10-01 23:41:00 15 2000-10-01 23:58:00 45 2000-10-02 00:15:00 45 Freq: 17T, dtype: int64 If you want to adjust the start of the bins with an `offset` Timedelta, the two following lines are equivalent: >>> ts.resample('17min', origin='start').sum() 2000-10-01 23:30:00 9 2000-10-01 23:47:00 21 2000-10-02 00:04:00 54 2000-10-02 00:21:00 24 Freq: 17T, dtype: int64 >>> ts.resample('17min', offset='23h30min').sum() 2000-10-01 23:30:00 9 2000-10-01 23:47:00 21 2000-10-02 00:04:00 54 2000-10-02 00:21:00 24 Freq: 17T, dtype: int64 If you want to take the largest Timestamp as the end of the bins: >>> ts.resample('17min', origin='end').sum() 2000-10-01 23:35:00 0 2000-10-01 23:52:00 18 2000-10-02 00:09:00 27 2000-10-02 00:26:00 63 Freq: 17T, dtype: int64 In contrast with the `start_day`, you can use `end_day` to take the ceiling midnight of the largest Timestamp as the end of the bins and drop the bins not containing data: >>> ts.resample('17min', origin='end_day').sum() 2000-10-01 23:38:00 3 2000-10-01 23:55:00 15 2000-10-02 00:12:00 45 2000-10-02 00:29:00 45 Freq: 17T, dtype: int64 """ from pandas.core.resample import get_resampler axis = self._get_axis_number(axis) return get_resampler( cast("Series | DataFrame", self), freq=rule, label=label, closed=closed, axis=axis, kind=kind, convention=convention, key=on, level=level, origin=origin, offset=offset, group_keys=group_keys, ) def first(self: NDFrameT, offset) -> NDFrameT: """ Select initial periods of time series data based on a date offset. When having a DataFrame with dates as index, this function can select the first few rows based on a date offset. Parameters ---------- offset : str, DateOffset or dateutil.relativedelta The offset length of the data that will be selected. For instance, '1M' will display all the rows having their index within the first month. Returns ------- Series or DataFrame A subset of the caller. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- last : Select final periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the first 3 days: >>> ts.first('3D') A 2018-04-09 1 2018-04-11 2 Notice the data for 3 first calendar days were returned, not the first 3 days observed in the dataset, and therefore data for 2018-04-13 was not returned. """ if not isinstance(self.index, DatetimeIndex): raise TypeError("'first' only supports a DatetimeIndex index") if len(self.index) == 0: return self.copy(deep=False) offset = to_offset(offset) if not isinstance(offset, Tick) and offset.is_on_offset(self.index[0]): # GH#29623 if first value is end of period, remove offset with n = 1 # before adding the real offset end_date = end = self.index[0] - offset.base + offset else: end_date = end = self.index[0] + offset # Tick-like, e.g. 3 weeks if isinstance(offset, Tick) and end_date in self.index: end = self.index.searchsorted(end_date, side="left") return self.iloc[:end] return self.loc[:end] def last(self: NDFrameT, offset) -> NDFrameT: """ Select final periods of time series data based on a date offset. For a DataFrame with a sorted DatetimeIndex, this function selects the last few rows based on a date offset. Parameters ---------- offset : str, DateOffset, dateutil.relativedelta The offset length of the data that will be selected. For instance, '3D' will display all the rows having their index within the last 3 days. Returns ------- Series or DataFrame A subset of the caller. Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- first : Select initial periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the last 3 days: >>> ts.last('3D') A 2018-04-13 3 2018-04-15 4 Notice the data for 3 last calendar days were returned, not the last 3 observed days in the dataset, and therefore data for 2018-04-11 was not returned. """ if not isinstance(self.index, DatetimeIndex): raise TypeError("'last' only supports a DatetimeIndex index") if len(self.index) == 0: return self.copy(deep=False) offset = to_offset(offset) start_date = self.index[-1] - offset start = self.index.searchsorted(start_date, side="right") return self.iloc[start:] def rank( self: NDFrameT, axis: Axis = 0, method: str = "average", numeric_only: bool_t = False, na_option: str = "keep", ascending: bool_t = True, pct: bool_t = False, ) -> NDFrameT: """ Compute numerical data ranks (1 through n) along axis. By default, equal values are assigned a rank that is the average of the ranks of those values. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 Index to direct ranking. For `Series` this parameter is unused and defaults to 0. method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' How to rank the group of records that have the same value (i.e. ties): * average: average rank of the group * min: lowest rank in the group * max: highest rank in the group * first: ranks assigned in order they appear in the array * dense: like 'min', but rank always increases by 1 between groups. numeric_only : bool, default False For DataFrame objects, rank only numeric columns if set to True. .. versionchanged:: 2.0.0 The default value of ``numeric_only`` is now ``False``. na_option : {'keep', 'top', 'bottom'}, default 'keep' How to rank NaN values: * keep: assign NaN rank to NaN values * top: assign lowest rank to NaN values * bottom: assign highest rank to NaN values ascending : bool, default True Whether or not the elements should be ranked in ascending order. pct : bool, default False Whether or not to display the returned rankings in percentile form. Returns ------- same type as caller Return a Series or DataFrame with data ranks as values. See Also -------- core.groupby.DataFrameGroupBy.rank : Rank of values within each group. core.groupby.SeriesGroupBy.rank : Rank of values within each group. Examples -------- >>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog', ... 'spider', 'snake'], ... 'Number_legs': [4, 2, 4, 8, np.nan]}) >>> df Animal Number_legs 0 cat 4.0 1 penguin 2.0 2 dog 4.0 3 spider 8.0 4 snake NaN Ties are assigned the mean of the ranks (by default) for the group. >>> s = pd.Series(range(5), index=list("abcde")) >>> s["d"] = s["b"] >>> s.rank() a 1.0 b 2.5 c 4.0 d 2.5 e 5.0 dtype: float64 The following example shows how the method behaves with the above parameters: * default_rank: this is the default behaviour obtained without using any parameter. * max_rank: setting ``method = 'max'`` the records that have the same values are ranked using the highest rank (e.g.: since 'cat' and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.) * NA_bottom: choosing ``na_option = 'bottom'``, if there are records with NaN values they are placed at the bottom of the ranking. * pct_rank: when setting ``pct = True``, the ranking is expressed as percentile rank. >>> df['default_rank'] = df['Number_legs'].rank() >>> df['max_rank'] = df['Number_legs'].rank(method='max') >>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom') >>> df['pct_rank'] = df['Number_legs'].rank(pct=True) >>> df Animal Number_legs default_rank max_rank NA_bottom pct_rank 0 cat 4.0 2.5 3.0 2.5 0.625 1 penguin 2.0 1.0 1.0 1.0 0.250 2 dog 4.0 2.5 3.0 2.5 0.625 3 spider 8.0 4.0 4.0 4.0 1.000 4 snake NaN NaN NaN 5.0 NaN """ axis_int = self._get_axis_number(axis) if na_option not in {"keep", "top", "bottom"}: msg = "na_option must be one of 'keep', 'top', or 'bottom'" raise ValueError(msg) def ranker(data): if data.ndim == 2: # i.e. DataFrame, we cast to ndarray values = data.values else: # i.e. Series, can dispatch to EA values = data._values if isinstance(values, ExtensionArray): ranks = values._rank( axis=axis_int, method=method, ascending=ascending, na_option=na_option, pct=pct, ) else: ranks = algos.rank( values, axis=axis_int, method=method, ascending=ascending, na_option=na_option, pct=pct, ) ranks_obj = self._constructor(ranks, **data._construct_axes_dict()) return ranks_obj.__finalize__(self, method="rank") if numeric_only: if self.ndim == 1 and not is_numeric_dtype(self.dtype): # GH#47500 raise TypeError( "Series.rank does not allow numeric_only=True with " "non-numeric dtype." ) data = self._get_numeric_data() else: data = self return ranker(data) def compare( self, other, align_axis: Axis = 1, keep_shape: bool_t = False, keep_equal: bool_t = False, result_names: Suffixes = ("self", "other"), ): if type(self) is not type(other): cls_self, cls_other = type(self).__name__, type(other).__name__ raise TypeError( f"can only compare '{cls_self}' (not '{cls_other}') with '{cls_self}'" ) mask = ~((self == other) | (self.isna() & other.isna())) mask.fillna(True, inplace=True) if not keep_equal: self = self.where(mask) other = other.where(mask) if not keep_shape: if isinstance(self, ABCDataFrame): cmask = mask.any() rmask = mask.any(axis=1) self = self.loc[rmask, cmask] other = other.loc[rmask, cmask] else: self = self[mask] other = other[mask] if not isinstance(result_names, tuple): raise TypeError( f"Passing 'result_names' as a {type(result_names)} is not " "supported. Provide 'result_names' as a tuple instead." ) if align_axis in (1, "columns"): # This is needed for Series axis = 1 else: axis = self._get_axis_number(align_axis) diff = concat([self, other], axis=axis, keys=result_names) if axis >= self.ndim: # No need to reorganize data if stacking on new axis # This currently applies for stacking two Series on columns return diff ax = diff._get_axis(axis) ax_names = np.array(ax.names) # set index names to positions to avoid confusion ax.names = np.arange(len(ax_names)) # bring self-other to inner level order = list(range(1, ax.nlevels)) + [0] if isinstance(diff, ABCDataFrame): diff = diff.reorder_levels(order, axis=axis) else: diff = diff.reorder_levels(order) # restore the index names in order diff._get_axis(axis=axis).names = ax_names[order] # reorder axis to keep things organized indices = ( np.arange(diff.shape[axis]).reshape([2, diff.shape[axis] // 2]).T.flatten() ) diff = diff.take(indices, axis=axis) return diff def align( self: NDFrameT, other: NDFrameT, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool_t | None = None, fill_value: Hashable = None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> NDFrameT: """ Align two objects on their axes with the specified join method. Join method is specified for each axis Index. Parameters ---------- other : DataFrame or Series join : {{'outer', 'inner', 'left', 'right'}}, default 'outer' axis : allowed axis of the other object, default None Align on index (0), columns (1), or both (None). level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level. copy : bool, default True Always returns new objects. If copy=False and no reindexing is required then original objects are returned. fill_value : scalar, default np.NaN Value to use for missing values. Defaults to NaN, but can be any "compatible" value. method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None Method to use for filling holes in reindexed Series: - pad / ffill: propagate last valid observation forward to next valid. - backfill / bfill: use NEXT valid observation to fill gap. limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None. fill_axis : {axes_single_arg}, default 0 Filling axis, method and limit. broadcast_axis : {axes_single_arg}, default None Broadcast values along this axis, if aligning two objects of different dimensions. Returns ------- tuple of ({klass}, type of other) Aligned objects. Examples -------- >>> df = pd.DataFrame( ... [[1, 2, 3, 4], [6, 7, 8, 9]], columns=["D", "B", "E", "A"], index=[1, 2] ... ) >>> other = pd.DataFrame( ... [[10, 20, 30, 40], [60, 70, 80, 90], [600, 700, 800, 900]], ... columns=["A", "B", "C", "D"], ... index=[2, 3, 4], ... ) >>> df D B E A 1 1 2 3 4 2 6 7 8 9 >>> other A B C D 2 10 20 30 40 3 60 70 80 90 4 600 700 800 900 Align on columns: >>> left, right = df.align(other, join="outer", axis=1) >>> left A B C D E 1 4 2 NaN 1 3 2 9 7 NaN 6 8 >>> right A B C D E 2 10 20 30 40 NaN 3 60 70 80 90 NaN 4 600 700 800 900 NaN We can also align on the index: >>> left, right = df.align(other, join="outer", axis=0) >>> left D B E A 1 1.0 2.0 3.0 4.0 2 6.0 7.0 8.0 9.0 3 NaN NaN NaN NaN 4 NaN NaN NaN NaN >>> right A B C D 1 NaN NaN NaN NaN 2 10.0 20.0 30.0 40.0 3 60.0 70.0 80.0 90.0 4 600.0 700.0 800.0 900.0 Finally, the default `axis=None` will align on both index and columns: >>> left, right = df.align(other, join="outer", axis=None) >>> left A B C D E 1 4.0 2.0 NaN 1.0 3.0 2 9.0 7.0 NaN 6.0 8.0 3 NaN NaN NaN NaN NaN 4 NaN NaN NaN NaN NaN >>> right A B C D E 1 NaN NaN NaN NaN NaN 2 10.0 20.0 30.0 40.0 NaN 3 60.0 70.0 80.0 90.0 NaN 4 600.0 700.0 800.0 900.0 NaN """ method = clean_fill_method(method) if broadcast_axis == 1 and self.ndim != other.ndim: if isinstance(self, ABCSeries): # this means other is a DataFrame, and we need to broadcast # self cons = self._constructor_expanddim df = cons( {c: self for c in other.columns}, **other._construct_axes_dict() ) return df._align_frame( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) elif isinstance(other, ABCSeries): # this means self is a DataFrame, and we need to broadcast # other cons = other._constructor_expanddim df = cons( {c: other for c in self.columns}, **self._construct_axes_dict() ) return self._align_frame( df, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) if axis is not None: axis = self._get_axis_number(axis) if isinstance(other, ABCDataFrame): return self._align_frame( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) elif isinstance(other, ABCSeries): return self._align_series( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, ) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def _align_frame( self, other, join: AlignJoin = "outer", axis: Axis | None = None, level=None, copy: bool_t | None = None, fill_value=None, method=None, limit=None, fill_axis: Axis = 0, ): # defaults join_index, join_columns = None, None ilidx, iridx = None, None clidx, cridx = None, None is_series = isinstance(self, ABCSeries) if (axis is None or axis == 0) and not self.index.equals(other.index): join_index, ilidx, iridx = self.index.join( other.index, how=join, level=level, return_indexers=True ) if ( (axis is None or axis == 1) and not is_series and not self.columns.equals(other.columns) ): join_columns, clidx, cridx = self.columns.join( other.columns, how=join, level=level, return_indexers=True ) if is_series: reindexers = {0: [join_index, ilidx]} else: reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]} left = self._reindex_with_indexers( reindexers, copy=copy, fill_value=fill_value, allow_dups=True ) # other must be always DataFrame right = other._reindex_with_indexers( {0: [join_index, iridx], 1: [join_columns, cridx]}, copy=copy, fill_value=fill_value, allow_dups=True, ) if method is not None: _left = left.fillna(method=method, axis=fill_axis, limit=limit) assert _left is not None # needed for mypy left = _left right = right.fillna(method=method, axis=fill_axis, limit=limit) # if DatetimeIndex have different tz, convert to UTC left, right = _align_as_utc(left, right, join_index) return ( left.__finalize__(self), right.__finalize__(other), ) def _align_series( self, other, join: AlignJoin = "outer", axis: Axis | None = None, level=None, copy: bool_t | None = None, fill_value=None, method=None, limit=None, fill_axis: Axis = 0, ): is_series = isinstance(self, ABCSeries) if copy and using_copy_on_write(): copy = False if (not is_series and axis is None) or axis not in [None, 0, 1]: raise ValueError("Must specify axis=0 or 1") if is_series and axis == 1: raise ValueError("cannot align series to a series other than axis 0") # series/series compat, other must always be a Series if not axis: # equal if self.index.equals(other.index): join_index, lidx, ridx = None, None, None else: join_index, lidx, ridx = self.index.join( other.index, how=join, level=level, return_indexers=True ) if is_series: left = self._reindex_indexer(join_index, lidx, copy) elif lidx is None or join_index is None: left = self.copy(deep=copy) else: left = self._constructor( self._mgr.reindex_indexer(join_index, lidx, axis=1, copy=copy) ) right = other._reindex_indexer(join_index, ridx, copy) else: # one has > 1 ndim fdata = self._mgr join_index = self.axes[1] lidx, ridx = None, None if not join_index.equals(other.index): join_index, lidx, ridx = join_index.join( other.index, how=join, level=level, return_indexers=True ) if lidx is not None: bm_axis = self._get_block_manager_axis(1) fdata = fdata.reindex_indexer(join_index, lidx, axis=bm_axis) if copy and fdata is self._mgr: fdata = fdata.copy() left = self._constructor(fdata) if ridx is None: right = other.copy(deep=copy) else: right = other.reindex(join_index, level=level) # fill fill_na = notna(fill_value) or (method is not None) if fill_na: left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis) right = right.fillna(fill_value, method=method, limit=limit) # if DatetimeIndex have different tz, convert to UTC if is_series or (not is_series and axis == 0): left, right = _align_as_utc(left, right, join_index) return ( left.__finalize__(self), right.__finalize__(other), ) def _where( self, cond, other=lib.no_default, inplace: bool_t = False, axis: Axis | None = None, level=None, ): """ Equivalent to public method `where`, except that `other` is not applied as a function even if callable. Used in __setitem__. """ inplace = validate_bool_kwarg(inplace, "inplace") if axis is not None: axis = self._get_axis_number(axis) # align the cond to same shape as myself cond = common.apply_if_callable(cond, self) if isinstance(cond, NDFrame): # CoW: Make sure reference is not kept alive cond = cond.align(self, join="right", broadcast_axis=1, copy=False)[0] else: if not hasattr(cond, "shape"): cond = np.asanyarray(cond) if cond.shape != self.shape: raise ValueError("Array conditional must be same shape as self") cond = self._constructor(cond, **self._construct_axes_dict(), copy=False) # make sure we are boolean fill_value = bool(inplace) cond = cond.fillna(fill_value) msg = "Boolean array expected for the condition, not {dtype}" if not cond.empty: if not isinstance(cond, ABCDataFrame): # This is a single-dimensional object. if not is_bool_dtype(cond): raise ValueError(msg.format(dtype=cond.dtype)) else: for _dt in cond.dtypes: if not is_bool_dtype(_dt): raise ValueError(msg.format(dtype=_dt)) else: # GH#21947 we have an empty DataFrame/Series, could be object-dtype cond = cond.astype(bool) cond = -cond if inplace else cond cond = cond.reindex(self._info_axis, axis=self._info_axis_number, copy=False) # try to align with other if isinstance(other, NDFrame): # align with me if other.ndim <= self.ndim: # CoW: Make sure reference is not kept alive other = self.align( other, join="left", axis=axis, level=level, fill_value=None, copy=False, )[1] # if we are NOT aligned, raise as we cannot where index if axis is None and not other._indexed_same(self): raise InvalidIndexError if other.ndim < self.ndim: # TODO(EA2D): avoid object-dtype cast in EA case GH#38729 other = other._values if axis == 0: other = np.reshape(other, (-1, 1)) elif axis == 1: other = np.reshape(other, (1, -1)) other = np.broadcast_to(other, self.shape) # slice me out of the other else: raise NotImplementedError( "cannot align with a higher dimensional NDFrame" ) elif not isinstance(other, (MultiIndex, NDFrame)): # mainly just catching Index here other = extract_array(other, extract_numpy=True) if isinstance(other, (np.ndarray, ExtensionArray)): if other.shape != self.shape: if self.ndim != 1: # In the ndim == 1 case we may have # other length 1, which we treat as scalar (GH#2745, GH#4192) # or len(other) == icond.sum(), which we treat like # __setitem__ (GH#3235) raise ValueError( "other must be the same shape as self when an ndarray" ) # we are the same shape, so create an actual object for alignment else: other = self._constructor( other, **self._construct_axes_dict(), copy=False ) if axis is None: axis = 0 if self.ndim == getattr(other, "ndim", 0): align = True else: align = self._get_axis_number(axis) == 1 if inplace: # we may have different type blocks come out of putmask, so # reconstruct the block manager self._check_inplace_setting(other) new_data = self._mgr.putmask(mask=cond, new=other, align=align) result = self._constructor(new_data) return self._update_inplace(result) else: new_data = self._mgr.where( other=other, cond=cond, align=align, ) result = self._constructor(new_data) return result.__finalize__(self) def where( self: NDFrameT, cond, other=..., *, inplace: Literal[False] = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT: ... def where( self, cond, other=..., *, inplace: Literal[True], axis: Axis | None = ..., level: Level = ..., ) -> None: ... def where( self: NDFrameT, cond, other=..., *, inplace: bool_t = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT | None: ... klass=_shared_doc_kwargs["klass"], cond="True", cond_rev="False", name="where", name_other="mask", ) def where( self: NDFrameT, cond, other=np.nan, *, inplace: bool_t = False, axis: Axis | None = None, level: Level = None, ) -> NDFrameT | None: """ Replace values where the condition is {cond_rev}. Parameters ---------- cond : bool {klass}, array-like, or callable Where `cond` is {cond}, keep the original value. Where {cond_rev}, replace with corresponding value from `other`. If `cond` is callable, it is computed on the {klass} and should return boolean {klass} or array. The callable must not change input {klass} (though pandas doesn't check it). other : scalar, {klass}, or callable Entries where `cond` is {cond_rev} are replaced with corresponding value from `other`. If other is callable, it is computed on the {klass} and should return scalar or {klass}. The callable must not change input {klass} (though pandas doesn't check it). If not specified, entries will be filled with the corresponding NULL value (``np.nan`` for numpy dtypes, ``pd.NA`` for extension dtypes). inplace : bool, default False Whether to perform the operation in place on the data. axis : int, default None Alignment axis if needed. For `Series` this parameter is unused and defaults to 0. level : int, default None Alignment level if needed. Returns ------- Same type as caller or None if ``inplace=True``. See Also -------- :func:`DataFrame.{name_other}` : Return an object of same shape as self. Notes ----- The {name} method is an application of the if-then idiom. For each element in the calling DataFrame, if ``cond`` is ``{cond}`` the element is used; otherwise the corresponding element from the DataFrame ``other`` is used. If the axis of ``other`` does not align with axis of ``cond`` {klass}, the misaligned index positions will be filled with {cond_rev}. The signature for :func:`DataFrame.where` differs from :func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to ``np.where(m, df1, df2)``. For further details and examples see the ``{name}`` documentation in :ref:`indexing <indexing.where_mask>`. The dtype of the object takes precedence. The fill value is casted to the object's dtype, if this can be done losslessly. Examples -------- >>> s = pd.Series(range(5)) >>> s.where(s > 0) 0 NaN 1 1.0 2 2.0 3 3.0 4 4.0 dtype: float64 >>> s.mask(s > 0) 0 0.0 1 NaN 2 NaN 3 NaN 4 NaN dtype: float64 >>> s = pd.Series(range(5)) >>> t = pd.Series([True, False]) >>> s.where(t, 99) 0 0 1 99 2 99 3 99 4 99 dtype: int64 >>> s.mask(t, 99) 0 99 1 1 2 99 3 99 4 99 dtype: int64 >>> s.where(s > 1, 10) 0 10 1 10 2 2 3 3 4 4 dtype: int64 >>> s.mask(s > 1, 10) 0 0 1 1 2 10 3 10 4 10 dtype: int64 >>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B']) >>> df A B 0 0 1 1 2 3 2 4 5 3 6 7 4 8 9 >>> m = df % 3 == 0 >>> df.where(m, -df) A B 0 0 -1 1 -2 3 2 -4 -5 3 6 -7 4 -8 9 >>> df.where(m, -df) == np.where(m, df, -df) A B 0 True True 1 True True 2 True True 3 True True 4 True True >>> df.where(m, -df) == df.mask(~m, -df) A B 0 True True 1 True True 2 True True 3 True True 4 True True """ other = common.apply_if_callable(other, self) return self._where(cond, other, inplace, axis, level) def mask( self: NDFrameT, cond, other=..., *, inplace: Literal[False] = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT: ... def mask( self, cond, other=..., *, inplace: Literal[True], axis: Axis | None = ..., level: Level = ..., ) -> None: ... def mask( self: NDFrameT, cond, other=..., *, inplace: bool_t = ..., axis: Axis | None = ..., level: Level = ..., ) -> NDFrameT | None: ... where, klass=_shared_doc_kwargs["klass"], cond="False", cond_rev="True", name="mask", name_other="where", ) def mask( self: NDFrameT, cond, other=lib.no_default, *, inplace: bool_t = False, axis: Axis | None = None, level: Level = None, ) -> NDFrameT | None: inplace = validate_bool_kwarg(inplace, "inplace") cond = common.apply_if_callable(cond, self) # see gh-21891 if not hasattr(cond, "__invert__"): cond = np.array(cond) return self.where( ~cond, other=other, inplace=inplace, axis=axis, level=level, ) def shift( self: NDFrameT, periods: int = 1, freq=None, axis: Axis = 0, fill_value: Hashable = None, ) -> NDFrameT: """ Shift index by desired number of periods with an optional time `freq`. When `freq` is not passed, shift the index without realigning the data. If `freq` is passed (in this case, the index must be date or datetime, or it will raise a `NotImplementedError`), the index will be increased using the periods and the `freq`. `freq` can be inferred when specified as "infer" as long as either freq or inferred_freq attribute is set in the index. Parameters ---------- periods : int Number of periods to shift. Can be positive or negative. freq : DateOffset, tseries.offsets, timedelta, or str, optional Offset to use from the tseries module or time rule (e.g. 'EOM'). If `freq` is specified then the index values are shifted but the data is not realigned. That is, use `freq` if you would like to extend the index when shifting and preserve the original data. If `freq` is specified as "infer" then it will be inferred from the freq or inferred_freq attributes of the index. If neither of those attributes exist, a ValueError is thrown. axis : {{0 or 'index', 1 or 'columns', None}}, default None Shift direction. For `Series` this parameter is unused and defaults to 0. fill_value : object, optional The scalar value to use for newly introduced missing values. the default depends on the dtype of `self`. For numeric data, ``np.nan`` is used. For datetime, timedelta, or period data, etc. :attr:`NaT` is used. For extension dtypes, ``self.dtype.na_value`` is used. .. versionchanged:: 1.1.0 Returns ------- {klass} Copy of input object, shifted. See Also -------- Index.shift : Shift values of Index. DatetimeIndex.shift : Shift values of DatetimeIndex. PeriodIndex.shift : Shift values of PeriodIndex. Examples -------- >>> df = pd.DataFrame({{"Col1": [10, 20, 15, 30, 45], ... "Col2": [13, 23, 18, 33, 48], ... "Col3": [17, 27, 22, 37, 52]}}, ... index=pd.date_range("2020-01-01", "2020-01-05")) >>> df Col1 Col2 Col3 2020-01-01 10 13 17 2020-01-02 20 23 27 2020-01-03 15 18 22 2020-01-04 30 33 37 2020-01-05 45 48 52 >>> df.shift(periods=3) Col1 Col2 Col3 2020-01-01 NaN NaN NaN 2020-01-02 NaN NaN NaN 2020-01-03 NaN NaN NaN 2020-01-04 10.0 13.0 17.0 2020-01-05 20.0 23.0 27.0 >>> df.shift(periods=1, axis="columns") Col1 Col2 Col3 2020-01-01 NaN 10 13 2020-01-02 NaN 20 23 2020-01-03 NaN 15 18 2020-01-04 NaN 30 33 2020-01-05 NaN 45 48 >>> df.shift(periods=3, fill_value=0) Col1 Col2 Col3 2020-01-01 0 0 0 2020-01-02 0 0 0 2020-01-03 0 0 0 2020-01-04 10 13 17 2020-01-05 20 23 27 >>> df.shift(periods=3, freq="D") Col1 Col2 Col3 2020-01-04 10 13 17 2020-01-05 20 23 27 2020-01-06 15 18 22 2020-01-07 30 33 37 2020-01-08 45 48 52 >>> df.shift(periods=3, freq="infer") Col1 Col2 Col3 2020-01-04 10 13 17 2020-01-05 20 23 27 2020-01-06 15 18 22 2020-01-07 30 33 37 2020-01-08 45 48 52 """ if periods == 0: return self.copy(deep=None) if freq is None: # when freq is None, data is shifted, index is not axis = self._get_axis_number(axis) new_data = self._mgr.shift( periods=periods, axis=axis, fill_value=fill_value ) return self._constructor(new_data).__finalize__(self, method="shift") # when freq is given, index is shifted, data is not index = self._get_axis(axis) if freq == "infer": freq = getattr(index, "freq", None) if freq is None: freq = getattr(index, "inferred_freq", None) if freq is None: msg = "Freq was not set in the index hence cannot be inferred" raise ValueError(msg) elif isinstance(freq, str): freq = to_offset(freq) if isinstance(index, PeriodIndex): orig_freq = to_offset(index.freq) if freq != orig_freq: assert orig_freq is not None # for mypy raise ValueError( f"Given freq {freq.rule_code} does not match " f"PeriodIndex freq {orig_freq.rule_code}" ) new_ax = index.shift(periods) else: new_ax = index.shift(periods, freq) result = self.set_axis(new_ax, axis=axis) return result.__finalize__(self, method="shift") def truncate( self: NDFrameT, before=None, after=None, axis: Axis | None = None, copy: bool_t | None = None, ) -> NDFrameT: """ Truncate a Series or DataFrame before and after some index value. This is a useful shorthand for boolean indexing based on index values above or below certain thresholds. Parameters ---------- before : date, str, int Truncate all rows before this index value. after : date, str, int Truncate all rows after this index value. axis : {0 or 'index', 1 or 'columns'}, optional Axis to truncate. Truncates the index (rows) by default. For `Series` this parameter is unused and defaults to 0. copy : bool, default is True, Return a copy of the truncated section. Returns ------- type of caller The truncated Series or DataFrame. See Also -------- DataFrame.loc : Select a subset of a DataFrame by label. DataFrame.iloc : Select a subset of a DataFrame by position. Notes ----- If the index being truncated contains only datetime values, `before` and `after` may be specified as strings instead of Timestamps. Examples -------- >>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'], ... 'B': ['f', 'g', 'h', 'i', 'j'], ... 'C': ['k', 'l', 'm', 'n', 'o']}, ... index=[1, 2, 3, 4, 5]) >>> df A B C 1 a f k 2 b g l 3 c h m 4 d i n 5 e j o >>> df.truncate(before=2, after=4) A B C 2 b g l 3 c h m 4 d i n The columns of a DataFrame can be truncated. >>> df.truncate(before="A", after="B", axis="columns") A B 1 a f 2 b g 3 c h 4 d i 5 e j For Series, only rows can be truncated. >>> df['A'].truncate(before=2, after=4) 2 b 3 c 4 d Name: A, dtype: object The index values in ``truncate`` can be datetimes or string dates. >>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s') >>> df = pd.DataFrame(index=dates, data={'A': 1}) >>> df.tail() A 2016-01-31 23:59:56 1 2016-01-31 23:59:57 1 2016-01-31 23:59:58 1 2016-01-31 23:59:59 1 2016-02-01 00:00:00 1 >>> df.truncate(before=pd.Timestamp('2016-01-05'), ... after=pd.Timestamp('2016-01-10')).tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Because the index is a DatetimeIndex containing only dates, we can specify `before` and `after` as strings. They will be coerced to Timestamps before truncation. >>> df.truncate('2016-01-05', '2016-01-10').tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Note that ``truncate`` assumes a 0 value for any unspecified time component (midnight). This differs from partial string slicing, which returns any partially matching dates. >>> df.loc['2016-01-05':'2016-01-10', :].tail() A 2016-01-10 23:59:55 1 2016-01-10 23:59:56 1 2016-01-10 23:59:57 1 2016-01-10 23:59:58 1 2016-01-10 23:59:59 1 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) ax = self._get_axis(axis) # GH 17935 # Check that index is sorted if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing: raise ValueError("truncate requires a sorted index") # if we have a date index, convert to dates, otherwise # treat like a slice if ax._is_all_dates: from pandas.core.tools.datetimes import to_datetime before = to_datetime(before) after = to_datetime(after) if before is not None and after is not None and before > after: raise ValueError(f"Truncate: {after} must be after {before}") if len(ax) > 1 and ax.is_monotonic_decreasing and ax.nunique() > 1: before, after = after, before slicer = [slice(None, None)] * self._AXIS_LEN slicer[axis] = slice(before, after) result = self.loc[tuple(slicer)] if isinstance(ax, MultiIndex): setattr(result, self._get_axis_name(axis), ax.truncate(before, after)) result = result.copy(deep=copy and not using_copy_on_write()) return result def tz_convert( self: NDFrameT, tz, axis: Axis = 0, level=None, copy: bool_t | None = None ) -> NDFrameT: """ Convert tz-aware axis to target time zone. Parameters ---------- tz : str or tzinfo object or None Target time zone. Passing ``None`` will convert to UTC and remove the timezone information. axis : {{0 or 'index', 1 or 'columns'}}, default 0 The axis to convert level : int, str, default None If axis is a MultiIndex, convert a specific level. Otherwise must be None. copy : bool, default True Also make a copy of the underlying data. Returns ------- {klass} Object with time zone converted axis. Raises ------ TypeError If the axis is tz-naive. Examples -------- Change to another time zone: >>> s = pd.Series( ... [1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00']), ... ) >>> s.tz_convert('Asia/Shanghai') 2018-09-15 07:30:00+08:00 1 dtype: int64 Pass None to convert to UTC and get a tz-naive index: >>> s = pd.Series([1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00'])) >>> s.tz_convert(None) 2018-09-14 23:30:00 1 dtype: int64 """ axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_convert(ax, tz): if not hasattr(ax, "tz_convert"): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError( f"{ax_name} is not a valid DatetimeIndex or PeriodIndex" ) ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_convert(tz) return ax # if a level is given it must be a MultiIndex level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_convert(ax.levels[level], tz) ax = ax.set_levels(new_level, level=level) else: if level not in (None, 0, ax.name): raise ValueError(f"The level {level} is not valid") ax = _tz_convert(ax, tz) result = self.copy(deep=copy and not using_copy_on_write()) result = result.set_axis(ax, axis=axis, copy=False) return result.__finalize__(self, method="tz_convert") def tz_localize( self: NDFrameT, tz, axis: Axis = 0, level=None, copy: bool_t | None = None, ambiguous: TimeAmbiguous = "raise", nonexistent: TimeNonexistent = "raise", ) -> NDFrameT: """ Localize tz-naive index of a Series or DataFrame to target time zone. This operation localizes the Index. To localize the values in a timezone-naive Series, use :meth:`Series.dt.tz_localize`. Parameters ---------- tz : str or tzinfo or None Time zone to localize. Passing ``None`` will remove the time zone information and preserve local time. axis : {{0 or 'index', 1 or 'columns'}}, default 0 The axis to localize level : int, str, default None If axis ia a MultiIndex, localize a specific level. Otherwise must be None. copy : bool, default True Also make a copy of the underlying data. ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' When clocks moved backward due to DST, ambiguous times may arise. For example in Central European Time (UTC+01), when going from 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter dictates how ambiguous times should be handled. - 'infer' will attempt to infer fall dst-transition hours based on order - bool-ndarray where True signifies a DST time, False designates a non-DST time (note that this flag is only applicable for ambiguous times) - 'NaT' will return NaT where there are ambiguous times - 'raise' will raise an AmbiguousTimeError if there are ambiguous times. nonexistent : str, default 'raise' A nonexistent time does not exist in a particular timezone where clocks moved forward due to DST. Valid values are: - 'shift_forward' will shift the nonexistent time forward to the closest existing time - 'shift_backward' will shift the nonexistent time backward to the closest existing time - 'NaT' will return NaT where there are nonexistent times - timedelta objects will shift nonexistent times by the timedelta - 'raise' will raise an NonExistentTimeError if there are nonexistent times. Returns ------- {klass} Same type as the input. Raises ------ TypeError If the TimeSeries is tz-aware and tz is not None. Examples -------- Localize local times: >>> s = pd.Series( ... [1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00']), ... ) >>> s.tz_localize('CET') 2018-09-15 01:30:00+02:00 1 dtype: int64 Pass None to convert to tz-naive index and preserve local time: >>> s = pd.Series([1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00'])) >>> s.tz_localize(None) 2018-09-15 01:30:00 1 dtype: int64 Be careful with DST changes. When there is sequential data, pandas can infer the DST time: >>> s = pd.Series(range(7), ... index=pd.DatetimeIndex(['2018-10-28 01:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 03:00:00', ... '2018-10-28 03:30:00'])) >>> s.tz_localize('CET', ambiguous='infer') 2018-10-28 01:30:00+02:00 0 2018-10-28 02:00:00+02:00 1 2018-10-28 02:30:00+02:00 2 2018-10-28 02:00:00+01:00 3 2018-10-28 02:30:00+01:00 4 2018-10-28 03:00:00+01:00 5 2018-10-28 03:30:00+01:00 6 dtype: int64 In some cases, inferring the DST is impossible. In such cases, you can pass an ndarray to the ambiguous parameter to set the DST explicitly >>> s = pd.Series(range(3), ... index=pd.DatetimeIndex(['2018-10-28 01:20:00', ... '2018-10-28 02:36:00', ... '2018-10-28 03:46:00'])) >>> s.tz_localize('CET', ambiguous=np.array([True, True, False])) 2018-10-28 01:20:00+02:00 0 2018-10-28 02:36:00+02:00 1 2018-10-28 03:46:00+01:00 2 dtype: int64 If the DST transition causes nonexistent times, you can shift these dates forward or backward with a timedelta object or `'shift_forward'` or `'shift_backward'`. >>> s = pd.Series(range(2), ... index=pd.DatetimeIndex(['2015-03-29 02:30:00', ... '2015-03-29 03:30:00'])) >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward') 2015-03-29 03:00:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward') 2015-03-29 01:59:59.999999999+01:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H')) 2015-03-29 03:30:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 """ nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward") if nonexistent not in nonexistent_options and not isinstance( nonexistent, dt.timedelta ): raise ValueError( "The nonexistent argument must be one of 'raise', " "'NaT', 'shift_forward', 'shift_backward' or " "a timedelta object" ) axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_localize(ax, tz, ambiguous, nonexistent): if not hasattr(ax, "tz_localize"): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError( f"{ax_name} is not a valid DatetimeIndex or PeriodIndex" ) ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent) return ax # if a level is given it must be a MultiIndex level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent) ax = ax.set_levels(new_level, level=level) else: if level not in (None, 0, ax.name): raise ValueError(f"The level {level} is not valid") ax = _tz_localize(ax, tz, ambiguous, nonexistent) result = self.copy(deep=copy and not using_copy_on_write()) result = result.set_axis(ax, axis=axis, copy=False) return result.__finalize__(self, method="tz_localize") # ---------------------------------------------------------------------- # Numeric Methods def describe( self: NDFrameT, percentiles=None, include=None, exclude=None, ) -> NDFrameT: """ Generate descriptive statistics. Descriptive statistics include those that summarize the central tendency, dispersion and shape of a dataset's distribution, excluding ``NaN`` values. Analyzes both numeric and object series, as well as ``DataFrame`` column sets of mixed data types. The output will vary depending on what is provided. Refer to the notes below for more detail. Parameters ---------- percentiles : list-like of numbers, optional The percentiles to include in the output. All should fall between 0 and 1. The default is ``[.25, .5, .75]``, which returns the 25th, 50th, and 75th percentiles. include : 'all', list-like of dtypes or None (default), optional A white list of data types to include in the result. Ignored for ``Series``. Here are the options: - 'all' : All columns of the input will be included in the output. - A list-like of dtypes : Limits the results to the provided data types. To limit the result to numeric types submit ``numpy.number``. To limit it instead to object columns submit the ``numpy.object`` data type. Strings can also be used in the style of ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To select pandas categorical columns, use ``'category'`` - None (default) : The result will include all numeric columns. exclude : list-like of dtypes or None (default), optional, A black list of data types to omit from the result. Ignored for ``Series``. Here are the options: - A list-like of dtypes : Excludes the provided data types from the result. To exclude numeric types submit ``numpy.number``. To exclude object columns submit the data type ``numpy.object``. Strings can also be used in the style of ``select_dtypes`` (e.g. ``df.describe(exclude=['O'])``). To exclude pandas categorical columns, use ``'category'`` - None (default) : The result will exclude nothing. Returns ------- Series or DataFrame Summary statistics of the Series or Dataframe provided. See Also -------- DataFrame.count: Count number of non-NA/null observations. DataFrame.max: Maximum of the values in the object. DataFrame.min: Minimum of the values in the object. DataFrame.mean: Mean of the values. DataFrame.std: Standard deviation of the observations. DataFrame.select_dtypes: Subset of a DataFrame including/excluding columns based on their dtype. Notes ----- For numeric data, the result's index will include ``count``, ``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and upper percentiles. By default the lower percentile is ``25`` and the upper percentile is ``75``. The ``50`` percentile is the same as the median. For object data (e.g. strings or timestamps), the result's index will include ``count``, ``unique``, ``top``, and ``freq``. The ``top`` is the most common value. The ``freq`` is the most common value's frequency. Timestamps also include the ``first`` and ``last`` items. If multiple object values have the highest count, then the ``count`` and ``top`` results will be arbitrarily chosen from among those with the highest count. For mixed data types provided via a ``DataFrame``, the default is to return only an analysis of numeric columns. If the dataframe consists only of object and categorical data without any numeric columns, the default is to return an analysis of both the object and categorical columns. If ``include='all'`` is provided as an option, the result will include a union of attributes of each type. The `include` and `exclude` parameters can be used to limit which columns in a ``DataFrame`` are analyzed for the output. The parameters are ignored when analyzing a ``Series``. Examples -------- Describing a numeric ``Series``. >>> s = pd.Series([1, 2, 3]) >>> s.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 dtype: float64 Describing a categorical ``Series``. >>> s = pd.Series(['a', 'a', 'b', 'c']) >>> s.describe() count 4 unique 3 top a freq 2 dtype: object Describing a timestamp ``Series``. >>> s = pd.Series([ ... np.datetime64("2000-01-01"), ... np.datetime64("2010-01-01"), ... np.datetime64("2010-01-01") ... ]) >>> s.describe() count 3 mean 2006-09-01 08:00:00 min 2000-01-01 00:00:00 25% 2004-12-31 12:00:00 50% 2010-01-01 00:00:00 75% 2010-01-01 00:00:00 max 2010-01-01 00:00:00 dtype: object Describing a ``DataFrame``. By default only numeric fields are returned. >>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']), ... 'numeric': [1, 2, 3], ... 'object': ['a', 'b', 'c'] ... }) >>> df.describe() numeric count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Describing all columns of a ``DataFrame`` regardless of data type. >>> df.describe(include='all') # doctest: +SKIP categorical numeric object count 3 3.0 3 unique 3 NaN 3 top f NaN a freq 1 NaN 1 mean NaN 2.0 NaN std NaN 1.0 NaN min NaN 1.0 NaN 25% NaN 1.5 NaN 50% NaN 2.0 NaN 75% NaN 2.5 NaN max NaN 3.0 NaN Describing a column from a ``DataFrame`` by accessing it as an attribute. >>> df.numeric.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Name: numeric, dtype: float64 Including only numeric columns in a ``DataFrame`` description. >>> df.describe(include=[np.number]) numeric count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.5 50% 2.0 75% 2.5 max 3.0 Including only string columns in a ``DataFrame`` description. >>> df.describe(include=[object]) # doctest: +SKIP object count 3 unique 3 top a freq 1 Including only categorical columns from a ``DataFrame`` description. >>> df.describe(include=['category']) categorical count 3 unique 3 top d freq 1 Excluding numeric columns from a ``DataFrame`` description. >>> df.describe(exclude=[np.number]) # doctest: +SKIP categorical object count 3 3 unique 3 3 top f a freq 1 1 Excluding object columns from a ``DataFrame`` description. >>> df.describe(exclude=[object]) # doctest: +SKIP categorical numeric count 3 3.0 unique 3 NaN top f NaN freq 1 NaN mean NaN 2.0 std NaN 1.0 min NaN 1.0 25% NaN 1.5 50% NaN 2.0 75% NaN 2.5 max NaN 3.0 """ return describe_ndframe( obj=self, include=include, exclude=exclude, percentiles=percentiles, ) def pct_change( self: NDFrameT, periods: int = 1, fill_method: Literal["backfill", "bfill", "pad", "ffill"] | None = "pad", limit=None, freq=None, **kwargs, ) -> NDFrameT: """ Percentage change between the current and a prior element. Computes the percentage change from the immediately previous row by default. This is useful in comparing the percentage of change in a time series of elements. Parameters ---------- periods : int, default 1 Periods to shift for forming percent change. fill_method : {'backfill', 'bfill', 'pad', 'ffill', None}, default 'pad' How to handle NAs **before** computing percent changes. limit : int, default None The number of consecutive NAs to fill before stopping. freq : DateOffset, timedelta, or str, optional Increment to use from time series API (e.g. 'M' or BDay()). **kwargs Additional keyword arguments are passed into `DataFrame.shift` or `Series.shift`. Returns ------- Series or DataFrame The same type as the calling object. See Also -------- Series.diff : Compute the difference of two elements in a Series. DataFrame.diff : Compute the difference of two elements in a DataFrame. Series.shift : Shift the index by some number of periods. DataFrame.shift : Shift the index by some number of periods. Examples -------- **Series** >>> s = pd.Series([90, 91, 85]) >>> s 0 90 1 91 2 85 dtype: int64 >>> s.pct_change() 0 NaN 1 0.011111 2 -0.065934 dtype: float64 >>> s.pct_change(periods=2) 0 NaN 1 NaN 2 -0.055556 dtype: float64 See the percentage change in a Series where filling NAs with last valid observation forward to next valid. >>> s = pd.Series([90, 91, None, 85]) >>> s 0 90.0 1 91.0 2 NaN 3 85.0 dtype: float64 >>> s.pct_change(fill_method='ffill') 0 NaN 1 0.011111 2 0.000000 3 -0.065934 dtype: float64 **DataFrame** Percentage change in French franc, Deutsche Mark, and Italian lira from 1980-01-01 to 1980-03-01. >>> df = pd.DataFrame({ ... 'FR': [4.0405, 4.0963, 4.3149], ... 'GR': [1.7246, 1.7482, 1.8519], ... 'IT': [804.74, 810.01, 860.13]}, ... index=['1980-01-01', '1980-02-01', '1980-03-01']) >>> df FR GR IT 1980-01-01 4.0405 1.7246 804.74 1980-02-01 4.0963 1.7482 810.01 1980-03-01 4.3149 1.8519 860.13 >>> df.pct_change() FR GR IT 1980-01-01 NaN NaN NaN 1980-02-01 0.013810 0.013684 0.006549 1980-03-01 0.053365 0.059318 0.061876 Percentage of change in GOOG and APPL stock volume. Shows computing the percentage change between columns. >>> df = pd.DataFrame({ ... '2016': [1769950, 30586265], ... '2015': [1500923, 40912316], ... '2014': [1371819, 41403351]}, ... index=['GOOG', 'APPL']) >>> df 2016 2015 2014 GOOG 1769950 1500923 1371819 APPL 30586265 40912316 41403351 >>> df.pct_change(axis='columns', periods=-1) 2016 2015 2014 GOOG 0.179241 0.094112 NaN APPL -0.252395 -0.011860 NaN """ axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name)) if fill_method is None: data = self else: _data = self.fillna(method=fill_method, axis=axis, limit=limit) assert _data is not None # needed for mypy data = _data shifted = data.shift(periods=periods, freq=freq, axis=axis, **kwargs) # Unsupported left operand type for / ("NDFrameT") rs = data / shifted - 1 # type: ignore[operator] if freq is not None: # Shift method is implemented differently when freq is not None # We want to restore the original index rs = rs.loc[~rs.index.duplicated()] rs = rs.reindex_like(data) return rs.__finalize__(self, method="pct_change") def _logical_func( self, name: str, func, axis: Axis = 0, bool_only: bool_t = False, skipna: bool_t = True, **kwargs, ) -> Series | bool_t: nv.validate_logical_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if self.ndim > 1 and axis is None: # Reduce along one dimension then the other, to simplify DataFrame._reduce res = self._logical_func( name, func, axis=0, bool_only=bool_only, skipna=skipna, **kwargs ) return res._logical_func(name, func, skipna=skipna, **kwargs) if ( self.ndim > 1 and axis == 1 and len(self._mgr.arrays) > 1 # TODO(EA2D): special-case not needed and all(x.ndim == 2 for x in self._mgr.arrays) and not kwargs ): # Fastpath avoiding potentially expensive transpose obj = self if bool_only: obj = self._get_bool_data() return obj._reduce_axis1(name, func, skipna=skipna) return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=bool_only, filter_type="bool", ) def any( self, axis: Axis = 0, bool_only: bool_t = False, skipna: bool_t = True, **kwargs, ) -> DataFrame | Series | bool_t: return self._logical_func( "any", nanops.nanany, axis, bool_only, skipna, **kwargs ) def all( self, axis: Axis = 0, bool_only: bool_t = False, skipna: bool_t = True, **kwargs, ) -> Series | bool_t: return self._logical_func( "all", nanops.nanall, axis, bool_only, skipna, **kwargs ) def _accum_func( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs, ): skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name) if axis is None: axis = self._stat_axis_number else: axis = self._get_axis_number(axis) if axis == 1: return self.T._accum_func( name, func, axis=0, skipna=skipna, *args, **kwargs # noqa: B026 ).T def block_accum_func(blk_values): values = blk_values.T if hasattr(blk_values, "T") else blk_values result: np.ndarray | ExtensionArray if isinstance(values, ExtensionArray): result = values._accumulate(name, skipna=skipna, **kwargs) else: result = nanops.na_accum_func(values, func, skipna=skipna) result = result.T if hasattr(result, "T") else result return result result = self._mgr.apply(block_accum_func) return self._constructor(result).__finalize__(self, method=name) def cummax(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func( "cummax", np.maximum.accumulate, axis, skipna, *args, **kwargs ) def cummin(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func( "cummin", np.minimum.accumulate, axis, skipna, *args, **kwargs ) def cumsum(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func("cumsum", np.cumsum, axis, skipna, *args, **kwargs) def cumprod(self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs): return self._accum_func("cumprod", np.cumprod, axis, skipna, *args, **kwargs) def _stat_function_ddof( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: nv.validate_stat_ddof_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if axis is None: axis = self._stat_axis_number return self._reduce( func, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof ) def sem( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function_ddof( "sem", nanops.nansem, axis, skipna, ddof, numeric_only, **kwargs ) def var( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function_ddof( "var", nanops.nanvar, axis, skipna, ddof, numeric_only, **kwargs ) def std( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function_ddof( "std", nanops.nanstd, axis, skipna, ddof, numeric_only, **kwargs ) def _stat_function( self, name: str, func, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): if name == "median": nv.validate_median((), kwargs) else: nv.validate_stat_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only ) def min( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return self._stat_function( "min", nanops.nanmin, axis, skipna, numeric_only, **kwargs, ) def max( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return self._stat_function( "max", nanops.nanmax, axis, skipna, numeric_only, **kwargs, ) def mean( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "mean", nanops.nanmean, axis, skipna, numeric_only, **kwargs ) def median( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "median", nanops.nanmedian, axis, skipna, numeric_only, **kwargs ) def skew( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "skew", nanops.nanskew, axis, skipna, numeric_only, **kwargs ) def kurt( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ) -> Series | float: return self._stat_function( "kurt", nanops.nankurt, axis, skipna, numeric_only, **kwargs ) kurtosis = kurt def _min_count_stat_function( self, name: str, func, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): if name == "sum": nv.validate_sum((), kwargs) elif name == "prod": nv.validate_prod((), kwargs) else: nv.validate_stat_func((), kwargs, fname=name) validate_bool_kwarg(skipna, "skipna", none_allowed=False) if axis is None: axis = self._stat_axis_number return self._reduce( func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count, ) def sum( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return self._min_count_stat_function( "sum", nanops.nansum, axis, skipna, numeric_only, min_count, **kwargs ) def prod( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return self._min_count_stat_function( "prod", nanops.nanprod, axis, skipna, numeric_only, min_count, **kwargs, ) product = prod def _add_numeric_operations(cls) -> None: """ Add the operations to the cls; evaluate the doc strings again """ axis_descr, name1, name2 = _doc_params(cls) _bool_doc, desc=_any_desc, name1=name1, name2=name2, axis_descr=axis_descr, see_also=_any_see_also, examples=_any_examples, empty_value=False, ) def any( self, *, axis: Axis = 0, bool_only=None, skipna: bool_t = True, **kwargs, ): return NDFrame.any( self, axis=axis, bool_only=bool_only, skipna=skipna, **kwargs, ) setattr(cls, "any", any) _bool_doc, desc=_all_desc, name1=name1, name2=name2, axis_descr=axis_descr, see_also=_all_see_also, examples=_all_examples, empty_value=True, ) def all( self, axis: Axis = 0, bool_only=None, skipna: bool_t = True, **kwargs, ): return NDFrame.all(self, axis, bool_only, skipna, **kwargs) setattr(cls, "all", all) _num_ddof_doc, desc="Return unbiased standard error of the mean over requested " "axis.\n\nNormalized by N-1 by default. This can be changed " "using the ddof argument", name1=name1, name2=name2, axis_descr=axis_descr, notes="", examples="", ) def sem( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ): return NDFrame.sem(self, axis, skipna, ddof, numeric_only, **kwargs) setattr(cls, "sem", sem) _num_ddof_doc, desc="Return unbiased variance over requested axis.\n\nNormalized by " "N-1 by default. This can be changed using the ddof argument.", name1=name1, name2=name2, axis_descr=axis_descr, notes="", examples=_var_examples, ) def var( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ): return NDFrame.var(self, axis, skipna, ddof, numeric_only, **kwargs) setattr(cls, "var", var) _num_ddof_doc, desc="Return sample standard deviation over requested axis." "\n\nNormalized by N-1 by default. This can be changed using the " "ddof argument.", name1=name1, name2=name2, axis_descr=axis_descr, notes=_std_notes, examples=_std_examples, ) def std( self, axis: Axis | None = None, skipna: bool_t = True, ddof: int = 1, numeric_only: bool_t = False, **kwargs, ): return NDFrame.std(self, axis, skipna, ddof, numeric_only, **kwargs) setattr(cls, "std", std) _cnum_doc, desc="minimum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="min", examples=_cummin_examples, ) def cummin( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cummin(self, axis, skipna, *args, **kwargs) setattr(cls, "cummin", cummin) _cnum_doc, desc="maximum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="max", examples=_cummax_examples, ) def cummax( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cummax(self, axis, skipna, *args, **kwargs) setattr(cls, "cummax", cummax) _cnum_doc, desc="sum", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="sum", examples=_cumsum_examples, ) def cumsum( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cumsum(self, axis, skipna, *args, **kwargs) setattr(cls, "cumsum", cumsum) _cnum_doc, desc="product", name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name="prod", examples=_cumprod_examples, ) def cumprod( self, axis: Axis | None = None, skipna: bool_t = True, *args, **kwargs ): return NDFrame.cumprod(self, axis, skipna, *args, **kwargs) setattr(cls, "cumprod", cumprod) # error: Untyped decorator makes function "sum" untyped _num_doc, desc="Return the sum of the values over the requested axis.\n\n" "This is equivalent to the method ``numpy.sum``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count=_min_count_stub, see_also=_stat_func_see_also, examples=_sum_examples, ) def sum( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return NDFrame.sum(self, axis, skipna, numeric_only, min_count, **kwargs) setattr(cls, "sum", sum) _num_doc, desc="Return the product of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count=_min_count_stub, see_also=_stat_func_see_also, examples=_prod_examples, ) def prod( self, axis: Axis | None = None, skipna: bool_t = True, numeric_only: bool_t = False, min_count: int = 0, **kwargs, ): return NDFrame.prod(self, axis, skipna, numeric_only, min_count, **kwargs) setattr(cls, "prod", prod) cls.product = prod _num_doc, desc="Return the mean of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def mean( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.mean(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "mean", mean) _num_doc, desc="Return unbiased skew over requested axis.\n\nNormalized by N-1.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def skew( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.skew(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "skew", skew) _num_doc, desc="Return unbiased kurtosis over requested axis.\n\n" "Kurtosis obtained using Fisher's definition of\n" "kurtosis (kurtosis of normal == 0.0). Normalized " "by N-1.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def kurt( self, axis: Axis | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.kurt(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "kurt", kurt) cls.kurtosis = kurt _num_doc, desc="Return the median of the values over the requested axis.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also="", examples="", ) def median( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.median(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "median", median) _num_doc, desc="Return the maximum of the values over the requested axis.\n\n" "If you want the *index* of the maximum, use ``idxmax``. This is " "the equivalent of the ``numpy.ndarray`` method ``argmax``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also=_stat_func_see_also, examples=_max_examples, ) def max( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.max(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "max", max) _num_doc, desc="Return the minimum of the values over the requested axis.\n\n" "If you want the *index* of the minimum, use ``idxmin``. This is " "the equivalent of the ``numpy.ndarray`` method ``argmin``.", name1=name1, name2=name2, axis_descr=axis_descr, min_count="", see_also=_stat_func_see_also, examples=_min_examples, ) def min( self, axis: AxisInt | None = 0, skipna: bool_t = True, numeric_only: bool_t = False, **kwargs, ): return NDFrame.min(self, axis, skipna, numeric_only, **kwargs) setattr(cls, "min", min) def rolling( self, window: int | dt.timedelta | str | BaseOffset | BaseIndexer, min_periods: int | None = None, center: bool_t = False, win_type: str | None = None, on: str | None = None, axis: Axis = 0, closed: str | None = None, step: int | None = None, method: str = "single", ) -> Window | Rolling: axis = self._get_axis_number(axis) if win_type is not None: return Window( self, window=window, min_periods=min_periods, center=center, win_type=win_type, on=on, axis=axis, closed=closed, step=step, method=method, ) return Rolling( self, window=window, min_periods=min_periods, center=center, win_type=win_type, on=on, axis=axis, closed=closed, step=step, method=method, ) def expanding( self, min_periods: int = 1, axis: Axis = 0, method: str = "single", ) -> Expanding: axis = self._get_axis_number(axis) return Expanding(self, min_periods=min_periods, axis=axis, method=method) def ewm( self, com: float | None = None, span: float | None = None, halflife: float | TimedeltaConvertibleTypes | None = None, alpha: float | None = None, min_periods: int | None = 0, adjust: bool_t = True, ignore_na: bool_t = False, axis: Axis = 0, times: np.ndarray | DataFrame | Series | None = None, method: str = "single", ) -> ExponentialMovingWindow: axis = self._get_axis_number(axis) return ExponentialMovingWindow( self, com=com, span=span, halflife=halflife, alpha=alpha, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, axis=axis, times=times, method=method, ) # ---------------------------------------------------------------------- # Arithmetic Methods def _inplace_method(self, other, op): """ Wrap arithmetic method to operate inplace. """ result = op(self, other) if ( self.ndim == 1 and result._indexed_same(self) and is_dtype_equal(result.dtype, self.dtype) ): # GH#36498 this inplace op can _actually_ be inplace. # Item "ArrayManager" of "Union[ArrayManager, SingleArrayManager, # BlockManager, SingleBlockManager]" has no attribute "setitem_inplace" self._mgr.setitem_inplace( # type: ignore[union-attr] slice(None), result._values ) return self # Delete cacher self._reset_cacher() # this makes sure that we are aligned like the input # we are updating inplace so we want to ignore is_copy self._update_inplace( result.reindex_like(self, copy=False), verify_is_copy=False ) return self def __iadd__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for + ("Type[NDFrame]") return self._inplace_method(other, type(self).__add__) # type: ignore[operator] def __isub__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for - ("Type[NDFrame]") return self._inplace_method(other, type(self).__sub__) # type: ignore[operator] def __imul__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for * ("Type[NDFrame]") return self._inplace_method(other, type(self).__mul__) # type: ignore[operator] def __itruediv__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for / ("Type[NDFrame]") return self._inplace_method( other, type(self).__truediv__ # type: ignore[operator] ) def __ifloordiv__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for // ("Type[NDFrame]") return self._inplace_method( other, type(self).__floordiv__ # type: ignore[operator] ) def __imod__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for % ("Type[NDFrame]") return self._inplace_method(other, type(self).__mod__) # type: ignore[operator] def __ipow__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for ** ("Type[NDFrame]") return self._inplace_method(other, type(self).__pow__) # type: ignore[operator] def __iand__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for & ("Type[NDFrame]") return self._inplace_method(other, type(self).__and__) # type: ignore[operator] def __ior__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for | ("Type[NDFrame]") return self._inplace_method(other, type(self).__or__) # type: ignore[operator] def __ixor__(self: NDFrameT, other) -> NDFrameT: # error: Unsupported left operand type for ^ ("Type[NDFrame]") return self._inplace_method(other, type(self).__xor__) # type: ignore[operator] # ---------------------------------------------------------------------- # Misc methods def _find_valid_index(self, *, how: str) -> Hashable | None: """ Retrieves the index of the first valid value. Parameters ---------- how : {'first', 'last'} Use this parameter to change between the first or last valid index. Returns ------- idx_first_valid : type of index """ idxpos = find_valid_index(self._values, how=how, is_valid=~isna(self._values)) if idxpos is None: return None return self.index[idxpos] def first_valid_index(self) -> Hashable | None: """ Return index for {position} non-NA value or None, if no non-NA value is found. Returns ------- type of index Notes ----- If all elements are non-NA/null, returns None. Also returns None for empty {klass}. """ return self._find_valid_index(how="first") def last_valid_index(self) -> Hashable | None: return self._find_valid_index(how="last") def to_json( path_or_buf: FilePath | WriteBuffer[str] | WriteBuffer[bytes] | None, obj: NDFrame, orient: str | None = None, date_format: str = "epoch", double_precision: int = 10, force_ascii: bool = True, date_unit: str = "ms", default_handler: Callable[[Any], JSONSerializable] | None = None, lines: bool = False, compression: CompressionOptions = "infer", index: bool = True, indent: int = 0, storage_options: StorageOptions = None, mode: Literal["a", "w"] = "w", ) -> str | None: if not index and orient not in ["split", "table"]: raise ValueError( "'index=False' is only valid when 'orient' is 'split' or 'table'" ) if lines and orient != "records": raise ValueError("'lines' keyword only valid when 'orient' is records") if mode not in ["a", "w"]: msg = ( f"mode={mode} is not a valid option." "Only 'w' and 'a' are currently supported." ) raise ValueError(msg) if mode == "a" and (not lines or orient != "records"): msg = ( "mode='a' (append) is only supported when" "lines is True and orient is 'records'" ) raise ValueError(msg) if orient == "table" and isinstance(obj, Series): obj = obj.to_frame(name=obj.name or "values") writer: type[Writer] if orient == "table" and isinstance(obj, DataFrame): writer = JSONTableWriter elif isinstance(obj, Series): writer = SeriesWriter elif isinstance(obj, DataFrame): writer = FrameWriter else: raise NotImplementedError("'obj' should be a Series or a DataFrame") s = writer( obj, orient=orient, date_format=date_format, double_precision=double_precision, ensure_ascii=force_ascii, date_unit=date_unit, default_handler=default_handler, index=index, indent=indent, ).write() if lines: s = convert_to_line_delimits(s) if path_or_buf is not None: # apply compression and byte/text conversion with get_handle( path_or_buf, mode, compression=compression, storage_options=storage_options ) as handles: handles.handle.write(s) else: return s return None
null
173,502
from __future__ import annotations from abc import ( ABC, abstractmethod, ) from collections import abc from io import StringIO from itertools import islice from types import TracebackType from typing import ( TYPE_CHECKING, Any, Callable, Generic, Literal, Mapping, TypeVar, overload, ) import numpy as np from pandas._libs import lib from pandas._libs.json import ( dumps, loads, ) from pandas._libs.tslibs import iNaT from pandas._typing import ( CompressionOptions, DtypeArg, DtypeBackend, FilePath, IndexLabel, JSONEngine, JSONSerializable, ReadBuffer, StorageOptions, WriteBuffer, ) from pandas.compat._optional import import_optional_dependency from pandas.errors import AbstractMethodError from pandas.util._decorators import doc from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( ensure_str, is_period_dtype, ) from pandas.core.dtypes.generic import ABCIndex from pandas import ( ArrowDtype, DataFrame, MultiIndex, Series, isna, notna, to_datetime, ) from pandas.core.reshape.concat import concat from pandas.core.shared_docs import _shared_docs from pandas.io.common import ( IOHandles, dedup_names, extension_to_compression, file_exists, get_handle, is_fsspec_url, is_potential_multi_index, is_url, stringify_path, ) from pandas.io.json._normalize import convert_to_line_delimits from pandas.io.json._table_schema import ( build_table_schema, parse_table_schema, ) from pandas.io.parsers.readers import validate_integer class JsonReader(abc.Iterator, Generic[FrameSeriesStrT]): def __init__( self, filepath_or_buffer, orient, typ: FrameSeriesStrT, dtype, convert_axes, convert_dates, keep_default_dates: bool, precise_float: bool, date_unit, encoding, lines: bool, chunksize: int | None, compression: CompressionOptions, nrows: int | None, storage_options: StorageOptions = None, encoding_errors: str | None = "strict", dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, engine: JSONEngine = "ujson", ) -> None: def _preprocess_data(self, data): def _get_data_from_filepath(self, filepath_or_buffer): def _combine_lines(self, lines) -> str: def read(self: JsonReader[Literal["frame"]]) -> DataFrame: def read(self: JsonReader[Literal["series"]]) -> Series: def read(self: JsonReader[Literal["frame", "series"]]) -> DataFrame | Series: def read(self) -> DataFrame | Series: def _get_object_parser(self, json) -> DataFrame | Series: def close(self) -> None: def __iter__(self: JsonReader[FrameSeriesStrT]) -> JsonReader[FrameSeriesStrT]: def __next__(self: JsonReader[Literal["frame"]]) -> DataFrame: def __next__(self: JsonReader[Literal["series"]]) -> Series: def __next__(self: JsonReader[Literal["frame", "series"]]) -> DataFrame | Series: def __next__(self) -> DataFrame | Series: def __enter__(self) -> JsonReader[FrameSeriesStrT]: def __exit__( self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None, ) -> None: Literal: _SpecialForm = ... DtypeArg = Union[Dtype, Dict[Hashable, Dtype]] class ReadBuffer(BaseBuffer, Protocol[AnyStr_co]): def read(self, __n: int = ...) -> AnyStr_co: FilePath = Union[str, "PathLike[str]"] StorageOptions = Optional[Dict[str, Any]] CompressionOptions = Optional[ Union[Literal["infer", "gzip", "bz2", "zip", "xz", "zstd", "tar"], CompressionDict] ] JSONEngine = Literal["ujson", "pyarrow"] DtypeBackend = Literal["pyarrow", "numpy_nullable"] def read_json( path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes], *, orient: str | None = ..., typ: Literal["frame"] = ..., dtype: DtypeArg | None = ..., convert_axes=..., convert_dates: bool | list[str] = ..., keep_default_dates: bool = ..., precise_float: bool = ..., date_unit: str | None = ..., encoding: str | None = ..., encoding_errors: str | None = ..., lines: bool = ..., chunksize: int, compression: CompressionOptions = ..., nrows: int | None = ..., storage_options: StorageOptions = ..., dtype_backend: DtypeBackend | lib.NoDefault = ..., engine: JSONEngine = ..., ) -> JsonReader[Literal["frame"]]: ...
null
173,503
from __future__ import annotations from abc import ( ABC, abstractmethod, ) from collections import abc from io import StringIO from itertools import islice from types import TracebackType from typing import ( TYPE_CHECKING, Any, Callable, Generic, Literal, Mapping, TypeVar, overload, ) import numpy as np from pandas._libs import lib from pandas._libs.json import ( dumps, loads, ) from pandas._libs.tslibs import iNaT from pandas._typing import ( CompressionOptions, DtypeArg, DtypeBackend, FilePath, IndexLabel, JSONEngine, JSONSerializable, ReadBuffer, StorageOptions, WriteBuffer, ) from pandas.compat._optional import import_optional_dependency from pandas.errors import AbstractMethodError from pandas.util._decorators import doc from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( ensure_str, is_period_dtype, ) from pandas.core.dtypes.generic import ABCIndex from pandas import ( ArrowDtype, DataFrame, MultiIndex, Series, isna, notna, to_datetime, ) from pandas.core.reshape.concat import concat from pandas.core.shared_docs import _shared_docs from pandas.io.common import ( IOHandles, dedup_names, extension_to_compression, file_exists, get_handle, is_fsspec_url, is_potential_multi_index, is_url, stringify_path, ) from pandas.io.json._normalize import convert_to_line_delimits from pandas.io.json._table_schema import ( build_table_schema, parse_table_schema, ) from pandas.io.parsers.readers import validate_integer class JsonReader(abc.Iterator, Generic[FrameSeriesStrT]): """ JsonReader provides an interface for reading in a JSON file. If initialized with ``lines=True`` and ``chunksize``, can be iterated over ``chunksize`` lines at a time. Otherwise, calling ``read`` reads in the whole document. """ def __init__( self, filepath_or_buffer, orient, typ: FrameSeriesStrT, dtype, convert_axes, convert_dates, keep_default_dates: bool, precise_float: bool, date_unit, encoding, lines: bool, chunksize: int | None, compression: CompressionOptions, nrows: int | None, storage_options: StorageOptions = None, encoding_errors: str | None = "strict", dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, engine: JSONEngine = "ujson", ) -> None: self.orient = orient self.typ = typ self.dtype = dtype self.convert_axes = convert_axes self.convert_dates = convert_dates self.keep_default_dates = keep_default_dates self.precise_float = precise_float self.date_unit = date_unit self.encoding = encoding self.engine = engine self.compression = compression self.storage_options = storage_options self.lines = lines self.chunksize = chunksize self.nrows_seen = 0 self.nrows = nrows self.encoding_errors = encoding_errors self.handles: IOHandles[str] | None = None self.dtype_backend = dtype_backend if self.engine not in {"pyarrow", "ujson"}: raise ValueError( f"The engine type {self.engine} is currently not supported." ) if self.chunksize is not None: self.chunksize = validate_integer("chunksize", self.chunksize, 1) if not self.lines: raise ValueError("chunksize can only be passed if lines=True") if self.engine == "pyarrow": raise ValueError( "currently pyarrow engine doesn't support chunksize parameter" ) if self.nrows is not None: self.nrows = validate_integer("nrows", self.nrows, 0) if not self.lines: raise ValueError("nrows can only be passed if lines=True") if self.engine == "pyarrow": if not self.lines: raise ValueError( "currently pyarrow engine only supports " "the line-delimited JSON format" ) self.data = filepath_or_buffer elif self.engine == "ujson": data = self._get_data_from_filepath(filepath_or_buffer) self.data = self._preprocess_data(data) def _preprocess_data(self, data): """ At this point, the data either has a `read` attribute (e.g. a file object or a StringIO) or is a string that is a JSON document. If self.chunksize, we prepare the data for the `__next__` method. Otherwise, we read it into memory for the `read` method. """ if hasattr(data, "read") and not (self.chunksize or self.nrows): with self: data = data.read() if not hasattr(data, "read") and (self.chunksize or self.nrows): data = StringIO(data) return data def _get_data_from_filepath(self, filepath_or_buffer): """ The function read_json accepts three input types: 1. filepath (string-like) 2. file-like object (e.g. open file object, StringIO) 3. JSON string This method turns (1) into (2) to simplify the rest of the processing. It returns input types (2) and (3) unchanged. It raises FileNotFoundError if the input is a string ending in one of .json, .json.gz, .json.bz2, etc. but no such file exists. """ # if it is a string but the file does not exist, it might be a JSON string filepath_or_buffer = stringify_path(filepath_or_buffer) if ( not isinstance(filepath_or_buffer, str) or is_url(filepath_or_buffer) or is_fsspec_url(filepath_or_buffer) or file_exists(filepath_or_buffer) ): self.handles = get_handle( filepath_or_buffer, "r", encoding=self.encoding, compression=self.compression, storage_options=self.storage_options, errors=self.encoding_errors, ) filepath_or_buffer = self.handles.handle elif ( isinstance(filepath_or_buffer, str) and filepath_or_buffer.lower().endswith( (".json",) + tuple(f".json{c}" for c in extension_to_compression) ) and not file_exists(filepath_or_buffer) ): raise FileNotFoundError(f"File {filepath_or_buffer} does not exist") return filepath_or_buffer def _combine_lines(self, lines) -> str: """ Combines a list of JSON objects into one JSON object. """ return ( f'[{",".join([line for line in (line.strip() for line in lines) if line])}]' ) def read(self: JsonReader[Literal["frame"]]) -> DataFrame: ... def read(self: JsonReader[Literal["series"]]) -> Series: ... def read(self: JsonReader[Literal["frame", "series"]]) -> DataFrame | Series: ... def read(self) -> DataFrame | Series: """ Read the whole JSON input into a pandas object. """ obj: DataFrame | Series with self: if self.engine == "pyarrow": pyarrow_json = import_optional_dependency("pyarrow.json") pa_table = pyarrow_json.read_json(self.data) mapping: type[ArrowDtype] | None | Callable if self.dtype_backend == "pyarrow": mapping = ArrowDtype elif self.dtype_backend == "numpy_nullable": from pandas.io._util import _arrow_dtype_mapping mapping = _arrow_dtype_mapping().get else: mapping = None return pa_table.to_pandas(types_mapper=mapping) elif self.engine == "ujson": if self.lines: if self.chunksize: obj = concat(self) elif self.nrows: lines = list(islice(self.data, self.nrows)) lines_json = self._combine_lines(lines) obj = self._get_object_parser(lines_json) else: data = ensure_str(self.data) data_lines = data.split("\n") obj = self._get_object_parser(self._combine_lines(data_lines)) else: obj = self._get_object_parser(self.data) if self.dtype_backend is not lib.no_default: return obj.convert_dtypes( infer_objects=False, dtype_backend=self.dtype_backend ) else: return obj def _get_object_parser(self, json) -> DataFrame | Series: """ Parses a json document into a pandas object. """ typ = self.typ dtype = self.dtype kwargs = { "orient": self.orient, "dtype": self.dtype, "convert_axes": self.convert_axes, "convert_dates": self.convert_dates, "keep_default_dates": self.keep_default_dates, "precise_float": self.precise_float, "date_unit": self.date_unit, "dtype_backend": self.dtype_backend, } obj = None if typ == "frame": obj = FrameParser(json, **kwargs).parse() if typ == "series" or obj is None: if not isinstance(dtype, bool): kwargs["dtype"] = dtype obj = SeriesParser(json, **kwargs).parse() return obj def close(self) -> None: """ If we opened a stream earlier, in _get_data_from_filepath, we should close it. If an open stream or file was passed, we leave it open. """ if self.handles is not None: self.handles.close() def __iter__(self: JsonReader[FrameSeriesStrT]) -> JsonReader[FrameSeriesStrT]: return self def __next__(self: JsonReader[Literal["frame"]]) -> DataFrame: ... def __next__(self: JsonReader[Literal["series"]]) -> Series: ... def __next__(self: JsonReader[Literal["frame", "series"]]) -> DataFrame | Series: ... def __next__(self) -> DataFrame | Series: if self.nrows and self.nrows_seen >= self.nrows: self.close() raise StopIteration lines = list(islice(self.data, self.chunksize)) if not lines: self.close() raise StopIteration try: lines_json = self._combine_lines(lines) obj = self._get_object_parser(lines_json) # Make sure that the returned objects have the right index. obj.index = range(self.nrows_seen, self.nrows_seen + len(obj)) self.nrows_seen += len(obj) except Exception as ex: self.close() raise ex if self.dtype_backend is not lib.no_default: return obj.convert_dtypes( infer_objects=False, dtype_backend=self.dtype_backend ) else: return obj def __enter__(self) -> JsonReader[FrameSeriesStrT]: return self def __exit__( self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None, ) -> None: self.close() Literal: _SpecialForm = ... DtypeArg = Union[Dtype, Dict[Hashable, Dtype]] class ReadBuffer(BaseBuffer, Protocol[AnyStr_co]): def read(self, __n: int = ...) -> AnyStr_co: # for BytesIOWrapper, gzip.GzipFile, bz2.BZ2File ... FilePath = Union[str, "PathLike[str]"] StorageOptions = Optional[Dict[str, Any]] CompressionOptions = Optional[ Union[Literal["infer", "gzip", "bz2", "zip", "xz", "zstd", "tar"], CompressionDict] ] JSONEngine = Literal["ujson", "pyarrow"] DtypeBackend = Literal["pyarrow", "numpy_nullable"] def read_json( path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes], *, orient: str | None = ..., typ: Literal["series"], dtype: DtypeArg | None = ..., convert_axes=..., convert_dates: bool | list[str] = ..., keep_default_dates: bool = ..., precise_float: bool = ..., date_unit: str | None = ..., encoding: str | None = ..., encoding_errors: str | None = ..., lines: bool = ..., chunksize: int, compression: CompressionOptions = ..., nrows: int | None = ..., storage_options: StorageOptions = ..., dtype_backend: DtypeBackend | lib.NoDefault = ..., engine: JSONEngine = ..., ) -> JsonReader[Literal["series"]]: ...
null
173,504
from __future__ import annotations from abc import ( ABC, abstractmethod, ) from collections import abc from io import StringIO from itertools import islice from types import TracebackType from typing import ( TYPE_CHECKING, Any, Callable, Generic, Literal, Mapping, TypeVar, overload, ) import numpy as np from pandas._libs import lib from pandas._libs.json import ( dumps, loads, ) from pandas._libs.tslibs import iNaT from pandas._typing import ( CompressionOptions, DtypeArg, DtypeBackend, FilePath, IndexLabel, JSONEngine, JSONSerializable, ReadBuffer, StorageOptions, WriteBuffer, ) from pandas.compat._optional import import_optional_dependency from pandas.errors import AbstractMethodError from pandas.util._decorators import doc from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( ensure_str, is_period_dtype, ) from pandas.core.dtypes.generic import ABCIndex from pandas import ( ArrowDtype, DataFrame, MultiIndex, Series, isna, notna, to_datetime, ) from pandas.core.reshape.concat import concat from pandas.core.shared_docs import _shared_docs from pandas.io.common import ( IOHandles, dedup_names, extension_to_compression, file_exists, get_handle, is_fsspec_url, is_potential_multi_index, is_url, stringify_path, ) from pandas.io.json._normalize import convert_to_line_delimits from pandas.io.json._table_schema import ( build_table_schema, parse_table_schema, ) from pandas.io.parsers.readers import validate_integer Literal: _SpecialForm = ... DtypeArg = Union[Dtype, Dict[Hashable, Dtype]] class ReadBuffer(BaseBuffer, Protocol[AnyStr_co]): def read(self, __n: int = ...) -> AnyStr_co: # for BytesIOWrapper, gzip.GzipFile, bz2.BZ2File ... FilePath = Union[str, "PathLike[str]"] StorageOptions = Optional[Dict[str, Any]] CompressionOptions = Optional[ Union[Literal["infer", "gzip", "bz2", "zip", "xz", "zstd", "tar"], CompressionDict] ] JSONEngine = Literal["ujson", "pyarrow"] DtypeBackend = Literal["pyarrow", "numpy_nullable"] def read_json( path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes], *, orient: str | None = ..., typ: Literal["series"], dtype: DtypeArg | None = ..., convert_axes=..., convert_dates: bool | list[str] = ..., keep_default_dates: bool = ..., precise_float: bool = ..., date_unit: str | None = ..., encoding: str | None = ..., encoding_errors: str | None = ..., lines: bool = ..., chunksize: None = ..., compression: CompressionOptions = ..., nrows: int | None = ..., storage_options: StorageOptions = ..., dtype_backend: DtypeBackend | lib.NoDefault = ..., engine: JSONEngine = ..., ) -> Series: ...
null
173,505
from __future__ import annotations from abc import ( ABC, abstractmethod, ) from collections import abc from io import StringIO from itertools import islice from types import TracebackType from typing import ( TYPE_CHECKING, Any, Callable, Generic, Literal, Mapping, TypeVar, overload, ) import numpy as np from pandas._libs import lib from pandas._libs.json import ( dumps, loads, ) from pandas._libs.tslibs import iNaT from pandas._typing import ( CompressionOptions, DtypeArg, DtypeBackend, FilePath, IndexLabel, JSONEngine, JSONSerializable, ReadBuffer, StorageOptions, WriteBuffer, ) from pandas.compat._optional import import_optional_dependency from pandas.errors import AbstractMethodError from pandas.util._decorators import doc from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( ensure_str, is_period_dtype, ) from pandas.core.dtypes.generic import ABCIndex from pandas import ( ArrowDtype, DataFrame, MultiIndex, Series, isna, notna, to_datetime, ) from pandas.core.reshape.concat import concat from pandas.core.shared_docs import _shared_docs from pandas.io.common import ( IOHandles, dedup_names, extension_to_compression, file_exists, get_handle, is_fsspec_url, is_potential_multi_index, is_url, stringify_path, ) from pandas.io.json._normalize import convert_to_line_delimits from pandas.io.json._table_schema import ( build_table_schema, parse_table_schema, ) from pandas.io.parsers.readers import validate_integer Literal: _SpecialForm = ... DtypeArg = Union[Dtype, Dict[Hashable, Dtype]] class ReadBuffer(BaseBuffer, Protocol[AnyStr_co]): def read(self, __n: int = ...) -> AnyStr_co: # for BytesIOWrapper, gzip.GzipFile, bz2.BZ2File ... FilePath = Union[str, "PathLike[str]"] StorageOptions = Optional[Dict[str, Any]] CompressionOptions = Optional[ Union[Literal["infer", "gzip", "bz2", "zip", "xz", "zstd", "tar"], CompressionDict] ] JSONEngine = Literal["ujson", "pyarrow"] DtypeBackend = Literal["pyarrow", "numpy_nullable"] def read_json( path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes], *, orient: str | None = ..., typ: Literal["frame"] = ..., dtype: DtypeArg | None = ..., convert_axes=..., convert_dates: bool | list[str] = ..., keep_default_dates: bool = ..., precise_float: bool = ..., date_unit: str | None = ..., encoding: str | None = ..., encoding_errors: str | None = ..., lines: bool = ..., chunksize: None = ..., compression: CompressionOptions = ..., nrows: int | None = ..., storage_options: StorageOptions = ..., dtype_backend: DtypeBackend | lib.NoDefault = ..., engine: JSONEngine = ..., ) -> DataFrame: ...
null
173,506
from __future__ import annotations from abc import ( ABC, abstractmethod, ) from collections import abc from io import StringIO from itertools import islice from types import TracebackType from typing import ( TYPE_CHECKING, Any, Callable, Generic, Literal, Mapping, TypeVar, overload, ) import numpy as np from pandas._libs import lib from pandas._libs.json import ( dumps, loads, ) from pandas._libs.tslibs import iNaT from pandas._typing import ( CompressionOptions, DtypeArg, DtypeBackend, FilePath, IndexLabel, JSONEngine, JSONSerializable, ReadBuffer, StorageOptions, WriteBuffer, ) from pandas.compat._optional import import_optional_dependency from pandas.errors import AbstractMethodError from pandas.util._decorators import doc from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( ensure_str, is_period_dtype, ) from pandas.core.dtypes.generic import ABCIndex from pandas import ( ArrowDtype, DataFrame, MultiIndex, Series, isna, notna, to_datetime, ) from pandas.core.reshape.concat import concat from pandas.core.shared_docs import _shared_docs from pandas.io.common import ( IOHandles, dedup_names, extension_to_compression, file_exists, get_handle, is_fsspec_url, is_potential_multi_index, is_url, stringify_path, ) from pandas.io.json._normalize import convert_to_line_delimits from pandas.io.json._table_schema import ( build_table_schema, parse_table_schema, ) from pandas.io.parsers.readers import validate_integer class JsonReader(abc.Iterator, Generic[FrameSeriesStrT]): """ JsonReader provides an interface for reading in a JSON file. If initialized with ``lines=True`` and ``chunksize``, can be iterated over ``chunksize`` lines at a time. Otherwise, calling ``read`` reads in the whole document. """ def __init__( self, filepath_or_buffer, orient, typ: FrameSeriesStrT, dtype, convert_axes, convert_dates, keep_default_dates: bool, precise_float: bool, date_unit, encoding, lines: bool, chunksize: int | None, compression: CompressionOptions, nrows: int | None, storage_options: StorageOptions = None, encoding_errors: str | None = "strict", dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, engine: JSONEngine = "ujson", ) -> None: self.orient = orient self.typ = typ self.dtype = dtype self.convert_axes = convert_axes self.convert_dates = convert_dates self.keep_default_dates = keep_default_dates self.precise_float = precise_float self.date_unit = date_unit self.encoding = encoding self.engine = engine self.compression = compression self.storage_options = storage_options self.lines = lines self.chunksize = chunksize self.nrows_seen = 0 self.nrows = nrows self.encoding_errors = encoding_errors self.handles: IOHandles[str] | None = None self.dtype_backend = dtype_backend if self.engine not in {"pyarrow", "ujson"}: raise ValueError( f"The engine type {self.engine} is currently not supported." ) if self.chunksize is not None: self.chunksize = validate_integer("chunksize", self.chunksize, 1) if not self.lines: raise ValueError("chunksize can only be passed if lines=True") if self.engine == "pyarrow": raise ValueError( "currently pyarrow engine doesn't support chunksize parameter" ) if self.nrows is not None: self.nrows = validate_integer("nrows", self.nrows, 0) if not self.lines: raise ValueError("nrows can only be passed if lines=True") if self.engine == "pyarrow": if not self.lines: raise ValueError( "currently pyarrow engine only supports " "the line-delimited JSON format" ) self.data = filepath_or_buffer elif self.engine == "ujson": data = self._get_data_from_filepath(filepath_or_buffer) self.data = self._preprocess_data(data) def _preprocess_data(self, data): """ At this point, the data either has a `read` attribute (e.g. a file object or a StringIO) or is a string that is a JSON document. If self.chunksize, we prepare the data for the `__next__` method. Otherwise, we read it into memory for the `read` method. """ if hasattr(data, "read") and not (self.chunksize or self.nrows): with self: data = data.read() if not hasattr(data, "read") and (self.chunksize or self.nrows): data = StringIO(data) return data def _get_data_from_filepath(self, filepath_or_buffer): """ The function read_json accepts three input types: 1. filepath (string-like) 2. file-like object (e.g. open file object, StringIO) 3. JSON string This method turns (1) into (2) to simplify the rest of the processing. It returns input types (2) and (3) unchanged. It raises FileNotFoundError if the input is a string ending in one of .json, .json.gz, .json.bz2, etc. but no such file exists. """ # if it is a string but the file does not exist, it might be a JSON string filepath_or_buffer = stringify_path(filepath_or_buffer) if ( not isinstance(filepath_or_buffer, str) or is_url(filepath_or_buffer) or is_fsspec_url(filepath_or_buffer) or file_exists(filepath_or_buffer) ): self.handles = get_handle( filepath_or_buffer, "r", encoding=self.encoding, compression=self.compression, storage_options=self.storage_options, errors=self.encoding_errors, ) filepath_or_buffer = self.handles.handle elif ( isinstance(filepath_or_buffer, str) and filepath_or_buffer.lower().endswith( (".json",) + tuple(f".json{c}" for c in extension_to_compression) ) and not file_exists(filepath_or_buffer) ): raise FileNotFoundError(f"File {filepath_or_buffer} does not exist") return filepath_or_buffer def _combine_lines(self, lines) -> str: """ Combines a list of JSON objects into one JSON object. """ return ( f'[{",".join([line for line in (line.strip() for line in lines) if line])}]' ) def read(self: JsonReader[Literal["frame"]]) -> DataFrame: ... def read(self: JsonReader[Literal["series"]]) -> Series: ... def read(self: JsonReader[Literal["frame", "series"]]) -> DataFrame | Series: ... def read(self) -> DataFrame | Series: """ Read the whole JSON input into a pandas object. """ obj: DataFrame | Series with self: if self.engine == "pyarrow": pyarrow_json = import_optional_dependency("pyarrow.json") pa_table = pyarrow_json.read_json(self.data) mapping: type[ArrowDtype] | None | Callable if self.dtype_backend == "pyarrow": mapping = ArrowDtype elif self.dtype_backend == "numpy_nullable": from pandas.io._util import _arrow_dtype_mapping mapping = _arrow_dtype_mapping().get else: mapping = None return pa_table.to_pandas(types_mapper=mapping) elif self.engine == "ujson": if self.lines: if self.chunksize: obj = concat(self) elif self.nrows: lines = list(islice(self.data, self.nrows)) lines_json = self._combine_lines(lines) obj = self._get_object_parser(lines_json) else: data = ensure_str(self.data) data_lines = data.split("\n") obj = self._get_object_parser(self._combine_lines(data_lines)) else: obj = self._get_object_parser(self.data) if self.dtype_backend is not lib.no_default: return obj.convert_dtypes( infer_objects=False, dtype_backend=self.dtype_backend ) else: return obj def _get_object_parser(self, json) -> DataFrame | Series: """ Parses a json document into a pandas object. """ typ = self.typ dtype = self.dtype kwargs = { "orient": self.orient, "dtype": self.dtype, "convert_axes": self.convert_axes, "convert_dates": self.convert_dates, "keep_default_dates": self.keep_default_dates, "precise_float": self.precise_float, "date_unit": self.date_unit, "dtype_backend": self.dtype_backend, } obj = None if typ == "frame": obj = FrameParser(json, **kwargs).parse() if typ == "series" or obj is None: if not isinstance(dtype, bool): kwargs["dtype"] = dtype obj = SeriesParser(json, **kwargs).parse() return obj def close(self) -> None: """ If we opened a stream earlier, in _get_data_from_filepath, we should close it. If an open stream or file was passed, we leave it open. """ if self.handles is not None: self.handles.close() def __iter__(self: JsonReader[FrameSeriesStrT]) -> JsonReader[FrameSeriesStrT]: return self def __next__(self: JsonReader[Literal["frame"]]) -> DataFrame: ... def __next__(self: JsonReader[Literal["series"]]) -> Series: ... def __next__(self: JsonReader[Literal["frame", "series"]]) -> DataFrame | Series: ... def __next__(self) -> DataFrame | Series: if self.nrows and self.nrows_seen >= self.nrows: self.close() raise StopIteration lines = list(islice(self.data, self.chunksize)) if not lines: self.close() raise StopIteration try: lines_json = self._combine_lines(lines) obj = self._get_object_parser(lines_json) # Make sure that the returned objects have the right index. obj.index = range(self.nrows_seen, self.nrows_seen + len(obj)) self.nrows_seen += len(obj) except Exception as ex: self.close() raise ex if self.dtype_backend is not lib.no_default: return obj.convert_dtypes( infer_objects=False, dtype_backend=self.dtype_backend ) else: return obj def __enter__(self) -> JsonReader[FrameSeriesStrT]: return self def __exit__( self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None, ) -> None: self.close() Literal: _SpecialForm = ... DtypeArg = Union[Dtype, Dict[Hashable, Dtype]] class ReadBuffer(BaseBuffer, Protocol[AnyStr_co]): def read(self, __n: int = ...) -> AnyStr_co: # for BytesIOWrapper, gzip.GzipFile, bz2.BZ2File ... FilePath = Union[str, "PathLike[str]"] StorageOptions = Optional[Dict[str, Any]] CompressionOptions = Optional[ Union[Literal["infer", "gzip", "bz2", "zip", "xz", "zstd", "tar"], CompressionDict] ] JSONEngine = Literal["ujson", "pyarrow"] DtypeBackend = Literal["pyarrow", "numpy_nullable"] def check_dtype_backend(dtype_backend) -> None: if dtype_backend is not lib.no_default: if dtype_backend not in ["numpy_nullable", "pyarrow"]: raise ValueError( f"dtype_backend {dtype_backend} is invalid, only 'numpy_nullable' and " f"'pyarrow' are allowed.", ) The provided code snippet includes necessary dependencies for implementing the `read_json` function. Write a Python function `def read_json( path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes], *, orient: str | None = None, typ: Literal["frame", "series"] = "frame", dtype: DtypeArg | None = None, convert_axes=None, convert_dates: bool | list[str] = True, keep_default_dates: bool = True, precise_float: bool = False, date_unit: str | None = None, encoding: str | None = None, encoding_errors: str | None = "strict", lines: bool = False, chunksize: int | None = None, compression: CompressionOptions = "infer", nrows: int | None = None, storage_options: StorageOptions = None, dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, engine: JSONEngine = "ujson", ) -> DataFrame | Series | JsonReader` to solve the following problem: Convert a JSON string to pandas object. Parameters ---------- path_or_buf : a valid JSON str, path object or file-like object Any valid string path is acceptable. The string could be a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. A local file could be: ``file://localhost/path/to/table.json``. If you want to pass in a path object, pandas accepts any ``os.PathLike``. By file-like object, we refer to objects with a ``read()`` method, such as a file handle (e.g. via builtin ``open`` function) or ``StringIO``. orient : str, optional Indication of expected JSON string format. Compatible JSON strings can be produced by ``to_json()`` with a corresponding orient value. The set of possible orients is: - ``'split'`` : dict like ``{{index -> [index], columns -> [columns], data -> [values]}}`` - ``'records'`` : list like ``[{{column -> value}}, ... , {{column -> value}}]`` - ``'index'`` : dict like ``{{index -> {{column -> value}}}}`` - ``'columns'`` : dict like ``{{column -> {{index -> value}}}}`` - ``'values'`` : just the values array The allowed and default values depend on the value of the `typ` parameter. * when ``typ == 'series'``, - allowed orients are ``{{'split','records','index'}}`` - default is ``'index'`` - The Series index must be unique for orient ``'index'``. * when ``typ == 'frame'``, - allowed orients are ``{{'split','records','index', 'columns','values', 'table'}}`` - default is ``'columns'`` - The DataFrame index must be unique for orients ``'index'`` and ``'columns'``. - The DataFrame columns must be unique for orients ``'index'``, ``'columns'``, and ``'records'``. typ : {{'frame', 'series'}}, default 'frame' The type of object to recover. dtype : bool or dict, default None If True, infer dtypes; if a dict of column to dtype, then use those; if False, then don't infer dtypes at all, applies only to the data. For all ``orient`` values except ``'table'``, default is True. convert_axes : bool, default None Try to convert the axes to the proper dtypes. For all ``orient`` values except ``'table'``, default is True. convert_dates : bool or list of str, default True If True then default datelike columns may be converted (depending on keep_default_dates). If False, no dates will be converted. If a list of column names, then those columns will be converted and default datelike columns may also be converted (depending on keep_default_dates). keep_default_dates : bool, default True If parsing dates (convert_dates is not False), then try to parse the default datelike columns. A column label is datelike if * it ends with ``'_at'``, * it ends with ``'_time'``, * it begins with ``'timestamp'``, * it is ``'modified'``, or * it is ``'date'``. precise_float : bool, default False Set to enable usage of higher precision (strtod) function when decoding string to double values. Default (False) is to use fast but less precise builtin functionality. date_unit : str, default None The timestamp unit to detect if converting dates. The default behaviour is to try and detect the correct precision, but if this is not desired then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds, milliseconds, microseconds or nanoseconds respectively. encoding : str, default is 'utf-8' The encoding to use to decode py3 bytes. encoding_errors : str, optional, default "strict" How encoding errors are treated. `List of possible values <https://docs.python.org/3/library/codecs.html#error-handlers>`_ . .. versionadded:: 1.3.0 lines : bool, default False Read the file as a json object per line. chunksize : int, optional Return JsonReader object for iteration. See the `line-delimited json docs <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#line-delimited-json>`_ for more information on ``chunksize``. This can only be passed if `lines=True`. If this is None, the file will be read into memory all at once. .. versionchanged:: 1.2 ``JsonReader`` is a context manager. {decompression_options} .. versionchanged:: 1.4.0 Zstandard support. nrows : int, optional The number of lines from the line-delimited jsonfile that has to be read. This can only be passed if `lines=True`. If this is None, all the rows will be returned. .. versionadded:: 1.1 {storage_options} .. versionadded:: 1.2.0 dtype_backend : {{"numpy_nullable", "pyarrow"}}, defaults to NumPy backed DataFrames Which dtype_backend to use, e.g. whether a DataFrame should have NumPy arrays, nullable dtypes are used for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 engine : {{"ujson", "pyarrow"}}, default "ujson" Parser engine to use. The ``"pyarrow"`` engine is only available when ``lines=True``. .. versionadded:: 2.0 Returns ------- Series or DataFrame The type returned depends on the value of `typ`. See Also -------- DataFrame.to_json : Convert a DataFrame to a JSON string. Series.to_json : Convert a Series to a JSON string. json_normalize : Normalize semi-structured JSON data into a flat table. Notes ----- Specific to ``orient='table'``, if a :class:`DataFrame` with a literal :class:`Index` name of `index` gets written with :func:`to_json`, the subsequent read operation will incorrectly set the :class:`Index` name to ``None``. This is because `index` is also used by :func:`DataFrame.to_json` to denote a missing :class:`Index` name, and the subsequent :func:`read_json` operation cannot distinguish between the two. The same limitation is encountered with a :class:`MultiIndex` and any names beginning with ``'level_'``. Examples -------- >>> df = pd.DataFrame([['a', 'b'], ['c', 'd']], ... index=['row 1', 'row 2'], ... columns=['col 1', 'col 2']) Encoding/decoding a Dataframe using ``'split'`` formatted JSON: >>> df.to_json(orient='split') '\ {{\ "columns":["col 1","col 2"],\ "index":["row 1","row 2"],\ "data":[["a","b"],["c","d"]]\ }}\ ' >>> pd.read_json(_, orient='split') col 1 col 2 row 1 a b row 2 c d Encoding/decoding a Dataframe using ``'index'`` formatted JSON: >>> df.to_json(orient='index') '{{"row 1":{{"col 1":"a","col 2":"b"}},"row 2":{{"col 1":"c","col 2":"d"}}}}' >>> pd.read_json(_, orient='index') col 1 col 2 row 1 a b row 2 c d Encoding/decoding a Dataframe using ``'records'`` formatted JSON. Note that index labels are not preserved with this encoding. >>> df.to_json(orient='records') '[{{"col 1":"a","col 2":"b"}},{{"col 1":"c","col 2":"d"}}]' >>> pd.read_json(_, orient='records') col 1 col 2 0 a b 1 c d Encoding with Table Schema >>> df.to_json(orient='table') '\ {{"schema":{{"fields":[\ {{"name":"index","type":"string"}},\ {{"name":"col 1","type":"string"}},\ {{"name":"col 2","type":"string"}}],\ "primaryKey":["index"],\ "pandas_version":"1.4.0"}},\ "data":[\ {{"index":"row 1","col 1":"a","col 2":"b"}},\ {{"index":"row 2","col 1":"c","col 2":"d"}}]\ }}\ ' Here is the function: def read_json( path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes], *, orient: str | None = None, typ: Literal["frame", "series"] = "frame", dtype: DtypeArg | None = None, convert_axes=None, convert_dates: bool | list[str] = True, keep_default_dates: bool = True, precise_float: bool = False, date_unit: str | None = None, encoding: str | None = None, encoding_errors: str | None = "strict", lines: bool = False, chunksize: int | None = None, compression: CompressionOptions = "infer", nrows: int | None = None, storage_options: StorageOptions = None, dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, engine: JSONEngine = "ujson", ) -> DataFrame | Series | JsonReader: """ Convert a JSON string to pandas object. Parameters ---------- path_or_buf : a valid JSON str, path object or file-like object Any valid string path is acceptable. The string could be a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. A local file could be: ``file://localhost/path/to/table.json``. If you want to pass in a path object, pandas accepts any ``os.PathLike``. By file-like object, we refer to objects with a ``read()`` method, such as a file handle (e.g. via builtin ``open`` function) or ``StringIO``. orient : str, optional Indication of expected JSON string format. Compatible JSON strings can be produced by ``to_json()`` with a corresponding orient value. The set of possible orients is: - ``'split'`` : dict like ``{{index -> [index], columns -> [columns], data -> [values]}}`` - ``'records'`` : list like ``[{{column -> value}}, ... , {{column -> value}}]`` - ``'index'`` : dict like ``{{index -> {{column -> value}}}}`` - ``'columns'`` : dict like ``{{column -> {{index -> value}}}}`` - ``'values'`` : just the values array The allowed and default values depend on the value of the `typ` parameter. * when ``typ == 'series'``, - allowed orients are ``{{'split','records','index'}}`` - default is ``'index'`` - The Series index must be unique for orient ``'index'``. * when ``typ == 'frame'``, - allowed orients are ``{{'split','records','index', 'columns','values', 'table'}}`` - default is ``'columns'`` - The DataFrame index must be unique for orients ``'index'`` and ``'columns'``. - The DataFrame columns must be unique for orients ``'index'``, ``'columns'``, and ``'records'``. typ : {{'frame', 'series'}}, default 'frame' The type of object to recover. dtype : bool or dict, default None If True, infer dtypes; if a dict of column to dtype, then use those; if False, then don't infer dtypes at all, applies only to the data. For all ``orient`` values except ``'table'``, default is True. convert_axes : bool, default None Try to convert the axes to the proper dtypes. For all ``orient`` values except ``'table'``, default is True. convert_dates : bool or list of str, default True If True then default datelike columns may be converted (depending on keep_default_dates). If False, no dates will be converted. If a list of column names, then those columns will be converted and default datelike columns may also be converted (depending on keep_default_dates). keep_default_dates : bool, default True If parsing dates (convert_dates is not False), then try to parse the default datelike columns. A column label is datelike if * it ends with ``'_at'``, * it ends with ``'_time'``, * it begins with ``'timestamp'``, * it is ``'modified'``, or * it is ``'date'``. precise_float : bool, default False Set to enable usage of higher precision (strtod) function when decoding string to double values. Default (False) is to use fast but less precise builtin functionality. date_unit : str, default None The timestamp unit to detect if converting dates. The default behaviour is to try and detect the correct precision, but if this is not desired then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds, milliseconds, microseconds or nanoseconds respectively. encoding : str, default is 'utf-8' The encoding to use to decode py3 bytes. encoding_errors : str, optional, default "strict" How encoding errors are treated. `List of possible values <https://docs.python.org/3/library/codecs.html#error-handlers>`_ . .. versionadded:: 1.3.0 lines : bool, default False Read the file as a json object per line. chunksize : int, optional Return JsonReader object for iteration. See the `line-delimited json docs <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#line-delimited-json>`_ for more information on ``chunksize``. This can only be passed if `lines=True`. If this is None, the file will be read into memory all at once. .. versionchanged:: 1.2 ``JsonReader`` is a context manager. {decompression_options} .. versionchanged:: 1.4.0 Zstandard support. nrows : int, optional The number of lines from the line-delimited jsonfile that has to be read. This can only be passed if `lines=True`. If this is None, all the rows will be returned. .. versionadded:: 1.1 {storage_options} .. versionadded:: 1.2.0 dtype_backend : {{"numpy_nullable", "pyarrow"}}, defaults to NumPy backed DataFrames Which dtype_backend to use, e.g. whether a DataFrame should have NumPy arrays, nullable dtypes are used for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 engine : {{"ujson", "pyarrow"}}, default "ujson" Parser engine to use. The ``"pyarrow"`` engine is only available when ``lines=True``. .. versionadded:: 2.0 Returns ------- Series or DataFrame The type returned depends on the value of `typ`. See Also -------- DataFrame.to_json : Convert a DataFrame to a JSON string. Series.to_json : Convert a Series to a JSON string. json_normalize : Normalize semi-structured JSON data into a flat table. Notes ----- Specific to ``orient='table'``, if a :class:`DataFrame` with a literal :class:`Index` name of `index` gets written with :func:`to_json`, the subsequent read operation will incorrectly set the :class:`Index` name to ``None``. This is because `index` is also used by :func:`DataFrame.to_json` to denote a missing :class:`Index` name, and the subsequent :func:`read_json` operation cannot distinguish between the two. The same limitation is encountered with a :class:`MultiIndex` and any names beginning with ``'level_'``. Examples -------- >>> df = pd.DataFrame([['a', 'b'], ['c', 'd']], ... index=['row 1', 'row 2'], ... columns=['col 1', 'col 2']) Encoding/decoding a Dataframe using ``'split'`` formatted JSON: >>> df.to_json(orient='split') '\ {{\ "columns":["col 1","col 2"],\ "index":["row 1","row 2"],\ "data":[["a","b"],["c","d"]]\ }}\ ' >>> pd.read_json(_, orient='split') col 1 col 2 row 1 a b row 2 c d Encoding/decoding a Dataframe using ``'index'`` formatted JSON: >>> df.to_json(orient='index') '{{"row 1":{{"col 1":"a","col 2":"b"}},"row 2":{{"col 1":"c","col 2":"d"}}}}' >>> pd.read_json(_, orient='index') col 1 col 2 row 1 a b row 2 c d Encoding/decoding a Dataframe using ``'records'`` formatted JSON. Note that index labels are not preserved with this encoding. >>> df.to_json(orient='records') '[{{"col 1":"a","col 2":"b"}},{{"col 1":"c","col 2":"d"}}]' >>> pd.read_json(_, orient='records') col 1 col 2 0 a b 1 c d Encoding with Table Schema >>> df.to_json(orient='table') '\ {{"schema":{{"fields":[\ {{"name":"index","type":"string"}},\ {{"name":"col 1","type":"string"}},\ {{"name":"col 2","type":"string"}}],\ "primaryKey":["index"],\ "pandas_version":"1.4.0"}},\ "data":[\ {{"index":"row 1","col 1":"a","col 2":"b"}},\ {{"index":"row 2","col 1":"c","col 2":"d"}}]\ }}\ ' """ if orient == "table" and dtype: raise ValueError("cannot pass both dtype and orient='table'") if orient == "table" and convert_axes: raise ValueError("cannot pass both convert_axes and orient='table'") check_dtype_backend(dtype_backend) if dtype is None and orient != "table": # error: Incompatible types in assignment (expression has type "bool", variable # has type "Union[ExtensionDtype, str, dtype[Any], Type[str], Type[float], # Type[int], Type[complex], Type[bool], Type[object], Dict[Hashable, # Union[ExtensionDtype, Union[str, dtype[Any]], Type[str], Type[float], # Type[int], Type[complex], Type[bool], Type[object]]], None]") dtype = True # type: ignore[assignment] if convert_axes is None and orient != "table": convert_axes = True json_reader = JsonReader( path_or_buf, orient=orient, typ=typ, dtype=dtype, convert_axes=convert_axes, convert_dates=convert_dates, keep_default_dates=keep_default_dates, precise_float=precise_float, date_unit=date_unit, encoding=encoding, lines=lines, chunksize=chunksize, compression=compression, nrows=nrows, storage_options=storage_options, encoding_errors=encoding_errors, dtype_backend=dtype_backend, engine=engine, ) if chunksize: return json_reader else: return json_reader.read()
Convert a JSON string to pandas object. Parameters ---------- path_or_buf : a valid JSON str, path object or file-like object Any valid string path is acceptable. The string could be a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. A local file could be: ``file://localhost/path/to/table.json``. If you want to pass in a path object, pandas accepts any ``os.PathLike``. By file-like object, we refer to objects with a ``read()`` method, such as a file handle (e.g. via builtin ``open`` function) or ``StringIO``. orient : str, optional Indication of expected JSON string format. Compatible JSON strings can be produced by ``to_json()`` with a corresponding orient value. The set of possible orients is: - ``'split'`` : dict like ``{{index -> [index], columns -> [columns], data -> [values]}}`` - ``'records'`` : list like ``[{{column -> value}}, ... , {{column -> value}}]`` - ``'index'`` : dict like ``{{index -> {{column -> value}}}}`` - ``'columns'`` : dict like ``{{column -> {{index -> value}}}}`` - ``'values'`` : just the values array The allowed and default values depend on the value of the `typ` parameter. * when ``typ == 'series'``, - allowed orients are ``{{'split','records','index'}}`` - default is ``'index'`` - The Series index must be unique for orient ``'index'``. * when ``typ == 'frame'``, - allowed orients are ``{{'split','records','index', 'columns','values', 'table'}}`` - default is ``'columns'`` - The DataFrame index must be unique for orients ``'index'`` and ``'columns'``. - The DataFrame columns must be unique for orients ``'index'``, ``'columns'``, and ``'records'``. typ : {{'frame', 'series'}}, default 'frame' The type of object to recover. dtype : bool or dict, default None If True, infer dtypes; if a dict of column to dtype, then use those; if False, then don't infer dtypes at all, applies only to the data. For all ``orient`` values except ``'table'``, default is True. convert_axes : bool, default None Try to convert the axes to the proper dtypes. For all ``orient`` values except ``'table'``, default is True. convert_dates : bool or list of str, default True If True then default datelike columns may be converted (depending on keep_default_dates). If False, no dates will be converted. If a list of column names, then those columns will be converted and default datelike columns may also be converted (depending on keep_default_dates). keep_default_dates : bool, default True If parsing dates (convert_dates is not False), then try to parse the default datelike columns. A column label is datelike if * it ends with ``'_at'``, * it ends with ``'_time'``, * it begins with ``'timestamp'``, * it is ``'modified'``, or * it is ``'date'``. precise_float : bool, default False Set to enable usage of higher precision (strtod) function when decoding string to double values. Default (False) is to use fast but less precise builtin functionality. date_unit : str, default None The timestamp unit to detect if converting dates. The default behaviour is to try and detect the correct precision, but if this is not desired then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds, milliseconds, microseconds or nanoseconds respectively. encoding : str, default is 'utf-8' The encoding to use to decode py3 bytes. encoding_errors : str, optional, default "strict" How encoding errors are treated. `List of possible values <https://docs.python.org/3/library/codecs.html#error-handlers>`_ . .. versionadded:: 1.3.0 lines : bool, default False Read the file as a json object per line. chunksize : int, optional Return JsonReader object for iteration. See the `line-delimited json docs <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#line-delimited-json>`_ for more information on ``chunksize``. This can only be passed if `lines=True`. If this is None, the file will be read into memory all at once. .. versionchanged:: 1.2 ``JsonReader`` is a context manager. {decompression_options} .. versionchanged:: 1.4.0 Zstandard support. nrows : int, optional The number of lines from the line-delimited jsonfile that has to be read. This can only be passed if `lines=True`. If this is None, all the rows will be returned. .. versionadded:: 1.1 {storage_options} .. versionadded:: 1.2.0 dtype_backend : {{"numpy_nullable", "pyarrow"}}, defaults to NumPy backed DataFrames Which dtype_backend to use, e.g. whether a DataFrame should have NumPy arrays, nullable dtypes are used for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 engine : {{"ujson", "pyarrow"}}, default "ujson" Parser engine to use. The ``"pyarrow"`` engine is only available when ``lines=True``. .. versionadded:: 2.0 Returns ------- Series or DataFrame The type returned depends on the value of `typ`. See Also -------- DataFrame.to_json : Convert a DataFrame to a JSON string. Series.to_json : Convert a Series to a JSON string. json_normalize : Normalize semi-structured JSON data into a flat table. Notes ----- Specific to ``orient='table'``, if a :class:`DataFrame` with a literal :class:`Index` name of `index` gets written with :func:`to_json`, the subsequent read operation will incorrectly set the :class:`Index` name to ``None``. This is because `index` is also used by :func:`DataFrame.to_json` to denote a missing :class:`Index` name, and the subsequent :func:`read_json` operation cannot distinguish between the two. The same limitation is encountered with a :class:`MultiIndex` and any names beginning with ``'level_'``. Examples -------- >>> df = pd.DataFrame([['a', 'b'], ['c', 'd']], ... index=['row 1', 'row 2'], ... columns=['col 1', 'col 2']) Encoding/decoding a Dataframe using ``'split'`` formatted JSON: >>> df.to_json(orient='split') '\ {{\ "columns":["col 1","col 2"],\ "index":["row 1","row 2"],\ "data":[["a","b"],["c","d"]]\ }}\ ' >>> pd.read_json(_, orient='split') col 1 col 2 row 1 a b row 2 c d Encoding/decoding a Dataframe using ``'index'`` formatted JSON: >>> df.to_json(orient='index') '{{"row 1":{{"col 1":"a","col 2":"b"}},"row 2":{{"col 1":"c","col 2":"d"}}}}' >>> pd.read_json(_, orient='index') col 1 col 2 row 1 a b row 2 c d Encoding/decoding a Dataframe using ``'records'`` formatted JSON. Note that index labels are not preserved with this encoding. >>> df.to_json(orient='records') '[{{"col 1":"a","col 2":"b"}},{{"col 1":"c","col 2":"d"}}]' >>> pd.read_json(_, orient='records') col 1 col 2 0 a b 1 c d Encoding with Table Schema >>> df.to_json(orient='table') '\ {{"schema":{{"fields":[\ {{"name":"index","type":"string"}},\ {{"name":"col 1","type":"string"}},\ {{"name":"col 2","type":"string"}}],\ "primaryKey":["index"],\ "pandas_version":"1.4.0"}},\ "data":[\ {{"index":"row 1","col 1":"a","col 2":"b"}},\ {{"index":"row 2","col 1":"c","col 2":"d"}}]\ }}\ '
173,507
from __future__ import annotations from collections import ( abc, defaultdict, ) import copy import sys from typing import ( Any, DefaultDict, Iterable, ) import numpy as np from pandas._libs.writers import convert_json_to_lines from pandas._typing import ( IgnoreRaise, Scalar, ) import pandas as pd from pandas import DataFrame def nested_to_record( ds, prefix: str = "", sep: str = ".", level: int = 0, max_level: int | None = None, ): """ A simplified json_normalize Converts a nested dict into a flat dict ("record"), unlike json_normalize, it does not attempt to extract a subset of the data. Parameters ---------- ds : dict or list of dicts prefix: the prefix, optional, default: "" sep : str, default '.' Nested records will generate names separated by sep, e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar level: int, optional, default: 0 The number of levels in the json string. max_level: int, optional, default: None The max depth to normalize. Returns ------- d - dict or list of dicts, matching `ds` Examples -------- >>> nested_to_record( ... dict(flat1=1, dict1=dict(c=1, d=2), nested=dict(e=dict(c=1, d=2), d=2)) ... ) {\ 'flat1': 1, \ 'dict1.c': 1, \ 'dict1.d': 2, \ 'nested.e.c': 1, \ 'nested.e.d': 2, \ 'nested.d': 2\ } """ singleton = False if isinstance(ds, dict): ds = [ds] singleton = True new_ds = [] for d in ds: new_d = copy.deepcopy(d) for k, v in d.items(): # each key gets renamed with prefix if not isinstance(k, str): k = str(k) if level == 0: newkey = k else: newkey = prefix + sep + k # flatten if type is dict and # current dict level < maximum level provided and # only dicts gets recurse-flattened # only at level>1 do we rename the rest of the keys if not isinstance(v, dict) or ( max_level is not None and level >= max_level ): if level != 0: # so we skip copying for top level, common case v = new_d.pop(k) new_d[newkey] = v continue v = new_d.pop(k) new_d.update(nested_to_record(v, newkey, sep, level + 1, max_level)) new_ds.append(new_d) if singleton: return new_ds[0] return new_ds def _simple_json_normalize( ds: dict | list[dict], sep: str = ".", ) -> dict | list[dict] | Any: """ A optimized basic json_normalize Converts a nested dict into a flat dict ("record"), unlike json_normalize and nested_to_record it doesn't do anything clever. But for the most basic use cases it enhances performance. E.g. pd.json_normalize(data) Parameters ---------- ds : dict or list of dicts sep : str, default '.' Nested records will generate names separated by sep, e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar Returns ------- frame : DataFrame d - dict or list of dicts, matching `normalised_json_object` Examples -------- >>> _simple_json_normalize( ... { ... "flat1": 1, ... "dict1": {"c": 1, "d": 2}, ... "nested": {"e": {"c": 1, "d": 2}, "d": 2}, ... } ... ) {\ 'flat1': 1, \ 'dict1.c': 1, \ 'dict1.d': 2, \ 'nested.e.c': 1, \ 'nested.e.d': 2, \ 'nested.d': 2\ } """ normalised_json_object = {} # expect a dictionary, as most jsons are. However, lists are perfectly valid if isinstance(ds, dict): normalised_json_object = _normalise_json_ordered(data=ds, separator=sep) elif isinstance(ds, list): normalised_json_list = [_simple_json_normalize(row, sep=sep) for row in ds] return normalised_json_list return normalised_json_object class defaultdict(Dict[_KT, _VT], Generic[_KT, _VT]): default_factory: Callable[[], _VT] def __init__(self, **kwargs: _VT) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]]) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]], **kwargs: _VT) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]], map: Mapping[_KT, _VT]) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]], map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]], iterable: Iterable[Tuple[_KT, _VT]]) -> None: ... def __init__( self, default_factory: Optional[Callable[[], _VT]], iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT ) -> None: ... def __missing__(self, key: _KT) -> _VT: ... def copy(self: _S) -> _S: ... Any = object() DefaultDict = _Alias() class Iterable(Protocol[_T_co]): def __iter__(self) -> Iterator[_T_co]: ... Scalar = Union[PythonScalar, PandasScalar, np.datetime64, np.timedelta64, datetime] IgnoreRaise = Literal["ignore", "raise"] The provided code snippet includes necessary dependencies for implementing the `json_normalize` function. Write a Python function `def json_normalize( data: dict | list[dict], record_path: str | list | None = None, meta: str | list[str | list[str]] | None = None, meta_prefix: str | None = None, record_prefix: str | None = None, errors: IgnoreRaise = "raise", sep: str = ".", max_level: int | None = None, ) -> DataFrame` to solve the following problem: Normalize semi-structured JSON data into a flat table. Parameters ---------- data : dict or list of dicts Unserialized JSON objects. record_path : str or list of str, default None Path in each object to list of records. If not passed, data will be assumed to be an array of records. meta : list of paths (str or list of str), default None Fields to use as metadata for each record in resulting table. meta_prefix : str, default None If True, prefix records with dotted (?) path, e.g. foo.bar.field if meta is ['foo', 'bar']. record_prefix : str, default None If True, prefix records with dotted (?) path, e.g. foo.bar.field if path to records is ['foo', 'bar']. errors : {'raise', 'ignore'}, default 'raise' Configures error handling. * 'ignore' : will ignore KeyError if keys listed in meta are not always present. * 'raise' : will raise KeyError if keys listed in meta are not always present. sep : str, default '.' Nested records will generate names separated by sep. e.g., for sep='.', {'foo': {'bar': 0}} -> foo.bar. max_level : int, default None Max number of levels(depth of dict) to normalize. if None, normalizes all levels. Returns ------- frame : DataFrame Normalize semi-structured JSON data into a flat table. Examples -------- >>> data = [ ... {"id": 1, "name": {"first": "Coleen", "last": "Volk"}}, ... {"name": {"given": "Mark", "family": "Regner"}}, ... {"id": 2, "name": "Faye Raker"}, ... ] >>> pd.json_normalize(data) id name.first name.last name.given name.family name 0 1.0 Coleen Volk NaN NaN NaN 1 NaN NaN NaN Mark Regner NaN 2 2.0 NaN NaN NaN NaN Faye Raker >>> data = [ ... { ... "id": 1, ... "name": "Cole Volk", ... "fitness": {"height": 130, "weight": 60}, ... }, ... {"name": "Mark Reg", "fitness": {"height": 130, "weight": 60}}, ... { ... "id": 2, ... "name": "Faye Raker", ... "fitness": {"height": 130, "weight": 60}, ... }, ... ] >>> pd.json_normalize(data, max_level=0) id name fitness 0 1.0 Cole Volk {'height': 130, 'weight': 60} 1 NaN Mark Reg {'height': 130, 'weight': 60} 2 2.0 Faye Raker {'height': 130, 'weight': 60} Normalizes nested data up to level 1. >>> data = [ ... { ... "id": 1, ... "name": "Cole Volk", ... "fitness": {"height": 130, "weight": 60}, ... }, ... {"name": "Mark Reg", "fitness": {"height": 130, "weight": 60}}, ... { ... "id": 2, ... "name": "Faye Raker", ... "fitness": {"height": 130, "weight": 60}, ... }, ... ] >>> pd.json_normalize(data, max_level=1) id name fitness.height fitness.weight 0 1.0 Cole Volk 130 60 1 NaN Mark Reg 130 60 2 2.0 Faye Raker 130 60 >>> data = [ ... { ... "state": "Florida", ... "shortname": "FL", ... "info": {"governor": "Rick Scott"}, ... "counties": [ ... {"name": "Dade", "population": 12345}, ... {"name": "Broward", "population": 40000}, ... {"name": "Palm Beach", "population": 60000}, ... ], ... }, ... { ... "state": "Ohio", ... "shortname": "OH", ... "info": {"governor": "John Kasich"}, ... "counties": [ ... {"name": "Summit", "population": 1234}, ... {"name": "Cuyahoga", "population": 1337}, ... ], ... }, ... ] >>> result = pd.json_normalize( ... data, "counties", ["state", "shortname", ["info", "governor"]] ... ) >>> result name population state shortname info.governor 0 Dade 12345 Florida FL Rick Scott 1 Broward 40000 Florida FL Rick Scott 2 Palm Beach 60000 Florida FL Rick Scott 3 Summit 1234 Ohio OH John Kasich 4 Cuyahoga 1337 Ohio OH John Kasich >>> data = {"A": [1, 2]} >>> pd.json_normalize(data, "A", record_prefix="Prefix.") Prefix.0 0 1 1 2 Returns normalized data with columns prefixed with the given string. Here is the function: def json_normalize( data: dict | list[dict], record_path: str | list | None = None, meta: str | list[str | list[str]] | None = None, meta_prefix: str | None = None, record_prefix: str | None = None, errors: IgnoreRaise = "raise", sep: str = ".", max_level: int | None = None, ) -> DataFrame: """ Normalize semi-structured JSON data into a flat table. Parameters ---------- data : dict or list of dicts Unserialized JSON objects. record_path : str or list of str, default None Path in each object to list of records. If not passed, data will be assumed to be an array of records. meta : list of paths (str or list of str), default None Fields to use as metadata for each record in resulting table. meta_prefix : str, default None If True, prefix records with dotted (?) path, e.g. foo.bar.field if meta is ['foo', 'bar']. record_prefix : str, default None If True, prefix records with dotted (?) path, e.g. foo.bar.field if path to records is ['foo', 'bar']. errors : {'raise', 'ignore'}, default 'raise' Configures error handling. * 'ignore' : will ignore KeyError if keys listed in meta are not always present. * 'raise' : will raise KeyError if keys listed in meta are not always present. sep : str, default '.' Nested records will generate names separated by sep. e.g., for sep='.', {'foo': {'bar': 0}} -> foo.bar. max_level : int, default None Max number of levels(depth of dict) to normalize. if None, normalizes all levels. Returns ------- frame : DataFrame Normalize semi-structured JSON data into a flat table. Examples -------- >>> data = [ ... {"id": 1, "name": {"first": "Coleen", "last": "Volk"}}, ... {"name": {"given": "Mark", "family": "Regner"}}, ... {"id": 2, "name": "Faye Raker"}, ... ] >>> pd.json_normalize(data) id name.first name.last name.given name.family name 0 1.0 Coleen Volk NaN NaN NaN 1 NaN NaN NaN Mark Regner NaN 2 2.0 NaN NaN NaN NaN Faye Raker >>> data = [ ... { ... "id": 1, ... "name": "Cole Volk", ... "fitness": {"height": 130, "weight": 60}, ... }, ... {"name": "Mark Reg", "fitness": {"height": 130, "weight": 60}}, ... { ... "id": 2, ... "name": "Faye Raker", ... "fitness": {"height": 130, "weight": 60}, ... }, ... ] >>> pd.json_normalize(data, max_level=0) id name fitness 0 1.0 Cole Volk {'height': 130, 'weight': 60} 1 NaN Mark Reg {'height': 130, 'weight': 60} 2 2.0 Faye Raker {'height': 130, 'weight': 60} Normalizes nested data up to level 1. >>> data = [ ... { ... "id": 1, ... "name": "Cole Volk", ... "fitness": {"height": 130, "weight": 60}, ... }, ... {"name": "Mark Reg", "fitness": {"height": 130, "weight": 60}}, ... { ... "id": 2, ... "name": "Faye Raker", ... "fitness": {"height": 130, "weight": 60}, ... }, ... ] >>> pd.json_normalize(data, max_level=1) id name fitness.height fitness.weight 0 1.0 Cole Volk 130 60 1 NaN Mark Reg 130 60 2 2.0 Faye Raker 130 60 >>> data = [ ... { ... "state": "Florida", ... "shortname": "FL", ... "info": {"governor": "Rick Scott"}, ... "counties": [ ... {"name": "Dade", "population": 12345}, ... {"name": "Broward", "population": 40000}, ... {"name": "Palm Beach", "population": 60000}, ... ], ... }, ... { ... "state": "Ohio", ... "shortname": "OH", ... "info": {"governor": "John Kasich"}, ... "counties": [ ... {"name": "Summit", "population": 1234}, ... {"name": "Cuyahoga", "population": 1337}, ... ], ... }, ... ] >>> result = pd.json_normalize( ... data, "counties", ["state", "shortname", ["info", "governor"]] ... ) >>> result name population state shortname info.governor 0 Dade 12345 Florida FL Rick Scott 1 Broward 40000 Florida FL Rick Scott 2 Palm Beach 60000 Florida FL Rick Scott 3 Summit 1234 Ohio OH John Kasich 4 Cuyahoga 1337 Ohio OH John Kasich >>> data = {"A": [1, 2]} >>> pd.json_normalize(data, "A", record_prefix="Prefix.") Prefix.0 0 1 1 2 Returns normalized data with columns prefixed with the given string. """ def _pull_field( js: dict[str, Any], spec: list | str, extract_record: bool = False ) -> Scalar | Iterable: """Internal function to pull field""" result = js try: if isinstance(spec, list): for field in spec: if result is None: raise KeyError(field) result = result[field] else: result = result[spec] except KeyError as e: if extract_record: raise KeyError( f"Key {e} not found. If specifying a record_path, all elements of " f"data should have the path." ) from e if errors == "ignore": return np.nan else: raise KeyError( f"Key {e} not found. To replace missing values of {e} with " f"np.nan, pass in errors='ignore'" ) from e return result def _pull_records(js: dict[str, Any], spec: list | str) -> list: """ Internal function to pull field for records, and similar to _pull_field, but require to return list. And will raise error if has non iterable value. """ result = _pull_field(js, spec, extract_record=True) # GH 31507 GH 30145, GH 26284 if result is not list, raise TypeError if not # null, otherwise return an empty list if not isinstance(result, list): if pd.isnull(result): result = [] else: raise TypeError( f"{js} has non list value {result} for path {spec}. " "Must be list or null." ) return result if isinstance(data, list) and not data: return DataFrame() elif isinstance(data, dict): # A bit of a hackjob data = [data] elif isinstance(data, abc.Iterable) and not isinstance(data, str): # GH35923 Fix pd.json_normalize to not skip the first element of a # generator input data = list(data) else: raise NotImplementedError # check to see if a simple recursive function is possible to # improve performance (see #15621) but only for cases such # as pd.Dataframe(data) or pd.Dataframe(data, sep) if ( record_path is None and meta is None and meta_prefix is None and record_prefix is None and max_level is None ): return DataFrame(_simple_json_normalize(data, sep=sep)) if record_path is None: if any([isinstance(x, dict) for x in y.values()] for y in data): # naive normalization, this is idempotent for flat records # and potentially will inflate the data considerably for # deeply nested structures: # {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@} # # TODO: handle record value which are lists, at least error # reasonably data = nested_to_record(data, sep=sep, max_level=max_level) return DataFrame(data) elif not isinstance(record_path, list): record_path = [record_path] if meta is None: meta = [] elif not isinstance(meta, list): meta = [meta] _meta = [m if isinstance(m, list) else [m] for m in meta] # Disastrously inefficient for now records: list = [] lengths = [] meta_vals: DefaultDict = defaultdict(list) meta_keys = [sep.join(val) for val in _meta] def _recursive_extract(data, path, seen_meta, level: int = 0) -> None: if isinstance(data, dict): data = [data] if len(path) > 1: for obj in data: for val, key in zip(_meta, meta_keys): if level + 1 == len(val): seen_meta[key] = _pull_field(obj, val[-1]) _recursive_extract(obj[path[0]], path[1:], seen_meta, level=level + 1) else: for obj in data: recs = _pull_records(obj, path[0]) recs = [ nested_to_record(r, sep=sep, max_level=max_level) if isinstance(r, dict) else r for r in recs ] # For repeating the metadata later lengths.append(len(recs)) for val, key in zip(_meta, meta_keys): if level + 1 > len(val): meta_val = seen_meta[key] else: meta_val = _pull_field(obj, val[level:]) meta_vals[key].append(meta_val) records.extend(recs) _recursive_extract(data, record_path, {}, level=0) result = DataFrame(records) if record_prefix is not None: result = result.rename(columns=lambda x: f"{record_prefix}{x}") # Data types, a problem for k, v in meta_vals.items(): if meta_prefix is not None: k = meta_prefix + k if k in result: raise ValueError( f"Conflicting metadata name {k}, need distinguishing prefix " ) result[k] = np.array(v, dtype=object).repeat(lengths) return result
Normalize semi-structured JSON data into a flat table. Parameters ---------- data : dict or list of dicts Unserialized JSON objects. record_path : str or list of str, default None Path in each object to list of records. If not passed, data will be assumed to be an array of records. meta : list of paths (str or list of str), default None Fields to use as metadata for each record in resulting table. meta_prefix : str, default None If True, prefix records with dotted (?) path, e.g. foo.bar.field if meta is ['foo', 'bar']. record_prefix : str, default None If True, prefix records with dotted (?) path, e.g. foo.bar.field if path to records is ['foo', 'bar']. errors : {'raise', 'ignore'}, default 'raise' Configures error handling. * 'ignore' : will ignore KeyError if keys listed in meta are not always present. * 'raise' : will raise KeyError if keys listed in meta are not always present. sep : str, default '.' Nested records will generate names separated by sep. e.g., for sep='.', {'foo': {'bar': 0}} -> foo.bar. max_level : int, default None Max number of levels(depth of dict) to normalize. if None, normalizes all levels. Returns ------- frame : DataFrame Normalize semi-structured JSON data into a flat table. Examples -------- >>> data = [ ... {"id": 1, "name": {"first": "Coleen", "last": "Volk"}}, ... {"name": {"given": "Mark", "family": "Regner"}}, ... {"id": 2, "name": "Faye Raker"}, ... ] >>> pd.json_normalize(data) id name.first name.last name.given name.family name 0 1.0 Coleen Volk NaN NaN NaN 1 NaN NaN NaN Mark Regner NaN 2 2.0 NaN NaN NaN NaN Faye Raker >>> data = [ ... { ... "id": 1, ... "name": "Cole Volk", ... "fitness": {"height": 130, "weight": 60}, ... }, ... {"name": "Mark Reg", "fitness": {"height": 130, "weight": 60}}, ... { ... "id": 2, ... "name": "Faye Raker", ... "fitness": {"height": 130, "weight": 60}, ... }, ... ] >>> pd.json_normalize(data, max_level=0) id name fitness 0 1.0 Cole Volk {'height': 130, 'weight': 60} 1 NaN Mark Reg {'height': 130, 'weight': 60} 2 2.0 Faye Raker {'height': 130, 'weight': 60} Normalizes nested data up to level 1. >>> data = [ ... { ... "id": 1, ... "name": "Cole Volk", ... "fitness": {"height": 130, "weight": 60}, ... }, ... {"name": "Mark Reg", "fitness": {"height": 130, "weight": 60}}, ... { ... "id": 2, ... "name": "Faye Raker", ... "fitness": {"height": 130, "weight": 60}, ... }, ... ] >>> pd.json_normalize(data, max_level=1) id name fitness.height fitness.weight 0 1.0 Cole Volk 130 60 1 NaN Mark Reg 130 60 2 2.0 Faye Raker 130 60 >>> data = [ ... { ... "state": "Florida", ... "shortname": "FL", ... "info": {"governor": "Rick Scott"}, ... "counties": [ ... {"name": "Dade", "population": 12345}, ... {"name": "Broward", "population": 40000}, ... {"name": "Palm Beach", "population": 60000}, ... ], ... }, ... { ... "state": "Ohio", ... "shortname": "OH", ... "info": {"governor": "John Kasich"}, ... "counties": [ ... {"name": "Summit", "population": 1234}, ... {"name": "Cuyahoga", "population": 1337}, ... ], ... }, ... ] >>> result = pd.json_normalize( ... data, "counties", ["state", "shortname", ["info", "governor"]] ... ) >>> result name population state shortname info.governor 0 Dade 12345 Florida FL Rick Scott 1 Broward 40000 Florida FL Rick Scott 2 Palm Beach 60000 Florida FL Rick Scott 3 Summit 1234 Ohio OH John Kasich 4 Cuyahoga 1337 Ohio OH John Kasich >>> data = {"A": [1, 2]} >>> pd.json_normalize(data, "A", record_prefix="Prefix.") Prefix.0 0 1 1 2 Returns normalized data with columns prefixed with the given string.
173,508
from __future__ import annotations from contextlib import suppress import copy from datetime import ( date, tzinfo, ) import itertools import os import re from textwrap import dedent from types import TracebackType from typing import ( TYPE_CHECKING, Any, Callable, Final, Hashable, Iterator, Literal, Sequence, cast, overload, ) import warnings import numpy as np from pandas._config import ( config, get_option, ) from pandas._libs import ( lib, writers as libwriters, ) from pandas._libs.tslibs import timezones from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeArg, FilePath, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.compat.pickle_compat import patch_pickle from pandas.errors import ( AttributeConflictWarning, ClosedFileError, IncompatibilityWarning, PerformanceWarning, PossibleDataLossError, ) from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, is_bool_dtype, is_categorical_dtype, is_complex_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_extension_array_dtype, is_integer_dtype, is_list_like, is_object_dtype, is_string_dtype, is_timedelta64_dtype, needs_i8_conversion, ) from pandas.core.dtypes.missing import array_equivalent from pandas import ( DataFrame, DatetimeIndex, Index, MultiIndex, PeriodIndex, RangeIndex, Series, TimedeltaIndex, concat, isna, ) from pandas.core.arrays import ( Categorical, DatetimeArray, PeriodArray, ) import pandas.core.common as com from pandas.core.computation.pytables import ( PyTablesExpr, maybe_expression, ) from pandas.core.construction import extract_array from pandas.core.indexes.api import ensure_index from pandas.core.internals import ( ArrayManager, BlockManager, ) from pandas.io.common import stringify_path from pandas.io.formats.printing import ( adjoin, pprint_thing, ) _default_encoding = "UTF-8" def _ensure_encoding(encoding: str | None) -> str: # set the encoding if we need if encoding is None: encoding = _default_encoding return encoding
null
173,509
from __future__ import annotations from contextlib import suppress import copy from datetime import ( date, tzinfo, ) import itertools import os import re from textwrap import dedent from types import TracebackType from typing import ( TYPE_CHECKING, Any, Callable, Final, Hashable, Iterator, Literal, Sequence, cast, overload, ) import warnings import numpy as np from pandas._config import ( config, get_option, ) from pandas._libs import ( lib, writers as libwriters, ) from pandas._libs.tslibs import timezones from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeArg, FilePath, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.compat.pickle_compat import patch_pickle from pandas.errors import ( AttributeConflictWarning, ClosedFileError, IncompatibilityWarning, PerformanceWarning, PossibleDataLossError, ) from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, is_bool_dtype, is_categorical_dtype, is_complex_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_extension_array_dtype, is_integer_dtype, is_list_like, is_object_dtype, is_string_dtype, is_timedelta64_dtype, needs_i8_conversion, ) from pandas.core.dtypes.missing import array_equivalent from pandas import ( DataFrame, DatetimeIndex, Index, MultiIndex, PeriodIndex, RangeIndex, Series, TimedeltaIndex, concat, isna, ) from pandas.core.arrays import ( Categorical, DatetimeArray, PeriodArray, ) import pandas.core.common as com from pandas.core.computation.pytables import ( PyTablesExpr, maybe_expression, ) from pandas.core.construction import extract_array from pandas.core.indexes.api import ensure_index from pandas.core.internals import ( ArrayManager, BlockManager, ) from pandas.io.common import stringify_path from pandas.io.formats.printing import ( adjoin, pprint_thing, ) The provided code snippet includes necessary dependencies for implementing the `_ensure_str` function. Write a Python function `def _ensure_str(name)` to solve the following problem: Ensure that an index / column name is a str (python 3); otherwise they may be np.string dtype. Non-string dtypes are passed through unchanged. https://github.com/pandas-dev/pandas/issues/13492 Here is the function: def _ensure_str(name): """ Ensure that an index / column name is a str (python 3); otherwise they may be np.string dtype. Non-string dtypes are passed through unchanged. https://github.com/pandas-dev/pandas/issues/13492 """ if isinstance(name, str): name = str(name) return name
Ensure that an index / column name is a str (python 3); otherwise they may be np.string dtype. Non-string dtypes are passed through unchanged. https://github.com/pandas-dev/pandas/issues/13492
173,510
from __future__ import annotations from contextlib import suppress import copy from datetime import ( date, tzinfo, ) import itertools import os import re from textwrap import dedent from types import TracebackType from typing import ( TYPE_CHECKING, Any, Callable, Final, Hashable, Iterator, Literal, Sequence, cast, overload, ) import warnings import numpy as np from pandas._config import ( config, get_option, ) from pandas._libs import ( lib, writers as libwriters, ) from pandas._libs.tslibs import timezones from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeArg, FilePath, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.compat.pickle_compat import patch_pickle from pandas.errors import ( AttributeConflictWarning, ClosedFileError, IncompatibilityWarning, PerformanceWarning, PossibleDataLossError, ) from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, is_bool_dtype, is_categorical_dtype, is_complex_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_extension_array_dtype, is_integer_dtype, is_list_like, is_object_dtype, is_string_dtype, is_timedelta64_dtype, needs_i8_conversion, ) from pandas.core.dtypes.missing import array_equivalent from pandas import ( DataFrame, DatetimeIndex, Index, MultiIndex, PeriodIndex, RangeIndex, Series, TimedeltaIndex, concat, isna, ) from pandas.core.arrays import ( Categorical, DatetimeArray, PeriodArray, ) import pandas.core.common as com from pandas.core.computation.pytables import ( PyTablesExpr, maybe_expression, ) from pandas.core.construction import extract_array from pandas.core.indexes.api import ensure_index from pandas.core.internals import ( ArrayManager, BlockManager, ) from pandas.io.common import stringify_path from pandas.io.formats.printing import ( adjoin, pprint_thing, ) class HDFStore: """ Dict-like IO interface for storing pandas objects in PyTables. Either Fixed or Table format. .. warning:: Pandas uses PyTables for reading and writing HDF5 files, which allows serializing object-dtype data with pickle when using the "fixed" format. Loading pickled data received from untrusted sources can be unsafe. See: https://docs.python.org/3/library/pickle.html for more. Parameters ---------- path : str File path to HDF5 file. mode : {'a', 'w', 'r', 'r+'}, default 'a' ``'r'`` Read-only; no data can be modified. ``'w'`` Write; a new file is created (an existing file with the same name would be deleted). ``'a'`` Append; an existing file is opened for reading and writing, and if the file does not exist it is created. ``'r+'`` It is similar to ``'a'``, but the file must already exist. complevel : int, 0-9, default None Specifies a compression level for data. A value of 0 or None disables compression. complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib' Specifies the compression library to be used. As of v0.20.2 these additional compressors for Blosc are supported (default if no compressor specified: 'blosc:blosclz'): {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy', 'blosc:zlib', 'blosc:zstd'}. Specifying a compression library which is not available issues a ValueError. fletcher32 : bool, default False If applying compression use the fletcher32 checksum. **kwargs These parameters will be passed to the PyTables open_file method. Examples -------- >>> bar = pd.DataFrame(np.random.randn(10, 4)) >>> store = pd.HDFStore('test.h5') >>> store['foo'] = bar # write to HDF5 >>> bar = store['foo'] # retrieve >>> store.close() **Create or load HDF5 file in-memory** When passing the `driver` option to the PyTables open_file method through **kwargs, the HDF5 file is loaded or created in-memory and will only be written when closed: >>> bar = pd.DataFrame(np.random.randn(10, 4)) >>> store = pd.HDFStore('test.h5', driver='H5FD_CORE') >>> store['foo'] = bar >>> store.close() # only now, data is written to disk """ _handle: File | None _mode: str def __init__( self, path, mode: str = "a", complevel: int | None = None, complib=None, fletcher32: bool = False, **kwargs, ) -> None: if "format" in kwargs: raise ValueError("format is not a defined argument for HDFStore") tables = import_optional_dependency("tables") if complib is not None and complib not in tables.filters.all_complibs: raise ValueError( f"complib only supports {tables.filters.all_complibs} compression." ) if complib is None and complevel is not None: complib = tables.filters.default_complib self._path = stringify_path(path) if mode is None: mode = "a" self._mode = mode self._handle = None self._complevel = complevel if complevel else 0 self._complib = complib self._fletcher32 = fletcher32 self._filters = None self.open(mode=mode, **kwargs) def __fspath__(self) -> str: return self._path def root(self): """return the root node""" self._check_if_open() assert self._handle is not None # for mypy return self._handle.root def filename(self) -> str: return self._path def __getitem__(self, key: str): return self.get(key) def __setitem__(self, key: str, value) -> None: self.put(key, value) def __delitem__(self, key: str) -> None: return self.remove(key) def __getattr__(self, name: str): """allow attribute access to get stores""" try: return self.get(name) except (KeyError, ClosedFileError): pass raise AttributeError( f"'{type(self).__name__}' object has no attribute '{name}'" ) def __contains__(self, key: str) -> bool: """ check for existence of this key can match the exact pathname or the pathnm w/o the leading '/' """ node = self.get_node(key) if node is not None: name = node._v_pathname if key in (name, name[1:]): return True return False def __len__(self) -> int: return len(self.groups()) def __repr__(self) -> str: pstr = pprint_thing(self._path) return f"{type(self)}\nFile path: {pstr}\n" def __enter__(self) -> HDFStore: return self def __exit__( self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None, ) -> None: self.close() def keys(self, include: str = "pandas") -> list[str]: """ Return a list of keys corresponding to objects stored in HDFStore. Parameters ---------- include : str, default 'pandas' When kind equals 'pandas' return pandas objects. When kind equals 'native' return native HDF5 Table objects. .. versionadded:: 1.1.0 Returns ------- list List of ABSOLUTE path-names (e.g. have the leading '/'). Raises ------ raises ValueError if kind has an illegal value """ if include == "pandas": return [n._v_pathname for n in self.groups()] elif include == "native": assert self._handle is not None # mypy return [ n._v_pathname for n in self._handle.walk_nodes("/", classname="Table") ] raise ValueError( f"`include` should be either 'pandas' or 'native' but is '{include}'" ) def __iter__(self) -> Iterator[str]: return iter(self.keys()) def items(self) -> Iterator[tuple[str, list]]: """ iterate on key->group """ for g in self.groups(): yield g._v_pathname, g def open(self, mode: str = "a", **kwargs) -> None: """ Open the file in the specified mode Parameters ---------- mode : {'a', 'w', 'r', 'r+'}, default 'a' See HDFStore docstring or tables.open_file for info about modes **kwargs These parameters will be passed to the PyTables open_file method. """ tables = _tables() if self._mode != mode: # if we are changing a write mode to read, ok if self._mode in ["a", "w"] and mode in ["r", "r+"]: pass elif mode in ["w"]: # this would truncate, raise here if self.is_open: raise PossibleDataLossError( f"Re-opening the file [{self._path}] with mode [{self._mode}] " "will delete the current file!" ) self._mode = mode # close and reopen the handle if self.is_open: self.close() if self._complevel and self._complevel > 0: self._filters = _tables().Filters( self._complevel, self._complib, fletcher32=self._fletcher32 ) if _table_file_open_policy_is_strict and self.is_open: msg = ( "Cannot open HDF5 file, which is already opened, " "even in read-only mode." ) raise ValueError(msg) self._handle = tables.open_file(self._path, self._mode, **kwargs) def close(self) -> None: """ Close the PyTables file handle """ if self._handle is not None: self._handle.close() self._handle = None def is_open(self) -> bool: """ return a boolean indicating whether the file is open """ if self._handle is None: return False return bool(self._handle.isopen) def flush(self, fsync: bool = False) -> None: """ Force all buffered modifications to be written to disk. Parameters ---------- fsync : bool (default False) call ``os.fsync()`` on the file handle to force writing to disk. Notes ----- Without ``fsync=True``, flushing may not guarantee that the OS writes to disk. With fsync, the operation will block until the OS claims the file has been written; however, other caching layers may still interfere. """ if self._handle is not None: self._handle.flush() if fsync: with suppress(OSError): os.fsync(self._handle.fileno()) def get(self, key: str): """ Retrieve pandas object stored in file. Parameters ---------- key : str Returns ------- object Same type as object stored in file. """ with patch_pickle(): # GH#31167 Without this patch, pickle doesn't know how to unpickle # old DateOffset objects now that they are cdef classes. group = self.get_node(key) if group is None: raise KeyError(f"No object named {key} in the file") return self._read_group(group) def select( self, key: str, where=None, start=None, stop=None, columns=None, iterator: bool = False, chunksize=None, auto_close: bool = False, ): """ Retrieve pandas object stored in file, optionally based on where criteria. .. warning:: Pandas uses PyTables for reading and writing HDF5 files, which allows serializing object-dtype data with pickle when using the "fixed" format. Loading pickled data received from untrusted sources can be unsafe. See: https://docs.python.org/3/library/pickle.html for more. Parameters ---------- key : str Object being retrieved from file. where : list or None List of Term (or convertible) objects, optional. start : int or None Row number to start selection. stop : int, default None Row number to stop selection. columns : list or None A list of columns that if not None, will limit the return columns. iterator : bool or False Returns an iterator. chunksize : int or None Number or rows to include in iteration, return an iterator. auto_close : bool or False Should automatically close the store when finished. Returns ------- object Retrieved object from file. """ group = self.get_node(key) if group is None: raise KeyError(f"No object named {key} in the file") # create the storer and axes where = _ensure_term(where, scope_level=1) s = self._create_storer(group) s.infer_axes() # function to call on iteration def func(_start, _stop, _where): return s.read(start=_start, stop=_stop, where=_where, columns=columns) # create the iterator it = TableIterator( self, s, func, where=where, nrows=s.nrows, start=start, stop=stop, iterator=iterator, chunksize=chunksize, auto_close=auto_close, ) return it.get_result() def select_as_coordinates( self, key: str, where=None, start: int | None = None, stop: int | None = None, ): """ return the selection as an Index .. warning:: Pandas uses PyTables for reading and writing HDF5 files, which allows serializing object-dtype data with pickle when using the "fixed" format. Loading pickled data received from untrusted sources can be unsafe. See: https://docs.python.org/3/library/pickle.html for more. Parameters ---------- key : str where : list of Term (or convertible) objects, optional start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection """ where = _ensure_term(where, scope_level=1) tbl = self.get_storer(key) if not isinstance(tbl, Table): raise TypeError("can only read_coordinates with a table") return tbl.read_coordinates(where=where, start=start, stop=stop) def select_column( self, key: str, column: str, start: int | None = None, stop: int | None = None, ): """ return a single column from the table. This is generally only useful to select an indexable .. warning:: Pandas uses PyTables for reading and writing HDF5 files, which allows serializing object-dtype data with pickle when using the "fixed" format. Loading pickled data received from untrusted sources can be unsafe. See: https://docs.python.org/3/library/pickle.html for more. Parameters ---------- key : str column : str The column of interest. start : int or None, default None stop : int or None, default None Raises ------ raises KeyError if the column is not found (or key is not a valid store) raises ValueError if the column can not be extracted individually (it is part of a data block) """ tbl = self.get_storer(key) if not isinstance(tbl, Table): raise TypeError("can only read_column with a table") return tbl.read_column(column=column, start=start, stop=stop) def select_as_multiple( self, keys, where=None, selector=None, columns=None, start=None, stop=None, iterator: bool = False, chunksize=None, auto_close: bool = False, ): """ Retrieve pandas objects from multiple tables. .. warning:: Pandas uses PyTables for reading and writing HDF5 files, which allows serializing object-dtype data with pickle when using the "fixed" format. Loading pickled data received from untrusted sources can be unsafe. See: https://docs.python.org/3/library/pickle.html for more. Parameters ---------- keys : a list of the tables selector : the table to apply the where criteria (defaults to keys[0] if not supplied) columns : the columns I want back start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection iterator : bool, return an iterator, default False chunksize : nrows to include in iteration, return an iterator auto_close : bool, default False Should automatically close the store when finished. Raises ------ raises KeyError if keys or selector is not found or keys is empty raises TypeError if keys is not a list or tuple raises ValueError if the tables are not ALL THE SAME DIMENSIONS """ # default to single select where = _ensure_term(where, scope_level=1) if isinstance(keys, (list, tuple)) and len(keys) == 1: keys = keys[0] if isinstance(keys, str): return self.select( key=keys, where=where, columns=columns, start=start, stop=stop, iterator=iterator, chunksize=chunksize, auto_close=auto_close, ) if not isinstance(keys, (list, tuple)): raise TypeError("keys must be a list/tuple") if not len(keys): raise ValueError("keys must have a non-zero length") if selector is None: selector = keys[0] # collect the tables tbls = [self.get_storer(k) for k in keys] s = self.get_storer(selector) # validate rows nrows = None for t, k in itertools.chain([(s, selector)], zip(tbls, keys)): if t is None: raise KeyError(f"Invalid table [{k}]") if not t.is_table: raise TypeError( f"object [{t.pathname}] is not a table, and cannot be used in all " "select as multiple" ) if nrows is None: nrows = t.nrows elif t.nrows != nrows: raise ValueError("all tables must have exactly the same nrows!") # The isinstance checks here are redundant with the check above, # but necessary for mypy; see GH#29757 _tbls = [x for x in tbls if isinstance(x, Table)] # axis is the concentration axes axis = {t.non_index_axes[0][0] for t in _tbls}.pop() def func(_start, _stop, _where): # retrieve the objs, _where is always passed as a set of # coordinates here objs = [ t.read(where=_where, columns=columns, start=_start, stop=_stop) for t in tbls ] # concat and return return concat(objs, axis=axis, verify_integrity=False)._consolidate() # create the iterator it = TableIterator( self, s, func, where=where, nrows=nrows, start=start, stop=stop, iterator=iterator, chunksize=chunksize, auto_close=auto_close, ) return it.get_result(coordinates=True) def put( self, key: str, value: DataFrame | Series, format=None, index: bool = True, append: bool = False, complib=None, complevel: int | None = None, min_itemsize: int | dict[str, int] | None = None, nan_rep=None, data_columns: Literal[True] | list[str] | None = None, encoding=None, errors: str = "strict", track_times: bool = True, dropna: bool = False, ) -> None: """ Store object in HDFStore. Parameters ---------- key : str value : {Series, DataFrame} format : 'fixed(f)|table(t)', default is 'fixed' Format to use when storing object in HDFStore. Value can be one of: ``'fixed'`` Fixed format. Fast writing/reading. Not-appendable, nor searchable. ``'table'`` Table format. Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching / selecting subsets of the data. index : bool, default True Write DataFrame index as a column. append : bool, default False This will force Table format, append the input data to the existing. data_columns : list of columns or True, default None List of columns to create as data columns, or True to use all columns. See `here <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__. encoding : str, default None Provide an encoding for strings. track_times : bool, default True Parameter is propagated to 'create_table' method of 'PyTables'. If set to False it enables to have the same h5 files (same hashes) independent on creation time. dropna : bool, default False, optional Remove missing values. .. versionadded:: 1.1.0 """ if format is None: format = get_option("io.hdf.default_format") or "fixed" format = self._validate_format(format) self._write_to_group( key, value, format=format, index=index, append=append, complib=complib, complevel=complevel, min_itemsize=min_itemsize, nan_rep=nan_rep, data_columns=data_columns, encoding=encoding, errors=errors, track_times=track_times, dropna=dropna, ) def remove(self, key: str, where=None, start=None, stop=None) -> None: """ Remove pandas object partially by specifying the where condition Parameters ---------- key : str Node to remove or delete rows from where : list of Term (or convertible) objects, optional start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection Returns ------- number of rows removed (or None if not a Table) Raises ------ raises KeyError if key is not a valid store """ where = _ensure_term(where, scope_level=1) try: s = self.get_storer(key) except KeyError: # the key is not a valid store, re-raising KeyError raise except AssertionError: # surface any assertion errors for e.g. debugging raise except Exception as err: # In tests we get here with ClosedFileError, TypeError, and # _table_mod.NoSuchNodeError. TODO: Catch only these? if where is not None: raise ValueError( "trying to remove a node with a non-None where clause!" ) from err # we are actually trying to remove a node (with children) node = self.get_node(key) if node is not None: node._f_remove(recursive=True) return None # remove the node if com.all_none(where, start, stop): s.group._f_remove(recursive=True) # delete from the table else: if not s.is_table: raise ValueError( "can only remove with where on objects written as tables" ) return s.delete(where=where, start=start, stop=stop) def append( self, key: str, value: DataFrame | Series, format=None, axes=None, index: bool | list[str] = True, append: bool = True, complib=None, complevel: int | None = None, columns=None, min_itemsize: int | dict[str, int] | None = None, nan_rep=None, chunksize=None, expectedrows=None, dropna: bool | None = None, data_columns: Literal[True] | list[str] | None = None, encoding=None, errors: str = "strict", ) -> None: """ Append to Table in file. Node must already exist and be Table format. Parameters ---------- key : str value : {Series, DataFrame} format : 'table' is the default Format to use when storing object in HDFStore. Value can be one of: ``'table'`` Table format. Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching / selecting subsets of the data. index : bool, default True Write DataFrame index as a column. append : bool, default True Append the input data to the existing. data_columns : list of columns, or True, default None List of columns to create as indexed data columns for on-disk queries, or True to use all columns. By default only the axes of the object are indexed. See `here <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__. min_itemsize : dict of columns that specify minimum str sizes nan_rep : str to use as str nan representation chunksize : size to chunk the writing expectedrows : expected TOTAL row size of this table encoding : default None, provide an encoding for str dropna : bool, default False, optional Do not write an ALL nan row to the store settable by the option 'io.hdf.dropna_table'. Notes ----- Does *not* check if data being appended overlaps with existing data in the table, so be careful """ if columns is not None: raise TypeError( "columns is not a supported keyword in append, try data_columns" ) if dropna is None: dropna = get_option("io.hdf.dropna_table") if format is None: format = get_option("io.hdf.default_format") or "table" format = self._validate_format(format) self._write_to_group( key, value, format=format, axes=axes, index=index, append=append, complib=complib, complevel=complevel, min_itemsize=min_itemsize, nan_rep=nan_rep, chunksize=chunksize, expectedrows=expectedrows, dropna=dropna, data_columns=data_columns, encoding=encoding, errors=errors, ) def append_to_multiple( self, d: dict, value, selector, data_columns=None, axes=None, dropna: bool = False, **kwargs, ) -> None: """ Append to multiple tables Parameters ---------- d : a dict of table_name to table_columns, None is acceptable as the values of one node (this will get all the remaining columns) value : a pandas object selector : a string that designates the indexable table; all of its columns will be designed as data_columns, unless data_columns is passed, in which case these are used data_columns : list of columns to create as data columns, or True to use all columns dropna : if evaluates to True, drop rows from all tables if any single row in each table has all NaN. Default False. Notes ----- axes parameter is currently not accepted """ if axes is not None: raise TypeError( "axes is currently not accepted as a parameter to append_to_multiple; " "you can create the tables independently instead" ) if not isinstance(d, dict): raise ValueError( "append_to_multiple must have a dictionary specified as the " "way to split the value" ) if selector not in d: raise ValueError( "append_to_multiple requires a selector that is in passed dict" ) # figure out the splitting axis (the non_index_axis) axis = list(set(range(value.ndim)) - set(_AXES_MAP[type(value)]))[0] # figure out how to split the value remain_key = None remain_values: list = [] for k, v in d.items(): if v is None: if remain_key is not None: raise ValueError( "append_to_multiple can only have one value in d that is None" ) remain_key = k else: remain_values.extend(v) if remain_key is not None: ordered = value.axes[axis] ordd = ordered.difference(Index(remain_values)) ordd = sorted(ordered.get_indexer(ordd)) d[remain_key] = ordered.take(ordd) # data_columns if data_columns is None: data_columns = d[selector] # ensure rows are synchronized across the tables if dropna: idxs = (value[cols].dropna(how="all").index for cols in d.values()) valid_index = next(idxs) for index in idxs: valid_index = valid_index.intersection(index) value = value.loc[valid_index] min_itemsize = kwargs.pop("min_itemsize", None) # append for k, v in d.items(): dc = data_columns if k == selector else None # compute the val val = value.reindex(v, axis=axis) filtered = ( {key: value for (key, value) in min_itemsize.items() if key in v} if min_itemsize is not None else None ) self.append(k, val, data_columns=dc, min_itemsize=filtered, **kwargs) def create_table_index( self, key: str, columns=None, optlevel: int | None = None, kind: str | None = None, ) -> None: """ Create a pytables index on the table. Parameters ---------- key : str columns : None, bool, or listlike[str] Indicate which columns to create an index on. * False : Do not create any indexes. * True : Create indexes on all columns. * None : Create indexes on all columns. * listlike : Create indexes on the given columns. optlevel : int or None, default None Optimization level, if None, pytables defaults to 6. kind : str or None, default None Kind of index, if None, pytables defaults to "medium". Raises ------ TypeError: raises if the node is not a table """ # version requirements _tables() s = self.get_storer(key) if s is None: return if not isinstance(s, Table): raise TypeError("cannot create table index on a Fixed format store") s.create_index(columns=columns, optlevel=optlevel, kind=kind) def groups(self) -> list: """ Return a list of all the top-level nodes. Each node returned is not a pandas storage object. Returns ------- list List of objects. """ _tables() self._check_if_open() assert self._handle is not None # for mypy assert _table_mod is not None # for mypy return [ g for g in self._handle.walk_groups() if ( not isinstance(g, _table_mod.link.Link) and ( getattr(g._v_attrs, "pandas_type", None) or getattr(g, "table", None) or (isinstance(g, _table_mod.table.Table) and g._v_name != "table") ) ) ] def walk(self, where: str = "/") -> Iterator[tuple[str, list[str], list[str]]]: """ Walk the pytables group hierarchy for pandas objects. This generator will yield the group path, subgroups and pandas object names for each group. Any non-pandas PyTables objects that are not a group will be ignored. The `where` group itself is listed first (preorder), then each of its child groups (following an alphanumerical order) is also traversed, following the same procedure. Parameters ---------- where : str, default "/" Group where to start walking. Yields ------ path : str Full path to a group (without trailing '/'). groups : list Names (strings) of the groups contained in `path`. leaves : list Names (strings) of the pandas objects contained in `path`. """ _tables() self._check_if_open() assert self._handle is not None # for mypy assert _table_mod is not None # for mypy for g in self._handle.walk_groups(where): if getattr(g._v_attrs, "pandas_type", None) is not None: continue groups = [] leaves = [] for child in g._v_children.values(): pandas_type = getattr(child._v_attrs, "pandas_type", None) if pandas_type is None: if isinstance(child, _table_mod.group.Group): groups.append(child._v_name) else: leaves.append(child._v_name) yield (g._v_pathname.rstrip("/"), groups, leaves) def get_node(self, key: str) -> Node | None: """return the node with the key or None if it does not exist""" self._check_if_open() if not key.startswith("/"): key = "/" + key assert self._handle is not None assert _table_mod is not None # for mypy try: node = self._handle.get_node(self.root, key) except _table_mod.exceptions.NoSuchNodeError: return None assert isinstance(node, _table_mod.Node), type(node) return node def get_storer(self, key: str) -> GenericFixed | Table: """return the storer object for a key, raise if not in the file""" group = self.get_node(key) if group is None: raise KeyError(f"No object named {key} in the file") s = self._create_storer(group) s.infer_axes() return s def copy( self, file, mode: str = "w", propindexes: bool = True, keys=None, complib=None, complevel: int | None = None, fletcher32: bool = False, overwrite: bool = True, ) -> HDFStore: """ Copy the existing store to a new file, updating in place. Parameters ---------- propindexes : bool, default True Restore indexes in copied file. keys : list, optional List of keys to include in the copy (defaults to all). overwrite : bool, default True Whether to overwrite (remove and replace) existing nodes in the new store. mode, complib, complevel, fletcher32 same as in HDFStore.__init__ Returns ------- open file handle of the new store """ new_store = HDFStore( file, mode=mode, complib=complib, complevel=complevel, fletcher32=fletcher32 ) if keys is None: keys = list(self.keys()) if not isinstance(keys, (tuple, list)): keys = [keys] for k in keys: s = self.get_storer(k) if s is not None: if k in new_store: if overwrite: new_store.remove(k) data = self.select(k) if isinstance(s, Table): index: bool | list[str] = False if propindexes: index = [a.name for a in s.axes if a.is_indexed] new_store.append( k, data, index=index, data_columns=getattr(s, "data_columns", None), encoding=s.encoding, ) else: new_store.put(k, data, encoding=s.encoding) return new_store def info(self) -> str: """ Print detailed information on the store. Returns ------- str """ path = pprint_thing(self._path) output = f"{type(self)}\nFile path: {path}\n" if self.is_open: lkeys = sorted(self.keys()) if len(lkeys): keys = [] values = [] for k in lkeys: try: s = self.get_storer(k) if s is not None: keys.append(pprint_thing(s.pathname or k)) values.append(pprint_thing(s or "invalid_HDFStore node")) except AssertionError: # surface any assertion errors for e.g. debugging raise except Exception as detail: keys.append(k) dstr = pprint_thing(detail) values.append(f"[invalid_HDFStore node: {dstr}]") output += adjoin(12, keys, values) else: output += "Empty" else: output += "File is CLOSED" return output # ------------------------------------------------------------------------ # private methods def _check_if_open(self): if not self.is_open: raise ClosedFileError(f"{self._path} file is not open!") def _validate_format(self, format: str) -> str: """validate / deprecate formats""" # validate try: format = _FORMAT_MAP[format.lower()] except KeyError as err: raise TypeError(f"invalid HDFStore format specified [{format}]") from err return format def _create_storer( self, group, format=None, value: DataFrame | Series | None = None, encoding: str = "UTF-8", errors: str = "strict", ) -> GenericFixed | Table: """return a suitable class to operate""" cls: type[GenericFixed] | type[Table] if value is not None and not isinstance(value, (Series, DataFrame)): raise TypeError("value must be None, Series, or DataFrame") pt = _ensure_decoded(getattr(group._v_attrs, "pandas_type", None)) tt = _ensure_decoded(getattr(group._v_attrs, "table_type", None)) # infer the pt from the passed value if pt is None: if value is None: _tables() assert _table_mod is not None # for mypy if getattr(group, "table", None) or isinstance( group, _table_mod.table.Table ): pt = "frame_table" tt = "generic_table" else: raise TypeError( "cannot create a storer if the object is not existing " "nor a value are passed" ) else: if isinstance(value, Series): pt = "series" else: pt = "frame" # we are actually a table if format == "table": pt += "_table" # a storer node if "table" not in pt: _STORER_MAP = {"series": SeriesFixed, "frame": FrameFixed} try: cls = _STORER_MAP[pt] except KeyError as err: raise TypeError( f"cannot properly create the storer for: [_STORER_MAP] [group->" f"{group},value->{type(value)},format->{format}" ) from err return cls(self, group, encoding=encoding, errors=errors) # existing node (and must be a table) if tt is None: # if we are a writer, determine the tt if value is not None: if pt == "series_table": index = getattr(value, "index", None) if index is not None: if index.nlevels == 1: tt = "appendable_series" elif index.nlevels > 1: tt = "appendable_multiseries" elif pt == "frame_table": index = getattr(value, "index", None) if index is not None: if index.nlevels == 1: tt = "appendable_frame" elif index.nlevels > 1: tt = "appendable_multiframe" _TABLE_MAP = { "generic_table": GenericTable, "appendable_series": AppendableSeriesTable, "appendable_multiseries": AppendableMultiSeriesTable, "appendable_frame": AppendableFrameTable, "appendable_multiframe": AppendableMultiFrameTable, "worm": WORMTable, } try: cls = _TABLE_MAP[tt] except KeyError as err: raise TypeError( f"cannot properly create the storer for: [_TABLE_MAP] [group->" f"{group},value->{type(value)},format->{format}" ) from err return cls(self, group, encoding=encoding, errors=errors) def _write_to_group( self, key: str, value: DataFrame | Series, format, axes=None, index: bool | list[str] = True, append: bool = False, complib=None, complevel: int | None = None, fletcher32=None, min_itemsize: int | dict[str, int] | None = None, chunksize=None, expectedrows=None, dropna: bool = False, nan_rep=None, data_columns=None, encoding=None, errors: str = "strict", track_times: bool = True, ) -> None: # we don't want to store a table node at all if our object is 0-len # as there are not dtypes if getattr(value, "empty", None) and (format == "table" or append): return group = self._identify_group(key, append) s = self._create_storer(group, format, value, encoding=encoding, errors=errors) if append: # raise if we are trying to append to a Fixed format, # or a table that exists (and we are putting) if not s.is_table or (s.is_table and format == "fixed" and s.is_exists): raise ValueError("Can only append to Tables") if not s.is_exists: s.set_object_info() else: s.set_object_info() if not s.is_table and complib: raise ValueError("Compression not supported on Fixed format stores") # write the object s.write( obj=value, axes=axes, append=append, complib=complib, complevel=complevel, fletcher32=fletcher32, min_itemsize=min_itemsize, chunksize=chunksize, expectedrows=expectedrows, dropna=dropna, nan_rep=nan_rep, data_columns=data_columns, track_times=track_times, ) if isinstance(s, Table) and index: s.create_index(columns=index) def _read_group(self, group: Node): s = self._create_storer(group) s.infer_axes() return s.read() def _identify_group(self, key: str, append: bool) -> Node: """Identify HDF5 group based on key, delete/create group if needed.""" group = self.get_node(key) # we make this assertion for mypy; the get_node call will already # have raised if this is incorrect assert self._handle is not None # remove the node if we are not appending if group is not None and not append: self._handle.remove_node(group, recursive=True) group = None if group is None: group = self._create_nodes_and_group(key) return group def _create_nodes_and_group(self, key: str) -> Node: """Create nodes from key and return group name.""" # assertion for mypy assert self._handle is not None paths = key.split("/") # recursively create the groups path = "/" for p in paths: if not len(p): continue new_path = path if not path.endswith("/"): new_path += "/" new_path += p group = self.get_node(new_path) if group is None: group = self._handle.create_group(path, p) path = new_path return group Literal: _SpecialForm = ... FilePath = Union[str, "PathLike[str]"] def stringify_path(filepath_or_buffer: FilePath, convert_file_like: bool = ...) -> str: ... def stringify_path( filepath_or_buffer: BaseBufferT, convert_file_like: bool = ... ) -> BaseBufferT: ... def stringify_path( filepath_or_buffer: FilePath | BaseBufferT, convert_file_like: bool = False, ) -> str | BaseBufferT: """ Attempt to convert a path-like object to a string. Parameters ---------- filepath_or_buffer : object to be converted Returns ------- str_filepath_or_buffer : maybe a string version of the object Notes ----- Objects supporting the fspath protocol (python 3.6+) are coerced according to its __fspath__ method. Any other object is passed through unchanged, which includes bytes, strings, buffers, or anything else that's not even path-like. """ if not convert_file_like and is_file_like(filepath_or_buffer): # GH 38125: some fsspec objects implement os.PathLike but have already opened a # file. This prevents opening the file a second time. infer_compression calls # this function with convert_file_like=True to infer the compression. return cast(BaseBufferT, filepath_or_buffer) if isinstance(filepath_or_buffer, os.PathLike): filepath_or_buffer = filepath_or_buffer.__fspath__() return _expand_user(filepath_or_buffer) ) The provided code snippet includes necessary dependencies for implementing the `to_hdf` function. Write a Python function `def to_hdf( path_or_buf: FilePath | HDFStore, key: str, value: DataFrame | Series, mode: str = "a", complevel: int | None = None, complib: str | None = None, append: bool = False, format: str | None = None, index: bool = True, min_itemsize: int | dict[str, int] | None = None, nan_rep=None, dropna: bool | None = None, data_columns: Literal[True] | list[str] | None = None, errors: str = "strict", encoding: str = "UTF-8", ) -> None` to solve the following problem: store this object, close it if we opened it Here is the function: def to_hdf( path_or_buf: FilePath | HDFStore, key: str, value: DataFrame | Series, mode: str = "a", complevel: int | None = None, complib: str | None = None, append: bool = False, format: str | None = None, index: bool = True, min_itemsize: int | dict[str, int] | None = None, nan_rep=None, dropna: bool | None = None, data_columns: Literal[True] | list[str] | None = None, errors: str = "strict", encoding: str = "UTF-8", ) -> None: """store this object, close it if we opened it""" if append: f = lambda store: store.append( key, value, format=format, index=index, min_itemsize=min_itemsize, nan_rep=nan_rep, dropna=dropna, data_columns=data_columns, errors=errors, encoding=encoding, ) else: # NB: dropna is not passed to `put` f = lambda store: store.put( key, value, format=format, index=index, min_itemsize=min_itemsize, nan_rep=nan_rep, data_columns=data_columns, errors=errors, encoding=encoding, dropna=dropna, ) path_or_buf = stringify_path(path_or_buf) if isinstance(path_or_buf, str): with HDFStore( path_or_buf, mode=mode, complevel=complevel, complib=complib ) as store: f(store) else: f(path_or_buf)
store this object, close it if we opened it
173,511
from __future__ import annotations from contextlib import suppress import copy from datetime import ( date, tzinfo, ) import itertools import os import re from textwrap import dedent from types import TracebackType from typing import ( TYPE_CHECKING, Any, Callable, Final, Hashable, Iterator, Literal, Sequence, cast, overload, ) import warnings import numpy as np from pandas._config import ( config, get_option, ) from pandas._libs import ( lib, writers as libwriters, ) from pandas._libs.tslibs import timezones from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeArg, FilePath, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.compat.pickle_compat import patch_pickle from pandas.errors import ( AttributeConflictWarning, ClosedFileError, IncompatibilityWarning, PerformanceWarning, PossibleDataLossError, ) from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, is_bool_dtype, is_categorical_dtype, is_complex_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_extension_array_dtype, is_integer_dtype, is_list_like, is_object_dtype, is_string_dtype, is_timedelta64_dtype, needs_i8_conversion, ) from pandas.core.dtypes.missing import array_equivalent from pandas import ( DataFrame, DatetimeIndex, Index, MultiIndex, PeriodIndex, RangeIndex, Series, TimedeltaIndex, concat, isna, ) from pandas.core.arrays import ( Categorical, DatetimeArray, PeriodArray, ) import pandas.core.common as com from pandas.core.computation.pytables import ( PyTablesExpr, maybe_expression, ) from pandas.core.construction import extract_array from pandas.core.indexes.api import ensure_index from pandas.core.internals import ( ArrayManager, BlockManager, ) from pandas.io.common import stringify_path from pandas.io.formats.printing import ( adjoin, pprint_thing, ) def _ensure_term(where, scope_level: int): """ Ensure that the where is a Term or a list of Term. This makes sure that we are capturing the scope of variables that are passed create the terms here with a frame_level=2 (we are 2 levels down) """ # only consider list/tuple here as an ndarray is automatically a coordinate # list level = scope_level + 1 if isinstance(where, (list, tuple)): where = [ Term(term, scope_level=level + 1) if maybe_expression(term) else term for term in where if term is not None ] elif maybe_expression(where): where = Term(where, scope_level=level) return where if where is None or len(where) else None def _is_metadata_of(group: Node, parent_group: Node) -> bool: """Check if a given group is a metadata group for a given parent_group.""" if group._v_depth <= parent_group._v_depth: return False current = group while current._v_depth > 1: parent = current._v_parent if parent == parent_group and current._v_name == "meta": return True current = current._v_parent return False class HDFStore: """ Dict-like IO interface for storing pandas objects in PyTables. Either Fixed or Table format. .. warning:: Pandas uses PyTables for reading and writing HDF5 files, which allows serializing object-dtype data with pickle when using the "fixed" format. Loading pickled data received from untrusted sources can be unsafe. See: https://docs.python.org/3/library/pickle.html for more. Parameters ---------- path : str File path to HDF5 file. mode : {'a', 'w', 'r', 'r+'}, default 'a' ``'r'`` Read-only; no data can be modified. ``'w'`` Write; a new file is created (an existing file with the same name would be deleted). ``'a'`` Append; an existing file is opened for reading and writing, and if the file does not exist it is created. ``'r+'`` It is similar to ``'a'``, but the file must already exist. complevel : int, 0-9, default None Specifies a compression level for data. A value of 0 or None disables compression. complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib' Specifies the compression library to be used. As of v0.20.2 these additional compressors for Blosc are supported (default if no compressor specified: 'blosc:blosclz'): {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy', 'blosc:zlib', 'blosc:zstd'}. Specifying a compression library which is not available issues a ValueError. fletcher32 : bool, default False If applying compression use the fletcher32 checksum. **kwargs These parameters will be passed to the PyTables open_file method. Examples -------- >>> bar = pd.DataFrame(np.random.randn(10, 4)) >>> store = pd.HDFStore('test.h5') >>> store['foo'] = bar # write to HDF5 >>> bar = store['foo'] # retrieve >>> store.close() **Create or load HDF5 file in-memory** When passing the `driver` option to the PyTables open_file method through **kwargs, the HDF5 file is loaded or created in-memory and will only be written when closed: >>> bar = pd.DataFrame(np.random.randn(10, 4)) >>> store = pd.HDFStore('test.h5', driver='H5FD_CORE') >>> store['foo'] = bar >>> store.close() # only now, data is written to disk """ _handle: File | None _mode: str def __init__( self, path, mode: str = "a", complevel: int | None = None, complib=None, fletcher32: bool = False, **kwargs, ) -> None: if "format" in kwargs: raise ValueError("format is not a defined argument for HDFStore") tables = import_optional_dependency("tables") if complib is not None and complib not in tables.filters.all_complibs: raise ValueError( f"complib only supports {tables.filters.all_complibs} compression." ) if complib is None and complevel is not None: complib = tables.filters.default_complib self._path = stringify_path(path) if mode is None: mode = "a" self._mode = mode self._handle = None self._complevel = complevel if complevel else 0 self._complib = complib self._fletcher32 = fletcher32 self._filters = None self.open(mode=mode, **kwargs) def __fspath__(self) -> str: return self._path def root(self): """return the root node""" self._check_if_open() assert self._handle is not None # for mypy return self._handle.root def filename(self) -> str: return self._path def __getitem__(self, key: str): return self.get(key) def __setitem__(self, key: str, value) -> None: self.put(key, value) def __delitem__(self, key: str) -> None: return self.remove(key) def __getattr__(self, name: str): """allow attribute access to get stores""" try: return self.get(name) except (KeyError, ClosedFileError): pass raise AttributeError( f"'{type(self).__name__}' object has no attribute '{name}'" ) def __contains__(self, key: str) -> bool: """ check for existence of this key can match the exact pathname or the pathnm w/o the leading '/' """ node = self.get_node(key) if node is not None: name = node._v_pathname if key in (name, name[1:]): return True return False def __len__(self) -> int: return len(self.groups()) def __repr__(self) -> str: pstr = pprint_thing(self._path) return f"{type(self)}\nFile path: {pstr}\n" def __enter__(self) -> HDFStore: return self def __exit__( self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None, ) -> None: self.close() def keys(self, include: str = "pandas") -> list[str]: """ Return a list of keys corresponding to objects stored in HDFStore. Parameters ---------- include : str, default 'pandas' When kind equals 'pandas' return pandas objects. When kind equals 'native' return native HDF5 Table objects. .. versionadded:: 1.1.0 Returns ------- list List of ABSOLUTE path-names (e.g. have the leading '/'). Raises ------ raises ValueError if kind has an illegal value """ if include == "pandas": return [n._v_pathname for n in self.groups()] elif include == "native": assert self._handle is not None # mypy return [ n._v_pathname for n in self._handle.walk_nodes("/", classname="Table") ] raise ValueError( f"`include` should be either 'pandas' or 'native' but is '{include}'" ) def __iter__(self) -> Iterator[str]: return iter(self.keys()) def items(self) -> Iterator[tuple[str, list]]: """ iterate on key->group """ for g in self.groups(): yield g._v_pathname, g def open(self, mode: str = "a", **kwargs) -> None: """ Open the file in the specified mode Parameters ---------- mode : {'a', 'w', 'r', 'r+'}, default 'a' See HDFStore docstring or tables.open_file for info about modes **kwargs These parameters will be passed to the PyTables open_file method. """ tables = _tables() if self._mode != mode: # if we are changing a write mode to read, ok if self._mode in ["a", "w"] and mode in ["r", "r+"]: pass elif mode in ["w"]: # this would truncate, raise here if self.is_open: raise PossibleDataLossError( f"Re-opening the file [{self._path}] with mode [{self._mode}] " "will delete the current file!" ) self._mode = mode # close and reopen the handle if self.is_open: self.close() if self._complevel and self._complevel > 0: self._filters = _tables().Filters( self._complevel, self._complib, fletcher32=self._fletcher32 ) if _table_file_open_policy_is_strict and self.is_open: msg = ( "Cannot open HDF5 file, which is already opened, " "even in read-only mode." ) raise ValueError(msg) self._handle = tables.open_file(self._path, self._mode, **kwargs) def close(self) -> None: """ Close the PyTables file handle """ if self._handle is not None: self._handle.close() self._handle = None def is_open(self) -> bool: """ return a boolean indicating whether the file is open """ if self._handle is None: return False return bool(self._handle.isopen) def flush(self, fsync: bool = False) -> None: """ Force all buffered modifications to be written to disk. Parameters ---------- fsync : bool (default False) call ``os.fsync()`` on the file handle to force writing to disk. Notes ----- Without ``fsync=True``, flushing may not guarantee that the OS writes to disk. With fsync, the operation will block until the OS claims the file has been written; however, other caching layers may still interfere. """ if self._handle is not None: self._handle.flush() if fsync: with suppress(OSError): os.fsync(self._handle.fileno()) def get(self, key: str): """ Retrieve pandas object stored in file. Parameters ---------- key : str Returns ------- object Same type as object stored in file. """ with patch_pickle(): # GH#31167 Without this patch, pickle doesn't know how to unpickle # old DateOffset objects now that they are cdef classes. group = self.get_node(key) if group is None: raise KeyError(f"No object named {key} in the file") return self._read_group(group) def select( self, key: str, where=None, start=None, stop=None, columns=None, iterator: bool = False, chunksize=None, auto_close: bool = False, ): """ Retrieve pandas object stored in file, optionally based on where criteria. .. warning:: Pandas uses PyTables for reading and writing HDF5 files, which allows serializing object-dtype data with pickle when using the "fixed" format. Loading pickled data received from untrusted sources can be unsafe. See: https://docs.python.org/3/library/pickle.html for more. Parameters ---------- key : str Object being retrieved from file. where : list or None List of Term (or convertible) objects, optional. start : int or None Row number to start selection. stop : int, default None Row number to stop selection. columns : list or None A list of columns that if not None, will limit the return columns. iterator : bool or False Returns an iterator. chunksize : int or None Number or rows to include in iteration, return an iterator. auto_close : bool or False Should automatically close the store when finished. Returns ------- object Retrieved object from file. """ group = self.get_node(key) if group is None: raise KeyError(f"No object named {key} in the file") # create the storer and axes where = _ensure_term(where, scope_level=1) s = self._create_storer(group) s.infer_axes() # function to call on iteration def func(_start, _stop, _where): return s.read(start=_start, stop=_stop, where=_where, columns=columns) # create the iterator it = TableIterator( self, s, func, where=where, nrows=s.nrows, start=start, stop=stop, iterator=iterator, chunksize=chunksize, auto_close=auto_close, ) return it.get_result() def select_as_coordinates( self, key: str, where=None, start: int | None = None, stop: int | None = None, ): """ return the selection as an Index .. warning:: Pandas uses PyTables for reading and writing HDF5 files, which allows serializing object-dtype data with pickle when using the "fixed" format. Loading pickled data received from untrusted sources can be unsafe. See: https://docs.python.org/3/library/pickle.html for more. Parameters ---------- key : str where : list of Term (or convertible) objects, optional start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection """ where = _ensure_term(where, scope_level=1) tbl = self.get_storer(key) if not isinstance(tbl, Table): raise TypeError("can only read_coordinates with a table") return tbl.read_coordinates(where=where, start=start, stop=stop) def select_column( self, key: str, column: str, start: int | None = None, stop: int | None = None, ): """ return a single column from the table. This is generally only useful to select an indexable .. warning:: Pandas uses PyTables for reading and writing HDF5 files, which allows serializing object-dtype data with pickle when using the "fixed" format. Loading pickled data received from untrusted sources can be unsafe. See: https://docs.python.org/3/library/pickle.html for more. Parameters ---------- key : str column : str The column of interest. start : int or None, default None stop : int or None, default None Raises ------ raises KeyError if the column is not found (or key is not a valid store) raises ValueError if the column can not be extracted individually (it is part of a data block) """ tbl = self.get_storer(key) if not isinstance(tbl, Table): raise TypeError("can only read_column with a table") return tbl.read_column(column=column, start=start, stop=stop) def select_as_multiple( self, keys, where=None, selector=None, columns=None, start=None, stop=None, iterator: bool = False, chunksize=None, auto_close: bool = False, ): """ Retrieve pandas objects from multiple tables. .. warning:: Pandas uses PyTables for reading and writing HDF5 files, which allows serializing object-dtype data with pickle when using the "fixed" format. Loading pickled data received from untrusted sources can be unsafe. See: https://docs.python.org/3/library/pickle.html for more. Parameters ---------- keys : a list of the tables selector : the table to apply the where criteria (defaults to keys[0] if not supplied) columns : the columns I want back start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection iterator : bool, return an iterator, default False chunksize : nrows to include in iteration, return an iterator auto_close : bool, default False Should automatically close the store when finished. Raises ------ raises KeyError if keys or selector is not found or keys is empty raises TypeError if keys is not a list or tuple raises ValueError if the tables are not ALL THE SAME DIMENSIONS """ # default to single select where = _ensure_term(where, scope_level=1) if isinstance(keys, (list, tuple)) and len(keys) == 1: keys = keys[0] if isinstance(keys, str): return self.select( key=keys, where=where, columns=columns, start=start, stop=stop, iterator=iterator, chunksize=chunksize, auto_close=auto_close, ) if not isinstance(keys, (list, tuple)): raise TypeError("keys must be a list/tuple") if not len(keys): raise ValueError("keys must have a non-zero length") if selector is None: selector = keys[0] # collect the tables tbls = [self.get_storer(k) for k in keys] s = self.get_storer(selector) # validate rows nrows = None for t, k in itertools.chain([(s, selector)], zip(tbls, keys)): if t is None: raise KeyError(f"Invalid table [{k}]") if not t.is_table: raise TypeError( f"object [{t.pathname}] is not a table, and cannot be used in all " "select as multiple" ) if nrows is None: nrows = t.nrows elif t.nrows != nrows: raise ValueError("all tables must have exactly the same nrows!") # The isinstance checks here are redundant with the check above, # but necessary for mypy; see GH#29757 _tbls = [x for x in tbls if isinstance(x, Table)] # axis is the concentration axes axis = {t.non_index_axes[0][0] for t in _tbls}.pop() def func(_start, _stop, _where): # retrieve the objs, _where is always passed as a set of # coordinates here objs = [ t.read(where=_where, columns=columns, start=_start, stop=_stop) for t in tbls ] # concat and return return concat(objs, axis=axis, verify_integrity=False)._consolidate() # create the iterator it = TableIterator( self, s, func, where=where, nrows=nrows, start=start, stop=stop, iterator=iterator, chunksize=chunksize, auto_close=auto_close, ) return it.get_result(coordinates=True) def put( self, key: str, value: DataFrame | Series, format=None, index: bool = True, append: bool = False, complib=None, complevel: int | None = None, min_itemsize: int | dict[str, int] | None = None, nan_rep=None, data_columns: Literal[True] | list[str] | None = None, encoding=None, errors: str = "strict", track_times: bool = True, dropna: bool = False, ) -> None: """ Store object in HDFStore. Parameters ---------- key : str value : {Series, DataFrame} format : 'fixed(f)|table(t)', default is 'fixed' Format to use when storing object in HDFStore. Value can be one of: ``'fixed'`` Fixed format. Fast writing/reading. Not-appendable, nor searchable. ``'table'`` Table format. Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching / selecting subsets of the data. index : bool, default True Write DataFrame index as a column. append : bool, default False This will force Table format, append the input data to the existing. data_columns : list of columns or True, default None List of columns to create as data columns, or True to use all columns. See `here <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__. encoding : str, default None Provide an encoding for strings. track_times : bool, default True Parameter is propagated to 'create_table' method of 'PyTables'. If set to False it enables to have the same h5 files (same hashes) independent on creation time. dropna : bool, default False, optional Remove missing values. .. versionadded:: 1.1.0 """ if format is None: format = get_option("io.hdf.default_format") or "fixed" format = self._validate_format(format) self._write_to_group( key, value, format=format, index=index, append=append, complib=complib, complevel=complevel, min_itemsize=min_itemsize, nan_rep=nan_rep, data_columns=data_columns, encoding=encoding, errors=errors, track_times=track_times, dropna=dropna, ) def remove(self, key: str, where=None, start=None, stop=None) -> None: """ Remove pandas object partially by specifying the where condition Parameters ---------- key : str Node to remove or delete rows from where : list of Term (or convertible) objects, optional start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection Returns ------- number of rows removed (or None if not a Table) Raises ------ raises KeyError if key is not a valid store """ where = _ensure_term(where, scope_level=1) try: s = self.get_storer(key) except KeyError: # the key is not a valid store, re-raising KeyError raise except AssertionError: # surface any assertion errors for e.g. debugging raise except Exception as err: # In tests we get here with ClosedFileError, TypeError, and # _table_mod.NoSuchNodeError. TODO: Catch only these? if where is not None: raise ValueError( "trying to remove a node with a non-None where clause!" ) from err # we are actually trying to remove a node (with children) node = self.get_node(key) if node is not None: node._f_remove(recursive=True) return None # remove the node if com.all_none(where, start, stop): s.group._f_remove(recursive=True) # delete from the table else: if not s.is_table: raise ValueError( "can only remove with where on objects written as tables" ) return s.delete(where=where, start=start, stop=stop) def append( self, key: str, value: DataFrame | Series, format=None, axes=None, index: bool | list[str] = True, append: bool = True, complib=None, complevel: int | None = None, columns=None, min_itemsize: int | dict[str, int] | None = None, nan_rep=None, chunksize=None, expectedrows=None, dropna: bool | None = None, data_columns: Literal[True] | list[str] | None = None, encoding=None, errors: str = "strict", ) -> None: """ Append to Table in file. Node must already exist and be Table format. Parameters ---------- key : str value : {Series, DataFrame} format : 'table' is the default Format to use when storing object in HDFStore. Value can be one of: ``'table'`` Table format. Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching / selecting subsets of the data. index : bool, default True Write DataFrame index as a column. append : bool, default True Append the input data to the existing. data_columns : list of columns, or True, default None List of columns to create as indexed data columns for on-disk queries, or True to use all columns. By default only the axes of the object are indexed. See `here <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__. min_itemsize : dict of columns that specify minimum str sizes nan_rep : str to use as str nan representation chunksize : size to chunk the writing expectedrows : expected TOTAL row size of this table encoding : default None, provide an encoding for str dropna : bool, default False, optional Do not write an ALL nan row to the store settable by the option 'io.hdf.dropna_table'. Notes ----- Does *not* check if data being appended overlaps with existing data in the table, so be careful """ if columns is not None: raise TypeError( "columns is not a supported keyword in append, try data_columns" ) if dropna is None: dropna = get_option("io.hdf.dropna_table") if format is None: format = get_option("io.hdf.default_format") or "table" format = self._validate_format(format) self._write_to_group( key, value, format=format, axes=axes, index=index, append=append, complib=complib, complevel=complevel, min_itemsize=min_itemsize, nan_rep=nan_rep, chunksize=chunksize, expectedrows=expectedrows, dropna=dropna, data_columns=data_columns, encoding=encoding, errors=errors, ) def append_to_multiple( self, d: dict, value, selector, data_columns=None, axes=None, dropna: bool = False, **kwargs, ) -> None: """ Append to multiple tables Parameters ---------- d : a dict of table_name to table_columns, None is acceptable as the values of one node (this will get all the remaining columns) value : a pandas object selector : a string that designates the indexable table; all of its columns will be designed as data_columns, unless data_columns is passed, in which case these are used data_columns : list of columns to create as data columns, or True to use all columns dropna : if evaluates to True, drop rows from all tables if any single row in each table has all NaN. Default False. Notes ----- axes parameter is currently not accepted """ if axes is not None: raise TypeError( "axes is currently not accepted as a parameter to append_to_multiple; " "you can create the tables independently instead" ) if not isinstance(d, dict): raise ValueError( "append_to_multiple must have a dictionary specified as the " "way to split the value" ) if selector not in d: raise ValueError( "append_to_multiple requires a selector that is in passed dict" ) # figure out the splitting axis (the non_index_axis) axis = list(set(range(value.ndim)) - set(_AXES_MAP[type(value)]))[0] # figure out how to split the value remain_key = None remain_values: list = [] for k, v in d.items(): if v is None: if remain_key is not None: raise ValueError( "append_to_multiple can only have one value in d that is None" ) remain_key = k else: remain_values.extend(v) if remain_key is not None: ordered = value.axes[axis] ordd = ordered.difference(Index(remain_values)) ordd = sorted(ordered.get_indexer(ordd)) d[remain_key] = ordered.take(ordd) # data_columns if data_columns is None: data_columns = d[selector] # ensure rows are synchronized across the tables if dropna: idxs = (value[cols].dropna(how="all").index for cols in d.values()) valid_index = next(idxs) for index in idxs: valid_index = valid_index.intersection(index) value = value.loc[valid_index] min_itemsize = kwargs.pop("min_itemsize", None) # append for k, v in d.items(): dc = data_columns if k == selector else None # compute the val val = value.reindex(v, axis=axis) filtered = ( {key: value for (key, value) in min_itemsize.items() if key in v} if min_itemsize is not None else None ) self.append(k, val, data_columns=dc, min_itemsize=filtered, **kwargs) def create_table_index( self, key: str, columns=None, optlevel: int | None = None, kind: str | None = None, ) -> None: """ Create a pytables index on the table. Parameters ---------- key : str columns : None, bool, or listlike[str] Indicate which columns to create an index on. * False : Do not create any indexes. * True : Create indexes on all columns. * None : Create indexes on all columns. * listlike : Create indexes on the given columns. optlevel : int or None, default None Optimization level, if None, pytables defaults to 6. kind : str or None, default None Kind of index, if None, pytables defaults to "medium". Raises ------ TypeError: raises if the node is not a table """ # version requirements _tables() s = self.get_storer(key) if s is None: return if not isinstance(s, Table): raise TypeError("cannot create table index on a Fixed format store") s.create_index(columns=columns, optlevel=optlevel, kind=kind) def groups(self) -> list: """ Return a list of all the top-level nodes. Each node returned is not a pandas storage object. Returns ------- list List of objects. """ _tables() self._check_if_open() assert self._handle is not None # for mypy assert _table_mod is not None # for mypy return [ g for g in self._handle.walk_groups() if ( not isinstance(g, _table_mod.link.Link) and ( getattr(g._v_attrs, "pandas_type", None) or getattr(g, "table", None) or (isinstance(g, _table_mod.table.Table) and g._v_name != "table") ) ) ] def walk(self, where: str = "/") -> Iterator[tuple[str, list[str], list[str]]]: """ Walk the pytables group hierarchy for pandas objects. This generator will yield the group path, subgroups and pandas object names for each group. Any non-pandas PyTables objects that are not a group will be ignored. The `where` group itself is listed first (preorder), then each of its child groups (following an alphanumerical order) is also traversed, following the same procedure. Parameters ---------- where : str, default "/" Group where to start walking. Yields ------ path : str Full path to a group (without trailing '/'). groups : list Names (strings) of the groups contained in `path`. leaves : list Names (strings) of the pandas objects contained in `path`. """ _tables() self._check_if_open() assert self._handle is not None # for mypy assert _table_mod is not None # for mypy for g in self._handle.walk_groups(where): if getattr(g._v_attrs, "pandas_type", None) is not None: continue groups = [] leaves = [] for child in g._v_children.values(): pandas_type = getattr(child._v_attrs, "pandas_type", None) if pandas_type is None: if isinstance(child, _table_mod.group.Group): groups.append(child._v_name) else: leaves.append(child._v_name) yield (g._v_pathname.rstrip("/"), groups, leaves) def get_node(self, key: str) -> Node | None: """return the node with the key or None if it does not exist""" self._check_if_open() if not key.startswith("/"): key = "/" + key assert self._handle is not None assert _table_mod is not None # for mypy try: node = self._handle.get_node(self.root, key) except _table_mod.exceptions.NoSuchNodeError: return None assert isinstance(node, _table_mod.Node), type(node) return node def get_storer(self, key: str) -> GenericFixed | Table: """return the storer object for a key, raise if not in the file""" group = self.get_node(key) if group is None: raise KeyError(f"No object named {key} in the file") s = self._create_storer(group) s.infer_axes() return s def copy( self, file, mode: str = "w", propindexes: bool = True, keys=None, complib=None, complevel: int | None = None, fletcher32: bool = False, overwrite: bool = True, ) -> HDFStore: """ Copy the existing store to a new file, updating in place. Parameters ---------- propindexes : bool, default True Restore indexes in copied file. keys : list, optional List of keys to include in the copy (defaults to all). overwrite : bool, default True Whether to overwrite (remove and replace) existing nodes in the new store. mode, complib, complevel, fletcher32 same as in HDFStore.__init__ Returns ------- open file handle of the new store """ new_store = HDFStore( file, mode=mode, complib=complib, complevel=complevel, fletcher32=fletcher32 ) if keys is None: keys = list(self.keys()) if not isinstance(keys, (tuple, list)): keys = [keys] for k in keys: s = self.get_storer(k) if s is not None: if k in new_store: if overwrite: new_store.remove(k) data = self.select(k) if isinstance(s, Table): index: bool | list[str] = False if propindexes: index = [a.name for a in s.axes if a.is_indexed] new_store.append( k, data, index=index, data_columns=getattr(s, "data_columns", None), encoding=s.encoding, ) else: new_store.put(k, data, encoding=s.encoding) return new_store def info(self) -> str: """ Print detailed information on the store. Returns ------- str """ path = pprint_thing(self._path) output = f"{type(self)}\nFile path: {path}\n" if self.is_open: lkeys = sorted(self.keys()) if len(lkeys): keys = [] values = [] for k in lkeys: try: s = self.get_storer(k) if s is not None: keys.append(pprint_thing(s.pathname or k)) values.append(pprint_thing(s or "invalid_HDFStore node")) except AssertionError: # surface any assertion errors for e.g. debugging raise except Exception as detail: keys.append(k) dstr = pprint_thing(detail) values.append(f"[invalid_HDFStore node: {dstr}]") output += adjoin(12, keys, values) else: output += "Empty" else: output += "File is CLOSED" return output # ------------------------------------------------------------------------ # private methods def _check_if_open(self): if not self.is_open: raise ClosedFileError(f"{self._path} file is not open!") def _validate_format(self, format: str) -> str: """validate / deprecate formats""" # validate try: format = _FORMAT_MAP[format.lower()] except KeyError as err: raise TypeError(f"invalid HDFStore format specified [{format}]") from err return format def _create_storer( self, group, format=None, value: DataFrame | Series | None = None, encoding: str = "UTF-8", errors: str = "strict", ) -> GenericFixed | Table: """return a suitable class to operate""" cls: type[GenericFixed] | type[Table] if value is not None and not isinstance(value, (Series, DataFrame)): raise TypeError("value must be None, Series, or DataFrame") pt = _ensure_decoded(getattr(group._v_attrs, "pandas_type", None)) tt = _ensure_decoded(getattr(group._v_attrs, "table_type", None)) # infer the pt from the passed value if pt is None: if value is None: _tables() assert _table_mod is not None # for mypy if getattr(group, "table", None) or isinstance( group, _table_mod.table.Table ): pt = "frame_table" tt = "generic_table" else: raise TypeError( "cannot create a storer if the object is not existing " "nor a value are passed" ) else: if isinstance(value, Series): pt = "series" else: pt = "frame" # we are actually a table if format == "table": pt += "_table" # a storer node if "table" not in pt: _STORER_MAP = {"series": SeriesFixed, "frame": FrameFixed} try: cls = _STORER_MAP[pt] except KeyError as err: raise TypeError( f"cannot properly create the storer for: [_STORER_MAP] [group->" f"{group},value->{type(value)},format->{format}" ) from err return cls(self, group, encoding=encoding, errors=errors) # existing node (and must be a table) if tt is None: # if we are a writer, determine the tt if value is not None: if pt == "series_table": index = getattr(value, "index", None) if index is not None: if index.nlevels == 1: tt = "appendable_series" elif index.nlevels > 1: tt = "appendable_multiseries" elif pt == "frame_table": index = getattr(value, "index", None) if index is not None: if index.nlevels == 1: tt = "appendable_frame" elif index.nlevels > 1: tt = "appendable_multiframe" _TABLE_MAP = { "generic_table": GenericTable, "appendable_series": AppendableSeriesTable, "appendable_multiseries": AppendableMultiSeriesTable, "appendable_frame": AppendableFrameTable, "appendable_multiframe": AppendableMultiFrameTable, "worm": WORMTable, } try: cls = _TABLE_MAP[tt] except KeyError as err: raise TypeError( f"cannot properly create the storer for: [_TABLE_MAP] [group->" f"{group},value->{type(value)},format->{format}" ) from err return cls(self, group, encoding=encoding, errors=errors) def _write_to_group( self, key: str, value: DataFrame | Series, format, axes=None, index: bool | list[str] = True, append: bool = False, complib=None, complevel: int | None = None, fletcher32=None, min_itemsize: int | dict[str, int] | None = None, chunksize=None, expectedrows=None, dropna: bool = False, nan_rep=None, data_columns=None, encoding=None, errors: str = "strict", track_times: bool = True, ) -> None: # we don't want to store a table node at all if our object is 0-len # as there are not dtypes if getattr(value, "empty", None) and (format == "table" or append): return group = self._identify_group(key, append) s = self._create_storer(group, format, value, encoding=encoding, errors=errors) if append: # raise if we are trying to append to a Fixed format, # or a table that exists (and we are putting) if not s.is_table or (s.is_table and format == "fixed" and s.is_exists): raise ValueError("Can only append to Tables") if not s.is_exists: s.set_object_info() else: s.set_object_info() if not s.is_table and complib: raise ValueError("Compression not supported on Fixed format stores") # write the object s.write( obj=value, axes=axes, append=append, complib=complib, complevel=complevel, fletcher32=fletcher32, min_itemsize=min_itemsize, chunksize=chunksize, expectedrows=expectedrows, dropna=dropna, nan_rep=nan_rep, data_columns=data_columns, track_times=track_times, ) if isinstance(s, Table) and index: s.create_index(columns=index) def _read_group(self, group: Node): s = self._create_storer(group) s.infer_axes() return s.read() def _identify_group(self, key: str, append: bool) -> Node: """Identify HDF5 group based on key, delete/create group if needed.""" group = self.get_node(key) # we make this assertion for mypy; the get_node call will already # have raised if this is incorrect assert self._handle is not None # remove the node if we are not appending if group is not None and not append: self._handle.remove_node(group, recursive=True) group = None if group is None: group = self._create_nodes_and_group(key) return group def _create_nodes_and_group(self, key: str) -> Node: """Create nodes from key and return group name.""" # assertion for mypy assert self._handle is not None paths = key.split("/") # recursively create the groups path = "/" for p in paths: if not len(p): continue new_path = path if not path.endswith("/"): new_path += "/" new_path += p group = self.get_node(new_path) if group is None: group = self._handle.create_group(path, p) path = new_path return group FilePath = Union[str, "PathLike[str]"] def stringify_path(filepath_or_buffer: FilePath, convert_file_like: bool = ...) -> str: ... def stringify_path( filepath_or_buffer: BaseBufferT, convert_file_like: bool = ... ) -> BaseBufferT: ... def stringify_path( filepath_or_buffer: FilePath | BaseBufferT, convert_file_like: bool = False, ) -> str | BaseBufferT: """ Attempt to convert a path-like object to a string. Parameters ---------- filepath_or_buffer : object to be converted Returns ------- str_filepath_or_buffer : maybe a string version of the object Notes ----- Objects supporting the fspath protocol (python 3.6+) are coerced according to its __fspath__ method. Any other object is passed through unchanged, which includes bytes, strings, buffers, or anything else that's not even path-like. """ if not convert_file_like and is_file_like(filepath_or_buffer): # GH 38125: some fsspec objects implement os.PathLike but have already opened a # file. This prevents opening the file a second time. infer_compression calls # this function with convert_file_like=True to infer the compression. return cast(BaseBufferT, filepath_or_buffer) if isinstance(filepath_or_buffer, os.PathLike): filepath_or_buffer = filepath_or_buffer.__fspath__() return _expand_user(filepath_or_buffer) ) The provided code snippet includes necessary dependencies for implementing the `read_hdf` function. Write a Python function `def read_hdf( path_or_buf: FilePath | HDFStore, key=None, mode: str = "r", errors: str = "strict", where: str | list | None = None, start: int | None = None, stop: int | None = None, columns: list[str] | None = None, iterator: bool = False, chunksize: int | None = None, **kwargs, )` to solve the following problem: Read from the store, close it if we opened it. Retrieve pandas object stored in file, optionally based on where criteria. .. warning:: Pandas uses PyTables for reading and writing HDF5 files, which allows serializing object-dtype data with pickle when using the "fixed" format. Loading pickled data received from untrusted sources can be unsafe. See: https://docs.python.org/3/library/pickle.html for more. Parameters ---------- path_or_buf : str, path object, pandas.HDFStore Any valid string path is acceptable. Only supports the local file system, remote URLs and file-like objects are not supported. If you want to pass in a path object, pandas accepts any ``os.PathLike``. Alternatively, pandas accepts an open :class:`pandas.HDFStore` object. key : object, optional The group identifier in the store. Can be omitted if the HDF file contains a single pandas object. mode : {'r', 'r+', 'a'}, default 'r' Mode to use when opening the file. Ignored if path_or_buf is a :class:`pandas.HDFStore`. Default is 'r'. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. where : list, optional A list of Term (or convertible) objects. start : int, optional Row number to start selection. stop : int, optional Row number to stop selection. columns : list, optional A list of columns names to return. iterator : bool, optional Return an iterator object. chunksize : int, optional Number of rows to include in an iteration when using an iterator. **kwargs Additional keyword arguments passed to HDFStore. Returns ------- object The selected object. Return type depends on the object stored. See Also -------- DataFrame.to_hdf : Write a HDF file from a DataFrame. HDFStore : Low-level access to HDF files. Examples -------- >>> df = pd.DataFrame([[1, 1.0, 'a']], columns=['x', 'y', 'z']) # doctest: +SKIP >>> df.to_hdf('./store.h5', 'data') # doctest: +SKIP >>> reread = pd.read_hdf('./store.h5') # doctest: +SKIP Here is the function: def read_hdf( path_or_buf: FilePath | HDFStore, key=None, mode: str = "r", errors: str = "strict", where: str | list | None = None, start: int | None = None, stop: int | None = None, columns: list[str] | None = None, iterator: bool = False, chunksize: int | None = None, **kwargs, ): """ Read from the store, close it if we opened it. Retrieve pandas object stored in file, optionally based on where criteria. .. warning:: Pandas uses PyTables for reading and writing HDF5 files, which allows serializing object-dtype data with pickle when using the "fixed" format. Loading pickled data received from untrusted sources can be unsafe. See: https://docs.python.org/3/library/pickle.html for more. Parameters ---------- path_or_buf : str, path object, pandas.HDFStore Any valid string path is acceptable. Only supports the local file system, remote URLs and file-like objects are not supported. If you want to pass in a path object, pandas accepts any ``os.PathLike``. Alternatively, pandas accepts an open :class:`pandas.HDFStore` object. key : object, optional The group identifier in the store. Can be omitted if the HDF file contains a single pandas object. mode : {'r', 'r+', 'a'}, default 'r' Mode to use when opening the file. Ignored if path_or_buf is a :class:`pandas.HDFStore`. Default is 'r'. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. where : list, optional A list of Term (or convertible) objects. start : int, optional Row number to start selection. stop : int, optional Row number to stop selection. columns : list, optional A list of columns names to return. iterator : bool, optional Return an iterator object. chunksize : int, optional Number of rows to include in an iteration when using an iterator. **kwargs Additional keyword arguments passed to HDFStore. Returns ------- object The selected object. Return type depends on the object stored. See Also -------- DataFrame.to_hdf : Write a HDF file from a DataFrame. HDFStore : Low-level access to HDF files. Examples -------- >>> df = pd.DataFrame([[1, 1.0, 'a']], columns=['x', 'y', 'z']) # doctest: +SKIP >>> df.to_hdf('./store.h5', 'data') # doctest: +SKIP >>> reread = pd.read_hdf('./store.h5') # doctest: +SKIP """ if mode not in ["r", "r+", "a"]: raise ValueError( f"mode {mode} is not allowed while performing a read. " f"Allowed modes are r, r+ and a." ) # grab the scope if where is not None: where = _ensure_term(where, scope_level=1) if isinstance(path_or_buf, HDFStore): if not path_or_buf.is_open: raise OSError("The HDFStore must be open for reading.") store = path_or_buf auto_close = False else: path_or_buf = stringify_path(path_or_buf) if not isinstance(path_or_buf, str): raise NotImplementedError( "Support for generic buffers has not been implemented." ) try: exists = os.path.exists(path_or_buf) # if filepath is too long except (TypeError, ValueError): exists = False if not exists: raise FileNotFoundError(f"File {path_or_buf} does not exist") store = HDFStore(path_or_buf, mode=mode, errors=errors, **kwargs) # can't auto open/close if we are using an iterator # so delegate to the iterator auto_close = True try: if key is None: groups = store.groups() if len(groups) == 0: raise ValueError( "Dataset(s) incompatible with Pandas data types, " "not table, or no datasets found in HDF5 file." ) candidate_only_group = groups[0] # For the HDF file to have only one dataset, all other groups # should then be metadata groups for that candidate group. (This # assumes that the groups() method enumerates parent groups # before their children.) for group_to_check in groups[1:]: if not _is_metadata_of(group_to_check, candidate_only_group): raise ValueError( "key must be provided when HDF5 " "file contains multiple datasets." ) key = candidate_only_group._v_pathname return store.select( key, where=where, start=start, stop=stop, columns=columns, iterator=iterator, chunksize=chunksize, auto_close=auto_close, ) except (ValueError, TypeError, KeyError): if not isinstance(path_or_buf, HDFStore): # if there is an error, close the store if we opened it. with suppress(AttributeError): store.close() raise
Read from the store, close it if we opened it. Retrieve pandas object stored in file, optionally based on where criteria. .. warning:: Pandas uses PyTables for reading and writing HDF5 files, which allows serializing object-dtype data with pickle when using the "fixed" format. Loading pickled data received from untrusted sources can be unsafe. See: https://docs.python.org/3/library/pickle.html for more. Parameters ---------- path_or_buf : str, path object, pandas.HDFStore Any valid string path is acceptable. Only supports the local file system, remote URLs and file-like objects are not supported. If you want to pass in a path object, pandas accepts any ``os.PathLike``. Alternatively, pandas accepts an open :class:`pandas.HDFStore` object. key : object, optional The group identifier in the store. Can be omitted if the HDF file contains a single pandas object. mode : {'r', 'r+', 'a'}, default 'r' Mode to use when opening the file. Ignored if path_or_buf is a :class:`pandas.HDFStore`. Default is 'r'. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. where : list, optional A list of Term (or convertible) objects. start : int, optional Row number to start selection. stop : int, optional Row number to stop selection. columns : list, optional A list of columns names to return. iterator : bool, optional Return an iterator object. chunksize : int, optional Number of rows to include in an iteration when using an iterator. **kwargs Additional keyword arguments passed to HDFStore. Returns ------- object The selected object. Return type depends on the object stored. See Also -------- DataFrame.to_hdf : Write a HDF file from a DataFrame. HDFStore : Low-level access to HDF files. Examples -------- >>> df = pd.DataFrame([[1, 1.0, 'a']], columns=['x', 'y', 'z']) # doctest: +SKIP >>> df.to_hdf('./store.h5', 'data') # doctest: +SKIP >>> reread = pd.read_hdf('./store.h5') # doctest: +SKIP
173,512
from __future__ import annotations from contextlib import suppress import copy from datetime import ( date, tzinfo, ) import itertools import os import re from textwrap import dedent from types import TracebackType from typing import ( TYPE_CHECKING, Any, Callable, Final, Hashable, Iterator, Literal, Sequence, cast, overload, ) import warnings import numpy as np from pandas._config import ( config, get_option, ) from pandas._libs import ( lib, writers as libwriters, ) from pandas._libs.tslibs import timezones from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeArg, FilePath, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.compat.pickle_compat import patch_pickle from pandas.errors import ( AttributeConflictWarning, ClosedFileError, IncompatibilityWarning, PerformanceWarning, PossibleDataLossError, ) from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, is_bool_dtype, is_categorical_dtype, is_complex_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_extension_array_dtype, is_integer_dtype, is_list_like, is_object_dtype, is_string_dtype, is_timedelta64_dtype, needs_i8_conversion, ) from pandas.core.dtypes.missing import array_equivalent from pandas import ( DataFrame, DatetimeIndex, Index, MultiIndex, PeriodIndex, RangeIndex, Series, TimedeltaIndex, concat, isna, ) from pandas.core.arrays import ( Categorical, DatetimeArray, PeriodArray, ) import pandas.core.common as com from pandas.core.computation.pytables import ( PyTablesExpr, maybe_expression, ) from pandas.core.construction import extract_array from pandas.core.indexes.api import ensure_index from pandas.core.internals import ( ArrayManager, BlockManager, ) from pandas.io.common import stringify_path from pandas.io.formats.printing import ( adjoin, pprint_thing, ) AxisInt = int def _reindex_axis( obj: DataFrame, axis: AxisInt, labels: Index, other=None ) -> DataFrame: ax = obj._get_axis(axis) labels = ensure_index(labels) # try not to reindex even if other is provided # if it equals our current index if other is not None: other = ensure_index(other) if (other is None or labels.equals(other)) and labels.equals(ax): return obj labels = ensure_index(labels.unique()) if other is not None: labels = ensure_index(other.unique()).intersection(labels, sort=False) if not labels.equals(ax): slicer: list[slice | Index] = [slice(None, None)] * obj.ndim slicer[axis] = labels obj = obj.loc[tuple(slicer)] return obj
null
173,513
from __future__ import annotations from contextlib import suppress import copy from datetime import ( date, tzinfo, ) import itertools import os import re from textwrap import dedent from types import TracebackType from typing import ( TYPE_CHECKING, Any, Callable, Final, Hashable, Iterator, Literal, Sequence, cast, overload, ) import warnings import numpy as np from pandas._config import ( config, get_option, ) from pandas._libs import ( lib, writers as libwriters, ) from pandas._libs.tslibs import timezones from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeArg, FilePath, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.compat.pickle_compat import patch_pickle from pandas.errors import ( AttributeConflictWarning, ClosedFileError, IncompatibilityWarning, PerformanceWarning, PossibleDataLossError, ) from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, is_bool_dtype, is_categorical_dtype, is_complex_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_extension_array_dtype, is_integer_dtype, is_list_like, is_object_dtype, is_string_dtype, is_timedelta64_dtype, needs_i8_conversion, ) from pandas.core.dtypes.missing import array_equivalent from pandas import ( DataFrame, DatetimeIndex, Index, MultiIndex, PeriodIndex, RangeIndex, Series, TimedeltaIndex, concat, isna, ) from pandas.core.arrays import ( Categorical, DatetimeArray, PeriodArray, ) import pandas.core.common as com from pandas.core.computation.pytables import ( PyTablesExpr, maybe_expression, ) from pandas.core.construction import extract_array from pandas.core.indexes.api import ensure_index from pandas.core.internals import ( ArrayManager, BlockManager, ) from pandas.io.common import stringify_path from pandas.io.formats.printing import ( adjoin, pprint_thing, ) class tzinfo: def tzname(self, dt: Optional[datetime]) -> Optional[str]: ... def utcoffset(self, dt: Optional[datetime]) -> Optional[timedelta]: ... def dst(self, dt: Optional[datetime]) -> Optional[timedelta]: ... def fromutc(self, dt: datetime) -> datetime: ... The provided code snippet includes necessary dependencies for implementing the `_get_tz` function. Write a Python function `def _get_tz(tz: tzinfo) -> str | tzinfo` to solve the following problem: for a tz-aware type, return an encoded zone Here is the function: def _get_tz(tz: tzinfo) -> str | tzinfo: """for a tz-aware type, return an encoded zone""" zone = timezones.get_timezone(tz) return zone
for a tz-aware type, return an encoded zone
173,514
from __future__ import annotations from contextlib import suppress import copy from datetime import ( date, tzinfo, ) import itertools import os import re from textwrap import dedent from types import TracebackType from typing import ( TYPE_CHECKING, Any, Callable, Final, Hashable, Iterator, Literal, Sequence, cast, overload, ) import warnings import numpy as np from pandas._config import ( config, get_option, ) from pandas._libs import ( lib, writers as libwriters, ) from pandas._libs.tslibs import timezones from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeArg, FilePath, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.compat.pickle_compat import patch_pickle from pandas.errors import ( AttributeConflictWarning, ClosedFileError, IncompatibilityWarning, PerformanceWarning, PossibleDataLossError, ) from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, is_bool_dtype, is_categorical_dtype, is_complex_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_extension_array_dtype, is_integer_dtype, is_list_like, is_object_dtype, is_string_dtype, is_timedelta64_dtype, needs_i8_conversion, ) from pandas.core.dtypes.missing import array_equivalent from pandas import ( DataFrame, DatetimeIndex, Index, MultiIndex, PeriodIndex, RangeIndex, Series, TimedeltaIndex, concat, isna, ) from pandas.core.arrays import ( Categorical, DatetimeArray, PeriodArray, ) import pandas.core.common as com from pandas.core.computation.pytables import ( PyTablesExpr, maybe_expression, ) from pandas.core.construction import extract_array from pandas.core.indexes.api import ensure_index from pandas.core.internals import ( ArrayManager, BlockManager, ) from pandas.io.common import stringify_path from pandas.io.formats.printing import ( adjoin, pprint_thing, ) class tzinfo: def tzname(self, dt: Optional[datetime]) -> Optional[str]: ... def utcoffset(self, dt: Optional[datetime]) -> Optional[timedelta]: ... def dst(self, dt: Optional[datetime]) -> Optional[timedelta]: ... def fromutc(self, dt: datetime) -> datetime: ... def _set_tz( values: np.ndarray | Index, tz: str | tzinfo, coerce: bool = False ) -> DatetimeIndex: ...
null
173,515
from __future__ import annotations from contextlib import suppress import copy from datetime import ( date, tzinfo, ) import itertools import os import re from textwrap import dedent from types import TracebackType from typing import ( TYPE_CHECKING, Any, Callable, Final, Hashable, Iterator, Literal, Sequence, cast, overload, ) import warnings import numpy as np from pandas._config import ( config, get_option, ) from pandas._libs import ( lib, writers as libwriters, ) from pandas._libs.tslibs import timezones from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeArg, FilePath, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.compat.pickle_compat import patch_pickle from pandas.errors import ( AttributeConflictWarning, ClosedFileError, IncompatibilityWarning, PerformanceWarning, PossibleDataLossError, ) from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, is_bool_dtype, is_categorical_dtype, is_complex_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_extension_array_dtype, is_integer_dtype, is_list_like, is_object_dtype, is_string_dtype, is_timedelta64_dtype, needs_i8_conversion, ) from pandas.core.dtypes.missing import array_equivalent from pandas import ( DataFrame, DatetimeIndex, Index, MultiIndex, PeriodIndex, RangeIndex, Series, TimedeltaIndex, concat, isna, ) from pandas.core.arrays import ( Categorical, DatetimeArray, PeriodArray, ) import pandas.core.common as com from pandas.core.computation.pytables import ( PyTablesExpr, maybe_expression, ) from pandas.core.construction import extract_array from pandas.core.indexes.api import ensure_index from pandas.core.internals import ( ArrayManager, BlockManager, ) from pandas.io.common import stringify_path from pandas.io.formats.printing import ( adjoin, pprint_thing, ) def _set_tz(values: np.ndarray | Index, tz: None, coerce: bool = False) -> np.ndarray: ...
null
173,516
from __future__ import annotations from contextlib import suppress import copy from datetime import ( date, tzinfo, ) import itertools import os import re from textwrap import dedent from types import TracebackType from typing import ( TYPE_CHECKING, Any, Callable, Final, Hashable, Iterator, Literal, Sequence, cast, overload, ) import warnings import numpy as np from pandas._config import ( config, get_option, ) from pandas._libs import ( lib, writers as libwriters, ) from pandas._libs.tslibs import timezones from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeArg, FilePath, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.compat.pickle_compat import patch_pickle from pandas.errors import ( AttributeConflictWarning, ClosedFileError, IncompatibilityWarning, PerformanceWarning, PossibleDataLossError, ) from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, is_bool_dtype, is_categorical_dtype, is_complex_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_extension_array_dtype, is_integer_dtype, is_list_like, is_object_dtype, is_string_dtype, is_timedelta64_dtype, needs_i8_conversion, ) from pandas.core.dtypes.missing import array_equivalent from pandas import ( DataFrame, DatetimeIndex, Index, MultiIndex, PeriodIndex, RangeIndex, Series, TimedeltaIndex, concat, isna, ) from pandas.core.arrays import ( Categorical, DatetimeArray, PeriodArray, ) import pandas.core.common as com from pandas.core.computation.pytables import ( PyTablesExpr, maybe_expression, ) from pandas.core.construction import extract_array from pandas.core.indexes.api import ensure_index from pandas.core.internals import ( ArrayManager, BlockManager, ) from pandas.io.common import stringify_path from pandas.io.formats.printing import ( adjoin, pprint_thing, ) def _ensure_decoded(s): """if we have bytes, decode them to unicode""" if isinstance(s, np.bytes_): s = s.decode("UTF-8") return s class tzinfo: def tzname(self, dt: Optional[datetime]) -> Optional[str]: ... def utcoffset(self, dt: Optional[datetime]) -> Optional[timedelta]: ... def dst(self, dt: Optional[datetime]) -> Optional[timedelta]: ... def fromutc(self, dt: datetime) -> datetime: ... The provided code snippet includes necessary dependencies for implementing the `_set_tz` function. Write a Python function `def _set_tz( values: np.ndarray | Index, tz: str | tzinfo | None, coerce: bool = False ) -> np.ndarray | DatetimeIndex` to solve the following problem: coerce the values to a DatetimeIndex if tz is set preserve the input shape if possible Parameters ---------- values : ndarray or Index tz : str or tzinfo coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray Here is the function: def _set_tz( values: np.ndarray | Index, tz: str | tzinfo | None, coerce: bool = False ) -> np.ndarray | DatetimeIndex: """ coerce the values to a DatetimeIndex if tz is set preserve the input shape if possible Parameters ---------- values : ndarray or Index tz : str or tzinfo coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray """ if isinstance(values, DatetimeIndex): # If values is tzaware, the tz gets dropped in the values.ravel() # call below (which returns an ndarray). So we are only non-lossy # if `tz` matches `values.tz`. assert values.tz is None or values.tz == tz if tz is not None: if isinstance(values, DatetimeIndex): name = values.name values = values.asi8 else: name = None values = values.ravel() tz = _ensure_decoded(tz) values = DatetimeIndex(values, name=name) values = values.tz_localize("UTC").tz_convert(tz) elif coerce: values = np.asarray(values, dtype="M8[ns]") # error: Incompatible return value type (got "Union[ndarray, Index]", # expected "Union[ndarray, DatetimeIndex]") return values # type: ignore[return-value]
coerce the values to a DatetimeIndex if tz is set preserve the input shape if possible Parameters ---------- values : ndarray or Index tz : str or tzinfo coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray
173,517
from __future__ import annotations from contextlib import suppress import copy from datetime import ( date, tzinfo, ) import itertools import os import re from textwrap import dedent from types import TracebackType from typing import ( TYPE_CHECKING, Any, Callable, Final, Hashable, Iterator, Literal, Sequence, cast, overload, ) import warnings import numpy as np from pandas._config import ( config, get_option, ) from pandas._libs import ( lib, writers as libwriters, ) from pandas._libs.tslibs import timezones from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeArg, FilePath, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.compat.pickle_compat import patch_pickle from pandas.errors import ( AttributeConflictWarning, ClosedFileError, IncompatibilityWarning, PerformanceWarning, PossibleDataLossError, ) from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, is_bool_dtype, is_categorical_dtype, is_complex_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_extension_array_dtype, is_integer_dtype, is_list_like, is_object_dtype, is_string_dtype, is_timedelta64_dtype, needs_i8_conversion, ) from pandas.core.dtypes.missing import array_equivalent from pandas import ( DataFrame, DatetimeIndex, Index, MultiIndex, PeriodIndex, RangeIndex, Series, TimedeltaIndex, concat, isna, ) from pandas.core.arrays import ( Categorical, DatetimeArray, PeriodArray, ) import pandas.core.common as com from pandas.core.computation.pytables import ( PyTablesExpr, maybe_expression, ) from pandas.core.construction import extract_array from pandas.core.indexes.api import ensure_index from pandas.core.internals import ( ArrayManager, BlockManager, ) from pandas.io.common import stringify_path from pandas.io.formats.printing import ( adjoin, pprint_thing, ) def _tables(): class IndexCol: def __init__( self, name: str, values=None, kind=None, typ=None, cname: str | None = None, axis=None, pos=None, freq=None, tz=None, index_name=None, ordered=None, table=None, meta=None, metadata=None, ) -> None: def itemsize(self) -> int: def kind_attr(self) -> str: def set_pos(self, pos: int) -> None: def __repr__(self) -> str: def __eq__(self, other: Any) -> bool: def __ne__(self, other) -> bool: def is_indexed(self) -> bool: def convert( self, values: np.ndarray, nan_rep, encoding: str, errors: str ) -> tuple[np.ndarray, np.ndarray] | tuple[Index, Index]: def take_data(self): def attrs(self): def description(self): def col(self): def cvalues(self): def __iter__(self) -> Iterator: def maybe_set_size(self, min_itemsize=None) -> None: def validate_names(self) -> None: def validate_and_set(self, handler: AppendableTable, append: bool) -> None: def validate_col(self, itemsize=None): def validate_attr(self, append: bool) -> None: def update_info(self, info) -> None: def set_info(self, info) -> None: def set_attr(self) -> None: def validate_metadata(self, handler: AppendableTable) -> None: def write_metadata(self, handler: AppendableTable) -> None: class DataIndexableCol(DataCol): def validate_names(self) -> None: def get_atom_string(cls, shape, itemsize): def get_atom_data(cls, shape, kind: str) -> Col: def get_atom_datetime64(cls, shape): def get_atom_timedelta64(cls, shape): def _convert_string_array(data: np.ndarray, encoding: str, errors: str) -> np.ndarray: def _dtype_to_kind(dtype_str: str) -> str: def _get_data_and_dtype_name(data: ArrayLike): def is_integer_dtype(arr_or_dtype) -> bool: def needs_i8_conversion(arr_or_dtype) -> bool: def is_bool_dtype(arr_or_dtype) -> bool: def _convert_index(name: str, index: Index, encoding: str, errors: str) -> IndexCol: assert isinstance(name, str) index_name = index.name # error: Argument 1 to "_get_data_and_dtype_name" has incompatible type "Index"; # expected "Union[ExtensionArray, ndarray]" converted, dtype_name = _get_data_and_dtype_name(index) # type: ignore[arg-type] kind = _dtype_to_kind(dtype_name) atom = DataIndexableCol._get_atom(converted) if ( (isinstance(index.dtype, np.dtype) and is_integer_dtype(index)) or needs_i8_conversion(index.dtype) or is_bool_dtype(index.dtype) ): # Includes Index, RangeIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex, # in which case "kind" is "integer", "integer", "datetime64", # "timedelta64", and "integer", respectively. return IndexCol( name, values=converted, kind=kind, typ=atom, freq=getattr(index, "freq", None), tz=getattr(index, "tz", None), index_name=index_name, ) if isinstance(index, MultiIndex): raise TypeError("MultiIndex not supported here!") inferred_type = lib.infer_dtype(index, skipna=False) # we won't get inferred_type of "datetime64" or "timedelta64" as these # would go through the DatetimeIndex/TimedeltaIndex paths above values = np.asarray(index) if inferred_type == "date": converted = np.asarray([v.toordinal() for v in values], dtype=np.int32) return IndexCol( name, converted, "date", _tables().Time32Col(), index_name=index_name ) elif inferred_type == "string": converted = _convert_string_array(values, encoding, errors) itemsize = converted.dtype.itemsize return IndexCol( name, converted, "string", _tables().StringCol(itemsize), index_name=index_name, ) elif inferred_type in ["integer", "floating"]: return IndexCol( name, values=converted, kind=kind, typ=atom, index_name=index_name ) else: assert isinstance(converted, np.ndarray) and converted.dtype == object assert kind == "object", kind atom = _tables().ObjectAtom() return IndexCol(name, converted, kind, atom, index_name=index_name)
null
173,518
from __future__ import annotations from contextlib import suppress import copy from datetime import ( date, tzinfo, ) import itertools import os import re from textwrap import dedent from types import TracebackType from typing import ( TYPE_CHECKING, Any, Callable, Final, Hashable, Iterator, Literal, Sequence, cast, overload, ) import warnings import numpy as np from pandas._config import ( config, get_option, ) from pandas._libs import ( lib, writers as libwriters, ) from pandas._libs.tslibs import timezones from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeArg, FilePath, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.compat.pickle_compat import patch_pickle from pandas.errors import ( AttributeConflictWarning, ClosedFileError, IncompatibilityWarning, PerformanceWarning, PossibleDataLossError, ) from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, is_bool_dtype, is_categorical_dtype, is_complex_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_extension_array_dtype, is_integer_dtype, is_list_like, is_object_dtype, is_string_dtype, is_timedelta64_dtype, needs_i8_conversion, ) from pandas.core.dtypes.missing import array_equivalent from pandas import ( DataFrame, DatetimeIndex, Index, MultiIndex, PeriodIndex, RangeIndex, Series, TimedeltaIndex, concat, isna, ) from pandas.core.arrays import ( Categorical, DatetimeArray, PeriodArray, ) import pandas.core.common as com from pandas.core.computation.pytables import ( PyTablesExpr, maybe_expression, ) from pandas.core.construction import extract_array from pandas.core.indexes.api import ensure_index from pandas.core.internals import ( ArrayManager, BlockManager, ) from pandas.io.common import stringify_path from pandas.io.formats.printing import ( adjoin, pprint_thing, ) def _unconvert_string_array( data: np.ndarray, nan_rep, encoding: str, errors: str ) -> np.ndarray: class date: def __new__(cls: Type[_S], year: int, month: int, day: int) -> _S: def fromtimestamp(cls: Type[_S], __timestamp: float) -> _S: def today(cls: Type[_S]) -> _S: def fromordinal(cls: Type[_S], n: int) -> _S: def fromisoformat(cls: Type[_S], date_string: str) -> _S: def fromisocalendar(cls: Type[_S], year: int, week: int, day: int) -> _S: def year(self) -> int: def month(self) -> int: def day(self) -> int: def ctime(self) -> str: def strftime(self, fmt: _Text) -> str: def __format__(self, fmt: str) -> str: def __format__(self, fmt: AnyStr) -> AnyStr: def isoformat(self) -> str: def timetuple(self) -> struct_time: def toordinal(self) -> int: def replace(self, year: int = ..., month: int = ..., day: int = ...) -> date: def __le__(self, other: date) -> bool: def __lt__(self, other: date) -> bool: def __ge__(self, other: date) -> bool: def __gt__(self, other: date) -> bool: def __add__(self: _S, other: timedelta) -> _S: def __radd__(self: _S, other: timedelta) -> _S: def __add__(self, other: timedelta) -> date: def __radd__(self, other: timedelta) -> date: def __sub__(self, other: timedelta) -> date: def __sub__(self, other: date) -> timedelta: def __hash__(self) -> int: def weekday(self) -> int: def isoweekday(self) -> int: def isocalendar(self) -> Tuple[int, int, int]: def _unconvert_index(data, kind: str, encoding: str, errors: str) -> np.ndarray | Index: index: Index | np.ndarray if kind == "datetime64": index = DatetimeIndex(data) elif kind == "timedelta64": index = TimedeltaIndex(data) elif kind == "date": try: index = np.asarray([date.fromordinal(v) for v in data], dtype=object) except ValueError: index = np.asarray([date.fromtimestamp(v) for v in data], dtype=object) elif kind in ("integer", "float", "bool"): index = np.asarray(data) elif kind in ("string"): index = _unconvert_string_array( data, nan_rep=None, encoding=encoding, errors=errors ) elif kind == "object": index = np.asarray(data[0]) else: # pragma: no cover raise ValueError(f"unrecognized index type {kind}") return index
null
173,519
from __future__ import annotations from contextlib import suppress import copy from datetime import ( date, tzinfo, ) import itertools import os import re from textwrap import dedent from types import TracebackType from typing import ( TYPE_CHECKING, Any, Callable, Final, Hashable, Iterator, Literal, Sequence, cast, overload, ) import warnings import numpy as np from pandas._config import ( config, get_option, ) from pandas._libs import ( lib, writers as libwriters, ) from pandas._libs.tslibs import timezones from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeArg, FilePath, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.compat.pickle_compat import patch_pickle from pandas.errors import ( AttributeConflictWarning, ClosedFileError, IncompatibilityWarning, PerformanceWarning, PossibleDataLossError, ) from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, is_bool_dtype, is_categorical_dtype, is_complex_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_extension_array_dtype, is_integer_dtype, is_list_like, is_object_dtype, is_string_dtype, is_timedelta64_dtype, needs_i8_conversion, ) from pandas.core.dtypes.missing import array_equivalent from pandas import ( DataFrame, DatetimeIndex, Index, MultiIndex, PeriodIndex, RangeIndex, Series, TimedeltaIndex, concat, isna, ) from pandas.core.arrays import ( Categorical, DatetimeArray, PeriodArray, ) import pandas.core.common as com from pandas.core.computation.pytables import ( PyTablesExpr, maybe_expression, ) from pandas.core.construction import extract_array from pandas.core.indexes.api import ensure_index from pandas.core.internals import ( ArrayManager, BlockManager, ) from pandas.io.common import stringify_path from pandas.io.formats.printing import ( adjoin, pprint_thing, ) def _convert_string_array(data: np.ndarray, encoding: str, errors: str) -> np.ndarray: """ Take a string-like that is object dtype and coerce to a fixed size string type. Parameters ---------- data : np.ndarray[object] encoding : str errors : str Handler for encoding errors. Returns ------- np.ndarray[fixed-length-string] """ # encode if needed if len(data): data = ( Series(data.ravel(), copy=False) .str.encode(encoding, errors) ._values.reshape(data.shape) ) # create the sized dtype ensured = ensure_object(data.ravel()) itemsize = max(1, libwriters.max_len_string_array(ensured)) data = np.asarray(data, dtype=f"S{itemsize}") return data def cast(typ: Type[_T], val: Any) -> _T: ... def cast(typ: str, val: Any) -> Any: ... def cast(typ: object, val: Any) -> Any: ... ArrayLike = Union["ExtensionArray", np.ndarray] def _maybe_convert_for_string_atom( name: str, bvalues: ArrayLike, existing_col, min_itemsize, nan_rep, encoding, errors, columns: list[str], ): if bvalues.dtype != object: return bvalues bvalues = cast(np.ndarray, bvalues) dtype_name = bvalues.dtype.name inferred_type = lib.infer_dtype(bvalues, skipna=False) if inferred_type == "date": raise TypeError("[date] is not implemented as a table column") if inferred_type == "datetime": # after GH#8260 # this only would be hit for a multi-timezone dtype which is an error raise TypeError( "too many timezones in this block, create separate data columns" ) if not (inferred_type == "string" or dtype_name == "object"): return bvalues mask = isna(bvalues) data = bvalues.copy() data[mask] = nan_rep # see if we have a valid string type inferred_type = lib.infer_dtype(data, skipna=False) if inferred_type != "string": # we cannot serialize this data, so report an exception on a column # by column basis # expected behaviour: # search block for a non-string object column by column for i in range(data.shape[0]): col = data[i] inferred_type = lib.infer_dtype(col, skipna=False) if inferred_type != "string": error_column_label = columns[i] if len(columns) > i else f"No.{i}" raise TypeError( f"Cannot serialize the column [{error_column_label}]\n" f"because its data contents are not [string] but " f"[{inferred_type}] object dtype" ) # itemsize is the maximum length of a string (along any dimension) data_converted = _convert_string_array(data, encoding, errors).reshape(data.shape) itemsize = data_converted.itemsize # specified min_itemsize? if isinstance(min_itemsize, dict): min_itemsize = int(min_itemsize.get(name) or min_itemsize.get("values") or 0) itemsize = max(min_itemsize or 0, itemsize) # check for column in the values conflicts if existing_col is not None: eci = existing_col.validate_col(itemsize) if eci is not None and eci > itemsize: itemsize = eci data_converted = data_converted.astype(f"|S{itemsize}", copy=False) return data_converted
null
173,520
from __future__ import annotations from contextlib import suppress import copy from datetime import ( date, tzinfo, ) import itertools import os import re from textwrap import dedent from types import TracebackType from typing import ( TYPE_CHECKING, Any, Callable, Final, Hashable, Iterator, Literal, Sequence, cast, overload, ) import warnings import numpy as np from pandas._config import ( config, get_option, ) from pandas._libs import ( lib, writers as libwriters, ) from pandas._libs.tslibs import timezones from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeArg, FilePath, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.compat.pickle_compat import patch_pickle from pandas.errors import ( AttributeConflictWarning, ClosedFileError, IncompatibilityWarning, PerformanceWarning, PossibleDataLossError, ) from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, is_bool_dtype, is_categorical_dtype, is_complex_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_extension_array_dtype, is_integer_dtype, is_list_like, is_object_dtype, is_string_dtype, is_timedelta64_dtype, needs_i8_conversion, ) from pandas.core.dtypes.missing import array_equivalent from pandas import ( DataFrame, DatetimeIndex, Index, MultiIndex, PeriodIndex, RangeIndex, Series, TimedeltaIndex, concat, isna, ) from pandas.core.arrays import ( Categorical, DatetimeArray, PeriodArray, ) import pandas.core.common as com from pandas.core.computation.pytables import ( PyTablesExpr, maybe_expression, ) from pandas.core.construction import extract_array from pandas.core.indexes.api import ensure_index from pandas.core.internals import ( ArrayManager, BlockManager, ) from pandas.io.common import stringify_path from pandas.io.formats.printing import ( adjoin, pprint_thing, ) def _get_converter(kind: str, encoding: str, errors: str): if kind == "datetime64": return lambda x: np.asarray(x, dtype="M8[ns]") elif kind == "string": return lambda x: _unconvert_string_array( x, nan_rep=None, encoding=encoding, errors=errors ) else: # pragma: no cover raise ValueError(f"invalid kind {kind}") def _need_convert(kind: str) -> bool: if kind in ("datetime64", "string"): return True return False def _maybe_convert(values: np.ndarray, val_kind: str, encoding: str, errors: str): assert isinstance(val_kind, str), type(val_kind) if _need_convert(val_kind): conv = _get_converter(val_kind, encoding, errors) values = conv(values) return values
null
173,521
from __future__ import annotations from contextlib import suppress import copy from datetime import ( date, tzinfo, ) import itertools import os import re from textwrap import dedent from types import TracebackType from typing import ( TYPE_CHECKING, Any, Callable, Final, Hashable, Iterator, Literal, Sequence, cast, overload, ) import warnings import numpy as np from pandas._config import ( config, get_option, ) from pandas._libs import ( lib, writers as libwriters, ) from pandas._libs.tslibs import timezones from pandas._typing import ( AnyArrayLike, ArrayLike, AxisInt, DtypeArg, FilePath, Shape, npt, ) from pandas.compat._optional import import_optional_dependency from pandas.compat.pickle_compat import patch_pickle from pandas.errors import ( AttributeConflictWarning, ClosedFileError, IncompatibilityWarning, PerformanceWarning, PossibleDataLossError, ) from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, is_bool_dtype, is_categorical_dtype, is_complex_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_extension_array_dtype, is_integer_dtype, is_list_like, is_object_dtype, is_string_dtype, is_timedelta64_dtype, needs_i8_conversion, ) from pandas.core.dtypes.missing import array_equivalent from pandas import ( DataFrame, DatetimeIndex, Index, MultiIndex, PeriodIndex, RangeIndex, Series, TimedeltaIndex, concat, isna, ) from pandas.core.arrays import ( Categorical, DatetimeArray, PeriodArray, ) import pandas.core.common as com from pandas.core.computation.pytables import ( PyTablesExpr, maybe_expression, ) from pandas.core.construction import extract_array from pandas.core.indexes.api import ensure_index from pandas.core.internals import ( ArrayManager, BlockManager, ) from pandas.io.common import stringify_path from pandas.io.formats.printing import ( adjoin, pprint_thing, ) class Sequence(_Collection[_T_co], Reversible[_T_co], Generic[_T_co]): def __getitem__(self, i: int) -> _T_co: ... def __getitem__(self, s: slice) -> Sequence[_T_co]: ... # Mixin methods def index(self, value: Any, start: int = ..., stop: int = ...) -> int: ... def count(self, value: Any) -> int: ... def __contains__(self, x: object) -> bool: ... def __iter__(self) -> Iterator[_T_co]: ... def __reversed__(self) -> Iterator[_T_co]: ... The provided code snippet includes necessary dependencies for implementing the `_maybe_adjust_name` function. Write a Python function `def _maybe_adjust_name(name: str, version: Sequence[int]) -> str` to solve the following problem: Prior to 0.10.1, we named values blocks like: values_block_0 an the name values_0, adjust the given name if necessary. Parameters ---------- name : str version : Tuple[int, int, int] Returns ------- str Here is the function: def _maybe_adjust_name(name: str, version: Sequence[int]) -> str: """ Prior to 0.10.1, we named values blocks like: values_block_0 an the name values_0, adjust the given name if necessary. Parameters ---------- name : str version : Tuple[int, int, int] Returns ------- str """ if isinstance(version, str) or len(version) < 3: raise ValueError("Version is incorrect, expected sequence of 3 integers.") if version[0] == 0 and version[1] <= 10 and version[2] == 0: m = re.search(r"values_block_(\d+)", name) if m: grp = m.groups()[0] name = f"values_{grp}" return name
Prior to 0.10.1, we named values blocks like: values_block_0 an the name values_0, adjust the given name if necessary. Parameters ---------- name : str version : Tuple[int, int, int] Returns ------- str
173,522
from __future__ import annotations from abc import ( ABC, abstractmethod, ) import codecs from collections import defaultdict import dataclasses import functools import gzip from io import ( BufferedIOBase, BytesIO, RawIOBase, StringIO, TextIOBase, TextIOWrapper, ) import mmap import os from pathlib import Path import re import tarfile from typing import ( IO, Any, AnyStr, DefaultDict, Generic, Hashable, Literal, Mapping, Sequence, TypeVar, cast, overload, ) from urllib.parse import ( urljoin, urlparse as parse_url, uses_netloc, uses_params, uses_relative, ) import warnings import zipfile from pandas._typing import ( BaseBuffer, CompressionDict, CompressionOptions, FilePath, ReadBuffer, ReadCsvBuffer, StorageOptions, WriteBuffer, ) from pandas.compat import get_lzma_file from pandas.compat._optional import import_optional_dependency from pandas.compat.compressors import BZ2File as _BZ2File from pandas.util._decorators import doc from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_bool, is_file_like, is_integer, is_list_like, ) from pandas.core.indexes.api import MultiIndex from pandas.core.shared_docs import _shared_docs The provided code snippet includes necessary dependencies for implementing the `file_path_to_url` function. Write a Python function `def file_path_to_url(path: str) -> str` to solve the following problem: converts an absolute native path to a FILE URL. Parameters ---------- path : a path in native format Returns ------- a valid FILE URL Here is the function: def file_path_to_url(path: str) -> str: """ converts an absolute native path to a FILE URL. Parameters ---------- path : a path in native format Returns ------- a valid FILE URL """ # lazify expensive import (~30ms) from urllib.request import pathname2url return urljoin("file:", pathname2url(path))
converts an absolute native path to a FILE URL. Parameters ---------- path : a path in native format Returns ------- a valid FILE URL
173,523
from __future__ import annotations from abc import ( ABC, abstractmethod, ) import codecs from collections import defaultdict import dataclasses import functools import gzip from io import ( BufferedIOBase, BytesIO, RawIOBase, StringIO, TextIOBase, TextIOWrapper, ) import mmap import os from pathlib import Path import re import tarfile from typing import ( IO, Any, AnyStr, DefaultDict, Generic, Hashable, Literal, Mapping, Sequence, TypeVar, cast, overload, ) from urllib.parse import ( urljoin, urlparse as parse_url, uses_netloc, uses_params, uses_relative, ) import warnings import zipfile from pandas._typing import ( BaseBuffer, CompressionDict, CompressionOptions, FilePath, ReadBuffer, ReadCsvBuffer, StorageOptions, WriteBuffer, ) from pandas.compat import get_lzma_file from pandas.compat._optional import import_optional_dependency from pandas.compat.compressors import BZ2File as _BZ2File from pandas.util._decorators import doc from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_bool, is_file_like, is_integer, is_list_like, ) from pandas.core.indexes.api import MultiIndex from pandas.core.shared_docs import _shared_docs class Hashable(Protocol, metaclass=ABCMeta): # TODO: This is special, in that a subclass of a hashable class may not be hashable # (for example, list vs. object). It's not obvious how to represent this. This class # is currently mostly useless for static checking. def __hash__(self) -> int: ... class Sequence(_Collection[_T_co], Reversible[_T_co], Generic[_T_co]): def __getitem__(self, i: int) -> _T_co: ... def __getitem__(self, s: slice) -> Sequence[_T_co]: ... # Mixin methods def index(self, value: Any, start: int = ..., stop: int = ...) -> int: ... def count(self, value: Any) -> int: ... def __contains__(self, x: object) -> bool: ... def __iter__(self) -> Iterator[_T_co]: ... def __reversed__(self) -> Iterator[_T_co]: ... The provided code snippet includes necessary dependencies for implementing the `is_potential_multi_index` function. Write a Python function `def is_potential_multi_index( columns: Sequence[Hashable] | MultiIndex, index_col: bool | Sequence[int] | None = None, ) -> bool` to solve the following problem: Check whether or not the `columns` parameter could be converted into a MultiIndex. Parameters ---------- columns : array-like Object which may or may not be convertible into a MultiIndex index_col : None, bool or list, optional Column or columns to use as the (possibly hierarchical) index Returns ------- bool : Whether or not columns could become a MultiIndex Here is the function: def is_potential_multi_index( columns: Sequence[Hashable] | MultiIndex, index_col: bool | Sequence[int] | None = None, ) -> bool: """ Check whether or not the `columns` parameter could be converted into a MultiIndex. Parameters ---------- columns : array-like Object which may or may not be convertible into a MultiIndex index_col : None, bool or list, optional Column or columns to use as the (possibly hierarchical) index Returns ------- bool : Whether or not columns could become a MultiIndex """ if index_col is None or isinstance(index_col, bool): index_col = [] return bool( len(columns) and not isinstance(columns, MultiIndex) and all(isinstance(c, tuple) for c in columns if c not in list(index_col)) )
Check whether or not the `columns` parameter could be converted into a MultiIndex. Parameters ---------- columns : array-like Object which may or may not be convertible into a MultiIndex index_col : None, bool or list, optional Column or columns to use as the (possibly hierarchical) index Returns ------- bool : Whether or not columns could become a MultiIndex
173,524
from __future__ import annotations from abc import ( ABC, abstractmethod, ) import codecs from collections import defaultdict import dataclasses import functools import gzip from io import ( BufferedIOBase, BytesIO, RawIOBase, StringIO, TextIOBase, TextIOWrapper, ) import mmap import os from pathlib import Path import re import tarfile from typing import ( IO, Any, AnyStr, DefaultDict, Generic, Hashable, Literal, Mapping, Sequence, TypeVar, cast, overload, ) from urllib.parse import ( urljoin, urlparse as parse_url, uses_netloc, uses_params, uses_relative, ) import warnings import zipfile from pandas._typing import ( BaseBuffer, CompressionDict, CompressionOptions, FilePath, ReadBuffer, ReadCsvBuffer, StorageOptions, WriteBuffer, ) from pandas.compat import get_lzma_file from pandas.compat._optional import import_optional_dependency from pandas.compat.compressors import BZ2File as _BZ2File from pandas.util._decorators import doc from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( is_bool, is_file_like, is_integer, is_list_like, ) from pandas.core.indexes.api import MultiIndex from pandas.core.shared_docs import _shared_docs class defaultdict(Dict[_KT, _VT], Generic[_KT, _VT]): default_factory: Callable[[], _VT] def __init__(self, **kwargs: _VT) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]]) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]], **kwargs: _VT) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]], map: Mapping[_KT, _VT]) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]], map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ... def __init__(self, default_factory: Optional[Callable[[], _VT]], iterable: Iterable[Tuple[_KT, _VT]]) -> None: ... def __init__( self, default_factory: Optional[Callable[[], _VT]], iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT ) -> None: ... def __missing__(self, key: _KT) -> _VT: ... def copy(self: _S) -> _S: ... DefaultDict = _Alias() class Hashable(Protocol, metaclass=ABCMeta): # TODO: This is special, in that a subclass of a hashable class may not be hashable # (for example, list vs. object). It's not obvious how to represent this. This class # is currently mostly useless for static checking. def __hash__(self) -> int: ... class Sequence(_Collection[_T_co], Reversible[_T_co], Generic[_T_co]): def __getitem__(self, i: int) -> _T_co: ... def __getitem__(self, s: slice) -> Sequence[_T_co]: ... # Mixin methods def index(self, value: Any, start: int = ..., stop: int = ...) -> int: ... def count(self, value: Any) -> int: ... def __contains__(self, x: object) -> bool: ... def __iter__(self) -> Iterator[_T_co]: ... def __reversed__(self) -> Iterator[_T_co]: ... The provided code snippet includes necessary dependencies for implementing the `dedup_names` function. Write a Python function `def dedup_names( names: Sequence[Hashable], is_potential_multiindex: bool ) -> Sequence[Hashable]` to solve the following problem: Rename column names if duplicates exist. Currently the renaming is done by appending a period and an autonumeric, but a custom pattern may be supported in the future. Examples -------- >>> dedup_names(["x", "y", "x", "x"], is_potential_multiindex=False) ['x', 'y', 'x.1', 'x.2'] Here is the function: def dedup_names( names: Sequence[Hashable], is_potential_multiindex: bool ) -> Sequence[Hashable]: """ Rename column names if duplicates exist. Currently the renaming is done by appending a period and an autonumeric, but a custom pattern may be supported in the future. Examples -------- >>> dedup_names(["x", "y", "x", "x"], is_potential_multiindex=False) ['x', 'y', 'x.1', 'x.2'] """ names = list(names) # so we can index counts: DefaultDict[Hashable, int] = defaultdict(int) for i, col in enumerate(names): cur_count = counts[col] while cur_count > 0: counts[col] = cur_count + 1 if is_potential_multiindex: # for mypy assert isinstance(col, tuple) col = col[:-1] + (f"{col[-1]}.{cur_count}",) else: col = f"{col}.{cur_count}" cur_count = counts[col] names[i] = col counts[col] = cur_count + 1 return names
Rename column names if duplicates exist. Currently the renaming is done by appending a period and an autonumeric, but a custom pattern may be supported in the future. Examples -------- >>> dedup_names(["x", "y", "x", "x"], is_potential_multiindex=False) ['x', 'y', 'x.1', 'x.2']
173,525
from __future__ import annotations import io import os from typing import ( Any, Literal, ) import warnings from warnings import catch_warnings from pandas._libs import lib from pandas._typing import ( DtypeBackend, FilePath, ReadBuffer, StorageOptions, WriteBuffer, ) from pandas.compat._optional import import_optional_dependency from pandas.errors import AbstractMethodError from pandas.util._decorators import doc from pandas.util._exceptions import find_stack_level from pandas.util._validators import check_dtype_backend import pandas as pd from pandas import ( DataFrame, get_option, ) from pandas.core.shared_docs import _shared_docs from pandas.util.version import Version from pandas.io.common import ( IOHandles, get_handle, is_fsspec_url, is_url, stringify_path, ) Any = object() class ReadBuffer(BaseBuffer, Protocol[AnyStr_co]): def read(self, __n: int = ...) -> AnyStr_co: # for BytesIOWrapper, gzip.GzipFile, bz2.BZ2File ... class WriteBuffer(BaseBuffer, Protocol[AnyStr_contra]): def write(self, __b: AnyStr_contra) -> Any: # for gzip.GzipFile, bz2.BZ2File ... def flush(self) -> Any: # for gzip.GzipFile, bz2.BZ2File ... FilePath = Union[str, "PathLike[str]"] StorageOptions = Optional[Dict[str, Any]] def import_optional_dependency( name: str, extra: str = "", errors: str = "raise", min_version: str | None = None, ): """ Import an optional dependency. By default, if a dependency is missing an ImportError with a nice message will be raised. If a dependency is present, but too old, we raise. Parameters ---------- name : str The module name. extra : str Additional text to include in the ImportError message. errors : str {'raise', 'warn', 'ignore'} What to do when a dependency is not found or its version is too old. * raise : Raise an ImportError * warn : Only applicable when a module's version is to old. Warns that the version is too old and returns None * ignore: If the module is not installed, return None, otherwise, return the module, even if the version is too old. It's expected that users validate the version locally when using ``errors="ignore"`` (see. ``io/html.py``) min_version : str, default None Specify a minimum version that is different from the global pandas minimum version required. Returns ------- maybe_module : Optional[ModuleType] The imported module, when found and the version is correct. None is returned when the package is not found and `errors` is False, or when the package's version is too old and `errors` is ``'warn'``. """ assert errors in {"warn", "raise", "ignore"} package_name = INSTALL_MAPPING.get(name) install_name = package_name if package_name is not None else name msg = ( f"Missing optional dependency '{install_name}'. {extra} " f"Use pip or conda to install {install_name}." ) try: module = importlib.import_module(name) except ImportError: if errors == "raise": raise ImportError(msg) return None # Handle submodules: if we have submodule, grab parent module from sys.modules parent = name.split(".")[0] if parent != name: install_name = parent module_to_get = sys.modules[install_name] else: module_to_get = module minimum_version = min_version if min_version is not None else VERSIONS.get(parent) if minimum_version: version = get_version(module_to_get) if version and Version(version) < Version(minimum_version): msg = ( f"Pandas requires version '{minimum_version}' or newer of '{parent}' " f"(version '{version}' currently installed)." ) if errors == "warn": warnings.warn( msg, UserWarning, stacklevel=find_stack_level(), ) return None elif errors == "raise": raise ImportError(msg) return module class IOHandles(Generic[AnyStr]): """ Return value of io/common.py:get_handle Can be used as a context manager. This is used to easily close created buffers and to handle corner cases when TextIOWrapper is inserted. handle: The file handle to be used. created_handles: All file handles that are created by get_handle is_wrapped: Whether a TextIOWrapper needs to be detached. """ # handle might not implement the IO-interface handle: IO[AnyStr] compression: CompressionDict created_handles: list[IO[bytes] | IO[str]] = dataclasses.field(default_factory=list) is_wrapped: bool = False def close(self) -> None: """ Close all created buffers. Note: If a TextIOWrapper was inserted, it is flushed and detached to avoid closing the potentially user-created buffer. """ if self.is_wrapped: assert isinstance(self.handle, TextIOWrapper) self.handle.flush() self.handle.detach() self.created_handles.remove(self.handle) for handle in self.created_handles: handle.close() self.created_handles = [] self.is_wrapped = False def __enter__(self) -> IOHandles[AnyStr]: return self def __exit__(self, *args: Any) -> None: self.close() def is_url(url: object) -> bool: """ Check to see if a URL has a valid protocol. Parameters ---------- url : str or unicode Returns ------- isurl : bool If `url` has a valid protocol return True otherwise False. """ if not isinstance(url, str): return False return parse_url(url).scheme in _VALID_URLS def stringify_path(filepath_or_buffer: FilePath, convert_file_like: bool = ...) -> str: ... def stringify_path( filepath_or_buffer: BaseBufferT, convert_file_like: bool = ... ) -> BaseBufferT: ... def stringify_path( filepath_or_buffer: FilePath | BaseBufferT, convert_file_like: bool = False, ) -> str | BaseBufferT: """ Attempt to convert a path-like object to a string. Parameters ---------- filepath_or_buffer : object to be converted Returns ------- str_filepath_or_buffer : maybe a string version of the object Notes ----- Objects supporting the fspath protocol (python 3.6+) are coerced according to its __fspath__ method. Any other object is passed through unchanged, which includes bytes, strings, buffers, or anything else that's not even path-like. """ if not convert_file_like and is_file_like(filepath_or_buffer): # GH 38125: some fsspec objects implement os.PathLike but have already opened a # file. This prevents opening the file a second time. infer_compression calls # this function with convert_file_like=True to infer the compression. return cast(BaseBufferT, filepath_or_buffer) if isinstance(filepath_or_buffer, os.PathLike): filepath_or_buffer = filepath_or_buffer.__fspath__() return _expand_user(filepath_or_buffer) def is_fsspec_url(url: FilePath | BaseBuffer) -> bool: """ Returns true if the given URL looks like something fsspec can handle """ return ( isinstance(url, str) and bool(_RFC_3986_PATTERN.match(url)) and not url.startswith(("http://", "https://")) ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "filepath_or_buffer", ) def get_handle( path_or_buf: FilePath | BaseBuffer, mode: str, *, encoding: str | None = ..., compression: CompressionOptions = ..., memory_map: bool = ..., is_text: Literal[False], errors: str | None = ..., storage_options: StorageOptions = ..., ) -> IOHandles[bytes]: ... def get_handle( path_or_buf: FilePath | BaseBuffer, mode: str, *, encoding: str | None = ..., compression: CompressionOptions = ..., memory_map: bool = ..., is_text: Literal[True] = ..., errors: str | None = ..., storage_options: StorageOptions = ..., ) -> IOHandles[str]: ... def get_handle( path_or_buf: FilePath | BaseBuffer, mode: str, *, encoding: str | None = ..., compression: CompressionOptions = ..., memory_map: bool = ..., is_text: bool = ..., errors: str | None = ..., storage_options: StorageOptions = ..., ) -> IOHandles[str] | IOHandles[bytes]: ... def get_handle( path_or_buf: FilePath | BaseBuffer, mode: str, *, encoding: str | None = None, compression: CompressionOptions = None, memory_map: bool = False, is_text: bool = True, errors: str | None = None, storage_options: StorageOptions = None, ) -> IOHandles[str] | IOHandles[bytes]: """ Get file handle for given path/buffer and mode. Parameters ---------- path_or_buf : str or file handle File path or object. mode : str Mode to open path_or_buf with. encoding : str or None Encoding to use. {compression_options} .. versionchanged:: 1.0.0 May now be a dict with key 'method' as compression mode and other keys as compression options if compression mode is 'zip'. .. versionchanged:: 1.1.0 Passing compression options as keys in dict is now supported for compression modes 'gzip', 'bz2', 'zstd' and 'zip'. .. versionchanged:: 1.4.0 Zstandard support. memory_map : bool, default False See parsers._parser_params for more information. Only used by read_csv. is_text : bool, default True Whether the type of the content passed to the file/buffer is string or bytes. This is not the same as `"b" not in mode`. If a string content is passed to a binary file/buffer, a wrapper is inserted. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. storage_options: StorageOptions = None Passed to _get_filepath_or_buffer .. versionchanged:: 1.2.0 Returns the dataclass IOHandles """ # Windows does not default to utf-8. Set to utf-8 for a consistent behavior encoding = encoding or "utf-8" errors = errors or "strict" # read_csv does not know whether the buffer is opened in binary/text mode if _is_binary_mode(path_or_buf, mode) and "b" not in mode: mode += "b" # validate encoding and errors codecs.lookup(encoding) if isinstance(errors, str): codecs.lookup_error(errors) # open URLs ioargs = _get_filepath_or_buffer( path_or_buf, encoding=encoding, compression=compression, mode=mode, storage_options=storage_options, ) handle = ioargs.filepath_or_buffer handles: list[BaseBuffer] # memory mapping needs to be the first step # only used for read_csv handle, memory_map, handles = _maybe_memory_map(handle, memory_map) is_path = isinstance(handle, str) compression_args = dict(ioargs.compression) compression = compression_args.pop("method") # Only for write methods if "r" not in mode and is_path: check_parent_directory(str(handle)) if compression: if compression != "zstd": # compression libraries do not like an explicit text-mode ioargs.mode = ioargs.mode.replace("t", "") elif compression == "zstd" and "b" not in ioargs.mode: # python-zstandard defaults to text mode, but we always expect # compression libraries to use binary mode. ioargs.mode += "b" # GZ Compression if compression == "gzip": if isinstance(handle, str): # error: Incompatible types in assignment (expression has type # "GzipFile", variable has type "Union[str, BaseBuffer]") handle = gzip.GzipFile( # type: ignore[assignment] filename=handle, mode=ioargs.mode, **compression_args, ) else: handle = gzip.GzipFile( # No overload variant of "GzipFile" matches argument types # "Union[str, BaseBuffer]", "str", "Dict[str, Any]" fileobj=handle, # type: ignore[call-overload] mode=ioargs.mode, **compression_args, ) # BZ Compression elif compression == "bz2": # Overload of "BZ2File" to handle pickle protocol 5 # "Union[str, BaseBuffer]", "str", "Dict[str, Any]" handle = _BZ2File( # type: ignore[call-overload] handle, mode=ioargs.mode, **compression_args, ) # ZIP Compression elif compression == "zip": # error: Argument 1 to "_BytesZipFile" has incompatible type # "Union[str, BaseBuffer]"; expected "Union[Union[str, PathLike[str]], # ReadBuffer[bytes], WriteBuffer[bytes]]" handle = _BytesZipFile( handle, ioargs.mode, **compression_args # type: ignore[arg-type] ) if handle.buffer.mode == "r": handles.append(handle) zip_names = handle.buffer.namelist() if len(zip_names) == 1: handle = handle.buffer.open(zip_names.pop()) elif not zip_names: raise ValueError(f"Zero files found in ZIP file {path_or_buf}") else: raise ValueError( "Multiple files found in ZIP file. " f"Only one file per ZIP: {zip_names}" ) # TAR Encoding elif compression == "tar": compression_args.setdefault("mode", ioargs.mode) if isinstance(handle, str): handle = _BytesTarFile(name=handle, **compression_args) else: # error: Argument "fileobj" to "_BytesTarFile" has incompatible # type "BaseBuffer"; expected "Union[ReadBuffer[bytes], # WriteBuffer[bytes], None]" handle = _BytesTarFile( fileobj=handle, **compression_args # type: ignore[arg-type] ) assert isinstance(handle, _BytesTarFile) if "r" in handle.buffer.mode: handles.append(handle) files = handle.buffer.getnames() if len(files) == 1: file = handle.buffer.extractfile(files[0]) assert file is not None handle = file elif not files: raise ValueError(f"Zero files found in TAR archive {path_or_buf}") else: raise ValueError( "Multiple files found in TAR archive. " f"Only one file per TAR archive: {files}" ) # XZ Compression elif compression == "xz": # error: Argument 1 to "LZMAFile" has incompatible type "Union[str, # BaseBuffer]"; expected "Optional[Union[Union[str, bytes, PathLike[str], # PathLike[bytes]], IO[bytes]]]" handle = get_lzma_file()(handle, ioargs.mode) # type: ignore[arg-type] # Zstd Compression elif compression == "zstd": zstd = import_optional_dependency("zstandard") if "r" in ioargs.mode: open_args = {"dctx": zstd.ZstdDecompressor(**compression_args)} else: open_args = {"cctx": zstd.ZstdCompressor(**compression_args)} handle = zstd.open( handle, mode=ioargs.mode, **open_args, ) # Unrecognized Compression else: msg = f"Unrecognized compression type: {compression}" raise ValueError(msg) assert not isinstance(handle, str) handles.append(handle) elif isinstance(handle, str): # Check whether the filename is to be opened in binary mode. # Binary mode does not support 'encoding' and 'newline'. if ioargs.encoding and "b" not in ioargs.mode: # Encoding handle = open( handle, ioargs.mode, encoding=ioargs.encoding, errors=errors, newline="", ) else: # Binary mode handle = open(handle, ioargs.mode) handles.append(handle) # Convert BytesIO or file objects passed with an encoding is_wrapped = False if not is_text and ioargs.mode == "rb" and isinstance(handle, TextIOBase): # not added to handles as it does not open/buffer resources handle = _BytesIOWrapper( handle, encoding=ioargs.encoding, ) elif is_text and ( compression or memory_map or _is_binary_mode(handle, ioargs.mode) ): if ( not hasattr(handle, "readable") or not hasattr(handle, "writable") or not hasattr(handle, "seekable") ): handle = _IOWrapper(handle) # error: Argument 1 to "TextIOWrapper" has incompatible type # "_IOWrapper"; expected "IO[bytes]" handle = TextIOWrapper( handle, # type: ignore[arg-type] encoding=ioargs.encoding, errors=errors, newline="", ) handles.append(handle) # only marked as wrapped when the caller provided a handle is_wrapped = not ( isinstance(ioargs.filepath_or_buffer, str) or ioargs.should_close ) if "r" in ioargs.mode and not hasattr(handle, "read"): raise TypeError( "Expected file path name or file-like object, " f"got {type(ioargs.filepath_or_buffer)} type" ) handles.reverse() # close the most recently added buffer first if ioargs.should_close: assert not isinstance(ioargs.filepath_or_buffer, str) handles.append(ioargs.filepath_or_buffer) return IOHandles( # error: Argument "handle" to "IOHandles" has incompatible type # "Union[TextIOWrapper, GzipFile, BaseBuffer, typing.IO[bytes], # typing.IO[Any]]"; expected "pandas._typing.IO[Any]" handle=handle, # type: ignore[arg-type] # error: Argument "created_handles" to "IOHandles" has incompatible type # "List[BaseBuffer]"; expected "List[Union[IO[bytes], IO[str]]]" created_handles=handles, # type: ignore[arg-type] is_wrapped=is_wrapped, compression=ioargs.compression, ) The provided code snippet includes necessary dependencies for implementing the `_get_path_or_handle` function. Write a Python function `def _get_path_or_handle( path: FilePath | ReadBuffer[bytes] | WriteBuffer[bytes], fs: Any, storage_options: StorageOptions = None, mode: str = "rb", is_dir: bool = False, ) -> tuple[ FilePath | ReadBuffer[bytes] | WriteBuffer[bytes], IOHandles[bytes] | None, Any ]` to solve the following problem: File handling for PyArrow. Here is the function: def _get_path_or_handle( path: FilePath | ReadBuffer[bytes] | WriteBuffer[bytes], fs: Any, storage_options: StorageOptions = None, mode: str = "rb", is_dir: bool = False, ) -> tuple[ FilePath | ReadBuffer[bytes] | WriteBuffer[bytes], IOHandles[bytes] | None, Any ]: """File handling for PyArrow.""" path_or_handle = stringify_path(path) if is_fsspec_url(path_or_handle) and fs is None: fsspec = import_optional_dependency("fsspec") fs, path_or_handle = fsspec.core.url_to_fs( path_or_handle, **(storage_options or {}) ) elif storage_options and (not is_url(path_or_handle) or mode != "rb"): # can't write to a remote url # without making use of fsspec at the moment raise ValueError("storage_options passed with buffer, or non-supported URL") handles = None if ( not fs and not is_dir and isinstance(path_or_handle, str) and not os.path.isdir(path_or_handle) ): # use get_handle only when we are very certain that it is not a directory # fsspec resources can also point to directories # this branch is used for example when reading from non-fsspec URLs handles = get_handle( path_or_handle, mode, is_text=False, storage_options=storage_options ) fs = None path_or_handle = handles.handle return path_or_handle, handles, fs
File handling for PyArrow.
173,526
from __future__ import annotations import io import os from typing import ( Any, Literal, ) import warnings from warnings import catch_warnings from pandas._libs import lib from pandas._typing import ( DtypeBackend, FilePath, ReadBuffer, StorageOptions, WriteBuffer, ) from pandas.compat._optional import import_optional_dependency from pandas.errors import AbstractMethodError from pandas.util._decorators import doc from pandas.util._exceptions import find_stack_level from pandas.util._validators import check_dtype_backend import pandas as pd from pandas import ( DataFrame, get_option, ) from pandas.core.shared_docs import _shared_docs from pandas.util.version import Version from pandas.io.common import ( IOHandles, get_handle, is_fsspec_url, is_url, stringify_path, ) def get_engine(engine: str) -> BaseImpl: """return our implementation""" if engine == "auto": engine = get_option("io.parquet.engine") if engine == "auto": # try engines in this order engine_classes = [PyArrowImpl, FastParquetImpl] error_msgs = "" for engine_class in engine_classes: try: return engine_class() except ImportError as err: error_msgs += "\n - " + str(err) raise ImportError( "Unable to find a usable engine; " "tried using: 'pyarrow', 'fastparquet'.\n" "A suitable version of " "pyarrow or fastparquet is required for parquet " "support.\n" "Trying to import the above resulted in these errors:" f"{error_msgs}" ) if engine == "pyarrow": return PyArrowImpl() elif engine == "fastparquet": return FastParquetImpl() raise ValueError("engine must be one of 'pyarrow', 'fastparquet'") class WriteBuffer(BaseBuffer, Protocol[AnyStr_contra]): def write(self, __b: AnyStr_contra) -> Any: # for gzip.GzipFile, bz2.BZ2File ... def flush(self) -> Any: # for gzip.GzipFile, bz2.BZ2File ... FilePath = Union[str, "PathLike[str]"] StorageOptions = Optional[Dict[str, Any]] The provided code snippet includes necessary dependencies for implementing the `to_parquet` function. Write a Python function `def to_parquet( df: DataFrame, path: FilePath | WriteBuffer[bytes] | None = None, engine: str = "auto", compression: str | None = "snappy", index: bool | None = None, storage_options: StorageOptions = None, partition_cols: list[str] | None = None, **kwargs, ) -> bytes | None` to solve the following problem: Write a DataFrame to the parquet format. Parameters ---------- df : DataFrame path : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If None, the result is returned as bytes. If a string, it will be used as Root Directory path when writing a partitioned dataset. The engine fastparquet does not accept file-like objects. .. versionchanged:: 1.2.0 engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {{'snappy', 'gzip', 'brotli', 'lz4', 'zstd', None}}, default 'snappy'. Name of the compression to use. Use ``None`` for no compression. The supported compression methods actually depend on which engine is used. For 'pyarrow', 'snappy', 'gzip', 'brotli', 'lz4', 'zstd' are all supported. For 'fastparquet', only 'gzip' and 'snappy' are supported. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``True`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. partition_cols : str or list, optional, default None Column names by which to partition the dataset. Columns are partitioned in the order they are given. Must be None if path is not a string. {storage_options} .. versionadded:: 1.2.0 kwargs Additional keyword arguments passed to the engine Returns ------- bytes if no path argument is provided else None Here is the function: def to_parquet( df: DataFrame, path: FilePath | WriteBuffer[bytes] | None = None, engine: str = "auto", compression: str | None = "snappy", index: bool | None = None, storage_options: StorageOptions = None, partition_cols: list[str] | None = None, **kwargs, ) -> bytes | None: """ Write a DataFrame to the parquet format. Parameters ---------- df : DataFrame path : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If None, the result is returned as bytes. If a string, it will be used as Root Directory path when writing a partitioned dataset. The engine fastparquet does not accept file-like objects. .. versionchanged:: 1.2.0 engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {{'snappy', 'gzip', 'brotli', 'lz4', 'zstd', None}}, default 'snappy'. Name of the compression to use. Use ``None`` for no compression. The supported compression methods actually depend on which engine is used. For 'pyarrow', 'snappy', 'gzip', 'brotli', 'lz4', 'zstd' are all supported. For 'fastparquet', only 'gzip' and 'snappy' are supported. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``True`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. partition_cols : str or list, optional, default None Column names by which to partition the dataset. Columns are partitioned in the order they are given. Must be None if path is not a string. {storage_options} .. versionadded:: 1.2.0 kwargs Additional keyword arguments passed to the engine Returns ------- bytes if no path argument is provided else None """ if isinstance(partition_cols, str): partition_cols = [partition_cols] impl = get_engine(engine) path_or_buf: FilePath | WriteBuffer[bytes] = io.BytesIO() if path is None else path impl.write( df, path_or_buf, compression=compression, index=index, partition_cols=partition_cols, storage_options=storage_options, **kwargs, ) if path is None: assert isinstance(path_or_buf, io.BytesIO) return path_or_buf.getvalue() else: return None
Write a DataFrame to the parquet format. Parameters ---------- df : DataFrame path : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If None, the result is returned as bytes. If a string, it will be used as Root Directory path when writing a partitioned dataset. The engine fastparquet does not accept file-like objects. .. versionchanged:: 1.2.0 engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {{'snappy', 'gzip', 'brotli', 'lz4', 'zstd', None}}, default 'snappy'. Name of the compression to use. Use ``None`` for no compression. The supported compression methods actually depend on which engine is used. For 'pyarrow', 'snappy', 'gzip', 'brotli', 'lz4', 'zstd' are all supported. For 'fastparquet', only 'gzip' and 'snappy' are supported. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``True`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. partition_cols : str or list, optional, default None Column names by which to partition the dataset. Columns are partitioned in the order they are given. Must be None if path is not a string. {storage_options} .. versionadded:: 1.2.0 kwargs Additional keyword arguments passed to the engine Returns ------- bytes if no path argument is provided else None
173,527
from __future__ import annotations import io import os from typing import ( Any, Literal, ) import warnings from warnings import catch_warnings from pandas._libs import lib from pandas._typing import ( DtypeBackend, FilePath, ReadBuffer, StorageOptions, WriteBuffer, ) from pandas.compat._optional import import_optional_dependency from pandas.errors import AbstractMethodError from pandas.util._decorators import doc from pandas.util._exceptions import find_stack_level from pandas.util._validators import check_dtype_backend import pandas as pd from pandas import ( DataFrame, get_option, ) from pandas.core.shared_docs import _shared_docs from pandas.util.version import Version from pandas.io.common import ( IOHandles, get_handle, is_fsspec_url, is_url, stringify_path, ) def get_engine(engine: str) -> BaseImpl: """return our implementation""" if engine == "auto": engine = get_option("io.parquet.engine") if engine == "auto": # try engines in this order engine_classes = [PyArrowImpl, FastParquetImpl] error_msgs = "" for engine_class in engine_classes: try: return engine_class() except ImportError as err: error_msgs += "\n - " + str(err) raise ImportError( "Unable to find a usable engine; " "tried using: 'pyarrow', 'fastparquet'.\n" "A suitable version of " "pyarrow or fastparquet is required for parquet " "support.\n" "Trying to import the above resulted in these errors:" f"{error_msgs}" ) if engine == "pyarrow": return PyArrowImpl() elif engine == "fastparquet": return FastParquetImpl() raise ValueError("engine must be one of 'pyarrow', 'fastparquet'") class ReadBuffer(BaseBuffer, Protocol[AnyStr_co]): def read(self, __n: int = ...) -> AnyStr_co: # for BytesIOWrapper, gzip.GzipFile, bz2.BZ2File ... FilePath = Union[str, "PathLike[str]"] StorageOptions = Optional[Dict[str, Any]] DtypeBackend = Literal["pyarrow", "numpy_nullable"] def find_stack_level() -> int: """ Find the first place in the stack that is not inside pandas (tests notwithstanding). """ import pandas as pd pkg_dir = os.path.dirname(pd.__file__) test_dir = os.path.join(pkg_dir, "tests") # https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow frame = inspect.currentframe() n = 0 while frame: fname = inspect.getfile(frame) if fname.startswith(pkg_dir) and not fname.startswith(test_dir): frame = frame.f_back n += 1 else: break return n def check_dtype_backend(dtype_backend) -> None: if dtype_backend is not lib.no_default: if dtype_backend not in ["numpy_nullable", "pyarrow"]: raise ValueError( f"dtype_backend {dtype_backend} is invalid, only 'numpy_nullable' and " f"'pyarrow' are allowed.", ) The provided code snippet includes necessary dependencies for implementing the `read_parquet` function. Write a Python function `def read_parquet( path: FilePath | ReadBuffer[bytes], engine: str = "auto", columns: list[str] | None = None, storage_options: StorageOptions = None, use_nullable_dtypes: bool | lib.NoDefault = lib.no_default, dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, **kwargs, ) -> DataFrame` to solve the following problem: Load a parquet object from the file path, returning a DataFrame. Parameters ---------- path : str, path object or file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``read()`` function. The string could be a URL. Valid URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is expected. A local file could be: ``file://localhost/path/to/table.parquet``. A file URL can also be a path to a directory that contains multiple partitioned parquet files. Both pyarrow and fastparquet support paths to directories as well as file URLs. A directory path could be: ``file://localhost/path/to/tables`` or ``s3://bucket/partition_dir``. engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. columns : list, default=None If not None, only these columns will be read from the file. {storage_options} .. versionadded:: 1.3.0 use_nullable_dtypes : bool, default False If True, use dtypes that use ``pd.NA`` as missing value indicator for the resulting DataFrame. (only applicable for the ``pyarrow`` engine) As new dtypes are added that support ``pd.NA`` in the future, the output with this option will change to use those dtypes. Note: this is an experimental option, and behaviour (e.g. additional support dtypes) may change without notice. .. deprecated:: 2.0 dtype_backend : {{"numpy_nullable", "pyarrow"}}, defaults to NumPy backed DataFrames Which dtype_backend to use, e.g. whether a DataFrame should have NumPy arrays, nullable dtypes are used for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 **kwargs Any additional kwargs are passed to the engine. Returns ------- DataFrame Here is the function: def read_parquet( path: FilePath | ReadBuffer[bytes], engine: str = "auto", columns: list[str] | None = None, storage_options: StorageOptions = None, use_nullable_dtypes: bool | lib.NoDefault = lib.no_default, dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, **kwargs, ) -> DataFrame: """ Load a parquet object from the file path, returning a DataFrame. Parameters ---------- path : str, path object or file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``read()`` function. The string could be a URL. Valid URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is expected. A local file could be: ``file://localhost/path/to/table.parquet``. A file URL can also be a path to a directory that contains multiple partitioned parquet files. Both pyarrow and fastparquet support paths to directories as well as file URLs. A directory path could be: ``file://localhost/path/to/tables`` or ``s3://bucket/partition_dir``. engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. columns : list, default=None If not None, only these columns will be read from the file. {storage_options} .. versionadded:: 1.3.0 use_nullable_dtypes : bool, default False If True, use dtypes that use ``pd.NA`` as missing value indicator for the resulting DataFrame. (only applicable for the ``pyarrow`` engine) As new dtypes are added that support ``pd.NA`` in the future, the output with this option will change to use those dtypes. Note: this is an experimental option, and behaviour (e.g. additional support dtypes) may change without notice. .. deprecated:: 2.0 dtype_backend : {{"numpy_nullable", "pyarrow"}}, defaults to NumPy backed DataFrames Which dtype_backend to use, e.g. whether a DataFrame should have NumPy arrays, nullable dtypes are used for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 **kwargs Any additional kwargs are passed to the engine. Returns ------- DataFrame """ impl = get_engine(engine) if use_nullable_dtypes is not lib.no_default: msg = ( "The argument 'use_nullable_dtypes' is deprecated and will be removed " "in a future version." ) if use_nullable_dtypes is True: msg += ( "Use dtype_backend='numpy_nullable' instead of use_nullable_dtype=True." ) warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) else: use_nullable_dtypes = False check_dtype_backend(dtype_backend) return impl.read( path, columns=columns, storage_options=storage_options, use_nullable_dtypes=use_nullable_dtypes, dtype_backend=dtype_backend, **kwargs, )
Load a parquet object from the file path, returning a DataFrame. Parameters ---------- path : str, path object or file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``read()`` function. The string could be a URL. Valid URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is expected. A local file could be: ``file://localhost/path/to/table.parquet``. A file URL can also be a path to a directory that contains multiple partitioned parquet files. Both pyarrow and fastparquet support paths to directories as well as file URLs. A directory path could be: ``file://localhost/path/to/tables`` or ``s3://bucket/partition_dir``. engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. columns : list, default=None If not None, only these columns will be read from the file. {storage_options} .. versionadded:: 1.3.0 use_nullable_dtypes : bool, default False If True, use dtypes that use ``pd.NA`` as missing value indicator for the resulting DataFrame. (only applicable for the ``pyarrow`` engine) As new dtypes are added that support ``pd.NA`` in the future, the output with this option will change to use those dtypes. Note: this is an experimental option, and behaviour (e.g. additional support dtypes) may change without notice. .. deprecated:: 2.0 dtype_backend : {{"numpy_nullable", "pyarrow"}}, defaults to NumPy backed DataFrames Which dtype_backend to use, e.g. whether a DataFrame should have NumPy arrays, nullable dtypes are used for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 **kwargs Any additional kwargs are passed to the engine. Returns ------- DataFrame
173,528
from __future__ import annotations from io import StringIO from typing import TYPE_CHECKING import warnings from pandas._libs import lib from pandas.util._exceptions import find_stack_level from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.generic import ABCDataFrame from pandas import ( get_option, option_context, ) class StringIO(TextIOWrapper): def __init__(self, initial_value: Optional[str] = ..., newline: Optional[str] = ...) -> None: ... # StringIO does not contain a "name" field. This workaround is necessary # to allow StringIO sub-classes to add this field, as it is defined # as a read-only property on IO[]. name: Any def getvalue(self) -> str: ... def find_stack_level() -> int: """ Find the first place in the stack that is not inside pandas (tests notwithstanding). """ import pandas as pd pkg_dir = os.path.dirname(pd.__file__) test_dir = os.path.join(pkg_dir, "tests") # https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow frame = inspect.currentframe() n = 0 while frame: fname = inspect.getfile(frame) if fname.startswith(pkg_dir) and not fname.startswith(test_dir): frame = frame.f_back n += 1 else: break return n def check_dtype_backend(dtype_backend) -> None: if dtype_backend is not lib.no_default: if dtype_backend not in ["numpy_nullable", "pyarrow"]: raise ValueError( f"dtype_backend {dtype_backend} is invalid, only 'numpy_nullable' and " f"'pyarrow' are allowed.", ) DtypeBackend = Literal["pyarrow", "numpy_nullable"] clipboard_get = paste The provided code snippet includes necessary dependencies for implementing the `read_clipboard` function. Write a Python function `def read_clipboard( sep: str = r"\s+", dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, **kwargs, )` to solve the following problem: r""" Read text from clipboard and pass to read_csv. Parameters ---------- sep : str, default '\s+' A string or regex delimiter. The default of '\s+' denotes one or more whitespace characters. dtype_backend : {"numpy_nullable", "pyarrow"}, defaults to NumPy backed DataFrames Which dtype_backend to use, e.g. whether a DataFrame should have NumPy arrays, nullable dtypes are used for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 **kwargs See read_csv for the full argument list. Returns ------- DataFrame A parsed DataFrame object. Here is the function: def read_clipboard( sep: str = r"\s+", dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, **kwargs, ): # pragma: no cover r""" Read text from clipboard and pass to read_csv. Parameters ---------- sep : str, default '\s+' A string or regex delimiter. The default of '\s+' denotes one or more whitespace characters. dtype_backend : {"numpy_nullable", "pyarrow"}, defaults to NumPy backed DataFrames Which dtype_backend to use, e.g. whether a DataFrame should have NumPy arrays, nullable dtypes are used for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 **kwargs See read_csv for the full argument list. Returns ------- DataFrame A parsed DataFrame object. """ encoding = kwargs.pop("encoding", "utf-8") # only utf-8 is valid for passed value because that's what clipboard # supports if encoding is not None and encoding.lower().replace("-", "") != "utf8": raise NotImplementedError("reading from clipboard only supports utf-8 encoding") check_dtype_backend(dtype_backend) from pandas.io.clipboard import clipboard_get from pandas.io.parsers import read_csv text = clipboard_get() # Try to decode (if needed, as "text" might already be a string here). try: text = text.decode(kwargs.get("encoding") or get_option("display.encoding")) except AttributeError: pass # Excel copies into clipboard with \t separation # inspect no more then the 10 first lines, if they # all contain an equal number (>0) of tabs, infer # that this came from excel and set 'sep' accordingly lines = text[:10000].split("\n")[:-1][:10] # Need to remove leading white space, since read_csv # accepts: # a b # 0 1 2 # 1 3 4 counts = {x.lstrip(" ").count("\t") for x in lines} if len(lines) > 1 and len(counts) == 1 and counts.pop() != 0: sep = "\t" # check the number of leading tabs in the first line # to account for index columns index_length = len(lines[0]) - len(lines[0].lstrip(" \t")) if index_length != 0: kwargs.setdefault("index_col", list(range(index_length))) # Edge case where sep is specified to be None, return to default if sep is None and kwargs.get("delim_whitespace") is None: sep = r"\s+" # Regex separator currently only works with python engine. # Default to python if separator is multi-character (regex) if len(sep) > 1 and kwargs.get("engine") is None: kwargs["engine"] = "python" elif len(sep) > 1 and kwargs.get("engine") == "c": warnings.warn( "read_clipboard with regex separator does not work properly with c engine.", stacklevel=find_stack_level(), ) return read_csv(StringIO(text), sep=sep, dtype_backend=dtype_backend, **kwargs)
r""" Read text from clipboard and pass to read_csv. Parameters ---------- sep : str, default '\s+' A string or regex delimiter. The default of '\s+' denotes one or more whitespace characters. dtype_backend : {"numpy_nullable", "pyarrow"}, defaults to NumPy backed DataFrames Which dtype_backend to use, e.g. whether a DataFrame should have NumPy arrays, nullable dtypes are used for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 **kwargs See read_csv for the full argument list. Returns ------- DataFrame A parsed DataFrame object.
173,529
from __future__ import annotations from io import StringIO from typing import TYPE_CHECKING import warnings from pandas._libs import lib from pandas.util._exceptions import find_stack_level from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.generic import ABCDataFrame from pandas import ( get_option, option_context, ) class StringIO(TextIOWrapper): def __init__(self, initial_value: Optional[str] = ..., newline: Optional[str] = ...) -> None: ... # StringIO does not contain a "name" field. This workaround is necessary # to allow StringIO sub-classes to add this field, as it is defined # as a read-only property on IO[]. name: Any def getvalue(self) -> str: ... def find_stack_level() -> int: """ Find the first place in the stack that is not inside pandas (tests notwithstanding). """ import pandas as pd pkg_dir = os.path.dirname(pd.__file__) test_dir = os.path.join(pkg_dir, "tests") # https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow frame = inspect.currentframe() n = 0 while frame: fname = inspect.getfile(frame) if fname.startswith(pkg_dir) and not fname.startswith(test_dir): frame = frame.f_back n += 1 else: break return n ABCDataFrame = cast( "Type[DataFrame]", create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",)) ) clipboard_set = copy The provided code snippet includes necessary dependencies for implementing the `to_clipboard` function. Write a Python function `def to_clipboard( obj, excel: bool | None = True, sep: str | None = None, **kwargs ) -> None` to solve the following problem: Attempt to write text representation of object to the system clipboard The clipboard can be then pasted into Excel for example. Parameters ---------- obj : the object to write to the clipboard excel : bool, defaults to True if True, use the provided separator, writing in a csv format for allowing easy pasting into excel. if False, write a string representation of the object to the clipboard sep : optional, defaults to tab other keywords are passed to to_csv Notes ----- Requirements for your platform - Linux: xclip, or xsel (with PyQt4 modules) - Windows: - OS X: Here is the function: def to_clipboard( obj, excel: bool | None = True, sep: str | None = None, **kwargs ) -> None: # pragma: no cover """ Attempt to write text representation of object to the system clipboard The clipboard can be then pasted into Excel for example. Parameters ---------- obj : the object to write to the clipboard excel : bool, defaults to True if True, use the provided separator, writing in a csv format for allowing easy pasting into excel. if False, write a string representation of the object to the clipboard sep : optional, defaults to tab other keywords are passed to to_csv Notes ----- Requirements for your platform - Linux: xclip, or xsel (with PyQt4 modules) - Windows: - OS X: """ encoding = kwargs.pop("encoding", "utf-8") # testing if an invalid encoding is passed to clipboard if encoding is not None and encoding.lower().replace("-", "") != "utf8": raise ValueError("clipboard only supports utf-8 encoding") from pandas.io.clipboard import clipboard_set if excel is None: excel = True if excel: try: if sep is None: sep = "\t" buf = StringIO() # clipboard_set (pyperclip) expects unicode obj.to_csv(buf, sep=sep, encoding="utf-8", **kwargs) text = buf.getvalue() clipboard_set(text) return except TypeError: warnings.warn( "to_clipboard in excel mode requires a single character separator.", stacklevel=find_stack_level(), ) elif sep is not None: warnings.warn( "to_clipboard with excel=False ignores the sep argument.", stacklevel=find_stack_level(), ) if isinstance(obj, ABCDataFrame): # str(df) has various unhelpful defaults, like truncation with option_context("display.max_colwidth", None): objstr = obj.to_string(**kwargs) else: objstr = str(obj) clipboard_set(objstr)
Attempt to write text representation of object to the system clipboard The clipboard can be then pasted into Excel for example. Parameters ---------- obj : the object to write to the clipboard excel : bool, defaults to True if True, use the provided separator, writing in a csv format for allowing easy pasting into excel. if False, write a string representation of the object to the clipboard sep : optional, defaults to tab other keywords are passed to to_csv Notes ----- Requirements for your platform - Linux: xclip, or xsel (with PyQt4 modules) - Windows: - OS X:
173,530
from __future__ import annotations import io from types import ModuleType from typing import ( Any, Literal, ) from pandas._libs import lib from pandas._typing import ( DtypeBackend, FilePath, ReadBuffer, WriteBuffer, ) from pandas.compat._optional import import_optional_dependency from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( is_categorical_dtype, is_interval_dtype, is_period_dtype, is_unsigned_integer_dtype, ) import pandas as pd from pandas.core.frame import DataFrame from pandas.io.common import get_handle class ReadBuffer(BaseBuffer, Protocol[AnyStr_co]): def read(self, __n: int = ...) -> AnyStr_co: # for BytesIOWrapper, gzip.GzipFile, bz2.BZ2File ... FilePath = Union[str, "PathLike[str]"] DtypeBackend = Literal["pyarrow", "numpy_nullable"] def import_optional_dependency( name: str, extra: str = "", errors: str = "raise", min_version: str | None = None, ): """ Import an optional dependency. By default, if a dependency is missing an ImportError with a nice message will be raised. If a dependency is present, but too old, we raise. Parameters ---------- name : str The module name. extra : str Additional text to include in the ImportError message. errors : str {'raise', 'warn', 'ignore'} What to do when a dependency is not found or its version is too old. * raise : Raise an ImportError * warn : Only applicable when a module's version is to old. Warns that the version is too old and returns None * ignore: If the module is not installed, return None, otherwise, return the module, even if the version is too old. It's expected that users validate the version locally when using ``errors="ignore"`` (see. ``io/html.py``) min_version : str, default None Specify a minimum version that is different from the global pandas minimum version required. Returns ------- maybe_module : Optional[ModuleType] The imported module, when found and the version is correct. None is returned when the package is not found and `errors` is False, or when the package's version is too old and `errors` is ``'warn'``. """ assert errors in {"warn", "raise", "ignore"} package_name = INSTALL_MAPPING.get(name) install_name = package_name if package_name is not None else name msg = ( f"Missing optional dependency '{install_name}'. {extra} " f"Use pip or conda to install {install_name}." ) try: module = importlib.import_module(name) except ImportError: if errors == "raise": raise ImportError(msg) return None # Handle submodules: if we have submodule, grab parent module from sys.modules parent = name.split(".")[0] if parent != name: install_name = parent module_to_get = sys.modules[install_name] else: module_to_get = module minimum_version = min_version if min_version is not None else VERSIONS.get(parent) if minimum_version: version = get_version(module_to_get) if version and Version(version) < Version(minimum_version): msg = ( f"Pandas requires version '{minimum_version}' or newer of '{parent}' " f"(version '{version}' currently installed)." ) if errors == "warn": warnings.warn( msg, UserWarning, stacklevel=find_stack_level(), ) return None elif errors == "raise": raise ImportError(msg) return module def check_dtype_backend(dtype_backend) -> None: if dtype_backend is not lib.no_default: if dtype_backend not in ["numpy_nullable", "pyarrow"]: raise ValueError( f"dtype_backend {dtype_backend} is invalid, only 'numpy_nullable' and " f"'pyarrow' are allowed.", ) class DataFrame(NDFrame, OpsMixin): """ Two-dimensional, size-mutable, potentially heterogeneous tabular data. Data structure also contains labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects. The primary pandas data structure. Parameters ---------- data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame Dict can contain Series, arrays, constants, dataclass or list-like objects. If data is a dict, column order follows insertion-order. If a dict contains Series which have an index defined, it is aligned by its index. This alignment also occurs if data is a Series or a DataFrame itself. Alignment is done on Series/DataFrame inputs. If data is a list of dicts, column order follows insertion-order. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided. columns : Index or array-like Column labels to use for resulting frame when data does not have them, defaulting to RangeIndex(0, 1, 2, ..., n). If data contains column labels, will perform column selection instead. dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer. copy : bool or None, default None Copy data from inputs. For dict data, the default of None behaves like ``copy=True``. For DataFrame or 2d ndarray input, the default of None behaves like ``copy=False``. If data is a dict containing one or more Series (possibly of different dtypes), ``copy=False`` will ensure that these inputs are not copied. .. versionchanged:: 1.3.0 See Also -------- DataFrame.from_records : Constructor from tuples, also record arrays. DataFrame.from_dict : From dicts of Series, arrays, or dicts. read_csv : Read a comma-separated values (csv) file into DataFrame. read_table : Read general delimited file into DataFrame. read_clipboard : Read text from clipboard into DataFrame. Notes ----- Please reference the :ref:`User Guide <basics.dataframe>` for more information. Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = pd.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from a dictionary including Series: >>> d = {'col1': [0, 1, 2, 3], 'col2': pd.Series([2, 3], index=[2, 3])} >>> pd.DataFrame(data=d, index=[0, 1, 2, 3]) col1 col2 0 0 NaN 1 1 NaN 2 2 2.0 3 3 3.0 Constructing DataFrame from numpy ndarray: >>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), ... columns=['a', 'b', 'c']) >>> df2 a b c 0 1 2 3 1 4 5 6 2 7 8 9 Constructing DataFrame from a numpy ndarray that has labeled columns: >>> data = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], ... dtype=[("a", "i4"), ("b", "i4"), ("c", "i4")]) >>> df3 = pd.DataFrame(data, columns=['c', 'a']) ... >>> df3 c a 0 3 1 1 6 4 2 9 7 Constructing DataFrame from dataclass: >>> from dataclasses import make_dataclass >>> Point = make_dataclass("Point", [("x", int), ("y", int)]) >>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)]) x y 0 0 0 1 0 3 2 2 3 Constructing DataFrame from Series/DataFrame: >>> ser = pd.Series([1, 2, 3], index=["a", "b", "c"]) >>> df = pd.DataFrame(data=ser, index=["a", "c"]) >>> df 0 a 1 c 3 >>> df1 = pd.DataFrame([1, 2, 3], index=["a", "b", "c"], columns=["x"]) >>> df2 = pd.DataFrame(data=df1, index=["a", "c"]) >>> df2 x a 1 c 3 """ _internal_names_set = {"columns", "index"} | NDFrame._internal_names_set _typ = "dataframe" _HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray) _accessors: set[str] = {"sparse"} _hidden_attrs: frozenset[str] = NDFrame._hidden_attrs | frozenset([]) _mgr: BlockManager | ArrayManager def _constructor(self) -> Callable[..., DataFrame]: return DataFrame _constructor_sliced: Callable[..., Series] = Series # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index: Axes | None = None, columns: Axes | None = None, dtype: Dtype | None = None, copy: bool | None = None, ) -> None: if dtype is not None: dtype = self._validate_dtype(dtype) if isinstance(data, DataFrame): data = data._mgr if not copy: # if not copying data, ensure to still return a shallow copy # to avoid the result sharing the same Manager data = data.copy(deep=False) if isinstance(data, (BlockManager, ArrayManager)): if using_copy_on_write(): data = data.copy(deep=False) # first check if a Manager is passed without any other arguments # -> use fastpath (without checking Manager type) if index is None and columns is None and dtype is None and not copy: # GH#33357 fastpath NDFrame.__init__(self, data) return manager = get_option("mode.data_manager") # GH47215 if index is not None and isinstance(index, set): raise ValueError("index cannot be a set") if columns is not None and isinstance(columns, set): raise ValueError("columns cannot be a set") if copy is None: if isinstance(data, dict): # retain pre-GH#38939 default behavior copy = True elif ( manager == "array" and isinstance(data, (np.ndarray, ExtensionArray)) and data.ndim == 2 ): # INFO(ArrayManager) by default copy the 2D input array to get # contiguous 1D arrays copy = True elif using_copy_on_write() and not isinstance( data, (Index, DataFrame, Series) ): copy = True else: copy = False if data is None: index = index if index is not None else default_index(0) columns = columns if columns is not None else default_index(0) dtype = dtype if dtype is not None else pandas_dtype(object) data = [] if isinstance(data, (BlockManager, ArrayManager)): mgr = self._init_mgr( data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy ) elif isinstance(data, dict): # GH#38939 de facto copy defaults to False only in non-dict cases mgr = dict_to_mgr(data, index, columns, dtype=dtype, copy=copy, typ=manager) elif isinstance(data, ma.MaskedArray): from numpy.ma import mrecords # masked recarray if isinstance(data, mrecords.MaskedRecords): raise TypeError( "MaskedRecords are not supported. Pass " "{name: data[name] for name in data.dtype.names} " "instead" ) # a masked array data = sanitize_masked_array(data) mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) elif isinstance(data, (np.ndarray, Series, Index, ExtensionArray)): if data.dtype.names: # i.e. numpy structured array data = cast(np.ndarray, data) mgr = rec_array_to_mgr( data, index, columns, dtype, copy, typ=manager, ) elif getattr(data, "name", None) is not None: # i.e. Series/Index with non-None name _copy = copy if using_copy_on_write() else True mgr = dict_to_mgr( # error: Item "ndarray" of "Union[ndarray, Series, Index]" has no # attribute "name" {data.name: data}, # type: ignore[union-attr] index, columns, dtype=dtype, typ=manager, copy=_copy, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) # For data is list-like, or Iterable (will consume into list) elif is_list_like(data): if not isinstance(data, abc.Sequence): if hasattr(data, "__array__"): # GH#44616 big perf improvement for e.g. pytorch tensor data = np.asarray(data) else: data = list(data) if len(data) > 0: if is_dataclass(data[0]): data = dataclasses_to_dicts(data) if not isinstance(data, np.ndarray) and treat_as_nested(data): # exclude ndarray as we may have cast it a few lines above if columns is not None: columns = ensure_index(columns) arrays, columns, index = nested_data_to_arrays( # error: Argument 3 to "nested_data_to_arrays" has incompatible # type "Optional[Collection[Any]]"; expected "Optional[Index]" data, columns, index, # type: ignore[arg-type] dtype, ) mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, typ=manager, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) else: mgr = dict_to_mgr( {}, index, columns if columns is not None else default_index(0), dtype=dtype, typ=manager, ) # For data is scalar else: if index is None or columns is None: raise ValueError("DataFrame constructor not properly called!") index = ensure_index(index) columns = ensure_index(columns) if not dtype: dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True) # For data is a scalar extension dtype if isinstance(dtype, ExtensionDtype): # TODO(EA2D): special case not needed with 2D EAs values = [ construct_1d_arraylike_from_scalar(data, len(index), dtype) for _ in range(len(columns)) ] mgr = arrays_to_mgr(values, columns, index, dtype=None, typ=manager) else: arr2d = construct_2d_arraylike_from_scalar( data, len(index), len(columns), dtype, copy, ) mgr = ndarray_to_mgr( arr2d, index, columns, dtype=arr2d.dtype, copy=False, typ=manager, ) # ensure correct Manager type according to settings mgr = mgr_to_mgr(mgr, typ=manager) NDFrame.__init__(self, mgr) # ---------------------------------------------------------------------- def __dataframe__( self, nan_as_null: bool = False, allow_copy: bool = True ) -> DataFrameXchg: """ Return the dataframe interchange object implementing the interchange protocol. Parameters ---------- nan_as_null : bool, default False Whether to tell the DataFrame to overwrite null values in the data with ``NaN`` (or ``NaT``). allow_copy : bool, default True Whether to allow memory copying when exporting. If set to False it would cause non-zero-copy exports to fail. Returns ------- DataFrame interchange object The object which consuming library can use to ingress the dataframe. Notes ----- Details on the interchange protocol: https://data-apis.org/dataframe-protocol/latest/index.html `nan_as_null` currently has no effect; once support for nullable extension dtypes is added, this value should be propagated to columns. """ from pandas.core.interchange.dataframe import PandasDataFrameXchg return PandasDataFrameXchg(self, nan_as_null, allow_copy) # ---------------------------------------------------------------------- def axes(self) -> list[Index]: """ Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='object')] """ return [self.index, self.columns] def shape(self) -> tuple[int, int]: """ Return a tuple representing the dimensionality of the DataFrame. See Also -------- ndarray.shape : Tuple of array dimensions. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self.index), len(self.columns) def _is_homogeneous_type(self) -> bool: """ Whether all the columns in a DataFrame have the same type. Returns ------- bool See Also -------- Index._is_homogeneous_type : Whether the object has a single dtype. MultiIndex._is_homogeneous_type : Whether all the levels of a MultiIndex have the same dtype. Examples -------- >>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type True >>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type False Items with the same type but different sizes are considered different types. >>> DataFrame({ ... "A": np.array([1, 2], dtype=np.int32), ... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type False """ if isinstance(self._mgr, ArrayManager): return len({arr.dtype for arr in self._mgr.arrays}) == 1 if self._mgr.any_extension_types: return len({block.dtype for block in self._mgr.blocks}) == 1 else: return not self._is_mixed_type def _can_fast_transpose(self) -> bool: """ Can we transpose this DataFrame without creating any new array objects. """ if isinstance(self._mgr, ArrayManager): return False blocks = self._mgr.blocks if len(blocks) != 1: return False dtype = blocks[0].dtype # TODO(EA2D) special case would be unnecessary with 2D EAs return not is_1d_only_ea_dtype(dtype) def _values(self) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray: """ Analogue to ._values that may return a 2D ExtensionArray. """ mgr = self._mgr if isinstance(mgr, ArrayManager): if len(mgr.arrays) == 1 and not is_1d_only_ea_dtype(mgr.arrays[0].dtype): # error: Item "ExtensionArray" of "Union[ndarray, ExtensionArray]" # has no attribute "reshape" return mgr.arrays[0].reshape(-1, 1) # type: ignore[union-attr] return ensure_wrapped_if_datetimelike(self.values) blocks = mgr.blocks if len(blocks) != 1: return ensure_wrapped_if_datetimelike(self.values) arr = blocks[0].values if arr.ndim == 1: # non-2D ExtensionArray return self.values # more generally, whatever we allow in NDArrayBackedExtensionBlock arr = cast("np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray", arr) return arr.T # ---------------------------------------------------------------------- # Rendering Methods def _repr_fits_vertical_(self) -> bool: """ Check length against max_rows. """ max_rows = get_option("display.max_rows") return len(self) <= max_rows def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool: """ Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns. In case of non-interactive session, no boundaries apply. `ignore_width` is here so ipynb+HTML output can behave the way users expect. display.max_columns remains in effect. GH3541, GH3573 """ width, height = console.get_console_size() max_columns = get_option("display.max_columns") nb_columns = len(self.columns) # exceed max columns if (max_columns and nb_columns > max_columns) or ( (not ignore_width) and width and nb_columns > (width // 2) ): return False # used by repr_html under IPython notebook or scripts ignore terminal # dims if ignore_width or width is None or not console.in_interactive_session(): return True if get_option("display.width") is not None or console.in_ipython_frontend(): # check at least the column row for excessive width max_rows = 1 else: max_rows = get_option("display.max_rows") # when auto-detecting, so width=None and not in ipython front end # check whether repr fits horizontal by actually checking # the width of the rendered repr buf = StringIO() # only care about the stuff we'll actually print out # and to_string on entire frame may be expensive d = self if max_rows is not None: # unlimited rows # min of two, where one may be None d = d.iloc[: min(max_rows, len(d))] else: return True d.to_string(buf=buf) value = buf.getvalue() repr_width = max(len(line) for line in value.split("\n")) return repr_width < width def _info_repr(self) -> bool: """ True if the repr should show the info view. """ info_repr_option = get_option("display.large_repr") == "info" return info_repr_option and not ( self._repr_fits_horizontal_() and self._repr_fits_vertical_() ) def __repr__(self) -> str: """ Return a string representation for a particular DataFrame. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) return buf.getvalue() repr_params = fmt.get_dataframe_repr_params() return self.to_string(**repr_params) def _repr_html_(self) -> str | None: """ Return a html representation for a particular DataFrame. Mainly for IPython notebook. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) # need to escape the <class>, should be the first line. val = buf.getvalue().replace("<", r"&lt;", 1) val = val.replace(">", r"&gt;", 1) return f"<pre>{val}</pre>" if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") min_rows = get_option("display.min_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") formatter = fmt.DataFrameFormatter( self, columns=None, col_space=None, na_rep="NaN", formatters=None, float_format=None, sparsify=None, justify=None, index_names=True, header=True, index=True, bold_rows=True, escape=True, max_rows=max_rows, min_rows=min_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=".", ) return fmt.DataFrameRenderer(formatter).to_html(notebook=True) else: return None def to_string( self, buf: None = ..., columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> None: ... header_type="bool or sequence of str", header="Write out the column names. If a list of strings " "is given, it is assumed to be aliases for the " "column names", col_space_type="int, list or dict of int", col_space="The minimum width of each column. If a list of ints is given " "every integers corresponds with one column. If a dict is given, the key " "references the column, while the value defines the space to use.", ) def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[str] | None = None, col_space: int | list[int] | dict[Hashable, int] | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: fmt.FormattersType | None = None, float_format: fmt.FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool = False, decimal: str = ".", line_width: int | None = None, min_rows: int | None = None, max_colwidth: int | None = None, encoding: str | None = None, ) -> str | None: """ Render a DataFrame to a console-friendly tabular output. %(shared_params)s line_width : int, optional Width to wrap a line in characters. min_rows : int, optional The number of rows to display in the console in a truncated repr (when number of rows is above `max_rows`). max_colwidth : int, optional Max width to truncate each column in characters. By default, no limit. encoding : str, default "utf-8" Set character encoding. %(returns)s See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]} >>> df = pd.DataFrame(d) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 """ from pandas import option_context with option_context("display.max_colwidth", max_colwidth): formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, min_rows=min_rows, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, ) return fmt.DataFrameRenderer(formatter).to_string( buf=buf, encoding=encoding, line_width=line_width, ) # ---------------------------------------------------------------------- def style(self) -> Styler: """ Returns a Styler object. Contains methods for building a styled HTML representation of the DataFrame. See Also -------- io.formats.style.Styler : Helps style a DataFrame or Series according to the data with HTML and CSS. """ from pandas.io.formats.style import Styler return Styler(self) _shared_docs[ "items" ] = r""" Iterate over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Yields ------ label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. Examples -------- >>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.items(): ... print(f'label: {label}') ... print(f'content: {content}', sep='\n') ... label: species content: panda bear polar bear koala marsupial Name: species, dtype: object label: population content: panda 1864 polar 22000 koala 80000 Name: population, dtype: int64 """ def items(self) -> Iterable[tuple[Hashable, Series]]: if self.columns.is_unique and hasattr(self, "_item_cache"): for k in self.columns: yield k, self._get_item_cache(k) else: for i, k in enumerate(self.columns): yield k, self._ixs(i, axis=1) def iterrows(self) -> Iterable[tuple[Hashable, Series]]: """ Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a `MultiIndex`. data : Series The data of the row as a Series. See Also -------- DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- 1. Because ``iterrows`` returns a Series for each row, it does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float']) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row['int'].dtype) float64 >>> print(df['int'].dtype) int64 To preserve dtypes while iterating over the rows, it is better to use :meth:`itertuples` which returns namedtuples of the values and which is generally faster than ``iterrows``. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect. """ columns = self.columns klass = self._constructor_sliced using_cow = using_copy_on_write() for k, v in zip(self.index, self.values): s = klass(v, index=columns, name=k).__finalize__(self) if using_cow and self._mgr.is_single_block: s._mgr.add_references(self._mgr) # type: ignore[arg-type] yield k, s def itertuples( self, index: bool = True, name: str | None = "Pandas" ) -> Iterable[tuple[Any, ...]]: """ Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "Pandas" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An object to iterate over namedtuples for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. Examples -------- >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) ... Pandas(Index='dog', num_legs=4, num_wings=0) Pandas(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) ... Pandas(num_legs=4, num_wings=0) Pandas(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name='Animal'): ... print(row) ... Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2) """ arrays = [] fields = list(self.columns) if index: arrays.append(self.index) fields.insert(0, "Index") # use integer indexing because of possible duplicate column names arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) if name is not None: # https://github.com/python/mypy/issues/9046 # error: namedtuple() expects a string literal as the first argument itertuple = collections.namedtuple( # type: ignore[misc] name, fields, rename=True ) return map(itertuple._make, zip(*arrays)) # fallback to regular tuples return zip(*arrays) def __len__(self) -> int: """ Returns length of info axis, but here we use the index. """ return len(self.index) def dot(self, other: Series) -> Series: ... def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame: ... def dot(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Compute the matrix multiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series, DataFrame or a numpy array. It can also be called using ``self @ other`` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the matrix product with. Returns ------- Series or DataFrame If other is a Series, return the matrix product between self and other as a Series. If other is a DataFrame or a numpy.array, return the matrix product of self and other in a DataFrame of a np.array. See Also -------- Series.dot: Similar method for Series. Notes ----- The dimensions of DataFrame and other must be compatible in order to compute the matrix multiplication. In addition, the column names of DataFrame and the index of other must contain the same values, as they will be aligned prior to the multiplication. The dot method for Series computes the inner product, instead of the matrix product here. Examples -------- Here we multiply a DataFrame with a Series. >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) >>> s = pd.Series([1, 1, 2, 1]) >>> df.dot(s) 0 -4 1 5 dtype: int64 Here we multiply a DataFrame with another DataFrame. >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(other) 0 1 0 1 4 1 2 2 Note that the dot method give the same result as @ >>> df @ other 0 1 0 1 4 1 2 2 The dot method works also if other is an np.array. >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(arr) 0 1 0 1 4 1 2 2 Note how shuffling of the objects does not change the result. >>> s2 = s.reindex([1, 0, 2, 3]) >>> df.dot(s2) 0 -4 1 5 dtype: int64 """ if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index) if len(common) > len(self.columns) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(columns=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right._values else: left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: raise ValueError( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, DataFrame): return self._constructor( np.dot(lvals, rvals), index=left.index, columns=other.columns, copy=False, ) elif isinstance(other, Series): return self._constructor_sliced( np.dot(lvals, rvals), index=left.index, copy=False ) elif isinstance(rvals, (np.ndarray, Index)): result = np.dot(lvals, rvals) if result.ndim == 2: return self._constructor(result, index=left.index, copy=False) else: return self._constructor_sliced(result, index=left.index, copy=False) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other: Series) -> Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other) -> DataFrame: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ try: return self.T.dot(np.transpose(other)).T except ValueError as err: if "shape mismatch" not in str(err): raise # GH#21581 give exception message for original shapes msg = f"shapes {np.shape(other)} and {self.shape} not aligned" raise ValueError(msg) from err # ---------------------------------------------------------------------- # IO methods (to / from other formats) def from_dict( cls, data: dict, orient: str = "columns", dtype: Dtype | None = None, columns: Axes | None = None, ) -> DataFrame: """ Construct DataFrame from dict of array-like or dicts. Creates DataFrame object from dictionary by columns or by index allowing dtype specification. Parameters ---------- data : dict Of the form {field : array-like} or {field : dict}. orient : {'columns', 'index', 'tight'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. If 'tight', assume a dict with keys ['index', 'columns', 'data', 'index_names', 'column_names']. .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument dtype : dtype, default None Data type to force after DataFrame construction, otherwise infer. columns : list, default None Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'`` or ``orient='tight'``. Returns ------- DataFrame See Also -------- DataFrame.from_records : DataFrame from structured ndarray, sequence of tuples or dicts, or DataFrame. DataFrame : DataFrame object creation using constructor. DataFrame.to_dict : Convert the DataFrame to a dictionary. Examples -------- By default the keys of the dict become the DataFrame columns: >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Specify ``orient='index'`` to create the DataFrame using dictionary keys as rows: >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data, orient='index') 0 1 2 3 row_1 3 2 1 0 row_2 a b c d When using the 'index' orientation, the column names can be specified manually: >>> pd.DataFrame.from_dict(data, orient='index', ... columns=['A', 'B', 'C', 'D']) A B C D row_1 3 2 1 0 row_2 a b c d Specify ``orient='tight'`` to create the DataFrame using a 'tight' format: >>> data = {'index': [('a', 'b'), ('a', 'c')], ... 'columns': [('x', 1), ('y', 2)], ... 'data': [[1, 3], [2, 4]], ... 'index_names': ['n1', 'n2'], ... 'column_names': ['z1', 'z2']} >>> pd.DataFrame.from_dict(data, orient='tight') z1 x y z2 1 2 n1 n2 a b 1 3 c 2 4 """ index = None orient = orient.lower() if orient == "index": if len(data) > 0: # TODO speed up Series case if isinstance(list(data.values())[0], (Series, dict)): data = _from_nested_dict(data) else: index = list(data.keys()) # error: Incompatible types in assignment (expression has type # "List[Any]", variable has type "Dict[Any, Any]") data = list(data.values()) # type: ignore[assignment] elif orient in ("columns", "tight"): if columns is not None: raise ValueError(f"cannot use columns parameter with orient='{orient}'") else: # pragma: no cover raise ValueError( f"Expected 'index', 'columns' or 'tight' for orient parameter. " f"Got '{orient}' instead" ) if orient != "tight": return cls(data, index=index, columns=columns, dtype=dtype) else: realdata = data["data"] def create_index(indexlist, namelist): index: Index if len(namelist) > 1: index = MultiIndex.from_tuples(indexlist, names=namelist) else: index = Index(indexlist, name=namelist[0]) return index index = create_index(data["index"], data["index_names"]) columns = create_index(data["columns"], data["column_names"]) return cls(realdata, index=index, columns=columns, dtype=dtype) def to_numpy( self, dtype: npt.DTypeLike | None = None, copy: bool = False, na_value: object = lib.no_default, ) -> np.ndarray: """ Convert the DataFrame to a NumPy array. By default, the dtype of the returned array will be the common NumPy dtype of all types in the DataFrame. For example, if the dtypes are ``float16`` and ``float32``, the results dtype will be ``float32``. This may require copying data and coercing values, which may be expensive. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray`. copy : bool, default False Whether to ensure that the returned value is not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. na_value : Any, optional The value to use for missing values. The default value depends on `dtype` and the dtypes of the DataFrame columns. .. versionadded:: 1.1.0 Returns ------- numpy.ndarray See Also -------- Series.to_numpy : Similar method for Series. Examples -------- >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() array([[1, 3], [2, 4]]) With heterogeneous data, the lowest common type will have to be used. >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) >>> df.to_numpy() array([[1. , 3. ], [2. , 4.5]]) For a mix of numeric and non-numeric types, the output array will have object dtype. >>> df['C'] = pd.date_range('2000', periods=2) >>> df.to_numpy() array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) """ if dtype is not None: dtype = np.dtype(dtype) result = self._mgr.as_array(dtype=dtype, copy=copy, na_value=na_value) if result.dtype is not dtype: result = np.array(result, dtype=dtype, copy=False) return result def _create_data_for_split_and_tight_to_dict( self, are_all_object_dtype_cols: bool, object_dtype_indices: list[int] ) -> list: """ Simple helper method to create data for to ``to_dict(orient="split")`` and ``to_dict(orient="tight")`` to create the main output data """ if are_all_object_dtype_cols: data = [ list(map(maybe_box_native, t)) for t in self.itertuples(index=False, name=None) ] else: data = [list(t) for t in self.itertuples(index=False, name=None)] if object_dtype_indices: # If we have object_dtype_cols, apply maybe_box_naive after list # comprehension for perf for row in data: for i in object_dtype_indices: row[i] = maybe_box_native(row[i]) return data def to_dict( self, orient: Literal["dict", "list", "series", "split", "tight", "index"] = ..., into: type[dict] = ..., ) -> dict: ... def to_dict(self, orient: Literal["records"], into: type[dict] = ...) -> list[dict]: ... def to_dict( self, orient: Literal[ "dict", "list", "series", "split", "tight", "records", "index" ] = "dict", into: type[dict] = dict, index: bool = True, ) -> dict | list[dict]: """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'tight', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'tight' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values], 'index_names' -> [index.names], 'column_names' -> [column.names]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. index : bool, default True Whether to include the index item (and index_names item if `orient` is 'tight') in the returned dictionary. Can only be ``False`` when `orient` is 'split' or 'tight'. .. versionadded:: 2.0.0 Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. See Also -------- DataFrame.from_dict: Create a DataFrame from a dictionary. DataFrame.to_json: Convert a DataFrame to JSON format. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df.to_dict() {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}} You can specify the return orientation. >>> df.to_dict('series') {'col1': row1 1 row2 2 Name: col1, dtype: int64, 'col2': row1 0.50 row2 0.75 Name: col2, dtype: float64} >>> df.to_dict('split') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]]} >>> df.to_dict('records') [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}] >>> df.to_dict('index') {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}} >>> df.to_dict('tight') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]], 'index_names': [None], 'column_names': [None]} You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) [defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}), defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})] """ from pandas.core.methods.to_dict import to_dict return to_dict(self, orient, into, index) def to_gbq( self, destination_table: str, project_id: str | None = None, chunksize: int | None = None, reauth: bool = False, if_exists: str = "fail", auth_local_webserver: bool = True, table_schema: list[dict[str, str]] | None = None, location: str | None = None, progress_bar: bool = True, credentials=None, ) -> None: """ Write a DataFrame to a Google BigQuery table. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See the `How to authenticate with Google BigQuery <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__ guide for authentication instructions. Parameters ---------- destination_table : str Name of table to be written, in the form ``dataset.tablename``. project_id : str, optional Google BigQuery Account project ID. Optional when available from the environment. chunksize : int, optional Number of rows to be inserted in each chunk from the dataframe. Set to ``None`` to load the whole dataframe at once. reauth : bool, default False Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. if_exists : str, default 'fail' Behavior when the destination table exists. Value can be one of: ``'fail'`` If table exists raise pandas_gbq.gbq.TableCreationError. ``'replace'`` If table exists, drop it, recreate it, and insert data. ``'append'`` If table exists, insert data. Create if does not exist. auth_local_webserver : bool, default True Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. .. _local webserver flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server .. _console flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console *New in version 0.2.0 of pandas-gbq*. .. versionchanged:: 1.5.0 Default value is changed to ``True``. Google has deprecated the ``auth_local_webserver = False`` `"out of band" (copy-paste) flow <https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html?m=1#disallowed-oob>`_. table_schema : list of dicts, optional List of BigQuery table fields to which according DataFrame columns conform to, e.g. ``[{'name': 'col1', 'type': 'STRING'},...]``. If schema is not provided, it will be generated according to dtypes of DataFrame columns. See BigQuery API documentation on available names of a field. *New in version 0.3.1 of pandas-gbq*. location : str, optional Location where the load job should run. See the `BigQuery locations documentation <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a list of available locations. The location must match that of the target dataset. *New in version 0.5.0 of pandas-gbq*. progress_bar : bool, default True Use the library `tqdm` to show the progress bar for the upload, chunk by chunk. *New in version 0.5.0 of pandas-gbq*. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine :class:`google.auth.compute_engine.Credentials` or Service Account :class:`google.oauth2.service_account.Credentials` directly. *New in version 0.8.0 of pandas-gbq*. See Also -------- pandas_gbq.to_gbq : This function in the pandas-gbq library. read_gbq : Read a DataFrame from Google BigQuery. """ from pandas.io import gbq gbq.to_gbq( self, destination_table, project_id=project_id, chunksize=chunksize, reauth=reauth, if_exists=if_exists, auth_local_webserver=auth_local_webserver, table_schema=table_schema, location=location, progress_bar=progress_bar, credentials=credentials, ) def from_records( cls, data, index=None, exclude=None, columns=None, coerce_float: bool = False, nrows: int | None = None, ) -> DataFrame: """ Convert structured or record ndarray to DataFrame. Creates a DataFrame object from a structured ndarray, sequence of tuples or dicts, or DataFrame. Parameters ---------- data : structured ndarray, sequence of tuples or dicts, or DataFrame Structured input data. index : str, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use. exclude : sequence, default None Columns or fields to exclude. columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns). coerce_float : bool, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. nrows : int, default None Number of rows to read if data is an iterator. Returns ------- DataFrame See Also -------- DataFrame.from_dict : DataFrame from dict of array-like or dicts. DataFrame : DataFrame object creation using constructor. Examples -------- Data can be provided as a structured ndarray: >>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')], ... dtype=[('col_1', 'i4'), ('col_2', 'U1')]) >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of dicts: >>> data = [{'col_1': 3, 'col_2': 'a'}, ... {'col_1': 2, 'col_2': 'b'}, ... {'col_1': 1, 'col_2': 'c'}, ... {'col_1': 0, 'col_2': 'd'}] >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of tuples with corresponding columns: >>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')] >>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2']) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d """ if isinstance(data, DataFrame): if columns is not None: if is_scalar(columns): columns = [columns] data = data[columns] if index is not None: data = data.set_index(index) if exclude is not None: data = data.drop(columns=exclude) return data.copy(deep=False) result_index = None # Make a copy of the input columns so we can modify it if columns is not None: columns = ensure_index(columns) def maybe_reorder( arrays: list[ArrayLike], arr_columns: Index, columns: Index, index ) -> tuple[list[ArrayLike], Index, Index | None]: """ If our desired 'columns' do not match the data's pre-existing 'arr_columns', we re-order our arrays. This is like a pre-emptive (cheap) reindex. """ if len(arrays): length = len(arrays[0]) else: length = 0 result_index = None if len(arrays) == 0 and index is None and length == 0: result_index = default_index(0) arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, length) return arrays, arr_columns, result_index if is_iterator(data): if nrows == 0: return cls() try: first_row = next(data) except StopIteration: return cls(index=index, columns=columns) dtype = None if hasattr(first_row, "dtype") and first_row.dtype.names: dtype = first_row.dtype values = [first_row] if nrows is None: values += data else: values.extend(itertools.islice(data, nrows - 1)) if dtype is not None: data = np.array(values, dtype=dtype) else: data = values if isinstance(data, dict): if columns is None: columns = arr_columns = ensure_index(sorted(data)) arrays = [data[k] for k in columns] else: arrays = [] arr_columns_list = [] for k, v in data.items(): if k in columns: arr_columns_list.append(k) arrays.append(v) arr_columns = Index(arr_columns_list) arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = to_arrays(data, columns) arr_columns = columns else: arrays, arr_columns = to_arrays(data, columns) if coerce_float: for i, arr in enumerate(arrays): if arr.dtype == object: # error: Argument 1 to "maybe_convert_objects" has # incompatible type "Union[ExtensionArray, ndarray]"; # expected "ndarray" arrays[i] = lib.maybe_convert_objects( arr, # type: ignore[arg-type] try_float=True, ) arr_columns = ensure_index(arr_columns) if columns is None: columns = arr_columns else: arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) if exclude is None: exclude = set() else: exclude = set(exclude) if index is not None: if isinstance(index, str) or not hasattr(index, "__iter__"): i = columns.get_loc(index) exclude.add(index) if len(arrays) > 0: result_index = Index(arrays[i], name=index) else: result_index = Index([], name=index) else: try: index_data = [arrays[arr_columns.get_loc(field)] for field in index] except (KeyError, TypeError): # raised by get_loc, see GH#29258 result_index = index else: result_index = ensure_index_from_sequences(index_data, names=index) exclude.update(index) if any(exclude): arr_exclude = [x for x in exclude if x in arr_columns] to_remove = [arr_columns.get_loc(col) for col in arr_exclude] arrays = [v for i, v in enumerate(arrays) if i not in to_remove] columns = columns.drop(exclude) manager = get_option("mode.data_manager") mgr = arrays_to_mgr(arrays, columns, result_index, typ=manager) return cls(mgr) def to_records( self, index: bool = True, column_dtypes=None, index_dtypes=None ) -> np.recarray: """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. column_dtypes : str, type, dict, default None If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) If the DataFrame index has no label then the recarray field name is set to 'index'. If the index has a label then this is used as the field name: >>> df.index = df.index.rename("I") >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')]) As well as for the index: >>> df.to_records(index_dtypes="<S2") rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')]) >>> index_dtypes = f"<S{df.index.str.len().max()}" >>> df.to_records(index_dtypes=index_dtypes) rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')]) """ if index: ix_vals = [ np.asarray(self.index.get_level_values(i)) for i in range(self.index.nlevels) ] arrays = ix_vals + [ np.asarray(self.iloc[:, i]) for i in range(len(self.columns)) ] index_names = list(self.index.names) if isinstance(self.index, MultiIndex): index_names = com.fill_missing_names(index_names) elif index_names[0] is None: index_names = ["index"] names = [str(name) for name in itertools.chain(index_names, self.columns)] else: arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))] names = [str(c) for c in self.columns] index_names = [] index_len = len(index_names) formats = [] for i, v in enumerate(arrays): index_int = i # When the names and arrays are collected, we # first collect those in the DataFrame's index, # followed by those in its columns. # # Thus, the total length of the array is: # len(index_names) + len(DataFrame.columns). # # This check allows us to see whether we are # handling a name / array in the index or column. if index_int < index_len: dtype_mapping = index_dtypes name = index_names[index_int] else: index_int -= index_len dtype_mapping = column_dtypes name = self.columns[index_int] # We have a dictionary, so we get the data type # associated with the index or column (which can # be denoted by its name in the DataFrame or its # position in DataFrame's array of indices or # columns, whichever is applicable. if is_dict_like(dtype_mapping): if name in dtype_mapping: dtype_mapping = dtype_mapping[name] elif index_int in dtype_mapping: dtype_mapping = dtype_mapping[index_int] else: dtype_mapping = None # If no mapping can be found, use the array's # dtype attribute for formatting. # # A valid dtype must either be a type or # string naming a type. if dtype_mapping is None: formats.append(v.dtype) elif isinstance(dtype_mapping, (type, np.dtype, str)): # error: Argument 1 to "append" of "list" has incompatible # type "Union[type, dtype[Any], str]"; expected "dtype[Any]" formats.append(dtype_mapping) # type: ignore[arg-type] else: element = "row" if i < index_len else "column" msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}" raise ValueError(msg) return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats}) def _from_arrays( cls, arrays, columns, index, dtype: Dtype | None = None, verify_integrity: bool = True, ) -> DataFrame: """ Create DataFrame from a list of arrays corresponding to the columns. Parameters ---------- arrays : list-like of arrays Each array in the list corresponds to one column, in order. columns : list-like, Index The column names for the resulting DataFrame. index : list-like, Index The rows labels for the resulting DataFrame. dtype : dtype, optional Optional dtype to enforce for all arrays. verify_integrity : bool, default True Validate and homogenize all input. If set to False, it is assumed that all elements of `arrays` are actual arrays how they will be stored in a block (numpy ndarray or ExtensionArray), have the same length as and are aligned with the index, and that `columns` and `index` are ensured to be an Index object. Returns ------- DataFrame """ if dtype is not None: dtype = pandas_dtype(dtype) manager = get_option("mode.data_manager") columns = ensure_index(columns) if len(columns) != len(arrays): raise ValueError("len(columns) must match len(arrays)") mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, verify_integrity=verify_integrity, typ=manager, ) return cls(mgr) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path", ) def to_stata( self, path: FilePath | WriteBuffer[bytes], *, convert_dates: dict[Hashable, str] | None = None, write_index: bool = True, byteorder: str | None = None, time_stamp: datetime.datetime | None = None, data_label: str | None = None, variable_labels: dict[Hashable, str] | None = None, version: int | None = 114, convert_strl: Sequence[Hashable] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, value_labels: dict[Hashable, dict[float, str]] | None = None, ) -> None: """ Export DataFrame object to Stata dta format. Writes the DataFrame to a Stata dataset file. "dta" files contain a Stata dataset. Parameters ---------- path : str, path object, or buffer String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has timezone information. write_index : bool Write the index to Stata dataset. byteorder : str Can be ">", "<", "little", or "big". default is `sys.byteorder`. time_stamp : datetime A datetime to use as file creation date. Default is the current time. data_label : str, optional A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. version : {{114, 117, 118, 119, None}}, default 114 Version to use in the output dta file. Set to None to let pandas decide between 118 or 119 formats depending on the number of columns in the frame. Version 114 can be read by Stata 10 and later. Version 117 can be read by Stata 13 or later. Version 118 is supported in Stata 14 and later. Version 119 is supported in Stata 15 and later. Version 114 limits string variables to 244 characters or fewer while versions 117 and later allow strings with lengths up to 2,000,000 characters. Versions 118 and 119 support Unicode characters, and version 119 supports more than 32,767 variables. Version 119 should usually only be used when the number of variables exceeds the capacity of dta format 118. Exporting smaller datasets in format 119 may have unintended consequences, and, as of November 2020, Stata SE cannot read version 119 files. convert_strl : list, optional List of column names to convert to string columns to Stata StrL format. Only available if version is 117. Storing strings in the StrL format can produce smaller dta files if strings have more than 8 characters and values are repeated. {compression_options} .. versionadded:: 1.1.0 .. versionchanged:: 1.4.0 Zstandard support. {storage_options} .. versionadded:: 1.2.0 value_labels : dict of dicts Dictionary containing columns as keys and dictionaries of column value to labels as values. Labels for a single variable must be 32,000 characters or smaller. .. versionadded:: 1.4.0 Raises ------ NotImplementedError * If datetimes contain timezone information * Column dtype is not representable in Stata ValueError * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters See Also -------- read_stata : Import Stata data files. io.stata.StataWriter : Low-level writer for Stata data files. io.stata.StataWriter117 : Low-level writer for version 117 files. Examples -------- >>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], ... 'speed': [350, 18, 361, 15]}}) >>> df.to_stata('animals.dta') # doctest: +SKIP """ if version not in (114, 117, 118, 119, None): raise ValueError("Only formats 114, 117, 118 and 119 are supported.") if version == 114: if convert_strl is not None: raise ValueError("strl is not supported in format 114") from pandas.io.stata import StataWriter as statawriter elif version == 117: # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriter117 as statawriter, ) else: # versions 118 and 119 # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriterUTF8 as statawriter, ) kwargs: dict[str, Any] = {} if version is None or version >= 117: # strl conversion is only supported >= 117 kwargs["convert_strl"] = convert_strl if version is None or version >= 118: # Specifying the version is only supported for UTF8 (118 or 119) kwargs["version"] = version writer = statawriter( path, self, convert_dates=convert_dates, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, write_index=write_index, variable_labels=variable_labels, compression=compression, storage_options=storage_options, value_labels=value_labels, **kwargs, ) writer.write_file() def to_feather(self, path: FilePath | WriteBuffer[bytes], **kwargs) -> None: """ Write a DataFrame to the binary Feather format. Parameters ---------- path : str, path object, file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If a string or a path, it will be used as Root Directory path when writing a partitioned dataset. **kwargs : Additional keywords passed to :func:`pyarrow.feather.write_feather`. Starting with pyarrow 0.17, this includes the `compression`, `compression_level`, `chunksize` and `version` keywords. .. versionadded:: 1.1.0 Notes ----- This function writes the dataframe as a `feather file <https://arrow.apache.org/docs/python/feather.html>`_. Requires a default index. For saving the DataFrame with your custom index use a method that supports custom indices e.g. `to_parquet`. """ from pandas.io.feather_format import to_feather to_feather(self, path, **kwargs) Series.to_markdown, klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples="""Examples -------- >>> df = pd.DataFrame( ... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]} ... ) >>> print(df.to_markdown()) | | animal_1 | animal_2 | |---:|:-----------|:-----------| | 0 | elk | dog | | 1 | pig | quetzal | Output markdown with a tabulate option. >>> print(df.to_markdown(tablefmt="grid")) +----+------------+------------+ | | animal_1 | animal_2 | +====+============+============+ | 0 | elk | dog | +----+------------+------------+ | 1 | pig | quetzal | +----+------------+------------+""", ) def to_markdown( self, buf: FilePath | WriteBuffer[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: if "showindex" in kwargs: raise ValueError("Pass 'index' instead of 'showindex") kwargs.setdefault("headers", "keys") kwargs.setdefault("tablefmt", "pipe") kwargs.setdefault("showindex", index) tabulate = import_optional_dependency("tabulate") result = tabulate.tabulate(self, **kwargs) if buf is None: return result with get_handle(buf, mode, storage_options=storage_options) as handles: handles.handle.write(result) return None def to_parquet( self, path: None = ..., engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> bytes: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes], engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> None: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes] | None = None, engine: str = "auto", compression: str | None = "snappy", index: bool | None = None, partition_cols: list[str] | None = None, storage_options: StorageOptions = None, **kwargs, ) -> bytes | None: """ Write a DataFrame to the binary parquet format. This function writes the dataframe as a `parquet file <https://parquet.apache.org/>`_. You can choose different parquet backends, and have the option of compression. See :ref:`the user guide <io.parquet>` for more details. Parameters ---------- path : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If None, the result is returned as bytes. If a string or path, it will be used as Root Directory path when writing a partitioned dataset. .. versionchanged:: 1.2.0 Previously this was "fname" engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``True`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. partition_cols : list, optional, default None Column names by which to partition the dataset. Columns are partitioned in the order they are given. Must be None if path is not a string. {storage_options} .. versionadded:: 1.2.0 **kwargs Additional arguments passed to the parquet library. See :ref:`pandas io <io.parquet>` for more details. Returns ------- bytes if no path argument is provided else None See Also -------- read_parquet : Read a parquet file. DataFrame.to_orc : Write an orc file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- This function requires either the `fastparquet <https://pypi.org/project/fastparquet>`_ or `pyarrow <https://arrow.apache.org/docs/python/>`_ library. Examples -------- >>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}}) >>> df.to_parquet('df.parquet.gzip', ... compression='gzip') # doctest: +SKIP >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP col1 col2 0 1 3 1 2 4 If you want to get a buffer to the parquet content you can use a io.BytesIO object, as long as you don't use partition_cols, which creates multiple files. >>> import io >>> f = io.BytesIO() >>> df.to_parquet(f) >>> f.seek(0) 0 >>> content = f.read() """ from pandas.io.parquet import to_parquet return to_parquet( self, path, engine, compression=compression, index=index, partition_cols=partition_cols, storage_options=storage_options, **kwargs, ) def to_orc( self, path: FilePath | WriteBuffer[bytes] | None = None, *, engine: Literal["pyarrow"] = "pyarrow", index: bool | None = None, engine_kwargs: dict[str, Any] | None = None, ) -> bytes | None: """ Write a DataFrame to the ORC format. .. versionadded:: 1.5.0 Parameters ---------- path : str, file-like object or None, default None If a string, it will be used as Root Directory path when writing a partitioned dataset. By file-like object, we refer to objects with a write() method, such as a file handle (e.g. via builtin open function). If path is None, a bytes object is returned. engine : str, default 'pyarrow' ORC library to use. Pyarrow must be >= 7.0.0. index : bool, optional If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``infer`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. engine_kwargs : dict[str, Any] or None, default None Additional keyword arguments passed to :func:`pyarrow.orc.write_table`. Returns ------- bytes if no path argument is provided else None Raises ------ NotImplementedError Dtype of one or more columns is category, unsigned integers, interval, period or sparse. ValueError engine is not pyarrow. See Also -------- read_orc : Read a ORC file. DataFrame.to_parquet : Write a parquet file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- * Before using this function you should read the :ref:`user guide about ORC <io.orc>` and :ref:`install optional dependencies <install.warn_orc>`. * This function requires `pyarrow <https://arrow.apache.org/docs/python/>`_ library. * For supported dtypes please refer to `supported ORC features in Arrow <https://arrow.apache.org/docs/cpp/orc.html#data-types>`__. * Currently timezones in datetime columns are not preserved when a dataframe is converted into ORC files. Examples -------- >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [4, 3]}) >>> df.to_orc('df.orc') # doctest: +SKIP >>> pd.read_orc('df.orc') # doctest: +SKIP col1 col2 0 1 4 1 2 3 If you want to get a buffer to the orc content you can write it to io.BytesIO >>> import io >>> b = io.BytesIO(df.to_orc()) # doctest: +SKIP >>> b.seek(0) # doctest: +SKIP 0 >>> content = b.read() # doctest: +SKIP """ from pandas.io.orc import to_orc return to_orc( self, path, engine=engine, index=index, engine_kwargs=engine_kwargs ) def to_html( self, buf: FilePath | WriteBuffer[str], columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> None: ... def to_html( self, buf: None = ..., columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> str: ... header_type="bool", header="Whether to print column labels, default True", col_space_type="str or int, list or dict of int or str", col_space="The minimum width of each column in CSS length " "units. An int is assumed to be px units.", ) def to_html( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[Level] | None = None, col_space: ColspaceArgType | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: FormattersType | None = None, float_format: FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool | str = False, decimal: str = ".", bold_rows: bool = True, classes: str | list | tuple | None = None, escape: bool = True, notebook: bool = False, border: int | bool | None = None, table_id: str | None = None, render_links: bool = False, encoding: str | None = None, ) -> str | None: """ Render a DataFrame as an HTML table. %(shared_params)s bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.display.html.border``. table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links. encoding : str, default "utf-8" Set character encoding. .. versionadded:: 1.0 %(returns)s See Also -------- to_string : Convert DataFrame to a string. """ if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS: raise ValueError("Invalid value for justify parameter") formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, header=header, index=index, formatters=formatters, float_format=float_format, bold_rows=bold_rows, sparsify=sparsify, justify=justify, index_names=index_names, escape=escape, decimal=decimal, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, ) # TODO: a generic formatter wld b in DataFrameFormatter return fmt.DataFrameRenderer(formatter).to_html( buf=buf, classes=classes, notebook=notebook, border=border, encoding=encoding, table_id=table_id, render_links=render_links, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buffer", ) def to_xml( self, path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, index: bool = True, root_name: str | None = "data", row_name: str | None = "row", na_rep: str | None = None, attr_cols: list[str] | None = None, elem_cols: list[str] | None = None, namespaces: dict[str | None, str] | None = None, prefix: str | None = None, encoding: str = "utf-8", xml_declaration: bool | None = True, pretty_print: bool | None = True, parser: str | None = "lxml", stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ) -> str | None: """ Render a DataFrame to an XML document. .. versionadded:: 1.3.0 Parameters ---------- path_or_buffer : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a ``write()`` function. If None, the result is returned as a string. index : bool, default True Whether to include index in XML document. root_name : str, default 'data' The name of root element in XML document. row_name : str, default 'row' The name of row element in XML document. na_rep : str, optional Missing data representation. attr_cols : list-like, optional List of columns to write as attributes in row element. Hierarchical columns will be flattened with underscore delimiting the different levels. elem_cols : list-like, optional List of columns to write as children in row element. By default, all columns output as children of row element. Hierarchical columns will be flattened with underscore delimiting the different levels. namespaces : dict, optional All namespaces to be defined in root element. Keys of dict should be prefix names and values of dict corresponding URIs. Default namespaces should be given empty string key. For example, :: namespaces = {{"": "https://example.com"}} prefix : str, optional Namespace prefix to be used for every element and/or attribute in document. This should be one of the keys in ``namespaces`` dict. encoding : str, default 'utf-8' Encoding of the resulting document. xml_declaration : bool, default True Whether to include the XML declaration at start of document. pretty_print : bool, default True Whether output should be pretty printed with indentation and line breaks. parser : {{'lxml','etree'}}, default 'lxml' Parser module to use for building of tree. Only 'lxml' and 'etree' are supported. With 'lxml', the ability to use XSLT stylesheet is supported. stylesheet : str, path object or file-like object, optional A URL, file-like object, or a raw string containing an XSLT script used to transform the raw XML output. Script should use layout of elements and attributes from original output. This argument requires ``lxml`` to be installed. Only XSLT 1.0 scripts and not later versions is currently supported. {compression_options} .. versionchanged:: 1.4.0 Zstandard support. {storage_options} Returns ------- None or str If ``io`` is None, returns the resulting XML format as a string. Otherwise returns None. See Also -------- to_json : Convert the pandas object to a JSON string. to_html : Convert DataFrame to a html. Examples -------- >>> df = pd.DataFrame({{'shape': ['square', 'circle', 'triangle'], ... 'degrees': [360, 360, 180], ... 'sides': [4, np.nan, 3]}}) >>> df.to_xml() # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row> <index>0</index> <shape>square</shape> <degrees>360</degrees> <sides>4.0</sides> </row> <row> <index>1</index> <shape>circle</shape> <degrees>360</degrees> <sides/> </row> <row> <index>2</index> <shape>triangle</shape> <degrees>180</degrees> <sides>3.0</sides> </row> </data> >>> df.to_xml(attr_cols=[ ... 'index', 'shape', 'degrees', 'sides' ... ]) # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row index="0" shape="square" degrees="360" sides="4.0"/> <row index="1" shape="circle" degrees="360"/> <row index="2" shape="triangle" degrees="180" sides="3.0"/> </data> >>> df.to_xml(namespaces={{"doc": "https://example.com"}}, ... prefix="doc") # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <doc:data xmlns:doc="https://example.com"> <doc:row> <doc:index>0</doc:index> <doc:shape>square</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides>4.0</doc:sides> </doc:row> <doc:row> <doc:index>1</doc:index> <doc:shape>circle</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides/> </doc:row> <doc:row> <doc:index>2</doc:index> <doc:shape>triangle</doc:shape> <doc:degrees>180</doc:degrees> <doc:sides>3.0</doc:sides> </doc:row> </doc:data> """ from pandas.io.formats.xml import ( EtreeXMLFormatter, LxmlXMLFormatter, ) lxml = import_optional_dependency("lxml.etree", errors="ignore") TreeBuilder: type[EtreeXMLFormatter] | type[LxmlXMLFormatter] if parser == "lxml": if lxml is not None: TreeBuilder = LxmlXMLFormatter else: raise ImportError( "lxml not found, please install or use the etree parser." ) elif parser == "etree": TreeBuilder = EtreeXMLFormatter else: raise ValueError("Values for parser can only be lxml or etree.") xml_formatter = TreeBuilder( self, path_or_buffer=path_or_buffer, index=index, root_name=root_name, row_name=row_name, na_rep=na_rep, attr_cols=attr_cols, elem_cols=elem_cols, namespaces=namespaces, prefix=prefix, encoding=encoding, xml_declaration=xml_declaration, pretty_print=pretty_print, stylesheet=stylesheet, compression=compression, storage_options=storage_options, ) return xml_formatter.write_output() # ---------------------------------------------------------------------- def info( self, verbose: bool | None = None, buf: WriteBuffer[str] | None = None, max_cols: int | None = None, memory_usage: bool | str | None = None, show_counts: bool | None = None, ) -> None: info = DataFrameInfo( data=self, memory_usage=memory_usage, ) info.render( buf=buf, max_cols=max_cols, verbose=verbose, show_counts=show_counts, ) def memory_usage(self, index: bool = True, deep: bool = False) -> Series: """ Return the memory usage of each column in bytes. The memory usage can optionally include the contribution of the index and elements of `object` dtype. This value is displayed in `DataFrame.info` by default. This can be suppressed by setting ``pandas.options.display.memory_usage`` to False. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the DataFrame's index in returned Series. If ``index=True``, the memory usage of the index is the first item in the output. deep : bool, default False If True, introspect the data deeply by interrogating `object` dtypes for system-level memory consumption, and include it in the returned values. Returns ------- Series A Series whose index is the original column names and whose values is the memory usage of each column in bytes. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of an ndarray. Series.memory_usage : Bytes consumed by a Series. Categorical : Memory-efficient array for string values with many repeated values. DataFrame.info : Concise summary of a DataFrame. Notes ----- See the :ref:`Frequently Asked Questions <df-memory-usage>` for more details. Examples -------- >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool'] >>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t)) ... for t in dtypes]) >>> df = pd.DataFrame(data) >>> df.head() int64 float64 complex128 object bool 0 1 1.0 1.0+0.0j 1 True 1 1 1.0 1.0+0.0j 1 True 2 1 1.0 1.0+0.0j 1 True 3 1 1.0 1.0+0.0j 1 True 4 1 1.0 1.0+0.0j 1 True >>> df.memory_usage() Index 128 int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 >>> df.memory_usage(index=False) int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 The memory footprint of `object` dtype columns is ignored by default: >>> df.memory_usage(deep=True) Index 128 int64 40000 float64 40000 complex128 80000 object 180000 bool 5000 dtype: int64 Use a Categorical for efficient storage of an object-dtype column with many repeated values. >>> df['object'].astype('category').memory_usage(deep=True) 5244 """ result = self._constructor_sliced( [c.memory_usage(index=False, deep=deep) for col, c in self.items()], index=self.columns, dtype=np.intp, ) if index: index_memory_usage = self._constructor_sliced( self.index.memory_usage(deep=deep), index=["Index"] ) result = index_memory_usage._append(result) return result def transpose(self, *args, copy: bool = False) -> DataFrame: """ Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. Parameters ---------- *args : tuple, optional Accepted for compatibility with NumPy. copy : bool, default False Whether to copy the data after transposing, even for DataFrames with a single dtype. Note that a copy is always required for mixed dtype DataFrames, or for DataFrames with any extension types. Returns ------- DataFrame The transposed DataFrame. See Also -------- numpy.transpose : Permute the dimensions of a given array. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the `object` dtype. In such a case, a copy of the data is always made. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = pd.DataFrame(data=d1) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T # or df1.transpose() >>> df1_transposed 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'name': ['Alice', 'Bob'], ... 'score': [9.5, 8], ... 'employed': [False, True], ... 'kids': [0, 0]} >>> df2 = pd.DataFrame(data=d2) >>> df2 name score employed kids 0 Alice 9.5 False 0 1 Bob 8.0 True 0 >>> df2_transposed = df2.T # or df2.transpose() >>> df2_transposed 0 1 name Alice Bob score 9.5 8.0 employed False True kids 0 0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the `object` dtype: >>> df2.dtypes name object score float64 employed bool kids int64 dtype: object >>> df2_transposed.dtypes 0 object 1 object dtype: object """ nv.validate_transpose(args, {}) # construct the args dtypes = list(self.dtypes) if self._can_fast_transpose: # Note: tests pass without this, but this improves perf quite a bit. new_vals = self._values.T if copy and not using_copy_on_write(): new_vals = new_vals.copy() result = self._constructor( new_vals, index=self.columns, columns=self.index, copy=False ) if using_copy_on_write() and len(self) > 0: result._mgr.add_references(self._mgr) # type: ignore[arg-type] elif ( self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]) ): # We have EAs with the same dtype. We can preserve that dtype in transpose. dtype = dtypes[0] arr_type = dtype.construct_array_type() values = self.values new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values] result = type(self)._from_arrays( new_values, index=self.columns, columns=self.index ) else: new_arr = self.values.T if copy and not using_copy_on_write(): new_arr = new_arr.copy() result = self._constructor( new_arr, index=self.columns, columns=self.index, # We already made a copy (more than one block) copy=False, ) return result.__finalize__(self, method="transpose") def T(self) -> DataFrame: """ The transpose of the DataFrame. Returns ------- DataFrame The transposed DataFrame. See Also -------- DataFrame.transpose : Transpose index and columns. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.T 0 1 col1 1 2 col2 3 4 """ return self.transpose() # ---------------------------------------------------------------------- # Indexing Methods def _ixs(self, i: int, axis: AxisInt = 0) -> Series: """ Parameters ---------- i : int axis : int Returns ------- Series """ # irow if axis == 0: new_mgr = self._mgr.fast_xs(i) # if we are a copy, mark as such copy = isinstance(new_mgr.array, np.ndarray) and new_mgr.array.base is None result = self._constructor_sliced(new_mgr, name=self.index[i]).__finalize__( self ) result._set_is_copy(self, copy=copy) return result # icol else: label = self.columns[i] col_mgr = self._mgr.iget(i) result = self._box_col_values(col_mgr, i) # this is a cached value, mark it so result._set_as_cached(label, self) return result def _get_column_array(self, i: int) -> ArrayLike: """ Get the values of the i'th column (ndarray or ExtensionArray, as stored in the Block) Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ return self._mgr.iget_values(i) def _iter_column_arrays(self) -> Iterator[ArrayLike]: """ Iterate over the arrays of all columns in order. This returns the values as stored in the Block (ndarray or ExtensionArray). Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ for i in range(len(self.columns)): yield self._get_column_array(i) def _getitem_nocopy(self, key: list): """ Behaves like __getitem__, but returns a view in cases where __getitem__ would make a copy. """ # TODO(CoW): can be removed if/when we are always Copy-on-Write indexer = self.columns._get_indexer_strict(key, "columns")[1] new_axis = self.columns[indexer] new_mgr = self._mgr.reindex_indexer( new_axis, indexer, axis=0, allow_dups=True, copy=False, only_slice=True, ) return self._constructor(new_mgr) def __getitem__(self, key): check_dict_or_set_indexers(key) key = lib.item_from_zerodim(key) key = com.apply_if_callable(key, self) if is_hashable(key) and not is_iterator(key): # is_iterator to exclude generator e.g. test_getitem_listlike # shortcut if the key is in columns is_mi = isinstance(self.columns, MultiIndex) # GH#45316 Return view if key is not duplicated # Only use drop_duplicates with duplicates for performance if not is_mi and ( self.columns.is_unique and key in self.columns or key in self.columns.drop_duplicates(keep=False) ): return self._get_item_cache(key) elif is_mi and self.columns.is_unique and key in self.columns: return self._getitem_multilevel(key) # Do we have a slicer (on rows)? if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") if isinstance(indexer, np.ndarray): # reachable with DatetimeIndex indexer = lib.maybe_indices_to_slice( indexer.astype(np.intp, copy=False), len(self) ) if isinstance(indexer, np.ndarray): # GH#43223 If we can not convert, use take return self.take(indexer, axis=0) return self._slice(indexer, axis=0) # Do we have a (boolean) DataFrame? if isinstance(key, DataFrame): return self.where(key) # Do we have a (boolean) 1d indexer? if com.is_bool_indexer(key): return self._getitem_bool_array(key) # We are left with two options: a single key, and a collection of keys, # We interpret tuples as collections only for non-MultiIndex is_single_key = isinstance(key, tuple) or not is_list_like(key) if is_single_key: if self.columns.nlevels > 1: return self._getitem_multilevel(key) indexer = self.columns.get_loc(key) if is_integer(indexer): indexer = [indexer] else: if is_iterator(key): key = list(key) indexer = self.columns._get_indexer_strict(key, "columns")[1] # take() does not accept boolean indexers if getattr(indexer, "dtype", None) == bool: indexer = np.where(indexer)[0] data = self._take_with_is_copy(indexer, axis=1) if is_single_key: # What does looking for a single key in a non-unique index return? # The behavior is inconsistent. It returns a Series, except when # - the key itself is repeated (test on data.shape, #9519), or # - we have a MultiIndex on columns (test on self.columns, #21309) if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex): # GH#26490 using data[key] can cause RecursionError return data._get_item_cache(key) return data def _getitem_bool_array(self, key): # also raises Exception if object array with NA values # warning here just in case -- previously __setitem__ was # reindexing but __getitem__ was not; it seems more reasonable to # go with the __setitem__ behavior since that is more consistent # with all other indexing behavior if isinstance(key, Series) and not key.index.equals(self.index): warnings.warn( "Boolean Series key will be reindexed to match DataFrame index.", UserWarning, stacklevel=find_stack_level(), ) elif len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}." ) # check_bool_indexer will throw exception if Series key cannot # be reindexed to match DataFrame rows key = check_bool_indexer(self.index, key) if key.all(): return self.copy(deep=None) indexer = key.nonzero()[0] return self._take_with_is_copy(indexer, axis=0) def _getitem_multilevel(self, key): # self.columns is a MultiIndex loc = self.columns.get_loc(key) if isinstance(loc, (slice, np.ndarray)): new_columns = self.columns[loc] result_columns = maybe_droplevels(new_columns, key) if self._is_mixed_type: result = self.reindex(columns=new_columns) result.columns = result_columns else: new_values = self._values[:, loc] result = self._constructor( new_values, index=self.index, columns=result_columns, copy=False ) if using_copy_on_write() and isinstance(loc, slice): result._mgr.add_references(self._mgr) # type: ignore[arg-type] result = result.__finalize__(self) # If there is only one column being returned, and its name is # either an empty string, or a tuple with an empty string as its # first element, then treat the empty string as a placeholder # and return the column as if the user had provided that empty # string in the key. If the result is a Series, exclude the # implied empty string from its name. if len(result.columns) == 1: # e.g. test_frame_getitem_multicolumn_empty_level, # test_frame_mixed_depth_get, test_loc_setitem_single_column_slice top = result.columns[0] if isinstance(top, tuple): top = top[0] if top == "": result = result[""] if isinstance(result, Series): result = self._constructor_sliced( result, index=self.index, name=key ) result._set_is_copy(self) return result else: # loc is neither a slice nor ndarray, so must be an int return self._ixs(loc, axis=1) def _get_value(self, index, col, takeable: bool = False) -> Scalar: """ Quickly retrieve single value at passed column and index. Parameters ---------- index : row label col : column label takeable : interpret the index/col as indexers, default False Returns ------- scalar Notes ----- Assumes that both `self.index._index_as_unique` and `self.columns._index_as_unique`; Caller is responsible for checking. """ if takeable: series = self._ixs(col, axis=1) return series._values[index] series = self._get_item_cache(col) engine = self.index._engine if not isinstance(self.index, MultiIndex): # CategoricalIndex: Trying to use the engine fastpath may give incorrect # results if our categories are integers that dont match our codes # IntervalIndex: IntervalTree has no get_loc row = self.index.get_loc(index) return series._values[row] # For MultiIndex going through engine effectively restricts us to # same-length tuples; see test_get_set_value_no_partial_indexing loc = engine.get_loc(index) return series._values[loc] def isetitem(self, loc, value) -> None: """ Set the given value in the column with position `loc`. This is a positional analogue to ``__setitem__``. Parameters ---------- loc : int or sequence of ints Index position for the column. value : scalar or arraylike Value(s) for the column. Notes ----- ``frame.isetitem(loc, value)`` is an in-place method as it will modify the DataFrame in place (not returning a new object). In contrast to ``frame.iloc[:, i] = value`` which will try to update the existing values in place, ``frame.isetitem(loc, value)`` will not update the values of the column itself in place, it will instead insert a new array. In cases where ``frame.columns`` is unique, this is equivalent to ``frame[frame.columns[i]] = value``. """ if isinstance(value, DataFrame): if is_scalar(loc): loc = [loc] for i, idx in enumerate(loc): arraylike = self._sanitize_column(value.iloc[:, i]) self._iset_item_mgr(idx, arraylike, inplace=False) return arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=False) def __setitem__(self, key, value): if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) key = com.apply_if_callable(key, self) # see if we can slice the rows if isinstance(key, slice): slc = self.index._convert_slice_indexer(key, kind="getitem") return self._setitem_slice(slc, value) if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2: self._setitem_frame(key, value) elif isinstance(key, (Series, np.ndarray, list, Index)): self._setitem_array(key, value) elif isinstance(value, DataFrame): self._set_item_frame_value(key, value) elif ( is_list_like(value) and not self.columns.is_unique and 1 < len(self.columns.get_indexer_for([key])) == len(value) ): # Column to set is duplicated self._setitem_array([key], value) else: # set column self._set_item(key, value) def _setitem_slice(self, key: slice, value) -> None: # NB: we can't just use self.loc[key] = value because that # operates on labels and we need to operate positional for # backwards-compat, xref GH#31469 self._check_setitem_copy() self.iloc[key] = value def _setitem_array(self, key, value): # also raises Exception if object array with NA values if com.is_bool_indexer(key): # bool indexer is indexing along rows if len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}!" ) key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] self._check_setitem_copy() if isinstance(value, DataFrame): # GH#39931 reindex since iloc does not align value = value.reindex(self.index.take(indexer)) self.iloc[indexer] = value else: # Note: unlike self.iloc[:, indexer] = value, this will # never try to overwrite values inplace if isinstance(value, DataFrame): check_key_length(self.columns, key, value) for k1, k2 in zip(key, value.columns): self[k1] = value[k2] elif not is_list_like(value): for col in key: self[col] = value elif isinstance(value, np.ndarray) and value.ndim == 2: self._iset_not_inplace(key, value) elif np.ndim(value) > 1: # list of lists value = DataFrame(value).values return self._setitem_array(key, value) else: self._iset_not_inplace(key, value) def _iset_not_inplace(self, key, value): # GH#39510 when setting with df[key] = obj with a list-like key and # list-like value, we iterate over those listlikes and set columns # one at a time. This is different from dispatching to # `self.loc[:, key]= value` because loc.__setitem__ may overwrite # data inplace, whereas this will insert new arrays. def igetitem(obj, i: int): # Note: we catch DataFrame obj before getting here, but # hypothetically would return obj.iloc[:, i] if isinstance(obj, np.ndarray): return obj[..., i] else: return obj[i] if self.columns.is_unique: if np.shape(value)[-1] != len(key): raise ValueError("Columns must be same length as key") for i, col in enumerate(key): self[col] = igetitem(value, i) else: ilocs = self.columns.get_indexer_non_unique(key)[0] if (ilocs < 0).any(): # key entries not in self.columns raise NotImplementedError if np.shape(value)[-1] != len(ilocs): raise ValueError("Columns must be same length as key") assert np.ndim(value) <= 2 orig_columns = self.columns # Using self.iloc[:, i] = ... may set values inplace, which # by convention we do not do in __setitem__ try: self.columns = Index(range(len(self.columns))) for i, iloc in enumerate(ilocs): self[iloc] = igetitem(value, i) finally: self.columns = orig_columns def _setitem_frame(self, key, value): # support boolean setting with DataFrame input, e.g. # df[df > df2] = 0 if isinstance(key, np.ndarray): if key.shape != self.shape: raise ValueError("Array conditional must be same shape as self") key = self._constructor(key, **self._construct_axes_dict(), copy=False) if key.size and not all(is_bool_dtype(dtype) for dtype in key.dtypes): raise TypeError( "Must pass DataFrame or 2-d ndarray with boolean values only" ) self._check_inplace_setting(value) self._check_setitem_copy() self._where(-key, value, inplace=True) def _set_item_frame_value(self, key, value: DataFrame) -> None: self._ensure_valid_index(value) # align columns if key in self.columns: loc = self.columns.get_loc(key) cols = self.columns[loc] len_cols = 1 if is_scalar(cols) or isinstance(cols, tuple) else len(cols) if len_cols != len(value.columns): raise ValueError("Columns must be same length as key") # align right-hand-side columns if self.columns # is multi-index and self[key] is a sub-frame if isinstance(self.columns, MultiIndex) and isinstance( loc, (slice, Series, np.ndarray, Index) ): cols_droplevel = maybe_droplevels(cols, key) if len(cols_droplevel) and not cols_droplevel.equals(value.columns): value = value.reindex(cols_droplevel, axis=1) for col, col_droplevel in zip(cols, cols_droplevel): self[col] = value[col_droplevel] return if is_scalar(cols): self[cols] = value[value.columns[0]] return # now align rows arraylike = _reindex_for_setitem(value, self.index) self._set_item_mgr(key, arraylike) return if len(value.columns) != 1: raise ValueError( "Cannot set a DataFrame with multiple columns to the single " f"column {key}" ) self[key] = value[value.columns[0]] def _iset_item_mgr( self, loc: int | slice | np.ndarray, value, inplace: bool = False ) -> None: # when called from _set_item_mgr loc can be anything returned from get_loc self._mgr.iset(loc, value, inplace=inplace) self._clear_item_cache() def _set_item_mgr(self, key, value: ArrayLike) -> None: try: loc = self._info_axis.get_loc(key) except KeyError: # This item wasn't present, just insert at end self._mgr.insert(len(self._info_axis), key, value) else: self._iset_item_mgr(loc, value) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _iset_item(self, loc: int, value) -> None: arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=True) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _set_item(self, key, value) -> None: """ Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the same length as the DataFrames index or an error will be thrown. Series/TimeSeries will be conformed to the DataFrames index to ensure homogeneity. """ value = self._sanitize_column(value) if ( key in self.columns and value.ndim == 1 and not is_extension_array_dtype(value) ): # broadcast across multiple columns if necessary if not self.columns.is_unique or isinstance(self.columns, MultiIndex): existing_piece = self[key] if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)).T self._set_item_mgr(key, value) def _set_value( self, index: IndexLabel, col, value: Scalar, takeable: bool = False ) -> None: """ Put single value at passed column and index. Parameters ---------- index : Label row label col : Label column label value : scalar takeable : bool, default False Sets whether or not index/col interpreted as indexers """ try: if takeable: icol = col iindex = cast(int, index) else: icol = self.columns.get_loc(col) iindex = self.index.get_loc(index) self._mgr.column_setitem(icol, iindex, value, inplace_only=True) self._clear_item_cache() except (KeyError, TypeError, ValueError, LossySetitemError): # get_loc might raise a KeyError for missing labels (falling back # to (i)loc will do expansion of the index) # column_setitem will do validation that may raise TypeError, # ValueError, or LossySetitemError # set using a non-recursive method & reset the cache if takeable: self.iloc[index, col] = value else: self.loc[index, col] = value self._item_cache.pop(col, None) except InvalidIndexError as ii_err: # GH48729: Seems like you are trying to assign a value to a # row when only scalar options are permitted raise InvalidIndexError( f"You can only assign a scalar value not a {type(value)}" ) from ii_err def _ensure_valid_index(self, value) -> None: """ Ensure that if we don't have an index, that we can create one from the passed value. """ # GH5632, make sure that we are a Series convertible if not len(self.index) and is_list_like(value) and len(value): if not isinstance(value, DataFrame): try: value = Series(value) except (ValueError, NotImplementedError, TypeError) as err: raise ValueError( "Cannot set a frame with no defined index " "and a value that cannot be converted to a Series" ) from err # GH31368 preserve name of index index_copy = value.index.copy() if self.index.name is not None: index_copy.name = self.index.name self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan) def _box_col_values(self, values: SingleDataManager, loc: int) -> Series: """ Provide boxed values for a column. """ # Lookup in columns so that if e.g. a str datetime was passed # we attach the Timestamp object as the name. name = self.columns[loc] klass = self._constructor_sliced # We get index=self.index bc values is a SingleDataManager return klass(values, name=name, fastpath=True).__finalize__(self) # ---------------------------------------------------------------------- # Lookup Caching def _clear_item_cache(self) -> None: self._item_cache.clear() def _get_item_cache(self, item: Hashable) -> Series: """Return the cached item, item represents a label indexer.""" if using_copy_on_write(): loc = self.columns.get_loc(item) return self._ixs(loc, axis=1) cache = self._item_cache res = cache.get(item) if res is None: # All places that call _get_item_cache have unique columns, # pending resolution of GH#33047 loc = self.columns.get_loc(item) res = self._ixs(loc, axis=1) cache[item] = res # for a chain res._is_copy = self._is_copy return res def _reset_cacher(self) -> None: # no-op for DataFrame pass def _maybe_cache_changed(self, item, value: Series, inplace: bool) -> None: """ The object has called back to us saying maybe it has changed. """ loc = self._info_axis.get_loc(item) arraylike = value._values old = self._ixs(loc, axis=1) if old._values is value._values and inplace: # GH#46149 avoid making unnecessary copies/block-splitting return self._mgr.iset(loc, arraylike, inplace=inplace) # ---------------------------------------------------------------------- # Unsorted def query(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> DataFrame: ... def query(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def query(self, expr: str, *, inplace: bool = ..., **kwargs) -> DataFrame | None: ... def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | None: """ Query the columns of a DataFrame with a boolean expression. Parameters ---------- expr : str The query string to evaluate. You can refer to variables in the environment by prefixing them with an '@' character like ``@a + b``. You can refer to column names that are not valid Python variable names by surrounding them in backticks. Thus, column names containing spaces or punctuations (besides underscores) or starting with digits must be surrounded by backticks. (For example, a column named "Area (cm^2)" would be referenced as ```Area (cm^2)```). Column names which are Python keywords (like "list", "for", "import", etc) cannot be used. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. inplace : bool Whether to modify the DataFrame rather than creating a new one. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`DataFrame.query`. Returns ------- DataFrame or None DataFrame resulting from the provided query expression or None if ``inplace=True``. See Also -------- eval : Evaluate a string describing operations on DataFrame columns. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Notes ----- The result of the evaluation of this expression is first passed to :attr:`DataFrame.loc` and if that fails because of a multidimensional key (e.g., a DataFrame) then the result will be passed to :meth:`DataFrame.__getitem__`. This method uses the top-level :func:`eval` function to evaluate the passed query. The :meth:`~pandas.DataFrame.query` method uses a slightly modified Python syntax by default. For example, the ``&`` and ``|`` (bitwise) operators have the precedence of their boolean cousins, :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, however the semantics are different. You can change the semantics of the expression by passing the keyword argument ``parser='python'``. This enforces the same semantics as evaluation in Python space. Likewise, you can pass ``engine='python'`` to evaluate an expression using Python itself as a backend. This is not recommended as it is inefficient compared to using ``numexpr`` as the engine. The :attr:`DataFrame.index` and :attr:`DataFrame.columns` attributes of the :class:`~pandas.DataFrame` instance are placed in the query namespace by default, which allows you to treat both the index and columns of the frame as a column in the frame. The identifier ``index`` is used for the frame index; you can also use the name of the index to identify it in a query. Please note that Python keywords may not be used as identifiers. For further details and examples see the ``query`` documentation in :ref:`indexing <indexing.query>`. *Backtick quoted variables* Backtick quoted variables are parsed as literal Python code and are converted internally to a Python valid identifier. This can lead to the following problems. During parsing a number of disallowed characters inside the backtick quoted string are replaced by strings that are allowed as a Python identifier. These characters include all operators in Python, the space character, the question mark, the exclamation mark, the dollar sign, and the euro sign. For other characters that fall outside the ASCII range (U+0001..U+007F) and those that are not further specified in PEP 3131, the query parser will raise an error. This excludes whitespace different than the space character, but also the hashtag (as it is used for comments) and the backtick itself (backtick can also not be escaped). In a special case, quotes that make a pair around a backtick can confuse the parser. For example, ```it's` > `that's``` will raise an error, as it forms a quoted string (``'s > `that'``) with a backtick inside. See also the Python documentation about lexical analysis (https://docs.python.org/3/reference/lexical_analysis.html) in combination with the source code in :mod:`pandas.core.computation.parsing`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10 """ inplace = validate_bool_kwarg(inplace, "inplace") if not isinstance(expr, str): msg = f"expr must be a string to be evaluated, {type(expr)} given" raise ValueError(msg) kwargs["level"] = kwargs.pop("level", 0) + 1 kwargs["target"] = None res = self.eval(expr, **kwargs) try: result = self.loc[res] except ValueError: # when res is multi-dimensional loc raises, but this is sometimes a # valid query result = self[res] if inplace: self._update_inplace(result) return None else: return result def eval(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> Any: ... def eval(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def eval(self, expr: str, *, inplace: bool = False, **kwargs) -> Any | None: """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. Returns ------- ndarray, scalar, pandas object, or None The result of the evaluation or None if ``inplace=True``. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Notes ----- For more details see the API documentation for :func:`~eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Multiple columns can be assigned to using multi-line expressions: >>> df.eval( ... ''' ... C = A + B ... D = A - B ... ''' ... ) A B C D 0 1 10 11 -9 1 2 8 10 -6 2 3 6 9 -3 3 4 4 8 0 4 5 2 7 3 """ from pandas.core.computation.eval import eval as _eval inplace = validate_bool_kwarg(inplace, "inplace") kwargs["level"] = kwargs.pop("level", 0) + 1 index_resolvers = self._get_index_resolvers() column_resolvers = self._get_cleaned_column_resolvers() resolvers = column_resolvers, index_resolvers if "target" not in kwargs: kwargs["target"] = self kwargs["resolvers"] = tuple(kwargs.get("resolvers", ())) + resolvers return _eval(expr, inplace=inplace, **kwargs) def select_dtypes(self, include=None, exclude=None) -> DataFrame: """ Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty * If ``include`` and ``exclude`` have overlapping elements * If any kind of string dtype is passed in. See Also -------- DataFrame.dtypes: Return Series with the data type of each column. Notes ----- * To select all *numeric* types, use ``np.number`` or ``'number'`` * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy <https://numpy.org/doc/stable/reference/arrays.scalars.html>`__ * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or ``'timedelta64'`` * To select Pandas categorical dtypes, use ``'category'`` * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in 0.20.0) or ``'datetime64[ns, tz]'`` Examples -------- >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int64']) b c 0 True 1.0 1 False 2.0 2 True 1.0 3 False 2.0 4 True 1.0 5 False 2.0 """ if not is_list_like(include): include = (include,) if include is not None else () if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () selection = (frozenset(include), frozenset(exclude)) if not any(selection): raise ValueError("at least one of include or exclude must be nonempty") # convert the myriad valid dtypes object to a single representation def check_int_infer_dtype(dtypes): converted_dtypes: list[type] = [] for dtype in dtypes: # Numpy maps int to different types (int32, in64) on Windows and Linux # see https://github.com/numpy/numpy/issues/9464 if (isinstance(dtype, str) and dtype == "int") or (dtype is int): converted_dtypes.append(np.int32) converted_dtypes.append(np.int64) elif dtype == "float" or dtype is float: # GH#42452 : np.dtype("float") coerces to np.float64 from Numpy 1.20 converted_dtypes.extend([np.float64, np.float32]) else: converted_dtypes.append(infer_dtype_from_object(dtype)) return frozenset(converted_dtypes) include = check_int_infer_dtype(include) exclude = check_int_infer_dtype(exclude) for dtypes in (include, exclude): invalidate_string_dtypes(dtypes) # can't both include AND exclude! if not include.isdisjoint(exclude): raise ValueError(f"include and exclude overlap on {(include & exclude)}") def dtype_predicate(dtype: DtypeObj, dtypes_set) -> bool: # GH 46870: BooleanDtype._is_numeric == True but should be excluded return issubclass(dtype.type, tuple(dtypes_set)) or ( np.number in dtypes_set and getattr(dtype, "_is_numeric", False) and not is_bool_dtype(dtype) ) def predicate(arr: ArrayLike) -> bool: dtype = arr.dtype if include: if not dtype_predicate(dtype, include): return False if exclude: if dtype_predicate(dtype, exclude): return False return True mgr = self._mgr._get_data_subset(predicate).copy(deep=None) return type(self)(mgr).__finalize__(self) def insert( self, loc: int, column: Hashable, value: Scalar | AnyArrayLike, allow_duplicates: bool | lib.NoDefault = lib.no_default, ) -> None: """ Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns). column : str, number, or hashable object Label of the inserted column. value : Scalar, Series, or array-like allow_duplicates : bool, optional, default lib.no_default See Also -------- Index.insert : Insert new item by index. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.insert(1, "newcol", [99, 99]) >>> df col1 newcol col2 0 1 99 3 1 2 99 4 >>> df.insert(0, "col1", [100, 100], allow_duplicates=True) >>> df col1 col1 newcol col2 0 100 1 99 3 1 100 2 99 4 Notice that pandas uses index alignment in case of `value` from type `Series`: >>> df.insert(0, "col0", pd.Series([5, 6], index=[1, 2])) >>> df col0 col1 col1 newcol col2 0 NaN 100 1 99 3 1 5.0 100 2 99 4 """ if allow_duplicates is lib.no_default: allow_duplicates = False if allow_duplicates and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'allow_duplicates=True' when " "'self.flags.allows_duplicate_labels' is False." ) if not allow_duplicates and column in self.columns: # Should this be a different kind of error?? raise ValueError(f"cannot insert {column}, already exists") if not isinstance(loc, int): raise TypeError("loc must be int") value = self._sanitize_column(value) self._mgr.insert(loc, column, value) def assign(self, **kwargs) -> DataFrame: r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. Later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 You can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 """ data = self.copy(deep=None) for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) return data def _sanitize_column(self, value) -> ArrayLike: """ Ensures new columns (which go into the BlockManager as new blocks) are always copied and converted into an array. Parameters ---------- value : scalar, Series, or array-like Returns ------- numpy.ndarray or ExtensionArray """ self._ensure_valid_index(value) # We can get there through isetitem with a DataFrame # or through loc single_block_path if isinstance(value, DataFrame): return _reindex_for_setitem(value, self.index) elif is_dict_like(value): return _reindex_for_setitem(Series(value), self.index) if is_list_like(value): com.require_length_match(value, self.index) return sanitize_array(value, self.index, copy=True, allow_2d=True) def _series(self): return { item: Series( self._mgr.iget(idx), index=self.index, name=item, fastpath=True ) for idx, item in enumerate(self.columns) } # ---------------------------------------------------------------------- # Reindexing and alignment def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy): frame = self columns = axes["columns"] if columns is not None: frame = frame._reindex_columns( columns, method, copy, level, fill_value, limit, tolerance ) index = axes["index"] if index is not None: frame = frame._reindex_index( index, method, copy, level, fill_value, limit, tolerance ) return frame def _reindex_index( self, new_index, method, copy: bool, level: Level, fill_value=np.nan, limit=None, tolerance=None, ): new_index, indexer = self.index.reindex( new_index, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {0: [new_index, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_columns( self, new_columns, method, copy: bool, level: Level, fill_value=None, limit=None, tolerance=None, ): new_columns, indexer = self.columns.reindex( new_columns, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {1: [new_columns, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_multi( self, axes: dict[str, Index], copy: bool, fill_value ) -> DataFrame: """ We are guaranteed non-Nones in the axes. """ new_index, row_indexer = self.index.reindex(axes["index"]) new_columns, col_indexer = self.columns.reindex(axes["columns"]) if row_indexer is not None and col_indexer is not None: # Fastpath. By doing two 'take's at once we avoid making an # unnecessary copy. # We only get here with `not self._is_mixed_type`, which (almost) # ensures that self.values is cheap. It may be worth making this # condition more specific. indexer = row_indexer, col_indexer new_values = take_2d_multi(self.values, indexer, fill_value=fill_value) return self._constructor( new_values, index=new_index, columns=new_columns, copy=False ) else: return self._reindex_with_indexers( {0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, copy=copy, fill_value=fill_value, ) def align( self, other: DataFrame, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value=None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> DataFrame: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) """ Examples -------- >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) Change the row labels. >>> df.set_axis(['a', 'b', 'c'], axis='index') A B a 1 4 b 2 5 c 3 6 Change the column labels. >>> df.set_axis(['I', 'II'], axis='columns') I II 0 1 4 1 2 5 2 3 6 """ ) **_shared_doc_kwargs, extended_summary_sub=" column or", axis_description_sub=", and 1 identifies the columns", see_also_sub=" or columns", ) ) # ---------------------------------------------------------------------- # Reindex-based selection methods # ---------------------------------------------------------------------- # Sorting # error: Signature of "sort_values" incompatible with supertype "NDFrame" # TODO: Just move the sort_values doc here. ) # ---------------------------------------------------------------------- # Arithmetic Methods ) ) ) # ---------------------------------------------------------------------- # Function application ) # error: Signature of "any" incompatible with supertype "NDFrame" [override] # error: Missing return statement ) # ---------------------------------------------------------------------- # Merging / joining methods # ---------------------------------------------------------------------- # Statistical methods, etc. # ---------------------------------------------------------------------- # ndarray-like stats methods # ---------------------------------------------------------------------- # Add index and columns # ---------------------------------------------------------------------- # Add plotting methods to DataFrame # ---------------------------------------------------------------------- # Internal Interface Methods DataFrame ) def get_handle( path_or_buf: FilePath | BaseBuffer, mode: str, *, encoding: str | None = ..., compression: CompressionOptions = ..., memory_map: bool = ..., is_text: Literal[False], errors: str | None = ..., storage_options: StorageOptions = ..., ) -> IOHandles[bytes]: ... def get_handle( path_or_buf: FilePath | BaseBuffer, mode: str, *, encoding: str | None = ..., compression: CompressionOptions = ..., memory_map: bool = ..., is_text: Literal[True] = ..., errors: str | None = ..., storage_options: StorageOptions = ..., ) -> IOHandles[str]: ... def get_handle( path_or_buf: FilePath | BaseBuffer, mode: str, *, encoding: str | None = ..., compression: CompressionOptions = ..., memory_map: bool = ..., is_text: bool = ..., errors: str | None = ..., storage_options: StorageOptions = ..., ) -> IOHandles[str] | IOHandles[bytes]: ... def get_handle( path_or_buf: FilePath | BaseBuffer, mode: str, *, encoding: str | None = None, compression: CompressionOptions = None, memory_map: bool = False, is_text: bool = True, errors: str | None = None, storage_options: StorageOptions = None, ) -> IOHandles[str] | IOHandles[bytes]: """ Get file handle for given path/buffer and mode. Parameters ---------- path_or_buf : str or file handle File path or object. mode : str Mode to open path_or_buf with. encoding : str or None Encoding to use. {compression_options} .. versionchanged:: 1.0.0 May now be a dict with key 'method' as compression mode and other keys as compression options if compression mode is 'zip'. .. versionchanged:: 1.1.0 Passing compression options as keys in dict is now supported for compression modes 'gzip', 'bz2', 'zstd' and 'zip'. .. versionchanged:: 1.4.0 Zstandard support. memory_map : bool, default False See parsers._parser_params for more information. Only used by read_csv. is_text : bool, default True Whether the type of the content passed to the file/buffer is string or bytes. This is not the same as `"b" not in mode`. If a string content is passed to a binary file/buffer, a wrapper is inserted. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. storage_options: StorageOptions = None Passed to _get_filepath_or_buffer .. versionchanged:: 1.2.0 Returns the dataclass IOHandles """ # Windows does not default to utf-8. Set to utf-8 for a consistent behavior encoding = encoding or "utf-8" errors = errors or "strict" # read_csv does not know whether the buffer is opened in binary/text mode if _is_binary_mode(path_or_buf, mode) and "b" not in mode: mode += "b" # validate encoding and errors codecs.lookup(encoding) if isinstance(errors, str): codecs.lookup_error(errors) # open URLs ioargs = _get_filepath_or_buffer( path_or_buf, encoding=encoding, compression=compression, mode=mode, storage_options=storage_options, ) handle = ioargs.filepath_or_buffer handles: list[BaseBuffer] # memory mapping needs to be the first step # only used for read_csv handle, memory_map, handles = _maybe_memory_map(handle, memory_map) is_path = isinstance(handle, str) compression_args = dict(ioargs.compression) compression = compression_args.pop("method") # Only for write methods if "r" not in mode and is_path: check_parent_directory(str(handle)) if compression: if compression != "zstd": # compression libraries do not like an explicit text-mode ioargs.mode = ioargs.mode.replace("t", "") elif compression == "zstd" and "b" not in ioargs.mode: # python-zstandard defaults to text mode, but we always expect # compression libraries to use binary mode. ioargs.mode += "b" # GZ Compression if compression == "gzip": if isinstance(handle, str): # error: Incompatible types in assignment (expression has type # "GzipFile", variable has type "Union[str, BaseBuffer]") handle = gzip.GzipFile( # type: ignore[assignment] filename=handle, mode=ioargs.mode, **compression_args, ) else: handle = gzip.GzipFile( # No overload variant of "GzipFile" matches argument types # "Union[str, BaseBuffer]", "str", "Dict[str, Any]" fileobj=handle, # type: ignore[call-overload] mode=ioargs.mode, **compression_args, ) # BZ Compression elif compression == "bz2": # Overload of "BZ2File" to handle pickle protocol 5 # "Union[str, BaseBuffer]", "str", "Dict[str, Any]" handle = _BZ2File( # type: ignore[call-overload] handle, mode=ioargs.mode, **compression_args, ) # ZIP Compression elif compression == "zip": # error: Argument 1 to "_BytesZipFile" has incompatible type # "Union[str, BaseBuffer]"; expected "Union[Union[str, PathLike[str]], # ReadBuffer[bytes], WriteBuffer[bytes]]" handle = _BytesZipFile( handle, ioargs.mode, **compression_args # type: ignore[arg-type] ) if handle.buffer.mode == "r": handles.append(handle) zip_names = handle.buffer.namelist() if len(zip_names) == 1: handle = handle.buffer.open(zip_names.pop()) elif not zip_names: raise ValueError(f"Zero files found in ZIP file {path_or_buf}") else: raise ValueError( "Multiple files found in ZIP file. " f"Only one file per ZIP: {zip_names}" ) # TAR Encoding elif compression == "tar": compression_args.setdefault("mode", ioargs.mode) if isinstance(handle, str): handle = _BytesTarFile(name=handle, **compression_args) else: # error: Argument "fileobj" to "_BytesTarFile" has incompatible # type "BaseBuffer"; expected "Union[ReadBuffer[bytes], # WriteBuffer[bytes], None]" handle = _BytesTarFile( fileobj=handle, **compression_args # type: ignore[arg-type] ) assert isinstance(handle, _BytesTarFile) if "r" in handle.buffer.mode: handles.append(handle) files = handle.buffer.getnames() if len(files) == 1: file = handle.buffer.extractfile(files[0]) assert file is not None handle = file elif not files: raise ValueError(f"Zero files found in TAR archive {path_or_buf}") else: raise ValueError( "Multiple files found in TAR archive. " f"Only one file per TAR archive: {files}" ) # XZ Compression elif compression == "xz": # error: Argument 1 to "LZMAFile" has incompatible type "Union[str, # BaseBuffer]"; expected "Optional[Union[Union[str, bytes, PathLike[str], # PathLike[bytes]], IO[bytes]]]" handle = get_lzma_file()(handle, ioargs.mode) # type: ignore[arg-type] # Zstd Compression elif compression == "zstd": zstd = import_optional_dependency("zstandard") if "r" in ioargs.mode: open_args = {"dctx": zstd.ZstdDecompressor(**compression_args)} else: open_args = {"cctx": zstd.ZstdCompressor(**compression_args)} handle = zstd.open( handle, mode=ioargs.mode, **open_args, ) # Unrecognized Compression else: msg = f"Unrecognized compression type: {compression}" raise ValueError(msg) assert not isinstance(handle, str) handles.append(handle) elif isinstance(handle, str): # Check whether the filename is to be opened in binary mode. # Binary mode does not support 'encoding' and 'newline'. if ioargs.encoding and "b" not in ioargs.mode: # Encoding handle = open( handle, ioargs.mode, encoding=ioargs.encoding, errors=errors, newline="", ) else: # Binary mode handle = open(handle, ioargs.mode) handles.append(handle) # Convert BytesIO or file objects passed with an encoding is_wrapped = False if not is_text and ioargs.mode == "rb" and isinstance(handle, TextIOBase): # not added to handles as it does not open/buffer resources handle = _BytesIOWrapper( handle, encoding=ioargs.encoding, ) elif is_text and ( compression or memory_map or _is_binary_mode(handle, ioargs.mode) ): if ( not hasattr(handle, "readable") or not hasattr(handle, "writable") or not hasattr(handle, "seekable") ): handle = _IOWrapper(handle) # error: Argument 1 to "TextIOWrapper" has incompatible type # "_IOWrapper"; expected "IO[bytes]" handle = TextIOWrapper( handle, # type: ignore[arg-type] encoding=ioargs.encoding, errors=errors, newline="", ) handles.append(handle) # only marked as wrapped when the caller provided a handle is_wrapped = not ( isinstance(ioargs.filepath_or_buffer, str) or ioargs.should_close ) if "r" in ioargs.mode and not hasattr(handle, "read"): raise TypeError( "Expected file path name or file-like object, " f"got {type(ioargs.filepath_or_buffer)} type" ) handles.reverse() # close the most recently added buffer first if ioargs.should_close: assert not isinstance(ioargs.filepath_or_buffer, str) handles.append(ioargs.filepath_or_buffer) return IOHandles( # error: Argument "handle" to "IOHandles" has incompatible type # "Union[TextIOWrapper, GzipFile, BaseBuffer, typing.IO[bytes], # typing.IO[Any]]"; expected "pandas._typing.IO[Any]" handle=handle, # type: ignore[arg-type] # error: Argument "created_handles" to "IOHandles" has incompatible type # "List[BaseBuffer]"; expected "List[Union[IO[bytes], IO[str]]]" created_handles=handles, # type: ignore[arg-type] is_wrapped=is_wrapped, compression=ioargs.compression, ) def _arrow_dtype_mapping() -> dict: pa = import_optional_dependency("pyarrow") return { pa.int8(): pd.Int8Dtype(), pa.int16(): pd.Int16Dtype(), pa.int32(): pd.Int32Dtype(), pa.int64(): pd.Int64Dtype(), pa.uint8(): pd.UInt8Dtype(), pa.uint16(): pd.UInt16Dtype(), pa.uint32(): pd.UInt32Dtype(), pa.uint64(): pd.UInt64Dtype(), pa.bool_(): pd.BooleanDtype(), pa.string(): pd.StringDtype(), pa.float32(): pd.Float32Dtype(), pa.float64(): pd.Float64Dtype(), } The provided code snippet includes necessary dependencies for implementing the `read_orc` function. Write a Python function `def read_orc( path: FilePath | ReadBuffer[bytes], columns: list[str] | None = None, dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, **kwargs, ) -> DataFrame` to solve the following problem: Load an ORC object from the file path, returning a DataFrame. Parameters ---------- path : str, path object, or file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``read()`` function. The string could be a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. A local file could be: ``file://localhost/path/to/table.orc``. columns : list, default None If not None, only these columns will be read from the file. Output always follows the ordering of the file and not the columns list. This mirrors the original behaviour of :external+pyarrow:py:meth:`pyarrow.orc.ORCFile.read`. dtype_backend : {"numpy_nullable", "pyarrow"}, defaults to NumPy backed DataFrames Which dtype_backend to use, e.g. whether a DataFrame should have NumPy arrays, nullable dtypes are used for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 **kwargs Any additional kwargs are passed to pyarrow. Returns ------- DataFrame Notes ----- Before using this function you should read the :ref:`user guide about ORC <io.orc>` and :ref:`install optional dependencies <install.warn_orc>`. Here is the function: def read_orc( path: FilePath | ReadBuffer[bytes], columns: list[str] | None = None, dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, **kwargs, ) -> DataFrame: """ Load an ORC object from the file path, returning a DataFrame. Parameters ---------- path : str, path object, or file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``read()`` function. The string could be a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. A local file could be: ``file://localhost/path/to/table.orc``. columns : list, default None If not None, only these columns will be read from the file. Output always follows the ordering of the file and not the columns list. This mirrors the original behaviour of :external+pyarrow:py:meth:`pyarrow.orc.ORCFile.read`. dtype_backend : {"numpy_nullable", "pyarrow"}, defaults to NumPy backed DataFrames Which dtype_backend to use, e.g. whether a DataFrame should have NumPy arrays, nullable dtypes are used for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 **kwargs Any additional kwargs are passed to pyarrow. Returns ------- DataFrame Notes ----- Before using this function you should read the :ref:`user guide about ORC <io.orc>` and :ref:`install optional dependencies <install.warn_orc>`. """ # we require a newer version of pyarrow than we support for parquet orc = import_optional_dependency("pyarrow.orc") check_dtype_backend(dtype_backend) with get_handle(path, "rb", is_text=False) as handles: orc_file = orc.ORCFile(handles.handle) pa_table = orc_file.read(columns=columns, **kwargs) if dtype_backend is not lib.no_default: if dtype_backend == "pyarrow": df = pa_table.to_pandas(types_mapper=pd.ArrowDtype) else: from pandas.io._util import _arrow_dtype_mapping mapping = _arrow_dtype_mapping() df = pa_table.to_pandas(types_mapper=mapping.get) return df else: return pa_table.to_pandas()
Load an ORC object from the file path, returning a DataFrame. Parameters ---------- path : str, path object, or file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``read()`` function. The string could be a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. A local file could be: ``file://localhost/path/to/table.orc``. columns : list, default None If not None, only these columns will be read from the file. Output always follows the ordering of the file and not the columns list. This mirrors the original behaviour of :external+pyarrow:py:meth:`pyarrow.orc.ORCFile.read`. dtype_backend : {"numpy_nullable", "pyarrow"}, defaults to NumPy backed DataFrames Which dtype_backend to use, e.g. whether a DataFrame should have NumPy arrays, nullable dtypes are used for all dtypes that have a nullable implementation when "numpy_nullable" is set, pyarrow is used for all dtypes if "pyarrow" is set. The dtype_backends are still experimential. .. versionadded:: 2.0 **kwargs Any additional kwargs are passed to pyarrow. Returns ------- DataFrame Notes ----- Before using this function you should read the :ref:`user guide about ORC <io.orc>` and :ref:`install optional dependencies <install.warn_orc>`.
173,531
from __future__ import annotations import io from types import ModuleType from typing import ( Any, Literal, ) from pandas._libs import lib from pandas._typing import ( DtypeBackend, FilePath, ReadBuffer, WriteBuffer, ) from pandas.compat._optional import import_optional_dependency from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( is_categorical_dtype, is_interval_dtype, is_period_dtype, is_unsigned_integer_dtype, ) import pandas as pd from pandas.core.frame import DataFrame from pandas.io.common import get_handle class ModuleType: __doc__: Optional[str] __file__: Optional[str] __name__: str __package__: Optional[str] __path__: Optional[Iterable[str]] __dict__: Dict[str, Any] def __init__(self, name: str, doc: Optional[str] = ...) -> None: ... Any = object() Literal: _SpecialForm = ... class WriteBuffer(BaseBuffer, Protocol[AnyStr_contra]): def write(self, __b: AnyStr_contra) -> Any: # for gzip.GzipFile, bz2.BZ2File ... def flush(self) -> Any: # for gzip.GzipFile, bz2.BZ2File ... FilePath = Union[str, "PathLike[str]"] def import_optional_dependency( name: str, extra: str = "", errors: str = "raise", min_version: str | None = None, ): """ Import an optional dependency. By default, if a dependency is missing an ImportError with a nice message will be raised. If a dependency is present, but too old, we raise. Parameters ---------- name : str The module name. extra : str Additional text to include in the ImportError message. errors : str {'raise', 'warn', 'ignore'} What to do when a dependency is not found or its version is too old. * raise : Raise an ImportError * warn : Only applicable when a module's version is to old. Warns that the version is too old and returns None * ignore: If the module is not installed, return None, otherwise, return the module, even if the version is too old. It's expected that users validate the version locally when using ``errors="ignore"`` (see. ``io/html.py``) min_version : str, default None Specify a minimum version that is different from the global pandas minimum version required. Returns ------- maybe_module : Optional[ModuleType] The imported module, when found and the version is correct. None is returned when the package is not found and `errors` is False, or when the package's version is too old and `errors` is ``'warn'``. """ assert errors in {"warn", "raise", "ignore"} package_name = INSTALL_MAPPING.get(name) install_name = package_name if package_name is not None else name msg = ( f"Missing optional dependency '{install_name}'. {extra} " f"Use pip or conda to install {install_name}." ) try: module = importlib.import_module(name) except ImportError: if errors == "raise": raise ImportError(msg) return None # Handle submodules: if we have submodule, grab parent module from sys.modules parent = name.split(".")[0] if parent != name: install_name = parent module_to_get = sys.modules[install_name] else: module_to_get = module minimum_version = min_version if min_version is not None else VERSIONS.get(parent) if minimum_version: version = get_version(module_to_get) if version and Version(version) < Version(minimum_version): msg = ( f"Pandas requires version '{minimum_version}' or newer of '{parent}' " f"(version '{version}' currently installed)." ) if errors == "warn": warnings.warn( msg, UserWarning, stacklevel=find_stack_level(), ) return None elif errors == "raise": raise ImportError(msg) return module def is_period_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of the Period dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the Period dtype. Examples -------- >>> is_period_dtype(object) False >>> is_period_dtype(PeriodDtype(freq="D")) True >>> is_period_dtype([1, 2, 3]) False >>> is_period_dtype(pd.Period("2017-01-01")) False >>> is_period_dtype(pd.PeriodIndex([], freq="A")) True """ if isinstance(arr_or_dtype, ExtensionDtype): # GH#33400 fastpath for dtype object return arr_or_dtype.type is Period if arr_or_dtype is None: return False return PeriodDtype.is_dtype(arr_or_dtype) def is_interval_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of the Interval dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the Interval dtype. Examples -------- >>> is_interval_dtype(object) False >>> is_interval_dtype(IntervalDtype()) True >>> is_interval_dtype([1, 2, 3]) False >>> >>> interval = pd.Interval(1, 2, closed="right") >>> is_interval_dtype(interval) False >>> is_interval_dtype(pd.IntervalIndex([interval])) True """ if isinstance(arr_or_dtype, ExtensionDtype): # GH#33400 fastpath for dtype object return arr_or_dtype.type is Interval if arr_or_dtype is None: return False return IntervalDtype.is_dtype(arr_or_dtype) def is_categorical_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of the Categorical dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the Categorical dtype. Examples -------- >>> from pandas.api.types import is_categorical_dtype >>> from pandas import CategoricalDtype >>> is_categorical_dtype(object) False >>> is_categorical_dtype(CategoricalDtype()) True >>> is_categorical_dtype([1, 2, 3]) False >>> is_categorical_dtype(pd.Categorical([1, 2, 3])) True >>> is_categorical_dtype(pd.CategoricalIndex([1, 2, 3])) True """ if isinstance(arr_or_dtype, ExtensionDtype): # GH#33400 fastpath for dtype object return arr_or_dtype.name == "category" if arr_or_dtype is None: return False return CategoricalDtype.is_dtype(arr_or_dtype) def is_unsigned_integer_dtype(arr_or_dtype) -> bool: """ Check whether the provided array or dtype is of an unsigned integer dtype. The nullable Integer dtypes (e.g. pandas.UInt64Dtype) are also considered as integer by this function. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of an unsigned integer dtype. Examples -------- >>> is_unsigned_integer_dtype(str) False >>> is_unsigned_integer_dtype(int) # signed False >>> is_unsigned_integer_dtype(float) False >>> is_unsigned_integer_dtype(np.uint64) True >>> is_unsigned_integer_dtype('uint8') True >>> is_unsigned_integer_dtype('UInt8') True >>> is_unsigned_integer_dtype(pd.UInt8Dtype) True >>> is_unsigned_integer_dtype(np.array(['a', 'b'])) False >>> is_unsigned_integer_dtype(pd.Series([1, 2])) # signed False >>> is_unsigned_integer_dtype(pd.Index([1, 2.])) # float False >>> is_unsigned_integer_dtype(np.array([1, 2], dtype=np.uint32)) True """ return _is_dtype_type( arr_or_dtype, classes_and_not_datetimelike(np.unsignedinteger) ) or _is_dtype( arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind == "u" ) class DataFrame(NDFrame, OpsMixin): """ Two-dimensional, size-mutable, potentially heterogeneous tabular data. Data structure also contains labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects. The primary pandas data structure. Parameters ---------- data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame Dict can contain Series, arrays, constants, dataclass or list-like objects. If data is a dict, column order follows insertion-order. If a dict contains Series which have an index defined, it is aligned by its index. This alignment also occurs if data is a Series or a DataFrame itself. Alignment is done on Series/DataFrame inputs. If data is a list of dicts, column order follows insertion-order. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided. columns : Index or array-like Column labels to use for resulting frame when data does not have them, defaulting to RangeIndex(0, 1, 2, ..., n). If data contains column labels, will perform column selection instead. dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer. copy : bool or None, default None Copy data from inputs. For dict data, the default of None behaves like ``copy=True``. For DataFrame or 2d ndarray input, the default of None behaves like ``copy=False``. If data is a dict containing one or more Series (possibly of different dtypes), ``copy=False`` will ensure that these inputs are not copied. .. versionchanged:: 1.3.0 See Also -------- DataFrame.from_records : Constructor from tuples, also record arrays. DataFrame.from_dict : From dicts of Series, arrays, or dicts. read_csv : Read a comma-separated values (csv) file into DataFrame. read_table : Read general delimited file into DataFrame. read_clipboard : Read text from clipboard into DataFrame. Notes ----- Please reference the :ref:`User Guide <basics.dataframe>` for more information. Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = pd.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from a dictionary including Series: >>> d = {'col1': [0, 1, 2, 3], 'col2': pd.Series([2, 3], index=[2, 3])} >>> pd.DataFrame(data=d, index=[0, 1, 2, 3]) col1 col2 0 0 NaN 1 1 NaN 2 2 2.0 3 3 3.0 Constructing DataFrame from numpy ndarray: >>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), ... columns=['a', 'b', 'c']) >>> df2 a b c 0 1 2 3 1 4 5 6 2 7 8 9 Constructing DataFrame from a numpy ndarray that has labeled columns: >>> data = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], ... dtype=[("a", "i4"), ("b", "i4"), ("c", "i4")]) >>> df3 = pd.DataFrame(data, columns=['c', 'a']) ... >>> df3 c a 0 3 1 1 6 4 2 9 7 Constructing DataFrame from dataclass: >>> from dataclasses import make_dataclass >>> Point = make_dataclass("Point", [("x", int), ("y", int)]) >>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)]) x y 0 0 0 1 0 3 2 2 3 Constructing DataFrame from Series/DataFrame: >>> ser = pd.Series([1, 2, 3], index=["a", "b", "c"]) >>> df = pd.DataFrame(data=ser, index=["a", "c"]) >>> df 0 a 1 c 3 >>> df1 = pd.DataFrame([1, 2, 3], index=["a", "b", "c"], columns=["x"]) >>> df2 = pd.DataFrame(data=df1, index=["a", "c"]) >>> df2 x a 1 c 3 """ _internal_names_set = {"columns", "index"} | NDFrame._internal_names_set _typ = "dataframe" _HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray) _accessors: set[str] = {"sparse"} _hidden_attrs: frozenset[str] = NDFrame._hidden_attrs | frozenset([]) _mgr: BlockManager | ArrayManager def _constructor(self) -> Callable[..., DataFrame]: return DataFrame _constructor_sliced: Callable[..., Series] = Series # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index: Axes | None = None, columns: Axes | None = None, dtype: Dtype | None = None, copy: bool | None = None, ) -> None: if dtype is not None: dtype = self._validate_dtype(dtype) if isinstance(data, DataFrame): data = data._mgr if not copy: # if not copying data, ensure to still return a shallow copy # to avoid the result sharing the same Manager data = data.copy(deep=False) if isinstance(data, (BlockManager, ArrayManager)): if using_copy_on_write(): data = data.copy(deep=False) # first check if a Manager is passed without any other arguments # -> use fastpath (without checking Manager type) if index is None and columns is None and dtype is None and not copy: # GH#33357 fastpath NDFrame.__init__(self, data) return manager = get_option("mode.data_manager") # GH47215 if index is not None and isinstance(index, set): raise ValueError("index cannot be a set") if columns is not None and isinstance(columns, set): raise ValueError("columns cannot be a set") if copy is None: if isinstance(data, dict): # retain pre-GH#38939 default behavior copy = True elif ( manager == "array" and isinstance(data, (np.ndarray, ExtensionArray)) and data.ndim == 2 ): # INFO(ArrayManager) by default copy the 2D input array to get # contiguous 1D arrays copy = True elif using_copy_on_write() and not isinstance( data, (Index, DataFrame, Series) ): copy = True else: copy = False if data is None: index = index if index is not None else default_index(0) columns = columns if columns is not None else default_index(0) dtype = dtype if dtype is not None else pandas_dtype(object) data = [] if isinstance(data, (BlockManager, ArrayManager)): mgr = self._init_mgr( data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy ) elif isinstance(data, dict): # GH#38939 de facto copy defaults to False only in non-dict cases mgr = dict_to_mgr(data, index, columns, dtype=dtype, copy=copy, typ=manager) elif isinstance(data, ma.MaskedArray): from numpy.ma import mrecords # masked recarray if isinstance(data, mrecords.MaskedRecords): raise TypeError( "MaskedRecords are not supported. Pass " "{name: data[name] for name in data.dtype.names} " "instead" ) # a masked array data = sanitize_masked_array(data) mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) elif isinstance(data, (np.ndarray, Series, Index, ExtensionArray)): if data.dtype.names: # i.e. numpy structured array data = cast(np.ndarray, data) mgr = rec_array_to_mgr( data, index, columns, dtype, copy, typ=manager, ) elif getattr(data, "name", None) is not None: # i.e. Series/Index with non-None name _copy = copy if using_copy_on_write() else True mgr = dict_to_mgr( # error: Item "ndarray" of "Union[ndarray, Series, Index]" has no # attribute "name" {data.name: data}, # type: ignore[union-attr] index, columns, dtype=dtype, typ=manager, copy=_copy, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) # For data is list-like, or Iterable (will consume into list) elif is_list_like(data): if not isinstance(data, abc.Sequence): if hasattr(data, "__array__"): # GH#44616 big perf improvement for e.g. pytorch tensor data = np.asarray(data) else: data = list(data) if len(data) > 0: if is_dataclass(data[0]): data = dataclasses_to_dicts(data) if not isinstance(data, np.ndarray) and treat_as_nested(data): # exclude ndarray as we may have cast it a few lines above if columns is not None: columns = ensure_index(columns) arrays, columns, index = nested_data_to_arrays( # error: Argument 3 to "nested_data_to_arrays" has incompatible # type "Optional[Collection[Any]]"; expected "Optional[Index]" data, columns, index, # type: ignore[arg-type] dtype, ) mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, typ=manager, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) else: mgr = dict_to_mgr( {}, index, columns if columns is not None else default_index(0), dtype=dtype, typ=manager, ) # For data is scalar else: if index is None or columns is None: raise ValueError("DataFrame constructor not properly called!") index = ensure_index(index) columns = ensure_index(columns) if not dtype: dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True) # For data is a scalar extension dtype if isinstance(dtype, ExtensionDtype): # TODO(EA2D): special case not needed with 2D EAs values = [ construct_1d_arraylike_from_scalar(data, len(index), dtype) for _ in range(len(columns)) ] mgr = arrays_to_mgr(values, columns, index, dtype=None, typ=manager) else: arr2d = construct_2d_arraylike_from_scalar( data, len(index), len(columns), dtype, copy, ) mgr = ndarray_to_mgr( arr2d, index, columns, dtype=arr2d.dtype, copy=False, typ=manager, ) # ensure correct Manager type according to settings mgr = mgr_to_mgr(mgr, typ=manager) NDFrame.__init__(self, mgr) # ---------------------------------------------------------------------- def __dataframe__( self, nan_as_null: bool = False, allow_copy: bool = True ) -> DataFrameXchg: """ Return the dataframe interchange object implementing the interchange protocol. Parameters ---------- nan_as_null : bool, default False Whether to tell the DataFrame to overwrite null values in the data with ``NaN`` (or ``NaT``). allow_copy : bool, default True Whether to allow memory copying when exporting. If set to False it would cause non-zero-copy exports to fail. Returns ------- DataFrame interchange object The object which consuming library can use to ingress the dataframe. Notes ----- Details on the interchange protocol: https://data-apis.org/dataframe-protocol/latest/index.html `nan_as_null` currently has no effect; once support for nullable extension dtypes is added, this value should be propagated to columns. """ from pandas.core.interchange.dataframe import PandasDataFrameXchg return PandasDataFrameXchg(self, nan_as_null, allow_copy) # ---------------------------------------------------------------------- def axes(self) -> list[Index]: """ Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='object')] """ return [self.index, self.columns] def shape(self) -> tuple[int, int]: """ Return a tuple representing the dimensionality of the DataFrame. See Also -------- ndarray.shape : Tuple of array dimensions. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self.index), len(self.columns) def _is_homogeneous_type(self) -> bool: """ Whether all the columns in a DataFrame have the same type. Returns ------- bool See Also -------- Index._is_homogeneous_type : Whether the object has a single dtype. MultiIndex._is_homogeneous_type : Whether all the levels of a MultiIndex have the same dtype. Examples -------- >>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type True >>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type False Items with the same type but different sizes are considered different types. >>> DataFrame({ ... "A": np.array([1, 2], dtype=np.int32), ... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type False """ if isinstance(self._mgr, ArrayManager): return len({arr.dtype for arr in self._mgr.arrays}) == 1 if self._mgr.any_extension_types: return len({block.dtype for block in self._mgr.blocks}) == 1 else: return not self._is_mixed_type def _can_fast_transpose(self) -> bool: """ Can we transpose this DataFrame without creating any new array objects. """ if isinstance(self._mgr, ArrayManager): return False blocks = self._mgr.blocks if len(blocks) != 1: return False dtype = blocks[0].dtype # TODO(EA2D) special case would be unnecessary with 2D EAs return not is_1d_only_ea_dtype(dtype) def _values(self) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray: """ Analogue to ._values that may return a 2D ExtensionArray. """ mgr = self._mgr if isinstance(mgr, ArrayManager): if len(mgr.arrays) == 1 and not is_1d_only_ea_dtype(mgr.arrays[0].dtype): # error: Item "ExtensionArray" of "Union[ndarray, ExtensionArray]" # has no attribute "reshape" return mgr.arrays[0].reshape(-1, 1) # type: ignore[union-attr] return ensure_wrapped_if_datetimelike(self.values) blocks = mgr.blocks if len(blocks) != 1: return ensure_wrapped_if_datetimelike(self.values) arr = blocks[0].values if arr.ndim == 1: # non-2D ExtensionArray return self.values # more generally, whatever we allow in NDArrayBackedExtensionBlock arr = cast("np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray", arr) return arr.T # ---------------------------------------------------------------------- # Rendering Methods def _repr_fits_vertical_(self) -> bool: """ Check length against max_rows. """ max_rows = get_option("display.max_rows") return len(self) <= max_rows def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool: """ Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns. In case of non-interactive session, no boundaries apply. `ignore_width` is here so ipynb+HTML output can behave the way users expect. display.max_columns remains in effect. GH3541, GH3573 """ width, height = console.get_console_size() max_columns = get_option("display.max_columns") nb_columns = len(self.columns) # exceed max columns if (max_columns and nb_columns > max_columns) or ( (not ignore_width) and width and nb_columns > (width // 2) ): return False # used by repr_html under IPython notebook or scripts ignore terminal # dims if ignore_width or width is None or not console.in_interactive_session(): return True if get_option("display.width") is not None or console.in_ipython_frontend(): # check at least the column row for excessive width max_rows = 1 else: max_rows = get_option("display.max_rows") # when auto-detecting, so width=None and not in ipython front end # check whether repr fits horizontal by actually checking # the width of the rendered repr buf = StringIO() # only care about the stuff we'll actually print out # and to_string on entire frame may be expensive d = self if max_rows is not None: # unlimited rows # min of two, where one may be None d = d.iloc[: min(max_rows, len(d))] else: return True d.to_string(buf=buf) value = buf.getvalue() repr_width = max(len(line) for line in value.split("\n")) return repr_width < width def _info_repr(self) -> bool: """ True if the repr should show the info view. """ info_repr_option = get_option("display.large_repr") == "info" return info_repr_option and not ( self._repr_fits_horizontal_() and self._repr_fits_vertical_() ) def __repr__(self) -> str: """ Return a string representation for a particular DataFrame. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) return buf.getvalue() repr_params = fmt.get_dataframe_repr_params() return self.to_string(**repr_params) def _repr_html_(self) -> str | None: """ Return a html representation for a particular DataFrame. Mainly for IPython notebook. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) # need to escape the <class>, should be the first line. val = buf.getvalue().replace("<", r"&lt;", 1) val = val.replace(">", r"&gt;", 1) return f"<pre>{val}</pre>" if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") min_rows = get_option("display.min_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") formatter = fmt.DataFrameFormatter( self, columns=None, col_space=None, na_rep="NaN", formatters=None, float_format=None, sparsify=None, justify=None, index_names=True, header=True, index=True, bold_rows=True, escape=True, max_rows=max_rows, min_rows=min_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=".", ) return fmt.DataFrameRenderer(formatter).to_html(notebook=True) else: return None def to_string( self, buf: None = ..., columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> None: ... header_type="bool or sequence of str", header="Write out the column names. If a list of strings " "is given, it is assumed to be aliases for the " "column names", col_space_type="int, list or dict of int", col_space="The minimum width of each column. If a list of ints is given " "every integers corresponds with one column. If a dict is given, the key " "references the column, while the value defines the space to use.", ) def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[str] | None = None, col_space: int | list[int] | dict[Hashable, int] | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: fmt.FormattersType | None = None, float_format: fmt.FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool = False, decimal: str = ".", line_width: int | None = None, min_rows: int | None = None, max_colwidth: int | None = None, encoding: str | None = None, ) -> str | None: """ Render a DataFrame to a console-friendly tabular output. %(shared_params)s line_width : int, optional Width to wrap a line in characters. min_rows : int, optional The number of rows to display in the console in a truncated repr (when number of rows is above `max_rows`). max_colwidth : int, optional Max width to truncate each column in characters. By default, no limit. encoding : str, default "utf-8" Set character encoding. %(returns)s See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]} >>> df = pd.DataFrame(d) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 """ from pandas import option_context with option_context("display.max_colwidth", max_colwidth): formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, min_rows=min_rows, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, ) return fmt.DataFrameRenderer(formatter).to_string( buf=buf, encoding=encoding, line_width=line_width, ) # ---------------------------------------------------------------------- def style(self) -> Styler: """ Returns a Styler object. Contains methods for building a styled HTML representation of the DataFrame. See Also -------- io.formats.style.Styler : Helps style a DataFrame or Series according to the data with HTML and CSS. """ from pandas.io.formats.style import Styler return Styler(self) _shared_docs[ "items" ] = r""" Iterate over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Yields ------ label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. Examples -------- >>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.items(): ... print(f'label: {label}') ... print(f'content: {content}', sep='\n') ... label: species content: panda bear polar bear koala marsupial Name: species, dtype: object label: population content: panda 1864 polar 22000 koala 80000 Name: population, dtype: int64 """ def items(self) -> Iterable[tuple[Hashable, Series]]: if self.columns.is_unique and hasattr(self, "_item_cache"): for k in self.columns: yield k, self._get_item_cache(k) else: for i, k in enumerate(self.columns): yield k, self._ixs(i, axis=1) def iterrows(self) -> Iterable[tuple[Hashable, Series]]: """ Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a `MultiIndex`. data : Series The data of the row as a Series. See Also -------- DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- 1. Because ``iterrows`` returns a Series for each row, it does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float']) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row['int'].dtype) float64 >>> print(df['int'].dtype) int64 To preserve dtypes while iterating over the rows, it is better to use :meth:`itertuples` which returns namedtuples of the values and which is generally faster than ``iterrows``. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect. """ columns = self.columns klass = self._constructor_sliced using_cow = using_copy_on_write() for k, v in zip(self.index, self.values): s = klass(v, index=columns, name=k).__finalize__(self) if using_cow and self._mgr.is_single_block: s._mgr.add_references(self._mgr) # type: ignore[arg-type] yield k, s def itertuples( self, index: bool = True, name: str | None = "Pandas" ) -> Iterable[tuple[Any, ...]]: """ Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "Pandas" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An object to iterate over namedtuples for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. Examples -------- >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) ... Pandas(Index='dog', num_legs=4, num_wings=0) Pandas(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) ... Pandas(num_legs=4, num_wings=0) Pandas(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name='Animal'): ... print(row) ... Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2) """ arrays = [] fields = list(self.columns) if index: arrays.append(self.index) fields.insert(0, "Index") # use integer indexing because of possible duplicate column names arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) if name is not None: # https://github.com/python/mypy/issues/9046 # error: namedtuple() expects a string literal as the first argument itertuple = collections.namedtuple( # type: ignore[misc] name, fields, rename=True ) return map(itertuple._make, zip(*arrays)) # fallback to regular tuples return zip(*arrays) def __len__(self) -> int: """ Returns length of info axis, but here we use the index. """ return len(self.index) def dot(self, other: Series) -> Series: ... def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame: ... def dot(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Compute the matrix multiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series, DataFrame or a numpy array. It can also be called using ``self @ other`` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the matrix product with. Returns ------- Series or DataFrame If other is a Series, return the matrix product between self and other as a Series. If other is a DataFrame or a numpy.array, return the matrix product of self and other in a DataFrame of a np.array. See Also -------- Series.dot: Similar method for Series. Notes ----- The dimensions of DataFrame and other must be compatible in order to compute the matrix multiplication. In addition, the column names of DataFrame and the index of other must contain the same values, as they will be aligned prior to the multiplication. The dot method for Series computes the inner product, instead of the matrix product here. Examples -------- Here we multiply a DataFrame with a Series. >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) >>> s = pd.Series([1, 1, 2, 1]) >>> df.dot(s) 0 -4 1 5 dtype: int64 Here we multiply a DataFrame with another DataFrame. >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(other) 0 1 0 1 4 1 2 2 Note that the dot method give the same result as @ >>> df @ other 0 1 0 1 4 1 2 2 The dot method works also if other is an np.array. >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(arr) 0 1 0 1 4 1 2 2 Note how shuffling of the objects does not change the result. >>> s2 = s.reindex([1, 0, 2, 3]) >>> df.dot(s2) 0 -4 1 5 dtype: int64 """ if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index) if len(common) > len(self.columns) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(columns=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right._values else: left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: raise ValueError( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, DataFrame): return self._constructor( np.dot(lvals, rvals), index=left.index, columns=other.columns, copy=False, ) elif isinstance(other, Series): return self._constructor_sliced( np.dot(lvals, rvals), index=left.index, copy=False ) elif isinstance(rvals, (np.ndarray, Index)): result = np.dot(lvals, rvals) if result.ndim == 2: return self._constructor(result, index=left.index, copy=False) else: return self._constructor_sliced(result, index=left.index, copy=False) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other: Series) -> Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other) -> DataFrame: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ try: return self.T.dot(np.transpose(other)).T except ValueError as err: if "shape mismatch" not in str(err): raise # GH#21581 give exception message for original shapes msg = f"shapes {np.shape(other)} and {self.shape} not aligned" raise ValueError(msg) from err # ---------------------------------------------------------------------- # IO methods (to / from other formats) def from_dict( cls, data: dict, orient: str = "columns", dtype: Dtype | None = None, columns: Axes | None = None, ) -> DataFrame: """ Construct DataFrame from dict of array-like or dicts. Creates DataFrame object from dictionary by columns or by index allowing dtype specification. Parameters ---------- data : dict Of the form {field : array-like} or {field : dict}. orient : {'columns', 'index', 'tight'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. If 'tight', assume a dict with keys ['index', 'columns', 'data', 'index_names', 'column_names']. .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument dtype : dtype, default None Data type to force after DataFrame construction, otherwise infer. columns : list, default None Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'`` or ``orient='tight'``. Returns ------- DataFrame See Also -------- DataFrame.from_records : DataFrame from structured ndarray, sequence of tuples or dicts, or DataFrame. DataFrame : DataFrame object creation using constructor. DataFrame.to_dict : Convert the DataFrame to a dictionary. Examples -------- By default the keys of the dict become the DataFrame columns: >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Specify ``orient='index'`` to create the DataFrame using dictionary keys as rows: >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data, orient='index') 0 1 2 3 row_1 3 2 1 0 row_2 a b c d When using the 'index' orientation, the column names can be specified manually: >>> pd.DataFrame.from_dict(data, orient='index', ... columns=['A', 'B', 'C', 'D']) A B C D row_1 3 2 1 0 row_2 a b c d Specify ``orient='tight'`` to create the DataFrame using a 'tight' format: >>> data = {'index': [('a', 'b'), ('a', 'c')], ... 'columns': [('x', 1), ('y', 2)], ... 'data': [[1, 3], [2, 4]], ... 'index_names': ['n1', 'n2'], ... 'column_names': ['z1', 'z2']} >>> pd.DataFrame.from_dict(data, orient='tight') z1 x y z2 1 2 n1 n2 a b 1 3 c 2 4 """ index = None orient = orient.lower() if orient == "index": if len(data) > 0: # TODO speed up Series case if isinstance(list(data.values())[0], (Series, dict)): data = _from_nested_dict(data) else: index = list(data.keys()) # error: Incompatible types in assignment (expression has type # "List[Any]", variable has type "Dict[Any, Any]") data = list(data.values()) # type: ignore[assignment] elif orient in ("columns", "tight"): if columns is not None: raise ValueError(f"cannot use columns parameter with orient='{orient}'") else: # pragma: no cover raise ValueError( f"Expected 'index', 'columns' or 'tight' for orient parameter. " f"Got '{orient}' instead" ) if orient != "tight": return cls(data, index=index, columns=columns, dtype=dtype) else: realdata = data["data"] def create_index(indexlist, namelist): index: Index if len(namelist) > 1: index = MultiIndex.from_tuples(indexlist, names=namelist) else: index = Index(indexlist, name=namelist[0]) return index index = create_index(data["index"], data["index_names"]) columns = create_index(data["columns"], data["column_names"]) return cls(realdata, index=index, columns=columns, dtype=dtype) def to_numpy( self, dtype: npt.DTypeLike | None = None, copy: bool = False, na_value: object = lib.no_default, ) -> np.ndarray: """ Convert the DataFrame to a NumPy array. By default, the dtype of the returned array will be the common NumPy dtype of all types in the DataFrame. For example, if the dtypes are ``float16`` and ``float32``, the results dtype will be ``float32``. This may require copying data and coercing values, which may be expensive. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray`. copy : bool, default False Whether to ensure that the returned value is not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. na_value : Any, optional The value to use for missing values. The default value depends on `dtype` and the dtypes of the DataFrame columns. .. versionadded:: 1.1.0 Returns ------- numpy.ndarray See Also -------- Series.to_numpy : Similar method for Series. Examples -------- >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() array([[1, 3], [2, 4]]) With heterogeneous data, the lowest common type will have to be used. >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) >>> df.to_numpy() array([[1. , 3. ], [2. , 4.5]]) For a mix of numeric and non-numeric types, the output array will have object dtype. >>> df['C'] = pd.date_range('2000', periods=2) >>> df.to_numpy() array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) """ if dtype is not None: dtype = np.dtype(dtype) result = self._mgr.as_array(dtype=dtype, copy=copy, na_value=na_value) if result.dtype is not dtype: result = np.array(result, dtype=dtype, copy=False) return result def _create_data_for_split_and_tight_to_dict( self, are_all_object_dtype_cols: bool, object_dtype_indices: list[int] ) -> list: """ Simple helper method to create data for to ``to_dict(orient="split")`` and ``to_dict(orient="tight")`` to create the main output data """ if are_all_object_dtype_cols: data = [ list(map(maybe_box_native, t)) for t in self.itertuples(index=False, name=None) ] else: data = [list(t) for t in self.itertuples(index=False, name=None)] if object_dtype_indices: # If we have object_dtype_cols, apply maybe_box_naive after list # comprehension for perf for row in data: for i in object_dtype_indices: row[i] = maybe_box_native(row[i]) return data def to_dict( self, orient: Literal["dict", "list", "series", "split", "tight", "index"] = ..., into: type[dict] = ..., ) -> dict: ... def to_dict(self, orient: Literal["records"], into: type[dict] = ...) -> list[dict]: ... def to_dict( self, orient: Literal[ "dict", "list", "series", "split", "tight", "records", "index" ] = "dict", into: type[dict] = dict, index: bool = True, ) -> dict | list[dict]: """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'tight', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'tight' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values], 'index_names' -> [index.names], 'column_names' -> [column.names]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. index : bool, default True Whether to include the index item (and index_names item if `orient` is 'tight') in the returned dictionary. Can only be ``False`` when `orient` is 'split' or 'tight'. .. versionadded:: 2.0.0 Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. See Also -------- DataFrame.from_dict: Create a DataFrame from a dictionary. DataFrame.to_json: Convert a DataFrame to JSON format. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df.to_dict() {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}} You can specify the return orientation. >>> df.to_dict('series') {'col1': row1 1 row2 2 Name: col1, dtype: int64, 'col2': row1 0.50 row2 0.75 Name: col2, dtype: float64} >>> df.to_dict('split') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]]} >>> df.to_dict('records') [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}] >>> df.to_dict('index') {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}} >>> df.to_dict('tight') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]], 'index_names': [None], 'column_names': [None]} You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) [defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}), defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})] """ from pandas.core.methods.to_dict import to_dict return to_dict(self, orient, into, index) def to_gbq( self, destination_table: str, project_id: str | None = None, chunksize: int | None = None, reauth: bool = False, if_exists: str = "fail", auth_local_webserver: bool = True, table_schema: list[dict[str, str]] | None = None, location: str | None = None, progress_bar: bool = True, credentials=None, ) -> None: """ Write a DataFrame to a Google BigQuery table. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See the `How to authenticate with Google BigQuery <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__ guide for authentication instructions. Parameters ---------- destination_table : str Name of table to be written, in the form ``dataset.tablename``. project_id : str, optional Google BigQuery Account project ID. Optional when available from the environment. chunksize : int, optional Number of rows to be inserted in each chunk from the dataframe. Set to ``None`` to load the whole dataframe at once. reauth : bool, default False Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. if_exists : str, default 'fail' Behavior when the destination table exists. Value can be one of: ``'fail'`` If table exists raise pandas_gbq.gbq.TableCreationError. ``'replace'`` If table exists, drop it, recreate it, and insert data. ``'append'`` If table exists, insert data. Create if does not exist. auth_local_webserver : bool, default True Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. .. _local webserver flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server .. _console flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console *New in version 0.2.0 of pandas-gbq*. .. versionchanged:: 1.5.0 Default value is changed to ``True``. Google has deprecated the ``auth_local_webserver = False`` `"out of band" (copy-paste) flow <https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html?m=1#disallowed-oob>`_. table_schema : list of dicts, optional List of BigQuery table fields to which according DataFrame columns conform to, e.g. ``[{'name': 'col1', 'type': 'STRING'},...]``. If schema is not provided, it will be generated according to dtypes of DataFrame columns. See BigQuery API documentation on available names of a field. *New in version 0.3.1 of pandas-gbq*. location : str, optional Location where the load job should run. See the `BigQuery locations documentation <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a list of available locations. The location must match that of the target dataset. *New in version 0.5.0 of pandas-gbq*. progress_bar : bool, default True Use the library `tqdm` to show the progress bar for the upload, chunk by chunk. *New in version 0.5.0 of pandas-gbq*. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine :class:`google.auth.compute_engine.Credentials` or Service Account :class:`google.oauth2.service_account.Credentials` directly. *New in version 0.8.0 of pandas-gbq*. See Also -------- pandas_gbq.to_gbq : This function in the pandas-gbq library. read_gbq : Read a DataFrame from Google BigQuery. """ from pandas.io import gbq gbq.to_gbq( self, destination_table, project_id=project_id, chunksize=chunksize, reauth=reauth, if_exists=if_exists, auth_local_webserver=auth_local_webserver, table_schema=table_schema, location=location, progress_bar=progress_bar, credentials=credentials, ) def from_records( cls, data, index=None, exclude=None, columns=None, coerce_float: bool = False, nrows: int | None = None, ) -> DataFrame: """ Convert structured or record ndarray to DataFrame. Creates a DataFrame object from a structured ndarray, sequence of tuples or dicts, or DataFrame. Parameters ---------- data : structured ndarray, sequence of tuples or dicts, or DataFrame Structured input data. index : str, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use. exclude : sequence, default None Columns or fields to exclude. columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns). coerce_float : bool, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. nrows : int, default None Number of rows to read if data is an iterator. Returns ------- DataFrame See Also -------- DataFrame.from_dict : DataFrame from dict of array-like or dicts. DataFrame : DataFrame object creation using constructor. Examples -------- Data can be provided as a structured ndarray: >>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')], ... dtype=[('col_1', 'i4'), ('col_2', 'U1')]) >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of dicts: >>> data = [{'col_1': 3, 'col_2': 'a'}, ... {'col_1': 2, 'col_2': 'b'}, ... {'col_1': 1, 'col_2': 'c'}, ... {'col_1': 0, 'col_2': 'd'}] >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of tuples with corresponding columns: >>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')] >>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2']) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d """ if isinstance(data, DataFrame): if columns is not None: if is_scalar(columns): columns = [columns] data = data[columns] if index is not None: data = data.set_index(index) if exclude is not None: data = data.drop(columns=exclude) return data.copy(deep=False) result_index = None # Make a copy of the input columns so we can modify it if columns is not None: columns = ensure_index(columns) def maybe_reorder( arrays: list[ArrayLike], arr_columns: Index, columns: Index, index ) -> tuple[list[ArrayLike], Index, Index | None]: """ If our desired 'columns' do not match the data's pre-existing 'arr_columns', we re-order our arrays. This is like a pre-emptive (cheap) reindex. """ if len(arrays): length = len(arrays[0]) else: length = 0 result_index = None if len(arrays) == 0 and index is None and length == 0: result_index = default_index(0) arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, length) return arrays, arr_columns, result_index if is_iterator(data): if nrows == 0: return cls() try: first_row = next(data) except StopIteration: return cls(index=index, columns=columns) dtype = None if hasattr(first_row, "dtype") and first_row.dtype.names: dtype = first_row.dtype values = [first_row] if nrows is None: values += data else: values.extend(itertools.islice(data, nrows - 1)) if dtype is not None: data = np.array(values, dtype=dtype) else: data = values if isinstance(data, dict): if columns is None: columns = arr_columns = ensure_index(sorted(data)) arrays = [data[k] for k in columns] else: arrays = [] arr_columns_list = [] for k, v in data.items(): if k in columns: arr_columns_list.append(k) arrays.append(v) arr_columns = Index(arr_columns_list) arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = to_arrays(data, columns) arr_columns = columns else: arrays, arr_columns = to_arrays(data, columns) if coerce_float: for i, arr in enumerate(arrays): if arr.dtype == object: # error: Argument 1 to "maybe_convert_objects" has # incompatible type "Union[ExtensionArray, ndarray]"; # expected "ndarray" arrays[i] = lib.maybe_convert_objects( arr, # type: ignore[arg-type] try_float=True, ) arr_columns = ensure_index(arr_columns) if columns is None: columns = arr_columns else: arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) if exclude is None: exclude = set() else: exclude = set(exclude) if index is not None: if isinstance(index, str) or not hasattr(index, "__iter__"): i = columns.get_loc(index) exclude.add(index) if len(arrays) > 0: result_index = Index(arrays[i], name=index) else: result_index = Index([], name=index) else: try: index_data = [arrays[arr_columns.get_loc(field)] for field in index] except (KeyError, TypeError): # raised by get_loc, see GH#29258 result_index = index else: result_index = ensure_index_from_sequences(index_data, names=index) exclude.update(index) if any(exclude): arr_exclude = [x for x in exclude if x in arr_columns] to_remove = [arr_columns.get_loc(col) for col in arr_exclude] arrays = [v for i, v in enumerate(arrays) if i not in to_remove] columns = columns.drop(exclude) manager = get_option("mode.data_manager") mgr = arrays_to_mgr(arrays, columns, result_index, typ=manager) return cls(mgr) def to_records( self, index: bool = True, column_dtypes=None, index_dtypes=None ) -> np.recarray: """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. column_dtypes : str, type, dict, default None If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) If the DataFrame index has no label then the recarray field name is set to 'index'. If the index has a label then this is used as the field name: >>> df.index = df.index.rename("I") >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')]) As well as for the index: >>> df.to_records(index_dtypes="<S2") rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')]) >>> index_dtypes = f"<S{df.index.str.len().max()}" >>> df.to_records(index_dtypes=index_dtypes) rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')]) """ if index: ix_vals = [ np.asarray(self.index.get_level_values(i)) for i in range(self.index.nlevels) ] arrays = ix_vals + [ np.asarray(self.iloc[:, i]) for i in range(len(self.columns)) ] index_names = list(self.index.names) if isinstance(self.index, MultiIndex): index_names = com.fill_missing_names(index_names) elif index_names[0] is None: index_names = ["index"] names = [str(name) for name in itertools.chain(index_names, self.columns)] else: arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))] names = [str(c) for c in self.columns] index_names = [] index_len = len(index_names) formats = [] for i, v in enumerate(arrays): index_int = i # When the names and arrays are collected, we # first collect those in the DataFrame's index, # followed by those in its columns. # # Thus, the total length of the array is: # len(index_names) + len(DataFrame.columns). # # This check allows us to see whether we are # handling a name / array in the index or column. if index_int < index_len: dtype_mapping = index_dtypes name = index_names[index_int] else: index_int -= index_len dtype_mapping = column_dtypes name = self.columns[index_int] # We have a dictionary, so we get the data type # associated with the index or column (which can # be denoted by its name in the DataFrame or its # position in DataFrame's array of indices or # columns, whichever is applicable. if is_dict_like(dtype_mapping): if name in dtype_mapping: dtype_mapping = dtype_mapping[name] elif index_int in dtype_mapping: dtype_mapping = dtype_mapping[index_int] else: dtype_mapping = None # If no mapping can be found, use the array's # dtype attribute for formatting. # # A valid dtype must either be a type or # string naming a type. if dtype_mapping is None: formats.append(v.dtype) elif isinstance(dtype_mapping, (type, np.dtype, str)): # error: Argument 1 to "append" of "list" has incompatible # type "Union[type, dtype[Any], str]"; expected "dtype[Any]" formats.append(dtype_mapping) # type: ignore[arg-type] else: element = "row" if i < index_len else "column" msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}" raise ValueError(msg) return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats}) def _from_arrays( cls, arrays, columns, index, dtype: Dtype | None = None, verify_integrity: bool = True, ) -> DataFrame: """ Create DataFrame from a list of arrays corresponding to the columns. Parameters ---------- arrays : list-like of arrays Each array in the list corresponds to one column, in order. columns : list-like, Index The column names for the resulting DataFrame. index : list-like, Index The rows labels for the resulting DataFrame. dtype : dtype, optional Optional dtype to enforce for all arrays. verify_integrity : bool, default True Validate and homogenize all input. If set to False, it is assumed that all elements of `arrays` are actual arrays how they will be stored in a block (numpy ndarray or ExtensionArray), have the same length as and are aligned with the index, and that `columns` and `index` are ensured to be an Index object. Returns ------- DataFrame """ if dtype is not None: dtype = pandas_dtype(dtype) manager = get_option("mode.data_manager") columns = ensure_index(columns) if len(columns) != len(arrays): raise ValueError("len(columns) must match len(arrays)") mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, verify_integrity=verify_integrity, typ=manager, ) return cls(mgr) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path", ) def to_stata( self, path: FilePath | WriteBuffer[bytes], *, convert_dates: dict[Hashable, str] | None = None, write_index: bool = True, byteorder: str | None = None, time_stamp: datetime.datetime | None = None, data_label: str | None = None, variable_labels: dict[Hashable, str] | None = None, version: int | None = 114, convert_strl: Sequence[Hashable] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, value_labels: dict[Hashable, dict[float, str]] | None = None, ) -> None: """ Export DataFrame object to Stata dta format. Writes the DataFrame to a Stata dataset file. "dta" files contain a Stata dataset. Parameters ---------- path : str, path object, or buffer String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has timezone information. write_index : bool Write the index to Stata dataset. byteorder : str Can be ">", "<", "little", or "big". default is `sys.byteorder`. time_stamp : datetime A datetime to use as file creation date. Default is the current time. data_label : str, optional A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. version : {{114, 117, 118, 119, None}}, default 114 Version to use in the output dta file. Set to None to let pandas decide between 118 or 119 formats depending on the number of columns in the frame. Version 114 can be read by Stata 10 and later. Version 117 can be read by Stata 13 or later. Version 118 is supported in Stata 14 and later. Version 119 is supported in Stata 15 and later. Version 114 limits string variables to 244 characters or fewer while versions 117 and later allow strings with lengths up to 2,000,000 characters. Versions 118 and 119 support Unicode characters, and version 119 supports more than 32,767 variables. Version 119 should usually only be used when the number of variables exceeds the capacity of dta format 118. Exporting smaller datasets in format 119 may have unintended consequences, and, as of November 2020, Stata SE cannot read version 119 files. convert_strl : list, optional List of column names to convert to string columns to Stata StrL format. Only available if version is 117. Storing strings in the StrL format can produce smaller dta files if strings have more than 8 characters and values are repeated. {compression_options} .. versionadded:: 1.1.0 .. versionchanged:: 1.4.0 Zstandard support. {storage_options} .. versionadded:: 1.2.0 value_labels : dict of dicts Dictionary containing columns as keys and dictionaries of column value to labels as values. Labels for a single variable must be 32,000 characters or smaller. .. versionadded:: 1.4.0 Raises ------ NotImplementedError * If datetimes contain timezone information * Column dtype is not representable in Stata ValueError * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters See Also -------- read_stata : Import Stata data files. io.stata.StataWriter : Low-level writer for Stata data files. io.stata.StataWriter117 : Low-level writer for version 117 files. Examples -------- >>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], ... 'speed': [350, 18, 361, 15]}}) >>> df.to_stata('animals.dta') # doctest: +SKIP """ if version not in (114, 117, 118, 119, None): raise ValueError("Only formats 114, 117, 118 and 119 are supported.") if version == 114: if convert_strl is not None: raise ValueError("strl is not supported in format 114") from pandas.io.stata import StataWriter as statawriter elif version == 117: # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriter117 as statawriter, ) else: # versions 118 and 119 # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriterUTF8 as statawriter, ) kwargs: dict[str, Any] = {} if version is None or version >= 117: # strl conversion is only supported >= 117 kwargs["convert_strl"] = convert_strl if version is None or version >= 118: # Specifying the version is only supported for UTF8 (118 or 119) kwargs["version"] = version writer = statawriter( path, self, convert_dates=convert_dates, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, write_index=write_index, variable_labels=variable_labels, compression=compression, storage_options=storage_options, value_labels=value_labels, **kwargs, ) writer.write_file() def to_feather(self, path: FilePath | WriteBuffer[bytes], **kwargs) -> None: """ Write a DataFrame to the binary Feather format. Parameters ---------- path : str, path object, file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If a string or a path, it will be used as Root Directory path when writing a partitioned dataset. **kwargs : Additional keywords passed to :func:`pyarrow.feather.write_feather`. Starting with pyarrow 0.17, this includes the `compression`, `compression_level`, `chunksize` and `version` keywords. .. versionadded:: 1.1.0 Notes ----- This function writes the dataframe as a `feather file <https://arrow.apache.org/docs/python/feather.html>`_. Requires a default index. For saving the DataFrame with your custom index use a method that supports custom indices e.g. `to_parquet`. """ from pandas.io.feather_format import to_feather to_feather(self, path, **kwargs) Series.to_markdown, klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples="""Examples -------- >>> df = pd.DataFrame( ... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]} ... ) >>> print(df.to_markdown()) | | animal_1 | animal_2 | |---:|:-----------|:-----------| | 0 | elk | dog | | 1 | pig | quetzal | Output markdown with a tabulate option. >>> print(df.to_markdown(tablefmt="grid")) +----+------------+------------+ | | animal_1 | animal_2 | +====+============+============+ | 0 | elk | dog | +----+------------+------------+ | 1 | pig | quetzal | +----+------------+------------+""", ) def to_markdown( self, buf: FilePath | WriteBuffer[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: if "showindex" in kwargs: raise ValueError("Pass 'index' instead of 'showindex") kwargs.setdefault("headers", "keys") kwargs.setdefault("tablefmt", "pipe") kwargs.setdefault("showindex", index) tabulate = import_optional_dependency("tabulate") result = tabulate.tabulate(self, **kwargs) if buf is None: return result with get_handle(buf, mode, storage_options=storage_options) as handles: handles.handle.write(result) return None def to_parquet( self, path: None = ..., engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> bytes: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes], engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> None: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes] | None = None, engine: str = "auto", compression: str | None = "snappy", index: bool | None = None, partition_cols: list[str] | None = None, storage_options: StorageOptions = None, **kwargs, ) -> bytes | None: """ Write a DataFrame to the binary parquet format. This function writes the dataframe as a `parquet file <https://parquet.apache.org/>`_. You can choose different parquet backends, and have the option of compression. See :ref:`the user guide <io.parquet>` for more details. Parameters ---------- path : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If None, the result is returned as bytes. If a string or path, it will be used as Root Directory path when writing a partitioned dataset. .. versionchanged:: 1.2.0 Previously this was "fname" engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``True`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. partition_cols : list, optional, default None Column names by which to partition the dataset. Columns are partitioned in the order they are given. Must be None if path is not a string. {storage_options} .. versionadded:: 1.2.0 **kwargs Additional arguments passed to the parquet library. See :ref:`pandas io <io.parquet>` for more details. Returns ------- bytes if no path argument is provided else None See Also -------- read_parquet : Read a parquet file. DataFrame.to_orc : Write an orc file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- This function requires either the `fastparquet <https://pypi.org/project/fastparquet>`_ or `pyarrow <https://arrow.apache.org/docs/python/>`_ library. Examples -------- >>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}}) >>> df.to_parquet('df.parquet.gzip', ... compression='gzip') # doctest: +SKIP >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP col1 col2 0 1 3 1 2 4 If you want to get a buffer to the parquet content you can use a io.BytesIO object, as long as you don't use partition_cols, which creates multiple files. >>> import io >>> f = io.BytesIO() >>> df.to_parquet(f) >>> f.seek(0) 0 >>> content = f.read() """ from pandas.io.parquet import to_parquet return to_parquet( self, path, engine, compression=compression, index=index, partition_cols=partition_cols, storage_options=storage_options, **kwargs, ) def to_orc( self, path: FilePath | WriteBuffer[bytes] | None = None, *, engine: Literal["pyarrow"] = "pyarrow", index: bool | None = None, engine_kwargs: dict[str, Any] | None = None, ) -> bytes | None: """ Write a DataFrame to the ORC format. .. versionadded:: 1.5.0 Parameters ---------- path : str, file-like object or None, default None If a string, it will be used as Root Directory path when writing a partitioned dataset. By file-like object, we refer to objects with a write() method, such as a file handle (e.g. via builtin open function). If path is None, a bytes object is returned. engine : str, default 'pyarrow' ORC library to use. Pyarrow must be >= 7.0.0. index : bool, optional If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``infer`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. engine_kwargs : dict[str, Any] or None, default None Additional keyword arguments passed to :func:`pyarrow.orc.write_table`. Returns ------- bytes if no path argument is provided else None Raises ------ NotImplementedError Dtype of one or more columns is category, unsigned integers, interval, period or sparse. ValueError engine is not pyarrow. See Also -------- read_orc : Read a ORC file. DataFrame.to_parquet : Write a parquet file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- * Before using this function you should read the :ref:`user guide about ORC <io.orc>` and :ref:`install optional dependencies <install.warn_orc>`. * This function requires `pyarrow <https://arrow.apache.org/docs/python/>`_ library. * For supported dtypes please refer to `supported ORC features in Arrow <https://arrow.apache.org/docs/cpp/orc.html#data-types>`__. * Currently timezones in datetime columns are not preserved when a dataframe is converted into ORC files. Examples -------- >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [4, 3]}) >>> df.to_orc('df.orc') # doctest: +SKIP >>> pd.read_orc('df.orc') # doctest: +SKIP col1 col2 0 1 4 1 2 3 If you want to get a buffer to the orc content you can write it to io.BytesIO >>> import io >>> b = io.BytesIO(df.to_orc()) # doctest: +SKIP >>> b.seek(0) # doctest: +SKIP 0 >>> content = b.read() # doctest: +SKIP """ from pandas.io.orc import to_orc return to_orc( self, path, engine=engine, index=index, engine_kwargs=engine_kwargs ) def to_html( self, buf: FilePath | WriteBuffer[str], columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> None: ... def to_html( self, buf: None = ..., columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> str: ... header_type="bool", header="Whether to print column labels, default True", col_space_type="str or int, list or dict of int or str", col_space="The minimum width of each column in CSS length " "units. An int is assumed to be px units.", ) def to_html( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[Level] | None = None, col_space: ColspaceArgType | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: FormattersType | None = None, float_format: FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool | str = False, decimal: str = ".", bold_rows: bool = True, classes: str | list | tuple | None = None, escape: bool = True, notebook: bool = False, border: int | bool | None = None, table_id: str | None = None, render_links: bool = False, encoding: str | None = None, ) -> str | None: """ Render a DataFrame as an HTML table. %(shared_params)s bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.display.html.border``. table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links. encoding : str, default "utf-8" Set character encoding. .. versionadded:: 1.0 %(returns)s See Also -------- to_string : Convert DataFrame to a string. """ if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS: raise ValueError("Invalid value for justify parameter") formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, header=header, index=index, formatters=formatters, float_format=float_format, bold_rows=bold_rows, sparsify=sparsify, justify=justify, index_names=index_names, escape=escape, decimal=decimal, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, ) # TODO: a generic formatter wld b in DataFrameFormatter return fmt.DataFrameRenderer(formatter).to_html( buf=buf, classes=classes, notebook=notebook, border=border, encoding=encoding, table_id=table_id, render_links=render_links, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buffer", ) def to_xml( self, path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, index: bool = True, root_name: str | None = "data", row_name: str | None = "row", na_rep: str | None = None, attr_cols: list[str] | None = None, elem_cols: list[str] | None = None, namespaces: dict[str | None, str] | None = None, prefix: str | None = None, encoding: str = "utf-8", xml_declaration: bool | None = True, pretty_print: bool | None = True, parser: str | None = "lxml", stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ) -> str | None: """ Render a DataFrame to an XML document. .. versionadded:: 1.3.0 Parameters ---------- path_or_buffer : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a ``write()`` function. If None, the result is returned as a string. index : bool, default True Whether to include index in XML document. root_name : str, default 'data' The name of root element in XML document. row_name : str, default 'row' The name of row element in XML document. na_rep : str, optional Missing data representation. attr_cols : list-like, optional List of columns to write as attributes in row element. Hierarchical columns will be flattened with underscore delimiting the different levels. elem_cols : list-like, optional List of columns to write as children in row element. By default, all columns output as children of row element. Hierarchical columns will be flattened with underscore delimiting the different levels. namespaces : dict, optional All namespaces to be defined in root element. Keys of dict should be prefix names and values of dict corresponding URIs. Default namespaces should be given empty string key. For example, :: namespaces = {{"": "https://example.com"}} prefix : str, optional Namespace prefix to be used for every element and/or attribute in document. This should be one of the keys in ``namespaces`` dict. encoding : str, default 'utf-8' Encoding of the resulting document. xml_declaration : bool, default True Whether to include the XML declaration at start of document. pretty_print : bool, default True Whether output should be pretty printed with indentation and line breaks. parser : {{'lxml','etree'}}, default 'lxml' Parser module to use for building of tree. Only 'lxml' and 'etree' are supported. With 'lxml', the ability to use XSLT stylesheet is supported. stylesheet : str, path object or file-like object, optional A URL, file-like object, or a raw string containing an XSLT script used to transform the raw XML output. Script should use layout of elements and attributes from original output. This argument requires ``lxml`` to be installed. Only XSLT 1.0 scripts and not later versions is currently supported. {compression_options} .. versionchanged:: 1.4.0 Zstandard support. {storage_options} Returns ------- None or str If ``io`` is None, returns the resulting XML format as a string. Otherwise returns None. See Also -------- to_json : Convert the pandas object to a JSON string. to_html : Convert DataFrame to a html. Examples -------- >>> df = pd.DataFrame({{'shape': ['square', 'circle', 'triangle'], ... 'degrees': [360, 360, 180], ... 'sides': [4, np.nan, 3]}}) >>> df.to_xml() # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row> <index>0</index> <shape>square</shape> <degrees>360</degrees> <sides>4.0</sides> </row> <row> <index>1</index> <shape>circle</shape> <degrees>360</degrees> <sides/> </row> <row> <index>2</index> <shape>triangle</shape> <degrees>180</degrees> <sides>3.0</sides> </row> </data> >>> df.to_xml(attr_cols=[ ... 'index', 'shape', 'degrees', 'sides' ... ]) # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row index="0" shape="square" degrees="360" sides="4.0"/> <row index="1" shape="circle" degrees="360"/> <row index="2" shape="triangle" degrees="180" sides="3.0"/> </data> >>> df.to_xml(namespaces={{"doc": "https://example.com"}}, ... prefix="doc") # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <doc:data xmlns:doc="https://example.com"> <doc:row> <doc:index>0</doc:index> <doc:shape>square</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides>4.0</doc:sides> </doc:row> <doc:row> <doc:index>1</doc:index> <doc:shape>circle</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides/> </doc:row> <doc:row> <doc:index>2</doc:index> <doc:shape>triangle</doc:shape> <doc:degrees>180</doc:degrees> <doc:sides>3.0</doc:sides> </doc:row> </doc:data> """ from pandas.io.formats.xml import ( EtreeXMLFormatter, LxmlXMLFormatter, ) lxml = import_optional_dependency("lxml.etree", errors="ignore") TreeBuilder: type[EtreeXMLFormatter] | type[LxmlXMLFormatter] if parser == "lxml": if lxml is not None: TreeBuilder = LxmlXMLFormatter else: raise ImportError( "lxml not found, please install or use the etree parser." ) elif parser == "etree": TreeBuilder = EtreeXMLFormatter else: raise ValueError("Values for parser can only be lxml or etree.") xml_formatter = TreeBuilder( self, path_or_buffer=path_or_buffer, index=index, root_name=root_name, row_name=row_name, na_rep=na_rep, attr_cols=attr_cols, elem_cols=elem_cols, namespaces=namespaces, prefix=prefix, encoding=encoding, xml_declaration=xml_declaration, pretty_print=pretty_print, stylesheet=stylesheet, compression=compression, storage_options=storage_options, ) return xml_formatter.write_output() # ---------------------------------------------------------------------- def info( self, verbose: bool | None = None, buf: WriteBuffer[str] | None = None, max_cols: int | None = None, memory_usage: bool | str | None = None, show_counts: bool | None = None, ) -> None: info = DataFrameInfo( data=self, memory_usage=memory_usage, ) info.render( buf=buf, max_cols=max_cols, verbose=verbose, show_counts=show_counts, ) def memory_usage(self, index: bool = True, deep: bool = False) -> Series: """ Return the memory usage of each column in bytes. The memory usage can optionally include the contribution of the index and elements of `object` dtype. This value is displayed in `DataFrame.info` by default. This can be suppressed by setting ``pandas.options.display.memory_usage`` to False. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the DataFrame's index in returned Series. If ``index=True``, the memory usage of the index is the first item in the output. deep : bool, default False If True, introspect the data deeply by interrogating `object` dtypes for system-level memory consumption, and include it in the returned values. Returns ------- Series A Series whose index is the original column names and whose values is the memory usage of each column in bytes. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of an ndarray. Series.memory_usage : Bytes consumed by a Series. Categorical : Memory-efficient array for string values with many repeated values. DataFrame.info : Concise summary of a DataFrame. Notes ----- See the :ref:`Frequently Asked Questions <df-memory-usage>` for more details. Examples -------- >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool'] >>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t)) ... for t in dtypes]) >>> df = pd.DataFrame(data) >>> df.head() int64 float64 complex128 object bool 0 1 1.0 1.0+0.0j 1 True 1 1 1.0 1.0+0.0j 1 True 2 1 1.0 1.0+0.0j 1 True 3 1 1.0 1.0+0.0j 1 True 4 1 1.0 1.0+0.0j 1 True >>> df.memory_usage() Index 128 int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 >>> df.memory_usage(index=False) int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 The memory footprint of `object` dtype columns is ignored by default: >>> df.memory_usage(deep=True) Index 128 int64 40000 float64 40000 complex128 80000 object 180000 bool 5000 dtype: int64 Use a Categorical for efficient storage of an object-dtype column with many repeated values. >>> df['object'].astype('category').memory_usage(deep=True) 5244 """ result = self._constructor_sliced( [c.memory_usage(index=False, deep=deep) for col, c in self.items()], index=self.columns, dtype=np.intp, ) if index: index_memory_usage = self._constructor_sliced( self.index.memory_usage(deep=deep), index=["Index"] ) result = index_memory_usage._append(result) return result def transpose(self, *args, copy: bool = False) -> DataFrame: """ Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. Parameters ---------- *args : tuple, optional Accepted for compatibility with NumPy. copy : bool, default False Whether to copy the data after transposing, even for DataFrames with a single dtype. Note that a copy is always required for mixed dtype DataFrames, or for DataFrames with any extension types. Returns ------- DataFrame The transposed DataFrame. See Also -------- numpy.transpose : Permute the dimensions of a given array. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the `object` dtype. In such a case, a copy of the data is always made. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = pd.DataFrame(data=d1) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T # or df1.transpose() >>> df1_transposed 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'name': ['Alice', 'Bob'], ... 'score': [9.5, 8], ... 'employed': [False, True], ... 'kids': [0, 0]} >>> df2 = pd.DataFrame(data=d2) >>> df2 name score employed kids 0 Alice 9.5 False 0 1 Bob 8.0 True 0 >>> df2_transposed = df2.T # or df2.transpose() >>> df2_transposed 0 1 name Alice Bob score 9.5 8.0 employed False True kids 0 0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the `object` dtype: >>> df2.dtypes name object score float64 employed bool kids int64 dtype: object >>> df2_transposed.dtypes 0 object 1 object dtype: object """ nv.validate_transpose(args, {}) # construct the args dtypes = list(self.dtypes) if self._can_fast_transpose: # Note: tests pass without this, but this improves perf quite a bit. new_vals = self._values.T if copy and not using_copy_on_write(): new_vals = new_vals.copy() result = self._constructor( new_vals, index=self.columns, columns=self.index, copy=False ) if using_copy_on_write() and len(self) > 0: result._mgr.add_references(self._mgr) # type: ignore[arg-type] elif ( self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]) ): # We have EAs with the same dtype. We can preserve that dtype in transpose. dtype = dtypes[0] arr_type = dtype.construct_array_type() values = self.values new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values] result = type(self)._from_arrays( new_values, index=self.columns, columns=self.index ) else: new_arr = self.values.T if copy and not using_copy_on_write(): new_arr = new_arr.copy() result = self._constructor( new_arr, index=self.columns, columns=self.index, # We already made a copy (more than one block) copy=False, ) return result.__finalize__(self, method="transpose") def T(self) -> DataFrame: """ The transpose of the DataFrame. Returns ------- DataFrame The transposed DataFrame. See Also -------- DataFrame.transpose : Transpose index and columns. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.T 0 1 col1 1 2 col2 3 4 """ return self.transpose() # ---------------------------------------------------------------------- # Indexing Methods def _ixs(self, i: int, axis: AxisInt = 0) -> Series: """ Parameters ---------- i : int axis : int Returns ------- Series """ # irow if axis == 0: new_mgr = self._mgr.fast_xs(i) # if we are a copy, mark as such copy = isinstance(new_mgr.array, np.ndarray) and new_mgr.array.base is None result = self._constructor_sliced(new_mgr, name=self.index[i]).__finalize__( self ) result._set_is_copy(self, copy=copy) return result # icol else: label = self.columns[i] col_mgr = self._mgr.iget(i) result = self._box_col_values(col_mgr, i) # this is a cached value, mark it so result._set_as_cached(label, self) return result def _get_column_array(self, i: int) -> ArrayLike: """ Get the values of the i'th column (ndarray or ExtensionArray, as stored in the Block) Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ return self._mgr.iget_values(i) def _iter_column_arrays(self) -> Iterator[ArrayLike]: """ Iterate over the arrays of all columns in order. This returns the values as stored in the Block (ndarray or ExtensionArray). Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ for i in range(len(self.columns)): yield self._get_column_array(i) def _getitem_nocopy(self, key: list): """ Behaves like __getitem__, but returns a view in cases where __getitem__ would make a copy. """ # TODO(CoW): can be removed if/when we are always Copy-on-Write indexer = self.columns._get_indexer_strict(key, "columns")[1] new_axis = self.columns[indexer] new_mgr = self._mgr.reindex_indexer( new_axis, indexer, axis=0, allow_dups=True, copy=False, only_slice=True, ) return self._constructor(new_mgr) def __getitem__(self, key): check_dict_or_set_indexers(key) key = lib.item_from_zerodim(key) key = com.apply_if_callable(key, self) if is_hashable(key) and not is_iterator(key): # is_iterator to exclude generator e.g. test_getitem_listlike # shortcut if the key is in columns is_mi = isinstance(self.columns, MultiIndex) # GH#45316 Return view if key is not duplicated # Only use drop_duplicates with duplicates for performance if not is_mi and ( self.columns.is_unique and key in self.columns or key in self.columns.drop_duplicates(keep=False) ): return self._get_item_cache(key) elif is_mi and self.columns.is_unique and key in self.columns: return self._getitem_multilevel(key) # Do we have a slicer (on rows)? if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") if isinstance(indexer, np.ndarray): # reachable with DatetimeIndex indexer = lib.maybe_indices_to_slice( indexer.astype(np.intp, copy=False), len(self) ) if isinstance(indexer, np.ndarray): # GH#43223 If we can not convert, use take return self.take(indexer, axis=0) return self._slice(indexer, axis=0) # Do we have a (boolean) DataFrame? if isinstance(key, DataFrame): return self.where(key) # Do we have a (boolean) 1d indexer? if com.is_bool_indexer(key): return self._getitem_bool_array(key) # We are left with two options: a single key, and a collection of keys, # We interpret tuples as collections only for non-MultiIndex is_single_key = isinstance(key, tuple) or not is_list_like(key) if is_single_key: if self.columns.nlevels > 1: return self._getitem_multilevel(key) indexer = self.columns.get_loc(key) if is_integer(indexer): indexer = [indexer] else: if is_iterator(key): key = list(key) indexer = self.columns._get_indexer_strict(key, "columns")[1] # take() does not accept boolean indexers if getattr(indexer, "dtype", None) == bool: indexer = np.where(indexer)[0] data = self._take_with_is_copy(indexer, axis=1) if is_single_key: # What does looking for a single key in a non-unique index return? # The behavior is inconsistent. It returns a Series, except when # - the key itself is repeated (test on data.shape, #9519), or # - we have a MultiIndex on columns (test on self.columns, #21309) if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex): # GH#26490 using data[key] can cause RecursionError return data._get_item_cache(key) return data def _getitem_bool_array(self, key): # also raises Exception if object array with NA values # warning here just in case -- previously __setitem__ was # reindexing but __getitem__ was not; it seems more reasonable to # go with the __setitem__ behavior since that is more consistent # with all other indexing behavior if isinstance(key, Series) and not key.index.equals(self.index): warnings.warn( "Boolean Series key will be reindexed to match DataFrame index.", UserWarning, stacklevel=find_stack_level(), ) elif len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}." ) # check_bool_indexer will throw exception if Series key cannot # be reindexed to match DataFrame rows key = check_bool_indexer(self.index, key) if key.all(): return self.copy(deep=None) indexer = key.nonzero()[0] return self._take_with_is_copy(indexer, axis=0) def _getitem_multilevel(self, key): # self.columns is a MultiIndex loc = self.columns.get_loc(key) if isinstance(loc, (slice, np.ndarray)): new_columns = self.columns[loc] result_columns = maybe_droplevels(new_columns, key) if self._is_mixed_type: result = self.reindex(columns=new_columns) result.columns = result_columns else: new_values = self._values[:, loc] result = self._constructor( new_values, index=self.index, columns=result_columns, copy=False ) if using_copy_on_write() and isinstance(loc, slice): result._mgr.add_references(self._mgr) # type: ignore[arg-type] result = result.__finalize__(self) # If there is only one column being returned, and its name is # either an empty string, or a tuple with an empty string as its # first element, then treat the empty string as a placeholder # and return the column as if the user had provided that empty # string in the key. If the result is a Series, exclude the # implied empty string from its name. if len(result.columns) == 1: # e.g. test_frame_getitem_multicolumn_empty_level, # test_frame_mixed_depth_get, test_loc_setitem_single_column_slice top = result.columns[0] if isinstance(top, tuple): top = top[0] if top == "": result = result[""] if isinstance(result, Series): result = self._constructor_sliced( result, index=self.index, name=key ) result._set_is_copy(self) return result else: # loc is neither a slice nor ndarray, so must be an int return self._ixs(loc, axis=1) def _get_value(self, index, col, takeable: bool = False) -> Scalar: """ Quickly retrieve single value at passed column and index. Parameters ---------- index : row label col : column label takeable : interpret the index/col as indexers, default False Returns ------- scalar Notes ----- Assumes that both `self.index._index_as_unique` and `self.columns._index_as_unique`; Caller is responsible for checking. """ if takeable: series = self._ixs(col, axis=1) return series._values[index] series = self._get_item_cache(col) engine = self.index._engine if not isinstance(self.index, MultiIndex): # CategoricalIndex: Trying to use the engine fastpath may give incorrect # results if our categories are integers that dont match our codes # IntervalIndex: IntervalTree has no get_loc row = self.index.get_loc(index) return series._values[row] # For MultiIndex going through engine effectively restricts us to # same-length tuples; see test_get_set_value_no_partial_indexing loc = engine.get_loc(index) return series._values[loc] def isetitem(self, loc, value) -> None: """ Set the given value in the column with position `loc`. This is a positional analogue to ``__setitem__``. Parameters ---------- loc : int or sequence of ints Index position for the column. value : scalar or arraylike Value(s) for the column. Notes ----- ``frame.isetitem(loc, value)`` is an in-place method as it will modify the DataFrame in place (not returning a new object). In contrast to ``frame.iloc[:, i] = value`` which will try to update the existing values in place, ``frame.isetitem(loc, value)`` will not update the values of the column itself in place, it will instead insert a new array. In cases where ``frame.columns`` is unique, this is equivalent to ``frame[frame.columns[i]] = value``. """ if isinstance(value, DataFrame): if is_scalar(loc): loc = [loc] for i, idx in enumerate(loc): arraylike = self._sanitize_column(value.iloc[:, i]) self._iset_item_mgr(idx, arraylike, inplace=False) return arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=False) def __setitem__(self, key, value): if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) key = com.apply_if_callable(key, self) # see if we can slice the rows if isinstance(key, slice): slc = self.index._convert_slice_indexer(key, kind="getitem") return self._setitem_slice(slc, value) if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2: self._setitem_frame(key, value) elif isinstance(key, (Series, np.ndarray, list, Index)): self._setitem_array(key, value) elif isinstance(value, DataFrame): self._set_item_frame_value(key, value) elif ( is_list_like(value) and not self.columns.is_unique and 1 < len(self.columns.get_indexer_for([key])) == len(value) ): # Column to set is duplicated self._setitem_array([key], value) else: # set column self._set_item(key, value) def _setitem_slice(self, key: slice, value) -> None: # NB: we can't just use self.loc[key] = value because that # operates on labels and we need to operate positional for # backwards-compat, xref GH#31469 self._check_setitem_copy() self.iloc[key] = value def _setitem_array(self, key, value): # also raises Exception if object array with NA values if com.is_bool_indexer(key): # bool indexer is indexing along rows if len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}!" ) key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] self._check_setitem_copy() if isinstance(value, DataFrame): # GH#39931 reindex since iloc does not align value = value.reindex(self.index.take(indexer)) self.iloc[indexer] = value else: # Note: unlike self.iloc[:, indexer] = value, this will # never try to overwrite values inplace if isinstance(value, DataFrame): check_key_length(self.columns, key, value) for k1, k2 in zip(key, value.columns): self[k1] = value[k2] elif not is_list_like(value): for col in key: self[col] = value elif isinstance(value, np.ndarray) and value.ndim == 2: self._iset_not_inplace(key, value) elif np.ndim(value) > 1: # list of lists value = DataFrame(value).values return self._setitem_array(key, value) else: self._iset_not_inplace(key, value) def _iset_not_inplace(self, key, value): # GH#39510 when setting with df[key] = obj with a list-like key and # list-like value, we iterate over those listlikes and set columns # one at a time. This is different from dispatching to # `self.loc[:, key]= value` because loc.__setitem__ may overwrite # data inplace, whereas this will insert new arrays. def igetitem(obj, i: int): # Note: we catch DataFrame obj before getting here, but # hypothetically would return obj.iloc[:, i] if isinstance(obj, np.ndarray): return obj[..., i] else: return obj[i] if self.columns.is_unique: if np.shape(value)[-1] != len(key): raise ValueError("Columns must be same length as key") for i, col in enumerate(key): self[col] = igetitem(value, i) else: ilocs = self.columns.get_indexer_non_unique(key)[0] if (ilocs < 0).any(): # key entries not in self.columns raise NotImplementedError if np.shape(value)[-1] != len(ilocs): raise ValueError("Columns must be same length as key") assert np.ndim(value) <= 2 orig_columns = self.columns # Using self.iloc[:, i] = ... may set values inplace, which # by convention we do not do in __setitem__ try: self.columns = Index(range(len(self.columns))) for i, iloc in enumerate(ilocs): self[iloc] = igetitem(value, i) finally: self.columns = orig_columns def _setitem_frame(self, key, value): # support boolean setting with DataFrame input, e.g. # df[df > df2] = 0 if isinstance(key, np.ndarray): if key.shape != self.shape: raise ValueError("Array conditional must be same shape as self") key = self._constructor(key, **self._construct_axes_dict(), copy=False) if key.size and not all(is_bool_dtype(dtype) for dtype in key.dtypes): raise TypeError( "Must pass DataFrame or 2-d ndarray with boolean values only" ) self._check_inplace_setting(value) self._check_setitem_copy() self._where(-key, value, inplace=True) def _set_item_frame_value(self, key, value: DataFrame) -> None: self._ensure_valid_index(value) # align columns if key in self.columns: loc = self.columns.get_loc(key) cols = self.columns[loc] len_cols = 1 if is_scalar(cols) or isinstance(cols, tuple) else len(cols) if len_cols != len(value.columns): raise ValueError("Columns must be same length as key") # align right-hand-side columns if self.columns # is multi-index and self[key] is a sub-frame if isinstance(self.columns, MultiIndex) and isinstance( loc, (slice, Series, np.ndarray, Index) ): cols_droplevel = maybe_droplevels(cols, key) if len(cols_droplevel) and not cols_droplevel.equals(value.columns): value = value.reindex(cols_droplevel, axis=1) for col, col_droplevel in zip(cols, cols_droplevel): self[col] = value[col_droplevel] return if is_scalar(cols): self[cols] = value[value.columns[0]] return # now align rows arraylike = _reindex_for_setitem(value, self.index) self._set_item_mgr(key, arraylike) return if len(value.columns) != 1: raise ValueError( "Cannot set a DataFrame with multiple columns to the single " f"column {key}" ) self[key] = value[value.columns[0]] def _iset_item_mgr( self, loc: int | slice | np.ndarray, value, inplace: bool = False ) -> None: # when called from _set_item_mgr loc can be anything returned from get_loc self._mgr.iset(loc, value, inplace=inplace) self._clear_item_cache() def _set_item_mgr(self, key, value: ArrayLike) -> None: try: loc = self._info_axis.get_loc(key) except KeyError: # This item wasn't present, just insert at end self._mgr.insert(len(self._info_axis), key, value) else: self._iset_item_mgr(loc, value) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _iset_item(self, loc: int, value) -> None: arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=True) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _set_item(self, key, value) -> None: """ Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the same length as the DataFrames index or an error will be thrown. Series/TimeSeries will be conformed to the DataFrames index to ensure homogeneity. """ value = self._sanitize_column(value) if ( key in self.columns and value.ndim == 1 and not is_extension_array_dtype(value) ): # broadcast across multiple columns if necessary if not self.columns.is_unique or isinstance(self.columns, MultiIndex): existing_piece = self[key] if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)).T self._set_item_mgr(key, value) def _set_value( self, index: IndexLabel, col, value: Scalar, takeable: bool = False ) -> None: """ Put single value at passed column and index. Parameters ---------- index : Label row label col : Label column label value : scalar takeable : bool, default False Sets whether or not index/col interpreted as indexers """ try: if takeable: icol = col iindex = cast(int, index) else: icol = self.columns.get_loc(col) iindex = self.index.get_loc(index) self._mgr.column_setitem(icol, iindex, value, inplace_only=True) self._clear_item_cache() except (KeyError, TypeError, ValueError, LossySetitemError): # get_loc might raise a KeyError for missing labels (falling back # to (i)loc will do expansion of the index) # column_setitem will do validation that may raise TypeError, # ValueError, or LossySetitemError # set using a non-recursive method & reset the cache if takeable: self.iloc[index, col] = value else: self.loc[index, col] = value self._item_cache.pop(col, None) except InvalidIndexError as ii_err: # GH48729: Seems like you are trying to assign a value to a # row when only scalar options are permitted raise InvalidIndexError( f"You can only assign a scalar value not a {type(value)}" ) from ii_err def _ensure_valid_index(self, value) -> None: """ Ensure that if we don't have an index, that we can create one from the passed value. """ # GH5632, make sure that we are a Series convertible if not len(self.index) and is_list_like(value) and len(value): if not isinstance(value, DataFrame): try: value = Series(value) except (ValueError, NotImplementedError, TypeError) as err: raise ValueError( "Cannot set a frame with no defined index " "and a value that cannot be converted to a Series" ) from err # GH31368 preserve name of index index_copy = value.index.copy() if self.index.name is not None: index_copy.name = self.index.name self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan) def _box_col_values(self, values: SingleDataManager, loc: int) -> Series: """ Provide boxed values for a column. """ # Lookup in columns so that if e.g. a str datetime was passed # we attach the Timestamp object as the name. name = self.columns[loc] klass = self._constructor_sliced # We get index=self.index bc values is a SingleDataManager return klass(values, name=name, fastpath=True).__finalize__(self) # ---------------------------------------------------------------------- # Lookup Caching def _clear_item_cache(self) -> None: self._item_cache.clear() def _get_item_cache(self, item: Hashable) -> Series: """Return the cached item, item represents a label indexer.""" if using_copy_on_write(): loc = self.columns.get_loc(item) return self._ixs(loc, axis=1) cache = self._item_cache res = cache.get(item) if res is None: # All places that call _get_item_cache have unique columns, # pending resolution of GH#33047 loc = self.columns.get_loc(item) res = self._ixs(loc, axis=1) cache[item] = res # for a chain res._is_copy = self._is_copy return res def _reset_cacher(self) -> None: # no-op for DataFrame pass def _maybe_cache_changed(self, item, value: Series, inplace: bool) -> None: """ The object has called back to us saying maybe it has changed. """ loc = self._info_axis.get_loc(item) arraylike = value._values old = self._ixs(loc, axis=1) if old._values is value._values and inplace: # GH#46149 avoid making unnecessary copies/block-splitting return self._mgr.iset(loc, arraylike, inplace=inplace) # ---------------------------------------------------------------------- # Unsorted def query(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> DataFrame: ... def query(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def query(self, expr: str, *, inplace: bool = ..., **kwargs) -> DataFrame | None: ... def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | None: """ Query the columns of a DataFrame with a boolean expression. Parameters ---------- expr : str The query string to evaluate. You can refer to variables in the environment by prefixing them with an '@' character like ``@a + b``. You can refer to column names that are not valid Python variable names by surrounding them in backticks. Thus, column names containing spaces or punctuations (besides underscores) or starting with digits must be surrounded by backticks. (For example, a column named "Area (cm^2)" would be referenced as ```Area (cm^2)```). Column names which are Python keywords (like "list", "for", "import", etc) cannot be used. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. inplace : bool Whether to modify the DataFrame rather than creating a new one. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`DataFrame.query`. Returns ------- DataFrame or None DataFrame resulting from the provided query expression or None if ``inplace=True``. See Also -------- eval : Evaluate a string describing operations on DataFrame columns. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Notes ----- The result of the evaluation of this expression is first passed to :attr:`DataFrame.loc` and if that fails because of a multidimensional key (e.g., a DataFrame) then the result will be passed to :meth:`DataFrame.__getitem__`. This method uses the top-level :func:`eval` function to evaluate the passed query. The :meth:`~pandas.DataFrame.query` method uses a slightly modified Python syntax by default. For example, the ``&`` and ``|`` (bitwise) operators have the precedence of their boolean cousins, :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, however the semantics are different. You can change the semantics of the expression by passing the keyword argument ``parser='python'``. This enforces the same semantics as evaluation in Python space. Likewise, you can pass ``engine='python'`` to evaluate an expression using Python itself as a backend. This is not recommended as it is inefficient compared to using ``numexpr`` as the engine. The :attr:`DataFrame.index` and :attr:`DataFrame.columns` attributes of the :class:`~pandas.DataFrame` instance are placed in the query namespace by default, which allows you to treat both the index and columns of the frame as a column in the frame. The identifier ``index`` is used for the frame index; you can also use the name of the index to identify it in a query. Please note that Python keywords may not be used as identifiers. For further details and examples see the ``query`` documentation in :ref:`indexing <indexing.query>`. *Backtick quoted variables* Backtick quoted variables are parsed as literal Python code and are converted internally to a Python valid identifier. This can lead to the following problems. During parsing a number of disallowed characters inside the backtick quoted string are replaced by strings that are allowed as a Python identifier. These characters include all operators in Python, the space character, the question mark, the exclamation mark, the dollar sign, and the euro sign. For other characters that fall outside the ASCII range (U+0001..U+007F) and those that are not further specified in PEP 3131, the query parser will raise an error. This excludes whitespace different than the space character, but also the hashtag (as it is used for comments) and the backtick itself (backtick can also not be escaped). In a special case, quotes that make a pair around a backtick can confuse the parser. For example, ```it's` > `that's``` will raise an error, as it forms a quoted string (``'s > `that'``) with a backtick inside. See also the Python documentation about lexical analysis (https://docs.python.org/3/reference/lexical_analysis.html) in combination with the source code in :mod:`pandas.core.computation.parsing`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10 """ inplace = validate_bool_kwarg(inplace, "inplace") if not isinstance(expr, str): msg = f"expr must be a string to be evaluated, {type(expr)} given" raise ValueError(msg) kwargs["level"] = kwargs.pop("level", 0) + 1 kwargs["target"] = None res = self.eval(expr, **kwargs) try: result = self.loc[res] except ValueError: # when res is multi-dimensional loc raises, but this is sometimes a # valid query result = self[res] if inplace: self._update_inplace(result) return None else: return result def eval(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> Any: ... def eval(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def eval(self, expr: str, *, inplace: bool = False, **kwargs) -> Any | None: """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. Returns ------- ndarray, scalar, pandas object, or None The result of the evaluation or None if ``inplace=True``. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Notes ----- For more details see the API documentation for :func:`~eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Multiple columns can be assigned to using multi-line expressions: >>> df.eval( ... ''' ... C = A + B ... D = A - B ... ''' ... ) A B C D 0 1 10 11 -9 1 2 8 10 -6 2 3 6 9 -3 3 4 4 8 0 4 5 2 7 3 """ from pandas.core.computation.eval import eval as _eval inplace = validate_bool_kwarg(inplace, "inplace") kwargs["level"] = kwargs.pop("level", 0) + 1 index_resolvers = self._get_index_resolvers() column_resolvers = self._get_cleaned_column_resolvers() resolvers = column_resolvers, index_resolvers if "target" not in kwargs: kwargs["target"] = self kwargs["resolvers"] = tuple(kwargs.get("resolvers", ())) + resolvers return _eval(expr, inplace=inplace, **kwargs) def select_dtypes(self, include=None, exclude=None) -> DataFrame: """ Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty * If ``include`` and ``exclude`` have overlapping elements * If any kind of string dtype is passed in. See Also -------- DataFrame.dtypes: Return Series with the data type of each column. Notes ----- * To select all *numeric* types, use ``np.number`` or ``'number'`` * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy <https://numpy.org/doc/stable/reference/arrays.scalars.html>`__ * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or ``'timedelta64'`` * To select Pandas categorical dtypes, use ``'category'`` * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in 0.20.0) or ``'datetime64[ns, tz]'`` Examples -------- >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int64']) b c 0 True 1.0 1 False 2.0 2 True 1.0 3 False 2.0 4 True 1.0 5 False 2.0 """ if not is_list_like(include): include = (include,) if include is not None else () if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () selection = (frozenset(include), frozenset(exclude)) if not any(selection): raise ValueError("at least one of include or exclude must be nonempty") # convert the myriad valid dtypes object to a single representation def check_int_infer_dtype(dtypes): converted_dtypes: list[type] = [] for dtype in dtypes: # Numpy maps int to different types (int32, in64) on Windows and Linux # see https://github.com/numpy/numpy/issues/9464 if (isinstance(dtype, str) and dtype == "int") or (dtype is int): converted_dtypes.append(np.int32) converted_dtypes.append(np.int64) elif dtype == "float" or dtype is float: # GH#42452 : np.dtype("float") coerces to np.float64 from Numpy 1.20 converted_dtypes.extend([np.float64, np.float32]) else: converted_dtypes.append(infer_dtype_from_object(dtype)) return frozenset(converted_dtypes) include = check_int_infer_dtype(include) exclude = check_int_infer_dtype(exclude) for dtypes in (include, exclude): invalidate_string_dtypes(dtypes) # can't both include AND exclude! if not include.isdisjoint(exclude): raise ValueError(f"include and exclude overlap on {(include & exclude)}") def dtype_predicate(dtype: DtypeObj, dtypes_set) -> bool: # GH 46870: BooleanDtype._is_numeric == True but should be excluded return issubclass(dtype.type, tuple(dtypes_set)) or ( np.number in dtypes_set and getattr(dtype, "_is_numeric", False) and not is_bool_dtype(dtype) ) def predicate(arr: ArrayLike) -> bool: dtype = arr.dtype if include: if not dtype_predicate(dtype, include): return False if exclude: if dtype_predicate(dtype, exclude): return False return True mgr = self._mgr._get_data_subset(predicate).copy(deep=None) return type(self)(mgr).__finalize__(self) def insert( self, loc: int, column: Hashable, value: Scalar | AnyArrayLike, allow_duplicates: bool | lib.NoDefault = lib.no_default, ) -> None: """ Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns). column : str, number, or hashable object Label of the inserted column. value : Scalar, Series, or array-like allow_duplicates : bool, optional, default lib.no_default See Also -------- Index.insert : Insert new item by index. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.insert(1, "newcol", [99, 99]) >>> df col1 newcol col2 0 1 99 3 1 2 99 4 >>> df.insert(0, "col1", [100, 100], allow_duplicates=True) >>> df col1 col1 newcol col2 0 100 1 99 3 1 100 2 99 4 Notice that pandas uses index alignment in case of `value` from type `Series`: >>> df.insert(0, "col0", pd.Series([5, 6], index=[1, 2])) >>> df col0 col1 col1 newcol col2 0 NaN 100 1 99 3 1 5.0 100 2 99 4 """ if allow_duplicates is lib.no_default: allow_duplicates = False if allow_duplicates and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'allow_duplicates=True' when " "'self.flags.allows_duplicate_labels' is False." ) if not allow_duplicates and column in self.columns: # Should this be a different kind of error?? raise ValueError(f"cannot insert {column}, already exists") if not isinstance(loc, int): raise TypeError("loc must be int") value = self._sanitize_column(value) self._mgr.insert(loc, column, value) def assign(self, **kwargs) -> DataFrame: r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. Later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 You can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 """ data = self.copy(deep=None) for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) return data def _sanitize_column(self, value) -> ArrayLike: """ Ensures new columns (which go into the BlockManager as new blocks) are always copied and converted into an array. Parameters ---------- value : scalar, Series, or array-like Returns ------- numpy.ndarray or ExtensionArray """ self._ensure_valid_index(value) # We can get there through isetitem with a DataFrame # or through loc single_block_path if isinstance(value, DataFrame): return _reindex_for_setitem(value, self.index) elif is_dict_like(value): return _reindex_for_setitem(Series(value), self.index) if is_list_like(value): com.require_length_match(value, self.index) return sanitize_array(value, self.index, copy=True, allow_2d=True) def _series(self): return { item: Series( self._mgr.iget(idx), index=self.index, name=item, fastpath=True ) for idx, item in enumerate(self.columns) } # ---------------------------------------------------------------------- # Reindexing and alignment def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy): frame = self columns = axes["columns"] if columns is not None: frame = frame._reindex_columns( columns, method, copy, level, fill_value, limit, tolerance ) index = axes["index"] if index is not None: frame = frame._reindex_index( index, method, copy, level, fill_value, limit, tolerance ) return frame def _reindex_index( self, new_index, method, copy: bool, level: Level, fill_value=np.nan, limit=None, tolerance=None, ): new_index, indexer = self.index.reindex( new_index, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {0: [new_index, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_columns( self, new_columns, method, copy: bool, level: Level, fill_value=None, limit=None, tolerance=None, ): new_columns, indexer = self.columns.reindex( new_columns, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {1: [new_columns, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_multi( self, axes: dict[str, Index], copy: bool, fill_value ) -> DataFrame: """ We are guaranteed non-Nones in the axes. """ new_index, row_indexer = self.index.reindex(axes["index"]) new_columns, col_indexer = self.columns.reindex(axes["columns"]) if row_indexer is not None and col_indexer is not None: # Fastpath. By doing two 'take's at once we avoid making an # unnecessary copy. # We only get here with `not self._is_mixed_type`, which (almost) # ensures that self.values is cheap. It may be worth making this # condition more specific. indexer = row_indexer, col_indexer new_values = take_2d_multi(self.values, indexer, fill_value=fill_value) return self._constructor( new_values, index=new_index, columns=new_columns, copy=False ) else: return self._reindex_with_indexers( {0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, copy=copy, fill_value=fill_value, ) def align( self, other: DataFrame, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value=None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> DataFrame: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) """ Examples -------- >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) Change the row labels. >>> df.set_axis(['a', 'b', 'c'], axis='index') A B a 1 4 b 2 5 c 3 6 Change the column labels. >>> df.set_axis(['I', 'II'], axis='columns') I II 0 1 4 1 2 5 2 3 6 """ ) **_shared_doc_kwargs, extended_summary_sub=" column or", axis_description_sub=", and 1 identifies the columns", see_also_sub=" or columns", ) ) # ---------------------------------------------------------------------- # Reindex-based selection methods # ---------------------------------------------------------------------- # Sorting # error: Signature of "sort_values" incompatible with supertype "NDFrame" # TODO: Just move the sort_values doc here. ) # ---------------------------------------------------------------------- # Arithmetic Methods ) ) ) # ---------------------------------------------------------------------- # Function application ) # error: Signature of "any" incompatible with supertype "NDFrame" [override] # error: Missing return statement ) # ---------------------------------------------------------------------- # Merging / joining methods # ---------------------------------------------------------------------- # Statistical methods, etc. # ---------------------------------------------------------------------- # ndarray-like stats methods # ---------------------------------------------------------------------- # Add index and columns # ---------------------------------------------------------------------- # Add plotting methods to DataFrame # ---------------------------------------------------------------------- # Internal Interface Methods DataFrame ) def get_handle( path_or_buf: FilePath | BaseBuffer, mode: str, *, encoding: str | None = ..., compression: CompressionOptions = ..., memory_map: bool = ..., is_text: Literal[False], errors: str | None = ..., storage_options: StorageOptions = ..., ) -> IOHandles[bytes]: ... def get_handle( path_or_buf: FilePath | BaseBuffer, mode: str, *, encoding: str | None = ..., compression: CompressionOptions = ..., memory_map: bool = ..., is_text: Literal[True] = ..., errors: str | None = ..., storage_options: StorageOptions = ..., ) -> IOHandles[str]: ... def get_handle( path_or_buf: FilePath | BaseBuffer, mode: str, *, encoding: str | None = ..., compression: CompressionOptions = ..., memory_map: bool = ..., is_text: bool = ..., errors: str | None = ..., storage_options: StorageOptions = ..., ) -> IOHandles[str] | IOHandles[bytes]: ... def get_handle( path_or_buf: FilePath | BaseBuffer, mode: str, *, encoding: str | None = None, compression: CompressionOptions = None, memory_map: bool = False, is_text: bool = True, errors: str | None = None, storage_options: StorageOptions = None, ) -> IOHandles[str] | IOHandles[bytes]: """ Get file handle for given path/buffer and mode. Parameters ---------- path_or_buf : str or file handle File path or object. mode : str Mode to open path_or_buf with. encoding : str or None Encoding to use. {compression_options} .. versionchanged:: 1.0.0 May now be a dict with key 'method' as compression mode and other keys as compression options if compression mode is 'zip'. .. versionchanged:: 1.1.0 Passing compression options as keys in dict is now supported for compression modes 'gzip', 'bz2', 'zstd' and 'zip'. .. versionchanged:: 1.4.0 Zstandard support. memory_map : bool, default False See parsers._parser_params for more information. Only used by read_csv. is_text : bool, default True Whether the type of the content passed to the file/buffer is string or bytes. This is not the same as `"b" not in mode`. If a string content is passed to a binary file/buffer, a wrapper is inserted. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. storage_options: StorageOptions = None Passed to _get_filepath_or_buffer .. versionchanged:: 1.2.0 Returns the dataclass IOHandles """ # Windows does not default to utf-8. Set to utf-8 for a consistent behavior encoding = encoding or "utf-8" errors = errors or "strict" # read_csv does not know whether the buffer is opened in binary/text mode if _is_binary_mode(path_or_buf, mode) and "b" not in mode: mode += "b" # validate encoding and errors codecs.lookup(encoding) if isinstance(errors, str): codecs.lookup_error(errors) # open URLs ioargs = _get_filepath_or_buffer( path_or_buf, encoding=encoding, compression=compression, mode=mode, storage_options=storage_options, ) handle = ioargs.filepath_or_buffer handles: list[BaseBuffer] # memory mapping needs to be the first step # only used for read_csv handle, memory_map, handles = _maybe_memory_map(handle, memory_map) is_path = isinstance(handle, str) compression_args = dict(ioargs.compression) compression = compression_args.pop("method") # Only for write methods if "r" not in mode and is_path: check_parent_directory(str(handle)) if compression: if compression != "zstd": # compression libraries do not like an explicit text-mode ioargs.mode = ioargs.mode.replace("t", "") elif compression == "zstd" and "b" not in ioargs.mode: # python-zstandard defaults to text mode, but we always expect # compression libraries to use binary mode. ioargs.mode += "b" # GZ Compression if compression == "gzip": if isinstance(handle, str): # error: Incompatible types in assignment (expression has type # "GzipFile", variable has type "Union[str, BaseBuffer]") handle = gzip.GzipFile( # type: ignore[assignment] filename=handle, mode=ioargs.mode, **compression_args, ) else: handle = gzip.GzipFile( # No overload variant of "GzipFile" matches argument types # "Union[str, BaseBuffer]", "str", "Dict[str, Any]" fileobj=handle, # type: ignore[call-overload] mode=ioargs.mode, **compression_args, ) # BZ Compression elif compression == "bz2": # Overload of "BZ2File" to handle pickle protocol 5 # "Union[str, BaseBuffer]", "str", "Dict[str, Any]" handle = _BZ2File( # type: ignore[call-overload] handle, mode=ioargs.mode, **compression_args, ) # ZIP Compression elif compression == "zip": # error: Argument 1 to "_BytesZipFile" has incompatible type # "Union[str, BaseBuffer]"; expected "Union[Union[str, PathLike[str]], # ReadBuffer[bytes], WriteBuffer[bytes]]" handle = _BytesZipFile( handle, ioargs.mode, **compression_args # type: ignore[arg-type] ) if handle.buffer.mode == "r": handles.append(handle) zip_names = handle.buffer.namelist() if len(zip_names) == 1: handle = handle.buffer.open(zip_names.pop()) elif not zip_names: raise ValueError(f"Zero files found in ZIP file {path_or_buf}") else: raise ValueError( "Multiple files found in ZIP file. " f"Only one file per ZIP: {zip_names}" ) # TAR Encoding elif compression == "tar": compression_args.setdefault("mode", ioargs.mode) if isinstance(handle, str): handle = _BytesTarFile(name=handle, **compression_args) else: # error: Argument "fileobj" to "_BytesTarFile" has incompatible # type "BaseBuffer"; expected "Union[ReadBuffer[bytes], # WriteBuffer[bytes], None]" handle = _BytesTarFile( fileobj=handle, **compression_args # type: ignore[arg-type] ) assert isinstance(handle, _BytesTarFile) if "r" in handle.buffer.mode: handles.append(handle) files = handle.buffer.getnames() if len(files) == 1: file = handle.buffer.extractfile(files[0]) assert file is not None handle = file elif not files: raise ValueError(f"Zero files found in TAR archive {path_or_buf}") else: raise ValueError( "Multiple files found in TAR archive. " f"Only one file per TAR archive: {files}" ) # XZ Compression elif compression == "xz": # error: Argument 1 to "LZMAFile" has incompatible type "Union[str, # BaseBuffer]"; expected "Optional[Union[Union[str, bytes, PathLike[str], # PathLike[bytes]], IO[bytes]]]" handle = get_lzma_file()(handle, ioargs.mode) # type: ignore[arg-type] # Zstd Compression elif compression == "zstd": zstd = import_optional_dependency("zstandard") if "r" in ioargs.mode: open_args = {"dctx": zstd.ZstdDecompressor(**compression_args)} else: open_args = {"cctx": zstd.ZstdCompressor(**compression_args)} handle = zstd.open( handle, mode=ioargs.mode, **open_args, ) # Unrecognized Compression else: msg = f"Unrecognized compression type: {compression}" raise ValueError(msg) assert not isinstance(handle, str) handles.append(handle) elif isinstance(handle, str): # Check whether the filename is to be opened in binary mode. # Binary mode does not support 'encoding' and 'newline'. if ioargs.encoding and "b" not in ioargs.mode: # Encoding handle = open( handle, ioargs.mode, encoding=ioargs.encoding, errors=errors, newline="", ) else: # Binary mode handle = open(handle, ioargs.mode) handles.append(handle) # Convert BytesIO or file objects passed with an encoding is_wrapped = False if not is_text and ioargs.mode == "rb" and isinstance(handle, TextIOBase): # not added to handles as it does not open/buffer resources handle = _BytesIOWrapper( handle, encoding=ioargs.encoding, ) elif is_text and ( compression or memory_map or _is_binary_mode(handle, ioargs.mode) ): if ( not hasattr(handle, "readable") or not hasattr(handle, "writable") or not hasattr(handle, "seekable") ): handle = _IOWrapper(handle) # error: Argument 1 to "TextIOWrapper" has incompatible type # "_IOWrapper"; expected "IO[bytes]" handle = TextIOWrapper( handle, # type: ignore[arg-type] encoding=ioargs.encoding, errors=errors, newline="", ) handles.append(handle) # only marked as wrapped when the caller provided a handle is_wrapped = not ( isinstance(ioargs.filepath_or_buffer, str) or ioargs.should_close ) if "r" in ioargs.mode and not hasattr(handle, "read"): raise TypeError( "Expected file path name or file-like object, " f"got {type(ioargs.filepath_or_buffer)} type" ) handles.reverse() # close the most recently added buffer first if ioargs.should_close: assert not isinstance(ioargs.filepath_or_buffer, str) handles.append(ioargs.filepath_or_buffer) return IOHandles( # error: Argument "handle" to "IOHandles" has incompatible type # "Union[TextIOWrapper, GzipFile, BaseBuffer, typing.IO[bytes], # typing.IO[Any]]"; expected "pandas._typing.IO[Any]" handle=handle, # type: ignore[arg-type] # error: Argument "created_handles" to "IOHandles" has incompatible type # "List[BaseBuffer]"; expected "List[Union[IO[bytes], IO[str]]]" created_handles=handles, # type: ignore[arg-type] is_wrapped=is_wrapped, compression=ioargs.compression, ) The provided code snippet includes necessary dependencies for implementing the `to_orc` function. Write a Python function `def to_orc( df: DataFrame, path: FilePath | WriteBuffer[bytes] | None = None, *, engine: Literal["pyarrow"] = "pyarrow", index: bool | None = None, engine_kwargs: dict[str, Any] | None = None, ) -> bytes | None` to solve the following problem: Write a DataFrame to the ORC format. .. versionadded:: 1.5.0 Parameters ---------- df : DataFrame The dataframe to be written to ORC. Raises NotImplementedError if dtype of one or more columns is category, unsigned integers, intervals, periods or sparse. path : str, file-like object or None, default None If a string, it will be used as Root Directory path when writing a partitioned dataset. By file-like object, we refer to objects with a write() method, such as a file handle (e.g. via builtin open function). If path is None, a bytes object is returned. engine : str, default 'pyarrow' ORC library to use. Pyarrow must be >= 7.0.0. index : bool, optional If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``infer`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. engine_kwargs : dict[str, Any] or None, default None Additional keyword arguments passed to :func:`pyarrow.orc.write_table`. Returns ------- bytes if no path argument is provided else None Raises ------ NotImplementedError Dtype of one or more columns is category, unsigned integers, interval, period or sparse. ValueError engine is not pyarrow. Notes ----- * Before using this function you should read the :ref:`user guide about ORC <io.orc>` and :ref:`install optional dependencies <install.warn_orc>`. * This function requires `pyarrow <https://arrow.apache.org/docs/python/>`_ library. * For supported dtypes please refer to `supported ORC features in Arrow <https://arrow.apache.org/docs/cpp/orc.html#data-types>`__. * Currently timezones in datetime columns are not preserved when a dataframe is converted into ORC files. Here is the function: def to_orc( df: DataFrame, path: FilePath | WriteBuffer[bytes] | None = None, *, engine: Literal["pyarrow"] = "pyarrow", index: bool | None = None, engine_kwargs: dict[str, Any] | None = None, ) -> bytes | None: """ Write a DataFrame to the ORC format. .. versionadded:: 1.5.0 Parameters ---------- df : DataFrame The dataframe to be written to ORC. Raises NotImplementedError if dtype of one or more columns is category, unsigned integers, intervals, periods or sparse. path : str, file-like object or None, default None If a string, it will be used as Root Directory path when writing a partitioned dataset. By file-like object, we refer to objects with a write() method, such as a file handle (e.g. via builtin open function). If path is None, a bytes object is returned. engine : str, default 'pyarrow' ORC library to use. Pyarrow must be >= 7.0.0. index : bool, optional If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``infer`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. engine_kwargs : dict[str, Any] or None, default None Additional keyword arguments passed to :func:`pyarrow.orc.write_table`. Returns ------- bytes if no path argument is provided else None Raises ------ NotImplementedError Dtype of one or more columns is category, unsigned integers, interval, period or sparse. ValueError engine is not pyarrow. Notes ----- * Before using this function you should read the :ref:`user guide about ORC <io.orc>` and :ref:`install optional dependencies <install.warn_orc>`. * This function requires `pyarrow <https://arrow.apache.org/docs/python/>`_ library. * For supported dtypes please refer to `supported ORC features in Arrow <https://arrow.apache.org/docs/cpp/orc.html#data-types>`__. * Currently timezones in datetime columns are not preserved when a dataframe is converted into ORC files. """ if index is None: index = df.index.names[0] is not None if engine_kwargs is None: engine_kwargs = {} # If unsupported dtypes are found raise NotImplementedError # In Pyarrow 9.0.0 this check will no longer be needed for dtype in df.dtypes: if ( is_categorical_dtype(dtype) or is_interval_dtype(dtype) or is_period_dtype(dtype) or is_unsigned_integer_dtype(dtype) ): raise NotImplementedError( "The dtype of one or more columns is not supported yet." ) if engine != "pyarrow": raise ValueError("engine must be 'pyarrow'") engine = import_optional_dependency(engine, min_version="7.0.0") orc = import_optional_dependency("pyarrow.orc") was_none = path is None if was_none: path = io.BytesIO() assert path is not None # For mypy with get_handle(path, "wb", is_text=False) as handles: assert isinstance(engine, ModuleType) # For mypy try: orc.write_table( engine.Table.from_pandas(df, preserve_index=index), handles.handle, **engine_kwargs, ) except TypeError as e: raise NotImplementedError( "The dtype of one or more columns is not supported yet." ) from e if was_none: assert isinstance(path, io.BytesIO) # For mypy return path.getvalue() return None
Write a DataFrame to the ORC format. .. versionadded:: 1.5.0 Parameters ---------- df : DataFrame The dataframe to be written to ORC. Raises NotImplementedError if dtype of one or more columns is category, unsigned integers, intervals, periods or sparse. path : str, file-like object or None, default None If a string, it will be used as Root Directory path when writing a partitioned dataset. By file-like object, we refer to objects with a write() method, such as a file handle (e.g. via builtin open function). If path is None, a bytes object is returned. engine : str, default 'pyarrow' ORC library to use. Pyarrow must be >= 7.0.0. index : bool, optional If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``infer`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. engine_kwargs : dict[str, Any] or None, default None Additional keyword arguments passed to :func:`pyarrow.orc.write_table`. Returns ------- bytes if no path argument is provided else None Raises ------ NotImplementedError Dtype of one or more columns is category, unsigned integers, interval, period or sparse. ValueError engine is not pyarrow. Notes ----- * Before using this function you should read the :ref:`user guide about ORC <io.orc>` and :ref:`install optional dependencies <install.warn_orc>`. * This function requires `pyarrow <https://arrow.apache.org/docs/python/>`_ library. * For supported dtypes please refer to `supported ORC features in Arrow <https://arrow.apache.org/docs/cpp/orc.html#data-types>`__. * Currently timezones in datetime columns are not preserved when a dataframe is converted into ORC files.
173,532
from __future__ import annotations from collections import abc import datetime from io import BytesIO import os import struct import sys from types import TracebackType from typing import ( IO, TYPE_CHECKING, Any, AnyStr, Callable, Final, Hashable, Sequence, cast, ) import warnings from dateutil.relativedelta import relativedelta import numpy as np from pandas._libs.lib import infer_dtype from pandas._libs.writers import max_len_string_array from pandas._typing import ( CompressionOptions, FilePath, ReadBuffer, StorageOptions, WriteBuffer, ) from pandas.errors import ( CategoricalConversionWarning, InvalidColumnName, PossiblePrecisionLoss, ValueLabelTypeMismatch, ) from pandas.util._decorators import ( Appender, doc, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, is_categorical_dtype, is_datetime64_dtype, is_numeric_dtype, ) from pandas import ( Categorical, DatetimeIndex, NaT, Timestamp, isna, to_datetime, to_timedelta, ) from pandas.core.arrays.boolean import BooleanDtype from pandas.core.arrays.integer import IntegerDtype from pandas.core.frame import DataFrame from pandas.core.indexes.base import Index from pandas.core.series import Series from pandas.core.shared_docs import _shared_docs from pandas.io.common import get_handle stata_epoch: Final = datetime.datetime(1960, 1, 1) class relativedelta(object): """ The relativedelta type is designed to be applied to an existing datetime and can replace specific components of that datetime, or represents an interval of time. It is based on the specification of the excellent work done by M.-A. Lemburg in his `mx.DateTime <https://www.egenix.com/products/python/mxBase/mxDateTime/>`_ extension. However, notice that this type does *NOT* implement the same algorithm as his work. Do *NOT* expect it to behave like mx.DateTime's counterpart. There are two different ways to build a relativedelta instance. The first one is passing it two date/datetime classes:: relativedelta(datetime1, datetime2) The second one is passing it any number of the following keyword arguments:: relativedelta(arg1=x,arg2=y,arg3=z...) year, month, day, hour, minute, second, microsecond: Absolute information (argument is singular); adding or subtracting a relativedelta with absolute information does not perform an arithmetic operation, but rather REPLACES the corresponding value in the original datetime with the value(s) in relativedelta. years, months, weeks, days, hours, minutes, seconds, microseconds: Relative information, may be negative (argument is plural); adding or subtracting a relativedelta with relative information performs the corresponding arithmetic operation on the original datetime value with the information in the relativedelta. weekday: One of the weekday instances (MO, TU, etc) available in the relativedelta module. These instances may receive a parameter N, specifying the Nth weekday, which could be positive or negative (like MO(+1) or MO(-2)). Not specifying it is the same as specifying +1. You can also use an integer, where 0=MO. This argument is always relative e.g. if the calculated date is already Monday, using MO(1) or MO(-1) won't change the day. To effectively make it absolute, use it in combination with the day argument (e.g. day=1, MO(1) for first Monday of the month). leapdays: Will add given days to the date found, if year is a leap year, and the date found is post 28 of february. yearday, nlyearday: Set the yearday or the non-leap year day (jump leap days). These are converted to day/month/leapdays information. There are relative and absolute forms of the keyword arguments. The plural is relative, and the singular is absolute. For each argument in the order below, the absolute form is applied first (by setting each attribute to that value) and then the relative form (by adding the value to the attribute). The order of attributes considered when this relativedelta is added to a datetime is: 1. Year 2. Month 3. Day 4. Hours 5. Minutes 6. Seconds 7. Microseconds Finally, weekday is applied, using the rule described above. For example >>> from datetime import datetime >>> from dateutil.relativedelta import relativedelta, MO >>> dt = datetime(2018, 4, 9, 13, 37, 0) >>> delta = relativedelta(hours=25, day=1, weekday=MO(1)) >>> dt + delta datetime.datetime(2018, 4, 2, 14, 37) First, the day is set to 1 (the first of the month), then 25 hours are added, to get to the 2nd day and 14th hour, finally the weekday is applied, but since the 2nd is already a Monday there is no effect. """ def __init__(self, dt1=None, dt2=None, years=0, months=0, days=0, leapdays=0, weeks=0, hours=0, minutes=0, seconds=0, microseconds=0, year=None, month=None, day=None, weekday=None, yearday=None, nlyearday=None, hour=None, minute=None, second=None, microsecond=None): if dt1 and dt2: # datetime is a subclass of date. So both must be date if not (isinstance(dt1, datetime.date) and isinstance(dt2, datetime.date)): raise TypeError("relativedelta only diffs datetime/date") # We allow two dates, or two datetimes, so we coerce them to be # of the same type if (isinstance(dt1, datetime.datetime) != isinstance(dt2, datetime.datetime)): if not isinstance(dt1, datetime.datetime): dt1 = datetime.datetime.fromordinal(dt1.toordinal()) elif not isinstance(dt2, datetime.datetime): dt2 = datetime.datetime.fromordinal(dt2.toordinal()) self.years = 0 self.months = 0 self.days = 0 self.leapdays = 0 self.hours = 0 self.minutes = 0 self.seconds = 0 self.microseconds = 0 self.year = None self.month = None self.day = None self.weekday = None self.hour = None self.minute = None self.second = None self.microsecond = None self._has_time = 0 # Get year / month delta between the two months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month) self._set_months(months) # Remove the year/month delta so the timedelta is just well-defined # time units (seconds, days and microseconds) dtm = self.__radd__(dt2) # If we've overshot our target, make an adjustment if dt1 < dt2: compare = operator.gt increment = 1 else: compare = operator.lt increment = -1 while compare(dt1, dtm): months += increment self._set_months(months) dtm = self.__radd__(dt2) # Get the timedelta between the "months-adjusted" date and dt1 delta = dt1 - dtm self.seconds = delta.seconds + delta.days * 86400 self.microseconds = delta.microseconds else: # Check for non-integer values in integer-only quantities if any(x is not None and x != int(x) for x in (years, months)): raise ValueError("Non-integer years and months are " "ambiguous and not currently supported.") # Relative information self.years = int(years) self.months = int(months) self.days = days + weeks * 7 self.leapdays = leapdays self.hours = hours self.minutes = minutes self.seconds = seconds self.microseconds = microseconds # Absolute information self.year = year self.month = month self.day = day self.hour = hour self.minute = minute self.second = second self.microsecond = microsecond if any(x is not None and int(x) != x for x in (year, month, day, hour, minute, second, microsecond)): # For now we'll deprecate floats - later it'll be an error. warn("Non-integer value passed as absolute information. " + "This is not a well-defined condition and will raise " + "errors in future versions.", DeprecationWarning) if isinstance(weekday, integer_types): self.weekday = weekdays[weekday] else: self.weekday = weekday yday = 0 if nlyearday: yday = nlyearday elif yearday: yday = yearday if yearday > 59: self.leapdays = -1 if yday: ydayidx = [31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 366] for idx, ydays in enumerate(ydayidx): if yday <= ydays: self.month = idx+1 if idx == 0: self.day = yday else: self.day = yday-ydayidx[idx-1] break else: raise ValueError("invalid year day (%d)" % yday) self._fix() def _fix(self): if abs(self.microseconds) > 999999: s = _sign(self.microseconds) div, mod = divmod(self.microseconds * s, 1000000) self.microseconds = mod * s self.seconds += div * s if abs(self.seconds) > 59: s = _sign(self.seconds) div, mod = divmod(self.seconds * s, 60) self.seconds = mod * s self.minutes += div * s if abs(self.minutes) > 59: s = _sign(self.minutes) div, mod = divmod(self.minutes * s, 60) self.minutes = mod * s self.hours += div * s if abs(self.hours) > 23: s = _sign(self.hours) div, mod = divmod(self.hours * s, 24) self.hours = mod * s self.days += div * s if abs(self.months) > 11: s = _sign(self.months) div, mod = divmod(self.months * s, 12) self.months = mod * s self.years += div * s if (self.hours or self.minutes or self.seconds or self.microseconds or self.hour is not None or self.minute is not None or self.second is not None or self.microsecond is not None): self._has_time = 1 else: self._has_time = 0 def weeks(self): return int(self.days / 7.0) def weeks(self, value): self.days = self.days - (self.weeks * 7) + value * 7 def _set_months(self, months): self.months = months if abs(self.months) > 11: s = _sign(self.months) div, mod = divmod(self.months * s, 12) self.months = mod * s self.years = div * s else: self.years = 0 def normalized(self): """ Return a version of this object represented entirely using integer values for the relative attributes. >>> relativedelta(days=1.5, hours=2).normalized() relativedelta(days=+1, hours=+14) :return: Returns a :class:`dateutil.relativedelta.relativedelta` object. """ # Cascade remainders down (rounding each to roughly nearest microsecond) days = int(self.days) hours_f = round(self.hours + 24 * (self.days - days), 11) hours = int(hours_f) minutes_f = round(self.minutes + 60 * (hours_f - hours), 10) minutes = int(minutes_f) seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8) seconds = int(seconds_f) microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds)) # Constructor carries overflow back up with call to _fix() return self.__class__(years=self.years, months=self.months, days=days, hours=hours, minutes=minutes, seconds=seconds, microseconds=microseconds, leapdays=self.leapdays, year=self.year, month=self.month, day=self.day, weekday=self.weekday, hour=self.hour, minute=self.minute, second=self.second, microsecond=self.microsecond) def __add__(self, other): if isinstance(other, relativedelta): return self.__class__(years=other.years + self.years, months=other.months + self.months, days=other.days + self.days, hours=other.hours + self.hours, minutes=other.minutes + self.minutes, seconds=other.seconds + self.seconds, microseconds=(other.microseconds + self.microseconds), leapdays=other.leapdays or self.leapdays, year=(other.year if other.year is not None else self.year), month=(other.month if other.month is not None else self.month), day=(other.day if other.day is not None else self.day), weekday=(other.weekday if other.weekday is not None else self.weekday), hour=(other.hour if other.hour is not None else self.hour), minute=(other.minute if other.minute is not None else self.minute), second=(other.second if other.second is not None else self.second), microsecond=(other.microsecond if other.microsecond is not None else self.microsecond)) if isinstance(other, datetime.timedelta): return self.__class__(years=self.years, months=self.months, days=self.days + other.days, hours=self.hours, minutes=self.minutes, seconds=self.seconds + other.seconds, microseconds=self.microseconds + other.microseconds, leapdays=self.leapdays, year=self.year, month=self.month, day=self.day, weekday=self.weekday, hour=self.hour, minute=self.minute, second=self.second, microsecond=self.microsecond) if not isinstance(other, datetime.date): return NotImplemented elif self._has_time and not isinstance(other, datetime.datetime): other = datetime.datetime.fromordinal(other.toordinal()) year = (self.year or other.year)+self.years month = self.month or other.month if self.months: assert 1 <= abs(self.months) <= 12 month += self.months if month > 12: year += 1 month -= 12 elif month < 1: year -= 1 month += 12 day = min(calendar.monthrange(year, month)[1], self.day or other.day) repl = {"year": year, "month": month, "day": day} for attr in ["hour", "minute", "second", "microsecond"]: value = getattr(self, attr) if value is not None: repl[attr] = value days = self.days if self.leapdays and month > 2 and calendar.isleap(year): days += self.leapdays ret = (other.replace(**repl) + datetime.timedelta(days=days, hours=self.hours, minutes=self.minutes, seconds=self.seconds, microseconds=self.microseconds)) if self.weekday: weekday, nth = self.weekday.weekday, self.weekday.n or 1 jumpdays = (abs(nth) - 1) * 7 if nth > 0: jumpdays += (7 - ret.weekday() + weekday) % 7 else: jumpdays += (ret.weekday() - weekday) % 7 jumpdays *= -1 ret += datetime.timedelta(days=jumpdays) return ret def __radd__(self, other): return self.__add__(other) def __rsub__(self, other): return self.__neg__().__radd__(other) def __sub__(self, other): if not isinstance(other, relativedelta): return NotImplemented # In case the other object defines __rsub__ return self.__class__(years=self.years - other.years, months=self.months - other.months, days=self.days - other.days, hours=self.hours - other.hours, minutes=self.minutes - other.minutes, seconds=self.seconds - other.seconds, microseconds=self.microseconds - other.microseconds, leapdays=self.leapdays or other.leapdays, year=(self.year if self.year is not None else other.year), month=(self.month if self.month is not None else other.month), day=(self.day if self.day is not None else other.day), weekday=(self.weekday if self.weekday is not None else other.weekday), hour=(self.hour if self.hour is not None else other.hour), minute=(self.minute if self.minute is not None else other.minute), second=(self.second if self.second is not None else other.second), microsecond=(self.microsecond if self.microsecond is not None else other.microsecond)) def __abs__(self): return self.__class__(years=abs(self.years), months=abs(self.months), days=abs(self.days), hours=abs(self.hours), minutes=abs(self.minutes), seconds=abs(self.seconds), microseconds=abs(self.microseconds), leapdays=self.leapdays, year=self.year, month=self.month, day=self.day, weekday=self.weekday, hour=self.hour, minute=self.minute, second=self.second, microsecond=self.microsecond) def __neg__(self): return self.__class__(years=-self.years, months=-self.months, days=-self.days, hours=-self.hours, minutes=-self.minutes, seconds=-self.seconds, microseconds=-self.microseconds, leapdays=self.leapdays, year=self.year, month=self.month, day=self.day, weekday=self.weekday, hour=self.hour, minute=self.minute, second=self.second, microsecond=self.microsecond) def __bool__(self): return not (not self.years and not self.months and not self.days and not self.hours and not self.minutes and not self.seconds and not self.microseconds and not self.leapdays and self.year is None and self.month is None and self.day is None and self.weekday is None and self.hour is None and self.minute is None and self.second is None and self.microsecond is None) # Compatibility with Python 2.x __nonzero__ = __bool__ def __mul__(self, other): try: f = float(other) except TypeError: return NotImplemented return self.__class__(years=int(self.years * f), months=int(self.months * f), days=int(self.days * f), hours=int(self.hours * f), minutes=int(self.minutes * f), seconds=int(self.seconds * f), microseconds=int(self.microseconds * f), leapdays=self.leapdays, year=self.year, month=self.month, day=self.day, weekday=self.weekday, hour=self.hour, minute=self.minute, second=self.second, microsecond=self.microsecond) __rmul__ = __mul__ def __eq__(self, other): if not isinstance(other, relativedelta): return NotImplemented if self.weekday or other.weekday: if not self.weekday or not other.weekday: return False if self.weekday.weekday != other.weekday.weekday: return False n1, n2 = self.weekday.n, other.weekday.n if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)): return False return (self.years == other.years and self.months == other.months and self.days == other.days and self.hours == other.hours and self.minutes == other.minutes and self.seconds == other.seconds and self.microseconds == other.microseconds and self.leapdays == other.leapdays and self.year == other.year and self.month == other.month and self.day == other.day and self.hour == other.hour and self.minute == other.minute and self.second == other.second and self.microsecond == other.microsecond) def __hash__(self): return hash(( self.weekday, self.years, self.months, self.days, self.hours, self.minutes, self.seconds, self.microseconds, self.leapdays, self.year, self.month, self.day, self.hour, self.minute, self.second, self.microsecond, )) def __ne__(self, other): return not self.__eq__(other) def __div__(self, other): try: reciprocal = 1 / float(other) except TypeError: return NotImplemented return self.__mul__(reciprocal) __truediv__ = __div__ def __repr__(self): l = [] for attr in ["years", "months", "days", "leapdays", "hours", "minutes", "seconds", "microseconds"]: value = getattr(self, attr) if value: l.append("{attr}={value:+g}".format(attr=attr, value=value)) for attr in ["year", "month", "day", "weekday", "hour", "minute", "second", "microsecond"]: value = getattr(self, attr) if value is not None: l.append("{attr}={value}".format(attr=attr, value=repr(value))) return "{classname}({attrs})".format(classname=self.__class__.__name__, attrs=", ".join(l)) def find_stack_level() -> int: """ Find the first place in the stack that is not inside pandas (tests notwithstanding). """ import pandas as pd pkg_dir = os.path.dirname(pd.__file__) test_dir = os.path.join(pkg_dir, "tests") # https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow frame = inspect.currentframe() n = 0 while frame: fname = inspect.getfile(frame) if fname.startswith(pkg_dir) and not fname.startswith(test_dir): frame = frame.f_back n += 1 else: break return n class Series(base.IndexOpsMixin, NDFrame): # type: ignore[misc] """ One-dimensional ndarray with axis labels (including time series). Labels need not be unique but must be a hashable type. The object supports both integer- and label-based indexing and provides a host of methods for performing operations involving the index. Statistical methods from ndarray have been overridden to automatically exclude missing data (currently represented as NaN). Operations between Series (+, -, /, \\*, \\*\\*) align values based on their associated index values-- they need not be the same length. The result index will be the sorted union of the two indexes. Parameters ---------- data : array-like, Iterable, dict, or scalar value Contains data stored in Series. If data is a dict, argument order is maintained. index : array-like or Index (1d) Values must be hashable and have the same length as `data`. Non-unique index values are allowed. Will default to RangeIndex (0, 1, 2, ..., n) if not provided. If data is dict-like and index is None, then the keys in the data are used as the index. If the index is not None, the resulting Series is reindexed with the index values. dtype : str, numpy.dtype, or ExtensionDtype, optional Data type for the output Series. If not specified, this will be inferred from `data`. See the :ref:`user guide <basics.dtypes>` for more usages. name : Hashable, default None The name to give to the Series. copy : bool, default False Copy input data. Only affects Series or 1d ndarray input. See examples. Notes ----- Please reference the :ref:`User Guide <basics.series>` for more information. Examples -------- Constructing Series from a dictionary with an Index specified >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['a', 'b', 'c']) >>> ser a 1 b 2 c 3 dtype: int64 The keys of the dictionary match with the Index values, hence the Index values have no effect. >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['x', 'y', 'z']) >>> ser x NaN y NaN z NaN dtype: float64 Note that the Index is first build with the keys from the dictionary. After this the Series is reindexed with the given Index values, hence we get all NaN as a result. Constructing Series from a list with `copy=False`. >>> r = [1, 2] >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r [1, 2] >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `copy` of the original data even though `copy=False`, so the data is unchanged. Constructing Series from a 1d ndarray with `copy=False`. >>> r = np.array([1, 2]) >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r array([999, 2]) >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `view` on the original data, so the data is changed as well. """ _typ = "series" _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) _name: Hashable _metadata: list[str] = ["name"] _internal_names_set = {"index"} | NDFrame._internal_names_set _accessors = {"dt", "cat", "str", "sparse"} _hidden_attrs = ( base.IndexOpsMixin._hidden_attrs | NDFrame._hidden_attrs | frozenset([]) ) # Override cache_readonly bc Series is mutable # error: Incompatible types in assignment (expression has type "property", # base class "IndexOpsMixin" defined the type as "Callable[[IndexOpsMixin], bool]") hasnans = property( # type: ignore[assignment] # error: "Callable[[IndexOpsMixin], bool]" has no attribute "fget" base.IndexOpsMixin.hasnans.fget, # type: ignore[attr-defined] doc=base.IndexOpsMixin.hasnans.__doc__, ) _mgr: SingleManager div: Callable[[Series, Any], Series] rdiv: Callable[[Series, Any], Series] # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index=None, dtype: Dtype | None = None, name=None, copy: bool | None = None, fastpath: bool = False, ) -> None: if ( isinstance(data, (SingleBlockManager, SingleArrayManager)) and index is None and dtype is None and (copy is False or copy is None) ): if using_copy_on_write(): data = data.copy(deep=False) # GH#33357 called with just the SingleBlockManager NDFrame.__init__(self, data) if fastpath: # e.g. from _box_col_values, skip validation of name object.__setattr__(self, "_name", name) else: self.name = name return if isinstance(data, (ExtensionArray, np.ndarray)): if copy is not False and using_copy_on_write(): if dtype is None or astype_is_view(data.dtype, pandas_dtype(dtype)): data = data.copy() if copy is None: copy = False # we are called internally, so short-circuit if fastpath: # data is a ndarray, index is defined if not isinstance(data, (SingleBlockManager, SingleArrayManager)): manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index) elif manager == "array": data = SingleArrayManager.from_array(data, index) elif using_copy_on_write() and not copy: data = data.copy(deep=False) if copy: data = data.copy() # skips validation of the name object.__setattr__(self, "_name", name) NDFrame.__init__(self, data) return if isinstance(data, SingleBlockManager) and using_copy_on_write() and not copy: data = data.copy(deep=False) name = ibase.maybe_extract_name(name, data, type(self)) if index is not None: index = ensure_index(index) if dtype is not None: dtype = self._validate_dtype(dtype) if data is None: index = index if index is not None else default_index(0) if len(index) or dtype is not None: data = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: data = [] if isinstance(data, MultiIndex): raise NotImplementedError( "initializing a Series from a MultiIndex is not supported" ) refs = None if isinstance(data, Index): if dtype is not None: data = data.astype(dtype, copy=False) if using_copy_on_write(): refs = data._references data = data._values else: # GH#24096 we need to ensure the index remains immutable data = data._values.copy() copy = False elif isinstance(data, np.ndarray): if len(data.dtype): # GH#13296 we are dealing with a compound dtype, which # should be treated as 2D raise ValueError( "Cannot construct a Series from an ndarray with " "compound dtype. Use DataFrame instead." ) elif isinstance(data, Series): if index is None: index = data.index data = data._mgr.copy(deep=False) else: data = data.reindex(index, copy=copy) copy = False data = data._mgr elif is_dict_like(data): data, index = self._init_dict(data, index, dtype) dtype = None copy = False elif isinstance(data, (SingleBlockManager, SingleArrayManager)): if index is None: index = data.index elif not data.index.equals(index) or copy: # GH#19275 SingleBlockManager input should only be called # internally raise AssertionError( "Cannot pass both SingleBlockManager " "`data` argument and a different " "`index` argument. `copy` must be False." ) elif isinstance(data, ExtensionArray): pass else: data = com.maybe_iterable_to_list(data) if is_list_like(data) and not len(data) and dtype is None: # GH 29405: Pre-2.0, this defaulted to float. dtype = np.dtype(object) if index is None: if not is_list_like(data): data = [data] index = default_index(len(data)) elif is_list_like(data): com.require_length_match(data, index) # create/copy the manager if isinstance(data, (SingleBlockManager, SingleArrayManager)): if dtype is not None: data = data.astype(dtype=dtype, errors="ignore", copy=copy) elif copy: data = data.copy() else: data = sanitize_array(data, index, dtype, copy) manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index, refs=refs) elif manager == "array": data = SingleArrayManager.from_array(data, index) NDFrame.__init__(self, data) self.name = name self._set_axis(0, index) def _init_dict( self, data, index: Index | None = None, dtype: DtypeObj | None = None ): """ Derive the "_mgr" and "index" attributes of a new Series from a dictionary input. Parameters ---------- data : dict or dict-like Data used to populate the new Series. index : Index or None, default None Index for the new Series: if None, use dict keys. dtype : np.dtype, ExtensionDtype, or None, default None The dtype for the new Series: if None, infer from data. Returns ------- _data : BlockManager for the new Series index : index for the new Series """ keys: Index | tuple # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')] # raises KeyError), so we iterate the entire dict, and align if data: # GH:34717, issue was using zip to extract key and values from data. # using generators in effects the performance. # Below is the new way of extracting the keys and values keys = tuple(data.keys()) values = list(data.values()) # Generating list of values- faster way elif index is not None: # fastpath for Series(data=None). Just use broadcasting a scalar # instead of reindexing. if len(index) or dtype is not None: values = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: values = [] keys = index else: keys, values = (), [] # Input is now list-like, so rely on "standard" construction: s = self._constructor( values, index=keys, dtype=dtype, ) # Now we just make sure the order is respected, if any if data and index is not None: s = s.reindex(index, copy=False) return s._mgr, s.index # ---------------------------------------------------------------------- def _constructor(self) -> Callable[..., Series]: return Series def _constructor_expanddim(self) -> Callable[..., DataFrame]: """ Used when a manipulation result has one higher dimension as the original, such as Series.to_frame() """ from pandas.core.frame import DataFrame return DataFrame # types def _can_hold_na(self) -> bool: return self._mgr._can_hold_na # ndarray compatibility def dtype(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtype dtype('int64') """ return self._mgr.dtype def dtypes(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtypes dtype('int64') """ # DataFrame compatibility return self.dtype def name(self) -> Hashable: """ Return the name of the Series. The name of a Series becomes its index or column name if it is used to form a DataFrame. It is also used whenever displaying the Series using the interpreter. Returns ------- label (hashable object) The name of the Series, also the column name if part of a DataFrame. See Also -------- Series.rename : Sets the Series name when given a scalar input. Index.name : Corresponding Index property. Examples -------- The Series name can be set initially when calling the constructor. >>> s = pd.Series([1, 2, 3], dtype=np.int64, name='Numbers') >>> s 0 1 1 2 2 3 Name: Numbers, dtype: int64 >>> s.name = "Integers" >>> s 0 1 1 2 2 3 Name: Integers, dtype: int64 The name of a Series within a DataFrame is its column name. >>> df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], ... columns=["Odd Numbers", "Even Numbers"]) >>> df Odd Numbers Even Numbers 0 1 2 1 3 4 2 5 6 >>> df["Even Numbers"].name 'Even Numbers' """ return self._name def name(self, value: Hashable) -> None: validate_all_hashable(value, error_name=f"{type(self).__name__}.name") object.__setattr__(self, "_name", value) def values(self): """ Return Series as ndarray or ndarray-like depending on the dtype. .. warning:: We recommend using :attr:`Series.array` or :meth:`Series.to_numpy`, depending on whether you need a reference to the underlying data or a NumPy array. Returns ------- numpy.ndarray or ndarray-like See Also -------- Series.array : Reference to the underlying data. Series.to_numpy : A NumPy array representing the underlying data. Examples -------- >>> pd.Series([1, 2, 3]).values array([1, 2, 3]) >>> pd.Series(list('aabc')).values array(['a', 'a', 'b', 'c'], dtype=object) >>> pd.Series(list('aabc')).astype('category').values ['a', 'a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] Timezone aware datetime data is converted to UTC: >>> pd.Series(pd.date_range('20130101', periods=3, ... tz='US/Eastern')).values array(['2013-01-01T05:00:00.000000000', '2013-01-02T05:00:00.000000000', '2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]') """ return self._mgr.external_values() def _values(self): """ Return the internal repr of this data (defined by Block.interval_values). This are the values as stored in the Block (ndarray or ExtensionArray depending on the Block class), with datetime64[ns] and timedelta64[ns] wrapped in ExtensionArrays to match Index._values behavior. Differs from the public ``.values`` for certain data types, because of historical backwards compatibility of the public attribute (e.g. period returns object ndarray and datetimetz a datetime64[ns] ndarray for ``.values`` while it returns an ExtensionArray for ``._values`` in those cases). Differs from ``.array`` in that this still returns the numpy array if the Block is backed by a numpy array (except for datetime64 and timedelta64 dtypes), while ``.array`` ensures to always return an ExtensionArray. Overview: dtype | values | _values | array | ----------- | ------------- | ------------- | ------------- | Numeric | ndarray | ndarray | PandasArray | Category | Categorical | Categorical | Categorical | dt64[ns] | ndarray[M8ns] | DatetimeArray | DatetimeArray | dt64[ns tz] | ndarray[M8ns] | DatetimeArray | DatetimeArray | td64[ns] | ndarray[m8ns] | TimedeltaArray| ndarray[m8ns] | Period | ndarray[obj] | PeriodArray | PeriodArray | Nullable | EA | EA | EA | """ return self._mgr.internal_values() def _references(self) -> BlockValuesRefs | None: if isinstance(self._mgr, SingleArrayManager): return None return self._mgr._block.refs # error: Decorated property not supported def array(self) -> ExtensionArray: return self._mgr.array_values() # ops def ravel(self, order: str = "C") -> ArrayLike: """ Return the flattened underlying data as an ndarray or ExtensionArray. Returns ------- numpy.ndarray or ExtensionArray Flattened data of the Series. See Also -------- numpy.ndarray.ravel : Return a flattened array. """ arr = self._values.ravel(order=order) if isinstance(arr, np.ndarray) and using_copy_on_write(): arr.flags.writeable = False return arr def __len__(self) -> int: """ Return the length of the Series. """ return len(self._mgr) def view(self, dtype: Dtype | None = None) -> Series: """ Create a new view of the Series. This function will return a new Series with a view of the same underlying values in memory, optionally reinterpreted with a new data type. The new data type must preserve the same size in bytes as to not cause index misalignment. Parameters ---------- dtype : data type Data type object or one of their string representations. Returns ------- Series A new Series object as a view of the same data in memory. See Also -------- numpy.ndarray.view : Equivalent numpy function to create a new view of the same data in memory. Notes ----- Series are instantiated with ``dtype=float64`` by default. While ``numpy.ndarray.view()`` will return a view with the same data type as the original array, ``Series.view()`` (without specified dtype) will try using ``float64`` and may fail if the original data type size in bytes is not the same. Examples -------- >>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8') >>> s 0 -2 1 -1 2 0 3 1 4 2 dtype: int8 The 8 bit signed integer representation of `-1` is `0b11111111`, but the same bytes represent 255 if read as an 8 bit unsigned integer: >>> us = s.view('uint8') >>> us 0 254 1 255 2 0 3 1 4 2 dtype: uint8 The views share the same underlying values: >>> us[0] = 128 >>> s 0 -128 1 -1 2 0 3 1 4 2 dtype: int8 """ # self.array instead of self._values so we piggyback on PandasArray # implementation res_values = self.array.view(dtype) res_ser = self._constructor(res_values, index=self.index, copy=False) if isinstance(res_ser._mgr, SingleBlockManager) and using_copy_on_write(): blk = res_ser._mgr._block blk.refs = cast("BlockValuesRefs", self._references) blk.refs.add_reference(blk) # type: ignore[arg-type] return res_ser.__finalize__(self, method="view") # ---------------------------------------------------------------------- # NDArray Compat _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: """ Return the values as a NumPy array. Users should not call this directly. Rather, it is invoked by :func:`numpy.array` and :func:`numpy.asarray`. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to use for the resulting NumPy array. By default, the dtype is inferred from the data. Returns ------- numpy.ndarray The values in the series converted to a :class:`numpy.ndarray` with the specified `dtype`. See Also -------- array : Create a new array from data. Series.array : Zero-copy view to the array backing the Series. Series.to_numpy : Series method for similar behavior. Examples -------- >>> ser = pd.Series([1, 2, 3]) >>> np.asarray(ser) array([1, 2, 3]) For timezone-aware data, the timezones may be retained with ``dtype='object'`` >>> tzser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) >>> np.asarray(tzser, dtype="object") array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'), Timestamp('2000-01-02 00:00:00+0100', tz='CET')], dtype=object) Or the values may be localized to UTC and the tzinfo discarded with ``dtype='datetime64[ns]'`` >>> np.asarray(tzser, dtype="datetime64[ns]") # doctest: +ELLIPSIS array(['1999-12-31T23:00:00.000000000', ...], dtype='datetime64[ns]') """ values = self._values arr = np.asarray(values, dtype=dtype) if using_copy_on_write() and astype_is_view(values.dtype, arr.dtype): arr = arr.view() arr.flags.writeable = False return arr # ---------------------------------------------------------------------- # Unary Methods # coercion __float__ = _coerce_method(float) __int__ = _coerce_method(int) # ---------------------------------------------------------------------- # indexers def axes(self) -> list[Index]: """ Return a list of the row axis labels. """ return [self.index] # ---------------------------------------------------------------------- # Indexing Methods def take(self, indices, axis: Axis = 0, **kwargs) -> Series: nv.validate_take((), kwargs) indices = ensure_platform_int(indices) if ( indices.ndim == 1 and using_copy_on_write() and is_range_indexer(indices, len(self)) ): return self.copy(deep=None) new_index = self.index.take(indices) new_values = self._values.take(indices) result = self._constructor(new_values, index=new_index, fastpath=True) return result.__finalize__(self, method="take") def _take_with_is_copy(self, indices, axis: Axis = 0) -> Series: """ Internal version of the `take` method that sets the `_is_copy` attribute to keep track of the parent dataframe (using in indexing for the SettingWithCopyWarning). For Series this does the same as the public take (it never sets `_is_copy`). See the docstring of `take` for full explanation of the parameters. """ return self.take(indices=indices, axis=axis) def _ixs(self, i: int, axis: AxisInt = 0) -> Any: """ Return the i-th value or values in the Series by location. Parameters ---------- i : int Returns ------- scalar (int) or Series (slice, sequence) """ return self._values[i] def _slice(self, slobj: slice | np.ndarray, axis: Axis = 0) -> Series: # axis kwarg is retained for compat with NDFrame method # _slice is *always* positional return self._get_values(slobj) def __getitem__(self, key): check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) if key is Ellipsis: return self key_is_scalar = is_scalar(key) if isinstance(key, (list, tuple)): key = unpack_1tuple(key) if is_integer(key) and self.index._should_fallback_to_positional: return self._values[key] elif key_is_scalar: return self._get_value(key) if is_hashable(key): # Otherwise index.get_value will raise InvalidIndexError try: # For labels that don't resolve as scalars like tuples and frozensets result = self._get_value(key) return result except (KeyError, TypeError, InvalidIndexError): # InvalidIndexError for e.g. generator # see test_series_getitem_corner_generator if isinstance(key, tuple) and isinstance(self.index, MultiIndex): # We still have the corner case where a tuple is a key # in the first level of our MultiIndex return self._get_values_tuple(key) if is_iterator(key): key = list(key) if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) return self._get_values(key) return self._get_with(key) def _get_with(self, key): # other: fancy integer or otherwise if isinstance(key, slice): # _convert_slice_indexer to determine if this slice is positional # or label based, and if the latter, convert to positional slobj = self.index._convert_slice_indexer(key, kind="getitem") return self._slice(slobj) elif isinstance(key, ABCDataFrame): raise TypeError( "Indexing a Series with DataFrame is not " "supported, use the appropriate DataFrame column" ) elif isinstance(key, tuple): return self._get_values_tuple(key) elif not is_list_like(key): # e.g. scalars that aren't recognized by lib.is_scalar, GH#32684 return self.loc[key] if not isinstance(key, (list, np.ndarray, ExtensionArray, Series, Index)): key = list(key) if isinstance(key, Index): key_type = key.inferred_type else: key_type = lib.infer_dtype(key, skipna=False) # Note: The key_type == "boolean" case should be caught by the # com.is_bool_indexer check in __getitem__ if key_type == "integer": # We need to decide whether to treat this as a positional indexer # (i.e. self.iloc) or label-based (i.e. self.loc) if not self.index._should_fallback_to_positional: return self.loc[key] else: return self.iloc[key] # handle the dup indexing case GH#4246 return self.loc[key] def _get_values_tuple(self, key: tuple): # mpl hackaround if com.any_none(*key): # mpl compat if we look up e.g. ser[:, np.newaxis]; # see tests.series.timeseries.test_mpl_compat_hack # the asarray is needed to avoid returning a 2D DatetimeArray result = np.asarray(self._values[key]) disallow_ndim_indexing(result) return result if not isinstance(self.index, MultiIndex): raise KeyError("key of type tuple not found and not a MultiIndex") # If key is contained, would have returned by now indexer, new_index = self.index.get_loc_level(key) new_ser = self._constructor(self._values[indexer], index=new_index, copy=False) if using_copy_on_write() and isinstance(indexer, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) def _get_values(self, indexer: slice | npt.NDArray[np.bool_]) -> Series: new_mgr = self._mgr.getitem_mgr(indexer) return self._constructor(new_mgr).__finalize__(self) def _get_value(self, label, takeable: bool = False): """ Quickly retrieve single value at passed index label. Parameters ---------- label : object takeable : interpret the index as indexers, default False Returns ------- scalar value """ if takeable: return self._values[label] # Similar to Index.get_value, but we do not fall back to positional loc = self.index.get_loc(label) if is_integer(loc): return self._values[loc] if isinstance(self.index, MultiIndex): mi = self.index new_values = self._values[loc] if len(new_values) == 1 and mi.nlevels == 1: # If more than one level left, we can not return a scalar return new_values[0] new_index = mi[loc] new_index = maybe_droplevels(new_index, label) new_ser = self._constructor( new_values, index=new_index, name=self.name, copy=False ) if using_copy_on_write() and isinstance(loc, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) else: return self.iloc[loc] def __setitem__(self, key, value) -> None: if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) cacher_needs_updating = self._check_is_chained_assignment_possible() if key is Ellipsis: key = slice(None) if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") return self._set_values(indexer, value) try: self._set_with_engine(key, value) except KeyError: # We have a scalar (or for MultiIndex or object-dtype, scalar-like) # key that is not present in self.index. if is_integer(key): if not self.index._should_fallback_to_positional: # GH#33469 self.loc[key] = value else: # positional setter # can't use _mgr.setitem_inplace yet bc could have *both* # KeyError and then ValueError, xref GH#45070 self._set_values(key, value) else: # GH#12862 adding a new key to the Series self.loc[key] = value except (TypeError, ValueError, LossySetitemError): # The key was OK, but we cannot set the value losslessly indexer = self.index.get_loc(key) self._set_values(indexer, value) except InvalidIndexError as err: if isinstance(key, tuple) and not isinstance(self.index, MultiIndex): # cases with MultiIndex don't get here bc they raise KeyError # e.g. test_basic_getitem_setitem_corner raise KeyError( "key of type tuple not found and not a MultiIndex" ) from err if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) if ( is_list_like(value) and len(value) != len(self) and not isinstance(value, Series) and not is_object_dtype(self.dtype) ): # Series will be reindexed to have matching length inside # _where call below # GH#44265 indexer = key.nonzero()[0] self._set_values(indexer, value) return # otherwise with listlike other we interpret series[mask] = other # as series[mask] = other[mask] try: self._where(~key, value, inplace=True) except InvalidIndexError: # test_where_dups self.iloc[key] = value return else: self._set_with(key, value) if cacher_needs_updating: self._maybe_update_cacher(inplace=True) def _set_with_engine(self, key, value) -> None: loc = self.index.get_loc(key) # this is equivalent to self._values[key] = value self._mgr.setitem_inplace(loc, value) def _set_with(self, key, value) -> None: # We got here via exception-handling off of InvalidIndexError, so # key should always be listlike at this point. assert not isinstance(key, tuple) if is_iterator(key): # Without this, the call to infer_dtype will consume the generator key = list(key) if not self.index._should_fallback_to_positional: # Regardless of the key type, we're treating it as labels self._set_labels(key, value) else: # Note: key_type == "boolean" should not occur because that # should be caught by the is_bool_indexer check in __setitem__ key_type = lib.infer_dtype(key, skipna=False) if key_type == "integer": self._set_values(key, value) else: self._set_labels(key, value) def _set_labels(self, key, value) -> None: key = com.asarray_tuplesafe(key) indexer: np.ndarray = self.index.get_indexer(key) mask = indexer == -1 if mask.any(): raise KeyError(f"{key[mask]} not in index") self._set_values(indexer, value) def _set_values(self, key, value) -> None: if isinstance(key, (Index, Series)): key = key._values self._mgr = self._mgr.setitem(indexer=key, value=value) self._maybe_update_cacher() def _set_value(self, label, value, takeable: bool = False) -> None: """ Quickly set single value at passed label. If label is not contained, a new object is created with the label placed at the end of the result index. Parameters ---------- label : object Partial indexing with MultiIndex not allowed. value : object Scalar value. takeable : interpret the index as indexers, default False """ if not takeable: try: loc = self.index.get_loc(label) except KeyError: # set using a non-recursive method self.loc[label] = value return else: loc = label self._set_values(loc, value) # ---------------------------------------------------------------------- # Lookup Caching def _is_cached(self) -> bool: """Return boolean indicating if self is cached or not.""" return getattr(self, "_cacher", None) is not None def _get_cacher(self): """return my cacher or None""" cacher = getattr(self, "_cacher", None) if cacher is not None: cacher = cacher[1]() return cacher def _reset_cacher(self) -> None: """ Reset the cacher. """ if hasattr(self, "_cacher"): del self._cacher def _set_as_cached(self, item, cacher) -> None: """ Set the _cacher attribute on the calling object with a weakref to cacher. """ if using_copy_on_write(): return self._cacher = (item, weakref.ref(cacher)) def _clear_item_cache(self) -> None: # no-op for Series pass def _check_is_chained_assignment_possible(self) -> bool: """ See NDFrame._check_is_chained_assignment_possible.__doc__ """ if self._is_view and self._is_cached: ref = self._get_cacher() if ref is not None and ref._is_mixed_type: self._check_setitem_copy(t="referent", force=True) return True return super()._check_is_chained_assignment_possible() def _maybe_update_cacher( self, clear: bool = False, verify_is_copy: bool = True, inplace: bool = False ) -> None: """ See NDFrame._maybe_update_cacher.__doc__ """ # for CoW, we never want to update the parent DataFrame cache # if the Series changed, but don't keep track of any cacher if using_copy_on_write(): return cacher = getattr(self, "_cacher", None) if cacher is not None: assert self.ndim == 1 ref: DataFrame = cacher[1]() # we are trying to reference a dead referent, hence # a copy if ref is None: del self._cacher elif len(self) == len(ref) and self.name in ref.columns: # GH#42530 self.name must be in ref.columns # to ensure column still in dataframe # otherwise, either self or ref has swapped in new arrays ref._maybe_cache_changed(cacher[0], self, inplace=inplace) else: # GH#33675 we have swapped in a new array, so parent # reference to self is now invalid ref._item_cache.pop(cacher[0], None) super()._maybe_update_cacher( clear=clear, verify_is_copy=verify_is_copy, inplace=inplace ) # ---------------------------------------------------------------------- # Unsorted def _is_mixed_type(self) -> bool: return False def repeat(self, repeats: int | Sequence[int], axis: None = None) -> Series: """ Repeat elements of a Series. Returns a new Series where each element of the current Series is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty Series. axis : None Unused. Parameter needed for compatibility with DataFrame. Returns ------- Series Newly created Series with repeated elements. See Also -------- Index.repeat : Equivalent function for Index. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> s = pd.Series(['a', 'b', 'c']) >>> s 0 a 1 b 2 c dtype: object >>> s.repeat(2) 0 a 0 a 1 b 1 b 2 c 2 c dtype: object >>> s.repeat([1, 2, 3]) 0 a 1 b 1 b 2 c 2 c 2 c dtype: object """ nv.validate_repeat((), {"axis": axis}) new_index = self.index.repeat(repeats) new_values = self._values.repeat(repeats) return self._constructor(new_values, index=new_index, copy=False).__finalize__( self, method="repeat" ) def reset_index( self, level: IndexLabel = ..., *, drop: Literal[False] = ..., name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> DataFrame: ... def reset_index( self, level: IndexLabel = ..., *, drop: Literal[True], name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> Series: ... def reset_index( self, level: IndexLabel = ..., *, drop: bool = ..., name: Level = ..., inplace: Literal[True], allow_duplicates: bool = ..., ) -> None: ... def reset_index( self, level: IndexLabel = None, *, drop: bool = False, name: Level = lib.no_default, inplace: bool = False, allow_duplicates: bool = False, ) -> DataFrame | Series | None: """ Generate a new DataFrame or Series with the index reset. This is useful when the index needs to be treated as a column, or when the index is meaningless and needs to be reset to the default before another operation. Parameters ---------- level : int, str, tuple, or list, default optional For a Series with a MultiIndex, only remove the specified levels from the index. Removes all levels by default. drop : bool, default False Just reset the index, without inserting it as a column in the new DataFrame. name : object, optional The name to use for the column containing the original Series values. Uses ``self.name`` by default. This argument is ignored when `drop` is True. inplace : bool, default False Modify the Series in place (do not create a new object). allow_duplicates : bool, default False Allow duplicate column labels to be created. .. versionadded:: 1.5.0 Returns ------- Series or DataFrame or None When `drop` is False (the default), a DataFrame is returned. The newly created columns will come first in the DataFrame, followed by the original Series values. When `drop` is True, a `Series` is returned. In either case, if ``inplace=True``, no value is returned. See Also -------- DataFrame.reset_index: Analogous function for DataFrame. Examples -------- >>> s = pd.Series([1, 2, 3, 4], name='foo', ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx')) Generate a DataFrame with default index. >>> s.reset_index() idx foo 0 a 1 1 b 2 2 c 3 3 d 4 To specify the name of the new column use `name`. >>> s.reset_index(name='values') idx values 0 a 1 1 b 2 2 c 3 3 d 4 To generate a new Series with the default set `drop` to True. >>> s.reset_index(drop=True) 0 1 1 2 2 3 3 4 Name: foo, dtype: int64 The `level` parameter is interesting for Series with a multi-level index. >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']), ... np.array(['one', 'two', 'one', 'two'])] >>> s2 = pd.Series( ... range(4), name='foo', ... index=pd.MultiIndex.from_arrays(arrays, ... names=['a', 'b'])) To remove a specific level from the Index, use `level`. >>> s2.reset_index(level='a') a foo b one bar 0 two bar 1 one baz 2 two baz 3 If `level` is not set, all levels are removed from the Index. >>> s2.reset_index() a b foo 0 bar one 0 1 bar two 1 2 baz one 2 3 baz two 3 """ inplace = validate_bool_kwarg(inplace, "inplace") if drop: new_index = default_index(len(self)) if level is not None: level_list: Sequence[Hashable] if not isinstance(level, (tuple, list)): level_list = [level] else: level_list = level level_list = [self.index._get_level_number(lev) for lev in level_list] if len(level_list) < self.index.nlevels: new_index = self.index.droplevel(level_list) if inplace: self.index = new_index elif using_copy_on_write(): new_ser = self.copy(deep=False) new_ser.index = new_index return new_ser.__finalize__(self, method="reset_index") else: return self._constructor( self._values.copy(), index=new_index, copy=False ).__finalize__(self, method="reset_index") elif inplace: raise TypeError( "Cannot reset_index inplace on a Series to create a DataFrame" ) else: if name is lib.no_default: # For backwards compatibility, keep columns as [0] instead of # [None] when self.name is None if self.name is None: name = 0 else: name = self.name df = self.to_frame(name) return df.reset_index( level=level, drop=drop, allow_duplicates=allow_duplicates ) return None # ---------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str: """ Return a string representation for a particular Series. """ # pylint: disable=invalid-repr-returned repr_params = fmt.get_series_repr_params() return self.to_string(**repr_params) def to_string( self, buf: None = ..., na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> None: ... def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, na_rep: str = "NaN", float_format: str | None = None, header: bool = True, index: bool = True, length: bool = False, dtype: bool = False, name: bool = False, max_rows: int | None = None, min_rows: int | None = None, ) -> str | None: """ Render a string representation of the Series. Parameters ---------- buf : StringIO-like, optional Buffer to write to. na_rep : str, optional String representation of NaN to use, default 'NaN'. float_format : one-parameter function, optional Formatter function to apply to columns' elements if they are floats, default None. header : bool, default True Add the Series header (index name). index : bool, optional Add index (row) labels, default True. length : bool, default False Add the Series length. dtype : bool, default False Add the Series dtype. name : bool, default False Add the Series name if not None. max_rows : int, optional Maximum number of rows to show before truncating. If None, show all. min_rows : int, optional The number of rows to display in a truncated repr (when number of rows is above `max_rows`). Returns ------- str or None String representation of Series if ``buf=None``, otherwise None. """ formatter = fmt.SeriesFormatter( self, name=name, length=length, header=header, index=index, dtype=dtype, na_rep=na_rep, float_format=float_format, min_rows=min_rows, max_rows=max_rows, ) result = formatter.to_string() # catch contract violations if not isinstance(result, str): raise AssertionError( "result must be of type str, type " f"of result is {repr(type(result).__name__)}" ) if buf is None: return result else: if hasattr(buf, "write"): buf.write(result) else: with open(buf, "w") as f: f.write(result) return None klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples=dedent( """Examples -------- >>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal") >>> print(s.to_markdown()) | | animal | |---:|:---------| | 0 | elk | | 1 | pig | | 2 | dog | | 3 | quetzal | Output markdown with a tabulate option. >>> print(s.to_markdown(tablefmt="grid")) +----+----------+ | | animal | +====+==========+ | 0 | elk | +----+----------+ | 1 | pig | +----+----------+ | 2 | dog | +----+----------+ | 3 | quetzal | +----+----------+""" ), ) def to_markdown( self, buf: IO[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: """ Print {klass} in Markdown-friendly format. Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. mode : str, optional Mode in which file is opened, "wt" by default. index : bool, optional, default True Add index (row) labels. .. versionadded:: 1.1.0 {storage_options} .. versionadded:: 1.2.0 **kwargs These parameters will be passed to `tabulate \ <https://pypi.org/project/tabulate>`_. Returns ------- str {klass} in Markdown-friendly format. Notes ----- Requires the `tabulate <https://pypi.org/project/tabulate>`_ package. {examples} """ return self.to_frame().to_markdown( buf, mode, index, storage_options=storage_options, **kwargs ) # ---------------------------------------------------------------------- def items(self) -> Iterable[tuple[Hashable, Any]]: """ Lazily iterate over (index, value) tuples. This method returns an iterable tuple (index, value). This is convenient if you want to create a lazy iterator. Returns ------- iterable Iterable of tuples containing the (index, value) pairs from a Series. See Also -------- DataFrame.items : Iterate over (column name, Series) pairs. DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. Examples -------- >>> s = pd.Series(['A', 'B', 'C']) >>> for index, value in s.items(): ... print(f"Index : {index}, Value : {value}") Index : 0, Value : A Index : 1, Value : B Index : 2, Value : C """ return zip(iter(self.index), iter(self)) # ---------------------------------------------------------------------- # Misc public methods def keys(self) -> Index: """ Return alias for index. Returns ------- Index Index of the Series. """ return self.index def to_dict(self, into: type[dict] = dict) -> dict: """ Convert Series to {label -> value} dict or dict-like object. Parameters ---------- into : class, default dict The collections.abc.Mapping subclass to use as the return object. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- collections.abc.Mapping Key-value representation of Series. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.to_dict() {0: 1, 1: 2, 2: 3, 3: 4} >>> from collections import OrderedDict, defaultdict >>> s.to_dict(OrderedDict) OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> dd = defaultdict(list) >>> s.to_dict(dd) defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4}) """ # GH16122 into_c = com.standardize_mapping(into) if is_object_dtype(self) or is_extension_array_dtype(self): return into_c((k, maybe_box_native(v)) for k, v in self.items()) else: # Not an object dtype => all types will be the same so let the default # indexer return native python type return into_c(self.items()) def to_frame(self, name: Hashable = lib.no_default) -> DataFrame: """ Convert Series to DataFrame. Parameters ---------- name : object, optional The passed name should substitute for the series name (if it has one). Returns ------- DataFrame DataFrame representation of Series. Examples -------- >>> s = pd.Series(["a", "b", "c"], ... name="vals") >>> s.to_frame() vals 0 a 1 b 2 c """ columns: Index if name is lib.no_default: name = self.name if name is None: # default to [0], same as we would get with DataFrame(self) columns = default_index(1) else: columns = Index([name]) else: columns = Index([name]) mgr = self._mgr.to_2d_mgr(columns) df = self._constructor_expanddim(mgr) return df.__finalize__(self, method="to_frame") def _set_name(self, name, inplace: bool = False) -> Series: """ Set the Series name. Parameters ---------- name : str inplace : bool Whether to modify `self` directly or return a copy. """ inplace = validate_bool_kwarg(inplace, "inplace") ser = self if inplace else self.copy() ser.name = name return ser """ Examples -------- >>> ser = pd.Series([390., 350., 30., 20.], ... index=['Falcon', 'Falcon', 'Parrot', 'Parrot'], name="Max Speed") >>> ser Falcon 390.0 Falcon 350.0 Parrot 30.0 Parrot 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", "b"]).mean() a 210.0 b 185.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(ser > 100).mean() Max Speed False 25.0 True 370.0 Name: Max Speed, dtype: float64 **Grouping by Indexes** We can groupby different levels of a hierarchical index using the `level` parameter: >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], ... ['Captive', 'Wild', 'Captive', 'Wild']] >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) >>> ser = pd.Series([390., 350., 30., 20.], index=index, name="Max Speed") >>> ser Animal Type Falcon Captive 390.0 Wild 350.0 Parrot Captive 30.0 Wild 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Animal Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level="Type").mean() Type Captive 210.0 Wild 185.0 Name: Max Speed, dtype: float64 We can also choose to include `NA` in group keys or not by defining `dropna` parameter, the default setting is `True`. >>> ser = pd.Series([1, 2, 3, 3], index=["a", 'a', 'b', np.nan]) >>> ser.groupby(level=0).sum() a 3 b 3 dtype: int64 >>> ser.groupby(level=0, dropna=False).sum() a 3 b 3 NaN 3 dtype: int64 >>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot'] >>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name="Max Speed") >>> ser.groupby(["a", "b", "a", np.nan]).mean() a 210.0 b 350.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", np.nan], dropna=False).mean() a 210.0 b 350.0 NaN 20.0 Name: Max Speed, dtype: float64 """ ) def groupby( self, by=None, axis: Axis = 0, level: IndexLabel = None, as_index: bool = True, sort: bool = True, group_keys: bool = True, observed: bool = False, dropna: bool = True, ) -> SeriesGroupBy: from pandas.core.groupby.generic import SeriesGroupBy if level is None and by is None: raise TypeError("You have to supply one of 'by' and 'level'") if not as_index: raise TypeError("as_index=False only valid with DataFrame") axis = self._get_axis_number(axis) return SeriesGroupBy( obj=self, keys=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, observed=observed, dropna=dropna, ) # ---------------------------------------------------------------------- # Statistics, overridden ndarray methods # TODO: integrate bottleneck def count(self): """ Return number of non-NA/null observations in the Series. Returns ------- int or Series (if level specified) Number of non-null values in the Series. See Also -------- DataFrame.count : Count non-NA cells for each column or row. Examples -------- >>> s = pd.Series([0.0, 1.0, np.nan]) >>> s.count() 2 """ return notna(self._values).sum().astype("int64") def mode(self, dropna: bool = True) -> Series: """ Return the mode(s) of the Series. The mode is the value that appears most often. There can be multiple modes. Always returns Series even if only one value is returned. Parameters ---------- dropna : bool, default True Don't consider counts of NaN/NaT. Returns ------- Series Modes of the Series in sorted order. """ # TODO: Add option for bins like value_counts() values = self._values if isinstance(values, np.ndarray): res_values = algorithms.mode(values, dropna=dropna) else: res_values = values._mode(dropna=dropna) # Ensure index is type stable (should always use int index) return self._constructor( res_values, index=range(len(res_values)), name=self.name, copy=False ) def unique(self) -> ArrayLike: # pylint: disable=useless-parent-delegation """ Return unique values of Series object. Uniques are returned in order of appearance. Hash table-based unique, therefore does NOT sort. Returns ------- ndarray or ExtensionArray The unique values returned as a NumPy array. See Notes. See Also -------- Series.drop_duplicates : Return Series with duplicate values removed. unique : Top-level unique method for any 1-d array-like object. Index.unique : Return Index with unique values from an Index object. Notes ----- Returns the unique values as a NumPy array. In case of an extension-array backed Series, a new :class:`~api.extensions.ExtensionArray` of that type with just the unique values is returned. This includes * Categorical * Period * Datetime with Timezone * Datetime without Timezone * Timedelta * Interval * Sparse * IntegerNA See Examples section. Examples -------- >>> pd.Series([2, 1, 3, 3], name='A').unique() array([2, 1, 3]) >>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00'] Length: 1, dtype: datetime64[ns] >>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern') ... for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00-05:00'] Length: 1, dtype: datetime64[ns, US/Eastern] An Categorical will return categories in the order of appearance and with the same dtype. >>> pd.Series(pd.Categorical(list('baabc'))).unique() ['b', 'a', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'), ... ordered=True)).unique() ['b', 'a', 'c'] Categories (3, object): ['a' < 'b' < 'c'] """ return super().unique() def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[False] = ..., ignore_index: bool = ..., ) -> Series: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[True], ignore_index: bool = ... ) -> None: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: bool = ..., ignore_index: bool = ... ) -> Series | None: ... def drop_duplicates( self, *, keep: DropKeep = "first", inplace: bool = False, ignore_index: bool = False, ) -> Series | None: """ Return Series with duplicate values removed. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' Method to handle dropping duplicates: - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. inplace : bool, default ``False`` If ``True``, performs operation inplace and returns None. ignore_index : bool, default ``False`` If ``True``, the resulting axis will be labeled 0, 1, …, n - 1. .. versionadded:: 2.0.0 Returns ------- Series or None Series with duplicates dropped or None if ``inplace=True``. See Also -------- Index.drop_duplicates : Equivalent method on Index. DataFrame.drop_duplicates : Equivalent method on DataFrame. Series.duplicated : Related method on Series, indicating duplicate Series values. Series.unique : Return unique values as an array. Examples -------- Generate a Series with duplicated entries. >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], ... name='animal') >>> s 0 lama 1 cow 2 lama 3 beetle 4 lama 5 hippo Name: animal, dtype: object With the 'keep' parameter, the selection behaviour of duplicated values can be changed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> s.drop_duplicates() 0 lama 1 cow 3 beetle 5 hippo Name: animal, dtype: object The value 'last' for parameter 'keep' keeps the last occurrence for each set of duplicated entries. >>> s.drop_duplicates(keep='last') 1 cow 3 beetle 4 lama 5 hippo Name: animal, dtype: object The value ``False`` for parameter 'keep' discards all sets of duplicated entries. >>> s.drop_duplicates(keep=False) 1 cow 3 beetle 5 hippo Name: animal, dtype: object """ inplace = validate_bool_kwarg(inplace, "inplace") result = super().drop_duplicates(keep=keep) if ignore_index: result.index = default_index(len(result)) if inplace: self._update_inplace(result) return None else: return result def duplicated(self, keep: DropKeep = "first") -> Series: """ Indicate duplicate Series values. Duplicated values are indicated as ``True`` values in the resulting Series. Either all duplicates, all except the first or all except the last occurrence of duplicates can be indicated. Parameters ---------- keep : {'first', 'last', False}, default 'first' Method to handle dropping duplicates: - 'first' : Mark duplicates as ``True`` except for the first occurrence. - 'last' : Mark duplicates as ``True`` except for the last occurrence. - ``False`` : Mark all duplicates as ``True``. Returns ------- Series[bool] Series indicating whether each value has occurred in the preceding values. See Also -------- Index.duplicated : Equivalent method on pandas.Index. DataFrame.duplicated : Equivalent method on pandas.DataFrame. Series.drop_duplicates : Remove duplicate values from Series. Examples -------- By default, for each set of duplicated values, the first occurrence is set on False and all others on True: >>> animals = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama']) >>> animals.duplicated() 0 False 1 False 2 True 3 False 4 True dtype: bool which is equivalent to >>> animals.duplicated(keep='first') 0 False 1 False 2 True 3 False 4 True dtype: bool By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True: >>> animals.duplicated(keep='last') 0 True 1 False 2 True 3 False 4 False dtype: bool By setting keep on ``False``, all duplicates are True: >>> animals.duplicated(keep=False) 0 True 1 False 2 True 3 False 4 True dtype: bool """ res = self._duplicated(keep=keep) result = self._constructor(res, index=self.index, copy=False) return result.__finalize__(self, method="duplicated") def idxmin(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the minimum value. If multiple values equal the minimum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the minimum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmin : Return indices of the minimum values along the given axis. DataFrame.idxmin : Return index of first occurrence of minimum over requested axis. Series.idxmax : Return index *label* of the first occurrence of maximum of values. Notes ----- This method is the Series version of ``ndarray.argmin``. This method returns the label of the minimum, while ``ndarray.argmin`` returns the position. To get the position, use ``series.values.argmin()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 1], ... index=['A', 'B', 'C', 'D']) >>> s A 1.0 B NaN C 4.0 D 1.0 dtype: float64 >>> s.idxmin() 'A' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmin(skipna=False) nan """ # error: Argument 1 to "argmin" of "IndexOpsMixin" has incompatible type "Union # [int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmin(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def idxmax(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the maximum value. If multiple values equal the maximum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the maximum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmax : Return indices of the maximum values along the given axis. DataFrame.idxmax : Return index of first occurrence of maximum over requested axis. Series.idxmin : Return index *label* of the first occurrence of minimum of values. Notes ----- This method is the Series version of ``ndarray.argmax``. This method returns the label of the maximum, while ``ndarray.argmax`` returns the position. To get the position, use ``series.values.argmax()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 3, 4], ... index=['A', 'B', 'C', 'D', 'E']) >>> s A 1.0 B NaN C 4.0 D 3.0 E 4.0 dtype: float64 >>> s.idxmax() 'C' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmax(skipna=False) nan """ # error: Argument 1 to "argmax" of "IndexOpsMixin" has incompatible type # "Union[int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmax(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def round(self, decimals: int = 0, *args, **kwargs) -> Series: """ Round each value in a Series to the given number of decimals. Parameters ---------- decimals : int, default 0 Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Series Rounded values of the Series. See Also -------- numpy.around : Round values of an np.array. DataFrame.round : Round values of a DataFrame. Examples -------- >>> s = pd.Series([0.1, 1.3, 2.7]) >>> s.round() 0 0.0 1 1.0 2 3.0 dtype: float64 """ nv.validate_round(args, kwargs) result = self._values.round(decimals) result = self._constructor(result, index=self.index, copy=False).__finalize__( self, method="round" ) return result def quantile( self, q: float = ..., interpolation: QuantileInterpolation = ... ) -> float: ... def quantile( self, q: Sequence[float] | AnyArrayLike, interpolation: QuantileInterpolation = ..., ) -> Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = ..., interpolation: QuantileInterpolation = ..., ) -> float | Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = 0.5, interpolation: QuantileInterpolation = "linear", ) -> float | Series: """ Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) The quantile(s) to compute, which can lie in range: 0 <= q <= 1. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- float or Series If ``q`` is an array, a Series will be returned where the index is ``q`` and the values are the quantiles, otherwise a float will be returned. See Also -------- core.window.Rolling.quantile : Calculate the rolling quantile. numpy.percentile : Returns the q-th percentile(s) of the array elements. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.quantile(.5) 2.5 >>> s.quantile([.25, .5, .75]) 0.25 1.75 0.50 2.50 0.75 3.25 dtype: float64 """ validate_percentile(q) # We dispatch to DataFrame so that core.internals only has to worry # about 2D cases. df = self.to_frame() result = df.quantile(q=q, interpolation=interpolation, numeric_only=False) if result.ndim == 2: result = result.iloc[:, 0] if is_list_like(q): result.name = self.name idx = Index(q, dtype=np.float64) return self._constructor(result, index=idx, name=self.name) else: # scalar return result.iloc[0] def corr( self, other: Series, method: CorrelationMethod = "pearson", min_periods: int | None = None, ) -> float: """ Compute correlation with `other` Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the correlation function is applied. Parameters ---------- other : Series Series with which to compute the correlation. method : {'pearson', 'kendall', 'spearman'} or callable Method used to compute correlation: - pearson : Standard correlation coefficient - kendall : Kendall Tau correlation coefficient - spearman : Spearman rank correlation - callable: Callable with input two 1d ndarrays and returning a float. .. warning:: Note that the returned matrix from corr will have 1 along the diagonals and will be symmetric regardless of the callable's behavior. min_periods : int, optional Minimum number of observations needed to have a valid result. Returns ------- float Correlation with other. See Also -------- DataFrame.corr : Compute pairwise correlation between columns. DataFrame.corrwith : Compute pairwise correlation with another DataFrame or Series. Notes ----- Pearson, Kendall and Spearman correlation are currently computed using pairwise complete observations. * `Pearson correlation coefficient <https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`_ * `Kendall rank correlation coefficient <https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient>`_ * `Spearman's rank correlation coefficient <https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient>`_ Examples -------- >>> def histogram_intersection(a, b): ... v = np.minimum(a, b).sum().round(decimals=1) ... return v >>> s1 = pd.Series([.2, .0, .6, .2]) >>> s2 = pd.Series([.3, .6, .0, .1]) >>> s1.corr(s2, method=histogram_intersection) 0.3 """ # noqa:E501 this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan if method in ["pearson", "spearman", "kendall"] or callable(method): return nanops.nancorr( this.values, other.values, method=method, min_periods=min_periods ) raise ValueError( "method must be either 'pearson', " "'spearman', 'kendall', or a callable, " f"'{method}' was supplied" ) def cov( self, other: Series, min_periods: int | None = None, ddof: int | None = 1, ) -> float: """ Compute covariance with Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the covariance is calculated. Parameters ---------- other : Series Series with which to compute the covariance. min_periods : int, optional Minimum number of observations needed to have a valid result. ddof : int, default 1 Delta degrees of freedom. The divisor used in calculations is ``N - ddof``, where ``N`` represents the number of elements. .. versionadded:: 1.1.0 Returns ------- float Covariance between Series and other normalized by N-1 (unbiased estimator). See Also -------- DataFrame.cov : Compute pairwise covariance of columns. Examples -------- >>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035]) >>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198]) >>> s1.cov(s2) -0.01685762652715874 """ this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan return nanops.nancov( this.values, other.values, min_periods=min_periods, ddof=ddof ) klass="Series", extra_params="", other_klass="DataFrame", examples=dedent( """ Difference with previous row >>> s = pd.Series([1, 1, 2, 3, 5, 8]) >>> s.diff() 0 NaN 1 0.0 2 1.0 3 1.0 4 2.0 5 3.0 dtype: float64 Difference with 3rd previous row >>> s.diff(periods=3) 0 NaN 1 NaN 2 NaN 3 2.0 4 4.0 5 6.0 dtype: float64 Difference with following row >>> s.diff(periods=-1) 0 0.0 1 -1.0 2 -1.0 3 -2.0 4 -3.0 5 NaN dtype: float64 Overflow in input dtype >>> s = pd.Series([1, 0], dtype=np.uint8) >>> s.diff() 0 NaN 1 255.0 dtype: float64""" ), ) def diff(self, periods: int = 1) -> Series: """ First discrete difference of element. Calculates the difference of a {klass} element compared with another element in the {klass} (default is element in previous row). Parameters ---------- periods : int, default 1 Periods to shift for calculating difference, accepts negative values. {extra_params} Returns ------- {klass} First differences of the Series. See Also -------- {klass}.pct_change: Percent change over given number of periods. {klass}.shift: Shift index by desired number of periods with an optional time freq. {other_klass}.diff: First discrete difference of object. Notes ----- For boolean dtypes, this uses :meth:`operator.xor` rather than :meth:`operator.sub`. The result is calculated according to current dtype in {klass}, however dtype of the result is always float64. Examples -------- {examples} """ result = algorithms.diff(self._values, periods) return self._constructor(result, index=self.index, copy=False).__finalize__( self, method="diff" ) def autocorr(self, lag: int = 1) -> float: """ Compute the lag-N autocorrelation. This method computes the Pearson correlation between the Series and its shifted self. Parameters ---------- lag : int, default 1 Number of lags to apply before performing autocorrelation. Returns ------- float The Pearson correlation between self and self.shift(lag). See Also -------- Series.corr : Compute the correlation between two Series. Series.shift : Shift index by desired number of periods. DataFrame.corr : Compute pairwise correlation of columns. DataFrame.corrwith : Compute pairwise correlation between rows or columns of two DataFrame objects. Notes ----- If the Pearson correlation is not well defined return 'NaN'. Examples -------- >>> s = pd.Series([0.25, 0.5, 0.2, -0.05]) >>> s.autocorr() # doctest: +ELLIPSIS 0.10355... >>> s.autocorr(lag=2) # doctest: +ELLIPSIS -0.99999... If the Pearson correlation is not well defined, then 'NaN' is returned. >>> s = pd.Series([1, 0, 0, 0]) >>> s.autocorr() nan """ return self.corr(self.shift(lag)) def dot(self, other: AnyArrayLike) -> Series | np.ndarray: """ Compute the dot product between the Series and the columns of other. This method computes the dot product between the Series and another one, or the Series and each columns of a DataFrame, or the Series and each columns of an array. It can also be called using `self @ other` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the dot product with its columns. Returns ------- scalar, Series or numpy.ndarray Return the dot product of the Series and other if other is a Series, the Series of the dot product of Series and each rows of other if other is a DataFrame or a numpy.ndarray between the Series and each columns of the numpy array. See Also -------- DataFrame.dot: Compute the matrix product with the DataFrame. Series.mul: Multiplication of series and other, element-wise. Notes ----- The Series and other has to share the same index if other is a Series or a DataFrame. Examples -------- >>> s = pd.Series([0, 1, 2, 3]) >>> other = pd.Series([-1, 2, -3, 4]) >>> s.dot(other) 8 >>> s @ other 8 >>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(df) 0 24 1 14 dtype: int64 >>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(arr) array([24, 14]) """ if isinstance(other, (Series, ABCDataFrame)): common = self.index.union(other.index) if len(common) > len(self.index) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(index=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right.values else: lvals = self.values rvals = np.asarray(other) if lvals.shape[0] != rvals.shape[0]: raise Exception( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, ABCDataFrame): return self._constructor( np.dot(lvals, rvals), index=other.columns, copy=False ).__finalize__(self, method="dot") elif isinstance(other, Series): return np.dot(lvals, rvals) elif isinstance(rvals, np.ndarray): return np.dot(lvals, rvals) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(np.transpose(other)) # Signature of "searchsorted" incompatible with supertype "IndexOpsMixin" def searchsorted( # type: ignore[override] self, value: NumpyValueArrayLike | ExtensionArray, side: Literal["left", "right"] = "left", sorter: NumpySorter = None, ) -> npt.NDArray[np.intp] | np.intp: return base.IndexOpsMixin.searchsorted(self, value, side=side, sorter=sorter) # ------------------------------------------------------------------- # Combination def _append( self, to_append, ignore_index: bool = False, verify_integrity: bool = False ): from pandas.core.reshape.concat import concat if isinstance(to_append, (list, tuple)): to_concat = [self] to_concat.extend(to_append) else: to_concat = [self, to_append] if any(isinstance(x, (ABCDataFrame,)) for x in to_concat[1:]): msg = "to_append should be a Series or list/tuple of Series, got DataFrame" raise TypeError(msg) return concat( to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity ) def _binop(self, other: Series, func, level=None, fill_value=None): """ Perform generic binary operation with optional fill value. Parameters ---------- other : Series func : binary operator fill_value : float or object Value to substitute for NA/null values. If both Series are NA in a location, the result will be NA regardless of the passed fill value. level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level. Returns ------- Series """ if not isinstance(other, Series): raise AssertionError("Other operand must be Series") this = self if not self.index.equals(other.index): this, other = self.align(other, level=level, join="outer", copy=False) this_vals, other_vals = ops.fill_binop(this._values, other._values, fill_value) with np.errstate(all="ignore"): result = func(this_vals, other_vals) name = ops.get_op_result_name(self, other) return this._construct_result(result, name) def _construct_result( self, result: ArrayLike | tuple[ArrayLike, ArrayLike], name: Hashable ) -> Series | tuple[Series, Series]: """ Construct an appropriately-labelled Series from the result of an op. Parameters ---------- result : ndarray or ExtensionArray name : Label Returns ------- Series In the case of __divmod__ or __rdivmod__, a 2-tuple of Series. """ if isinstance(result, tuple): # produced by divmod or rdivmod res1 = self._construct_result(result[0], name=name) res2 = self._construct_result(result[1], name=name) # GH#33427 assertions to keep mypy happy assert isinstance(res1, Series) assert isinstance(res2, Series) return (res1, res2) # TODO: result should always be ArrayLike, but this fails for some # JSONArray tests dtype = getattr(result, "dtype", None) out = self._constructor(result, index=self.index, dtype=dtype) out = out.__finalize__(self) # Set the result's name after __finalize__ is called because __finalize__ # would set it back to self.name out.name = name return out _shared_docs["compare"], """ Returns ------- Series or DataFrame If axis is 0 or 'index' the result will be a Series. The resulting index will be a MultiIndex with 'self' and 'other' stacked alternately at the inner level. If axis is 1 or 'columns' the result will be a DataFrame. It will have two columns namely 'self' and 'other'. See Also -------- DataFrame.compare : Compare with another DataFrame and show differences. Notes ----- Matching NaNs will not appear as a difference. Examples -------- >>> s1 = pd.Series(["a", "b", "c", "d", "e"]) >>> s2 = pd.Series(["a", "a", "c", "b", "e"]) Align the differences on columns >>> s1.compare(s2) self other 1 b a 3 d b Stack the differences on indices >>> s1.compare(s2, align_axis=0) 1 self b other a 3 self d other b dtype: object Keep all original rows >>> s1.compare(s2, keep_shape=True) self other 0 NaN NaN 1 b a 2 NaN NaN 3 d b 4 NaN NaN Keep all original rows and also all original values >>> s1.compare(s2, keep_shape=True, keep_equal=True) self other 0 a a 1 b a 2 c c 3 d b 4 e e """, klass=_shared_doc_kwargs["klass"], ) def compare( self, other: Series, align_axis: Axis = 1, keep_shape: bool = False, keep_equal: bool = False, result_names: Suffixes = ("self", "other"), ) -> DataFrame | Series: return super().compare( other=other, align_axis=align_axis, keep_shape=keep_shape, keep_equal=keep_equal, result_names=result_names, ) def combine( self, other: Series | Hashable, func: Callable[[Hashable, Hashable], Hashable], fill_value: Hashable = None, ) -> Series: """ Combine the Series with a Series or scalar according to `func`. Combine the Series and `other` using `func` to perform elementwise selection for combined Series. `fill_value` is assumed when value is missing at some index from one of the two objects being combined. Parameters ---------- other : Series or scalar The value(s) to be combined with the `Series`. func : function Function that takes two scalars as inputs and returns an element. fill_value : scalar, optional The value to assume when an index is missing from one Series or the other. The default specifies to use the appropriate NaN value for the underlying dtype of the Series. Returns ------- Series The result of combining the Series with the other object. See Also -------- Series.combine_first : Combine Series values, choosing the calling Series' values first. Examples -------- Consider 2 Datasets ``s1`` and ``s2`` containing highest clocked speeds of different birds. >>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0}) >>> s1 falcon 330.0 eagle 160.0 dtype: float64 >>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0}) >>> s2 falcon 345.0 eagle 200.0 duck 30.0 dtype: float64 Now, to combine the two datasets and view the highest speeds of the birds across the two datasets >>> s1.combine(s2, max) duck NaN eagle 200.0 falcon 345.0 dtype: float64 In the previous example, the resulting value for duck is missing, because the maximum of a NaN and a float is a NaN. So, in the example, we set ``fill_value=0``, so the maximum value returned will be the value from some dataset. >>> s1.combine(s2, max, fill_value=0) duck 30.0 eagle 200.0 falcon 345.0 dtype: float64 """ if fill_value is None: fill_value = na_value_for_dtype(self.dtype, compat=False) if isinstance(other, Series): # If other is a Series, result is based on union of Series, # so do this element by element new_index = self.index.union(other.index) new_name = ops.get_op_result_name(self, other) new_values = np.empty(len(new_index), dtype=object) for i, idx in enumerate(new_index): lv = self.get(idx, fill_value) rv = other.get(idx, fill_value) with np.errstate(all="ignore"): new_values[i] = func(lv, rv) else: # Assume that other is a scalar, so apply the function for # each element in the Series new_index = self.index new_values = np.empty(len(new_index), dtype=object) with np.errstate(all="ignore"): new_values[:] = [func(lv, other) for lv in self._values] new_name = self.name # try_float=False is to match agg_series npvalues = lib.maybe_convert_objects(new_values, try_float=False) res_values = maybe_cast_pointwise_result(npvalues, self.dtype, same_dtype=False) return self._constructor(res_values, index=new_index, name=new_name, copy=False) def combine_first(self, other) -> Series: """ Update null elements with value in the same location in 'other'. Combine two Series objects by filling null values in one Series with non-null values from the other Series. Result index will be the union of the two indexes. Parameters ---------- other : Series The value(s) to be used for filling null values. Returns ------- Series The result of combining the provided Series with the other object. See Also -------- Series.combine : Perform element-wise operation on two Series using a given function. Examples -------- >>> s1 = pd.Series([1, np.nan]) >>> s2 = pd.Series([3, 4, 5]) >>> s1.combine_first(s2) 0 1.0 1 4.0 2 5.0 dtype: float64 Null values still persist if the location of that null value does not exist in `other` >>> s1 = pd.Series({'falcon': np.nan, 'eagle': 160.0}) >>> s2 = pd.Series({'eagle': 200.0, 'duck': 30.0}) >>> s1.combine_first(s2) duck 30.0 eagle 160.0 falcon NaN dtype: float64 """ new_index = self.index.union(other.index) this = self.reindex(new_index, copy=False) other = other.reindex(new_index, copy=False) if this.dtype.kind == "M" and other.dtype.kind != "M": other = to_datetime(other) return this.where(notna(this), other) def update(self, other: Series | Sequence | Mapping) -> None: """ Modify Series in place using values from passed Series. Uses non-NA values from passed Series to make updates. Aligns on index. Parameters ---------- other : Series, or object coercible into Series Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6])) >>> s 0 4 1 5 2 6 dtype: int64 >>> s = pd.Series(['a', 'b', 'c']) >>> s.update(pd.Series(['d', 'e'], index=[0, 2])) >>> s 0 d 1 b 2 e dtype: object >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6, 7, 8])) >>> s 0 4 1 5 2 6 dtype: int64 If ``other`` contains NaNs the corresponding values are not updated in the original Series. >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, np.nan, 6])) >>> s 0 4 1 2 2 6 dtype: int64 ``other`` can also be a non-Series object type that is coercible into a Series >>> s = pd.Series([1, 2, 3]) >>> s.update([4, np.nan, 6]) >>> s 0 4 1 2 2 6 dtype: int64 >>> s = pd.Series([1, 2, 3]) >>> s.update({1: 9}) >>> s 0 1 1 9 2 3 dtype: int64 """ if not isinstance(other, Series): other = Series(other) other = other.reindex_like(self) mask = notna(other) self._mgr = self._mgr.putmask(mask=mask, new=other) self._maybe_update_cacher() # ---------------------------------------------------------------------- # Reindexing, sorting def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[False] = ..., kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> Series: ... def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[True], kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> None: ... def sort_values( self, *, axis: Axis = 0, ascending: bool | int | Sequence[bool] | Sequence[int] = True, inplace: bool = False, kind: str = "quicksort", na_position: str = "last", ignore_index: bool = False, key: ValueKeyFunc = None, ) -> Series | None: """ Sort by the values. Sort a Series in ascending or descending order by some criterion. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. ascending : bool or list of bools, default True If True, sort values in ascending order, otherwise descending. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the series values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect a ``Series`` and return an array-like. .. versionadded:: 1.1.0 Returns ------- Series or None Series ordered by values or None if ``inplace=True``. See Also -------- Series.sort_index : Sort by the Series indices. DataFrame.sort_values : Sort DataFrame by the values along either axis. DataFrame.sort_index : Sort DataFrame by indices. Examples -------- >>> s = pd.Series([np.nan, 1, 3, 10, 5]) >>> s 0 NaN 1 1.0 2 3.0 3 10.0 4 5.0 dtype: float64 Sort values ascending order (default behaviour) >>> s.sort_values(ascending=True) 1 1.0 2 3.0 4 5.0 3 10.0 0 NaN dtype: float64 Sort values descending order >>> s.sort_values(ascending=False) 3 10.0 4 5.0 2 3.0 1 1.0 0 NaN dtype: float64 Sort values putting NAs first >>> s.sort_values(na_position='first') 0 NaN 1 1.0 2 3.0 4 5.0 3 10.0 dtype: float64 Sort a series of strings >>> s = pd.Series(['z', 'b', 'd', 'a', 'c']) >>> s 0 z 1 b 2 d 3 a 4 c dtype: object >>> s.sort_values() 3 a 1 b 4 c 2 d 0 z dtype: object Sort using a key function. Your `key` function will be given the ``Series`` of values and should return an array-like. >>> s = pd.Series(['a', 'B', 'c', 'D', 'e']) >>> s.sort_values() 1 B 3 D 0 a 2 c 4 e dtype: object >>> s.sort_values(key=lambda x: x.str.lower()) 0 a 1 B 2 c 3 D 4 e dtype: object NumPy ufuncs work well here. For example, we can sort by the ``sin`` of the value >>> s = pd.Series([-4, -2, 0, 2, 4]) >>> s.sort_values(key=np.sin) 1 -2 4 4 2 0 0 -4 3 2 dtype: int64 More complicated user-defined functions can be used, as long as they expect a Series and return an array-like >>> s.sort_values(key=lambda x: (np.tan(x.cumsum()))) 0 -4 3 2 4 4 1 -2 2 0 dtype: int64 """ inplace = validate_bool_kwarg(inplace, "inplace") # Validate the axis parameter self._get_axis_number(axis) # GH 5856/5853 if inplace and self._is_cached: raise ValueError( "This Series is a view of some other array, to " "sort in-place you must create a copy" ) if is_list_like(ascending): ascending = cast(Sequence[Union[bool, int]], ascending) if len(ascending) != 1: raise ValueError( f"Length of ascending ({len(ascending)}) must be 1 for Series" ) ascending = ascending[0] ascending = validate_ascending(ascending) if na_position not in ["first", "last"]: raise ValueError(f"invalid na_position: {na_position}") # GH 35922. Make sorting stable by leveraging nargsort values_to_sort = ensure_key_mapped(self, key)._values if key else self._values sorted_index = nargsort(values_to_sort, kind, bool(ascending), na_position) if is_range_indexer(sorted_index, len(sorted_index)): if inplace: return self._update_inplace(self) return self.copy(deep=None) result = self._constructor( self._values[sorted_index], index=self.index[sorted_index], copy=False ) if ignore_index: result.index = default_index(len(sorted_index)) if not inplace: return result.__finalize__(self, method="sort_values") self._update_inplace(result) return None def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[True], kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> None: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[False] = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: bool = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series | None: ... def sort_index( self, *, axis: Axis = 0, level: IndexLabel = None, ascending: bool | Sequence[bool] = True, inplace: bool = False, kind: SortKind = "quicksort", na_position: NaPosition = "last", sort_remaining: bool = True, ignore_index: bool = False, key: IndexKeyFunc = None, ) -> Series | None: """ Sort Series by index labels. Returns a new Series sorted by label if `inplace` argument is ``False``, otherwise updates the original series and returns None. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. level : int, optional If not None, sort on values in specified index level(s). ascending : bool or list-like of bools, default True Sort ascending vs. descending. When the index is a MultiIndex the sort direction can be controlled for each level individually. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. Not implemented for MultiIndex. sort_remaining : bool, default True If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. .. versionadded:: 1.1.0 Returns ------- Series or None The original Series sorted by the labels or None if ``inplace=True``. See Also -------- DataFrame.sort_index: Sort DataFrame by the index. DataFrame.sort_values: Sort DataFrame by the value. Series.sort_values : Sort Series by the value. Examples -------- >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4]) >>> s.sort_index() 1 c 2 b 3 a 4 d dtype: object Sort Descending >>> s.sort_index(ascending=False) 4 d 3 a 2 b 1 c dtype: object By default NaNs are put at the end, but use `na_position` to place them at the beginning >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan]) >>> s.sort_index(na_position='first') NaN d 1.0 c 2.0 b 3.0 a dtype: object Specify index level to sort >>> arrays = [np.array(['qux', 'qux', 'foo', 'foo', ... 'baz', 'baz', 'bar', 'bar']), ... np.array(['two', 'one', 'two', 'one', ... 'two', 'one', 'two', 'one'])] >>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays) >>> s.sort_index(level=1) bar one 8 baz one 6 foo one 4 qux one 2 bar two 7 baz two 5 foo two 3 qux two 1 dtype: int64 Does not sort by remaining levels when sorting by levels >>> s.sort_index(level=1, sort_remaining=False) qux one 2 foo one 4 baz one 6 bar one 8 qux two 1 foo two 3 baz two 5 bar two 7 dtype: int64 Apply a key function before sorting >>> s = pd.Series([1, 2, 3, 4], index=['A', 'b', 'C', 'd']) >>> s.sort_index(key=lambda x : x.str.lower()) A 1 b 2 C 3 d 4 dtype: int64 """ return super().sort_index( axis=axis, level=level, ascending=ascending, inplace=inplace, kind=kind, na_position=na_position, sort_remaining=sort_remaining, ignore_index=ignore_index, key=key, ) def argsort( self, axis: Axis = 0, kind: SortKind = "quicksort", order: None = None, ) -> Series: """ Return the integer indices that would sort the Series values. Override ndarray.argsort. Argsorts the value, omitting NA/null values, and places the result in the same locations as the non-NA values. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. order : None Has no effect but is accepted for compatibility with numpy. Returns ------- Series[np.intp] Positions of values within the sort order with -1 indicating nan values. See Also -------- numpy.ndarray.argsort : Returns the indices that would sort this array. """ values = self._values mask = isna(values) if mask.any(): result = np.full(len(self), -1, dtype=np.intp) notmask = ~mask result[notmask] = np.argsort(values[notmask], kind=kind) else: result = np.argsort(values, kind=kind) res = self._constructor( result, index=self.index, name=self.name, dtype=np.intp, copy=False ) return res.__finalize__(self, method="argsort") def nlargest( self, n: int = 5, keep: Literal["first", "last", "all"] = "first" ) -> Series: """ Return the largest `n` elements. Parameters ---------- n : int, default 5 Return this many descending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` largest values in the Series, sorted in decreasing order. See Also -------- Series.nsmallest: Get the `n` smallest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values(ascending=False).head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Malta": 434000, "Maldives": 434000, ... "Brunei": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Malta 434000 Maldives 434000 Brunei 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` largest elements where ``n=5`` by default. >>> s.nlargest() France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3``. Default `keep` value is 'first' so Malta will be kept. >>> s.nlargest(3) France 65000000 Italy 59000000 Malta 434000 dtype: int64 The `n` largest elements where ``n=3`` and keeping the last duplicates. Brunei will be kept since it is the last with value 434000 based on the index order. >>> s.nlargest(3, keep='last') France 65000000 Italy 59000000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3`` with all duplicates kept. Note that the returned Series has five elements due to the three duplicates. >>> s.nlargest(3, keep='all') France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nlargest() def nsmallest(self, n: int = 5, keep: str = "first") -> Series: """ Return the smallest `n` elements. Parameters ---------- n : int, default 5 Return this many ascending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` smallest values in the Series, sorted in increasing order. See Also -------- Series.nlargest: Get the `n` largest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values().head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Brunei": 434000, "Malta": 434000, ... "Maldives": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Brunei 434000 Malta 434000 Maldives 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` smallest elements where ``n=5`` by default. >>> s.nsmallest() Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 Iceland 337000 dtype: int64 The `n` smallest elements where ``n=3``. Default `keep` value is 'first' so Nauru and Tuvalu will be kept. >>> s.nsmallest(3) Montserrat 5200 Nauru 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` and keeping the last duplicates. Anguilla and Tuvalu will be kept since they are the last with value 11300 based on the index order. >>> s.nsmallest(3, keep='last') Montserrat 5200 Anguilla 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` with all duplicates kept. Note that the returned Series has four elements due to the three duplicates. >>> s.nsmallest(3, keep='all') Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nsmallest() klass=_shared_doc_kwargs["klass"], extra_params=dedent( """copy : bool, default True Whether to copy underlying data.""" ), examples=dedent( """\ Examples -------- >>> s = pd.Series( ... ["A", "B", "A", "C"], ... index=[ ... ["Final exam", "Final exam", "Coursework", "Coursework"], ... ["History", "Geography", "History", "Geography"], ... ["January", "February", "March", "April"], ... ], ... ) >>> s Final exam History January A Geography February B Coursework History March A Geography April C dtype: object In the following example, we will swap the levels of the indices. Here, we will swap the levels column-wise, but levels can be swapped row-wise in a similar manner. Note that column-wise is the default behaviour. By not supplying any arguments for i and j, we swap the last and second to last indices. >>> s.swaplevel() Final exam January History A February Geography B Coursework March History A April Geography C dtype: object By supplying one argument, we can choose which index to swap the last index with. We can for example swap the first index with the last one as follows. >>> s.swaplevel(0) January History Final exam A February Geography Final exam B March History Coursework A April Geography Coursework C dtype: object We can also define explicitly which indices we want to swap by supplying values for both i and j. Here, we for example swap the first and second indices. >>> s.swaplevel(0, 1) History Final exam January A Geography Final exam February B History Coursework March A Geography Coursework April C dtype: object""" ), ) def swaplevel( self, i: Level = -2, j: Level = -1, copy: bool | None = None ) -> Series: """ Swap levels i and j in a :class:`MultiIndex`. Default is to swap the two innermost levels of the index. Parameters ---------- i, j : int or str Levels of the indices to be swapped. Can pass level name as string. {extra_params} Returns ------- {klass} {klass} with levels swapped in MultiIndex. {examples} """ assert isinstance(self.index, MultiIndex) result = self.copy(deep=copy and not using_copy_on_write()) result.index = self.index.swaplevel(i, j) return result def reorder_levels(self, order: Sequence[Level]) -> Series: """ Rearrange index levels using input order. May not drop or duplicate levels. Parameters ---------- order : list of int representing new level order Reference level by number or key. Returns ------- type of caller (new object) """ if not isinstance(self.index, MultiIndex): # pragma: no cover raise Exception("Can only reorder levels on a hierarchical axis.") result = self.copy(deep=None) assert isinstance(result.index, MultiIndex) result.index = result.index.reorder_levels(order) return result def explode(self, ignore_index: bool = False) -> Series: """ Transform each element of a list-like to a row. Parameters ---------- ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 1.1.0 Returns ------- Series Exploded lists to rows; index will be duplicated for these rows. See Also -------- Series.str.split : Split string values on specified separator. Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame. DataFrame.melt : Unpivot a DataFrame from wide format to long format. DataFrame.explode : Explode a DataFrame from list-like columns to long format. Notes ----- This routine will explode list-likes including lists, tuples, sets, Series, and np.ndarray. The result dtype of the subset rows will be object. Scalars will be returned unchanged, and empty list-likes will result in a np.nan for that row. In addition, the ordering of elements in the output will be non-deterministic when exploding sets. Reference :ref:`the user guide <reshaping.explode>` for more examples. Examples -------- >>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]]) >>> s 0 [1, 2, 3] 1 foo 2 [] 3 [3, 4] dtype: object >>> s.explode() 0 1 0 2 0 3 1 foo 2 NaN 3 3 3 4 dtype: object """ if not len(self) or not is_object_dtype(self): result = self.copy() return result.reset_index(drop=True) if ignore_index else result values, counts = reshape.explode(np.asarray(self._values)) if ignore_index: index = default_index(len(values)) else: index = self.index.repeat(counts) return self._constructor(values, index=index, name=self.name, copy=False) def unstack(self, level: IndexLabel = -1, fill_value: Hashable = None) -> DataFrame: """ Unstack, also known as pivot, Series with MultiIndex to produce DataFrame. Parameters ---------- level : int, str, or list of these, default last level Level(s) to unstack, can pass level name. fill_value : scalar value, default None Value to use when replacing NaN values. Returns ------- DataFrame Unstacked Series. Notes ----- Reference :ref:`the user guide <reshaping.stacking>` for more examples. Examples -------- >>> s = pd.Series([1, 2, 3, 4], ... index=pd.MultiIndex.from_product([['one', 'two'], ... ['a', 'b']])) >>> s one a 1 b 2 two a 3 b 4 dtype: int64 >>> s.unstack(level=-1) a b one 1 2 two 3 4 >>> s.unstack(level=0) one two a 1 3 b 2 4 """ from pandas.core.reshape.reshape import unstack return unstack(self, level, fill_value) # ---------------------------------------------------------------------- # function application def map( self, arg: Callable | Mapping | Series, na_action: Literal["ignore"] | None = None, ) -> Series: """ Map values of Series according to an input mapping or function. Used for substituting each value in a Series with another value, that may be derived from a function, a ``dict`` or a :class:`Series`. Parameters ---------- arg : function, collections.abc.Mapping subclass or Series Mapping correspondence. na_action : {None, 'ignore'}, default None If 'ignore', propagate NaN values, without passing them to the mapping correspondence. Returns ------- Series Same index as caller. See Also -------- Series.apply : For applying more complex functions on a Series. DataFrame.apply : Apply a function row-/column-wise. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Notes ----- When ``arg`` is a dictionary, values in Series that are not in the dictionary (as keys) are converted to ``NaN``. However, if the dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e. provides a method for default values), then this default is used rather than ``NaN``. Examples -------- >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit']) >>> s 0 cat 1 dog 2 NaN 3 rabbit dtype: object ``map`` accepts a ``dict`` or a ``Series``. Values that are not found in the ``dict`` are converted to ``NaN``, unless the dict has a default value (e.g. ``defaultdict``): >>> s.map({'cat': 'kitten', 'dog': 'puppy'}) 0 kitten 1 puppy 2 NaN 3 NaN dtype: object It also accepts a function: >>> s.map('I am a {}'.format) 0 I am a cat 1 I am a dog 2 I am a nan 3 I am a rabbit dtype: object To avoid applying the function to missing values (and keep them as ``NaN``) ``na_action='ignore'`` can be used: >>> s.map('I am a {}'.format, na_action='ignore') 0 I am a cat 1 I am a dog 2 NaN 3 I am a rabbit dtype: object """ new_values = self._map_values(arg, na_action=na_action) return self._constructor(new_values, index=self.index, copy=False).__finalize__( self, method="map" ) def _gotitem(self, key, ndim, subset=None) -> Series: """ Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : {1, 2} Requested ndim of result. subset : object, default None Subset to act on. """ return self _agg_see_also_doc = dedent( """ See Also -------- Series.apply : Invoke function on a Series. Series.transform : Transform function producing a Series with like indexes. """ ) _agg_examples_doc = dedent( """ Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.agg('min') 1 >>> s.agg(['min', 'max']) min 1 max 4 dtype: int64 """ ) _shared_docs["aggregate"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, ) def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs): # Validate the axis parameter self._get_axis_number(axis) # if func is None, will switch to user-provided "named aggregation" kwargs if func is None: func = dict(kwargs.items()) op = SeriesApply(self, func, convert_dtype=False, args=args, kwargs=kwargs) result = op.agg() return result agg = aggregate # error: Signature of "any" incompatible with supertype "NDFrame" [override] def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: None = ..., **kwargs, ) -> bool: ... def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: Level, **kwargs, ) -> Series | bool: ... # error: Missing return statement def any( # type: ignore[empty-body] self, axis: Axis = 0, bool_only: bool | None = None, skipna: bool = True, level: Level | None = None, **kwargs, ) -> Series | bool: ... _shared_docs["transform"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], ) def transform( self, func: AggFuncType, axis: Axis = 0, *args, **kwargs ) -> DataFrame | Series: # Validate axis argument self._get_axis_number(axis) result = SeriesApply( self, func=func, convert_dtype=True, args=args, kwargs=kwargs ).transform() return result def apply( self, func: AggFuncType, convert_dtype: bool = True, args: tuple[Any, ...] = (), **kwargs, ) -> DataFrame | Series: """ Invoke function on values of Series. Can be ufunc (a NumPy function that applies to the entire Series) or a Python function that only works on single values. Parameters ---------- func : function Python function or NumPy ufunc to apply. convert_dtype : bool, default True Try to find better dtype for elementwise function results. If False, leave as dtype=object. Note that the dtype is always preserved for some extension array dtypes, such as Categorical. args : tuple Positional arguments passed to func after the series value. **kwargs Additional keyword arguments passed to func. Returns ------- Series or DataFrame If func returns a Series object the result will be a DataFrame. See Also -------- Series.map: For element-wise operations. Series.agg: Only perform aggregating type operations. Series.transform: Only perform transforming type operations. Notes ----- Functions that mutate the passed object can produce unexpected behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` for more details. Examples -------- Create a series with typical summer temperatures for each city. >>> s = pd.Series([20, 21, 12], ... index=['London', 'New York', 'Helsinki']) >>> s London 20 New York 21 Helsinki 12 dtype: int64 Square the values by defining a function and passing it as an argument to ``apply()``. >>> def square(x): ... return x ** 2 >>> s.apply(square) London 400 New York 441 Helsinki 144 dtype: int64 Square the values by passing an anonymous function as an argument to ``apply()``. >>> s.apply(lambda x: x ** 2) London 400 New York 441 Helsinki 144 dtype: int64 Define a custom function that needs additional positional arguments and pass these additional arguments using the ``args`` keyword. >>> def subtract_custom_value(x, custom_value): ... return x - custom_value >>> s.apply(subtract_custom_value, args=(5,)) London 15 New York 16 Helsinki 7 dtype: int64 Define a custom function that takes keyword arguments and pass these arguments to ``apply``. >>> def add_custom_values(x, **kwargs): ... for month in kwargs: ... x += kwargs[month] ... return x >>> s.apply(add_custom_values, june=30, july=20, august=25) London 95 New York 96 Helsinki 87 dtype: int64 Use a function from the Numpy library. >>> s.apply(np.log) London 2.995732 New York 3.044522 Helsinki 2.484907 dtype: float64 """ return SeriesApply(self, func, convert_dtype, args, kwargs).apply() def _reduce( self, op, name: str, *, axis: Axis = 0, skipna: bool = True, numeric_only: bool = False, filter_type=None, **kwds, ): """ Perform a reduction operation. If we have an ndarray as a value, then simply perform the operation, otherwise delegate to the object. """ delegate = self._values if axis is not None: self._get_axis_number(axis) if isinstance(delegate, ExtensionArray): # dispatch to ExtensionArray interface return delegate._reduce(name, skipna=skipna, **kwds) else: # dispatch to numpy arrays if numeric_only and not is_numeric_dtype(self.dtype): kwd_name = "numeric_only" if name in ["any", "all"]: kwd_name = "bool_only" # GH#47500 - change to TypeError to match other methods raise TypeError( f"Series.{name} does not allow {kwd_name}={numeric_only} " "with non-numeric dtypes." ) with np.errstate(all="ignore"): return op(delegate, skipna=skipna, **kwds) def _reindex_indexer( self, new_index: Index | None, indexer: npt.NDArray[np.intp] | None, copy: bool | None, ) -> Series: # Note: new_index is None iff indexer is None # if not None, indexer is np.intp if indexer is None and ( new_index is None or new_index.names == self.index.names ): if using_copy_on_write(): return self.copy(deep=copy) if copy or copy is None: return self.copy(deep=copy) return self new_values = algorithms.take_nd( self._values, indexer, allow_fill=True, fill_value=None ) return self._constructor(new_values, index=new_index, copy=False) def _needs_reindex_multi(self, axes, method, level) -> bool: """ Check if we do need a multi reindex; this is for compat with higher dims. """ return False # error: Cannot determine type of 'align' NDFrame.align, # type: ignore[has-type] klass=_shared_doc_kwargs["klass"], axes_single_arg=_shared_doc_kwargs["axes_single_arg"], ) def align( self, other: Series, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value: Hashable = None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> Series: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[True], level: Level | None = ..., errors: IgnoreRaise = ..., ) -> None: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[False] = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: bool = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series | None: ... def rename( self, index: Renamer | Hashable | None = None, *, axis: Axis | None = None, copy: bool = True, inplace: bool = False, level: Level | None = None, errors: IgnoreRaise = "ignore", ) -> Series | None: """ Alter Series index labels or name. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. Alternatively, change ``Series.name`` with a scalar value. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- index : scalar, hashable sequence, dict-like or function optional Functions or dict-like are transformations to apply to the index. Scalar or hashable sequence-like will alter the ``Series.name`` attribute. axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new Series. If True the value of copy is ignored. level : int or level name, default None In case of MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise `KeyError` when a `dict-like mapper` or `index` contains labels that are not present in the index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- Series or None Series with index labels or name altered or None if ``inplace=True``. See Also -------- DataFrame.rename : Corresponding DataFrame method. Series.rename_axis : Set the name of the axis. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.rename("my_name") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 >>> s.rename(lambda x: x ** 2) # function, changes labels 0 1 1 2 4 3 dtype: int64 >>> s.rename({1: 3, 2: 5}) # mapping, changes labels 0 1 3 2 5 3 dtype: int64 """ if axis is not None: # Make sure we raise if an invalid 'axis' is passed. axis = self._get_axis_number(axis) if callable(index) or is_dict_like(index): # error: Argument 1 to "_rename" of "NDFrame" has incompatible # type "Union[Union[Mapping[Any, Hashable], Callable[[Any], # Hashable]], Hashable, None]"; expected "Union[Mapping[Any, # Hashable], Callable[[Any], Hashable], None]" return super()._rename( index, # type: ignore[arg-type] copy=copy, inplace=inplace, level=level, errors=errors, ) else: return self._set_name(index, inplace=inplace) """ Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.set_axis(['a', 'b', 'c'], axis=0) a 1 b 2 c 3 dtype: int64 """ ) **_shared_doc_kwargs, extended_summary_sub="", axis_description_sub="", see_also_sub="", ) ) ) # error: Cannot determine type of 'shift' # ---------------------------------------------------------------------- # Convert to types that support pd.NA # error: Cannot determine type of 'isna' # error: Return type "Series" of "isna" incompatible with return type "ndarray # [Any, dtype[bool_]]" in supertype "IndexOpsMixin" # error: Cannot determine type of 'isna' # error: Cannot determine type of 'notna' # error: Cannot determine type of 'notna' # ---------------------------------------------------------------------- # Time series-oriented methods # error: Cannot determine type of 'asfreq' # error: Cannot determine type of 'resample' # ---------------------------------------------------------------------- # Add index # ---------------------------------------------------------------------- # Accessor Methods # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # Add plotting methods to Series # ---------------------------------------------------------------------- # Template-Based Arithmetic/Comparison Methods Series The provided code snippet includes necessary dependencies for implementing the `_stata_elapsed_date_to_datetime_vec` function. Write a Python function `def _stata_elapsed_date_to_datetime_vec(dates, fmt) -> Series` to solve the following problem: Convert from SIF to datetime. https://www.stata.com/help.cgi?datetime Parameters ---------- dates : Series The Stata Internal Format date to convert to datetime according to fmt fmt : str The format to convert to. Can be, tc, td, tw, tm, tq, th, ty Returns Returns ------- converted : Series The converted dates Examples -------- >>> dates = pd.Series([52]) >>> _stata_elapsed_date_to_datetime_vec(dates , "%tw") 0 1961-01-01 dtype: datetime64[ns] Notes ----- datetime/c - tc milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day datetime/C - tC - NOT IMPLEMENTED milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds date - td days since 01jan1960 (01jan1960 = 0) weekly date - tw weeks since 1960w1 This assumes 52 weeks in a year, then adds 7 * remainder of the weeks. The datetime value is the start of the week in terms of days in the year, not ISO calendar weeks. monthly date - tm months since 1960m1 quarterly date - tq quarters since 1960q1 half-yearly date - th half-years since 1960h1 yearly date - ty years since 0000 Here is the function: def _stata_elapsed_date_to_datetime_vec(dates, fmt) -> Series: """ Convert from SIF to datetime. https://www.stata.com/help.cgi?datetime Parameters ---------- dates : Series The Stata Internal Format date to convert to datetime according to fmt fmt : str The format to convert to. Can be, tc, td, tw, tm, tq, th, ty Returns Returns ------- converted : Series The converted dates Examples -------- >>> dates = pd.Series([52]) >>> _stata_elapsed_date_to_datetime_vec(dates , "%tw") 0 1961-01-01 dtype: datetime64[ns] Notes ----- datetime/c - tc milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day datetime/C - tC - NOT IMPLEMENTED milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds date - td days since 01jan1960 (01jan1960 = 0) weekly date - tw weeks since 1960w1 This assumes 52 weeks in a year, then adds 7 * remainder of the weeks. The datetime value is the start of the week in terms of days in the year, not ISO calendar weeks. monthly date - tm months since 1960m1 quarterly date - tq quarters since 1960q1 half-yearly date - th half-years since 1960h1 yearly date - ty years since 0000 """ MIN_YEAR, MAX_YEAR = Timestamp.min.year, Timestamp.max.year MAX_DAY_DELTA = (Timestamp.max - datetime.datetime(1960, 1, 1)).days MIN_DAY_DELTA = (Timestamp.min - datetime.datetime(1960, 1, 1)).days MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000 MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000 def convert_year_month_safe(year, month) -> Series: """ Convert year and month to datetimes, using pandas vectorized versions when the date range falls within the range supported by pandas. Otherwise it falls back to a slower but more robust method using datetime. """ if year.max() < MAX_YEAR and year.min() > MIN_YEAR: return to_datetime(100 * year + month, format="%Y%m") else: index = getattr(year, "index", None) return Series( [datetime.datetime(y, m, 1) for y, m in zip(year, month)], index=index ) def convert_year_days_safe(year, days) -> Series: """ Converts year (e.g. 1999) and days since the start of the year to a datetime or datetime64 Series """ if year.max() < (MAX_YEAR - 1) and year.min() > MIN_YEAR: return to_datetime(year, format="%Y") + to_timedelta(days, unit="d") else: index = getattr(year, "index", None) value = [ datetime.datetime(y, 1, 1) + relativedelta(days=int(d)) for y, d in zip(year, days) ] return Series(value, index=index) def convert_delta_safe(base, deltas, unit) -> Series: """ Convert base dates and deltas to datetimes, using pandas vectorized versions if the deltas satisfy restrictions required to be expressed as dates in pandas. """ index = getattr(deltas, "index", None) if unit == "d": if deltas.max() > MAX_DAY_DELTA or deltas.min() < MIN_DAY_DELTA: values = [base + relativedelta(days=int(d)) for d in deltas] return Series(values, index=index) elif unit == "ms": if deltas.max() > MAX_MS_DELTA or deltas.min() < MIN_MS_DELTA: values = [ base + relativedelta(microseconds=(int(d) * 1000)) for d in deltas ] return Series(values, index=index) else: raise ValueError("format not understood") base = to_datetime(base) deltas = to_timedelta(deltas, unit=unit) return base + deltas # TODO(non-nano): If/when pandas supports more than datetime64[ns], this # should be improved to use correct range, e.g. datetime[Y] for yearly bad_locs = np.isnan(dates) has_bad_values = False if bad_locs.any(): has_bad_values = True # reset cache to avoid SettingWithCopy checks (we own the DataFrame and the # `dates` Series is used to overwrite itself in the DataFramae) dates._reset_cacher() dates[bad_locs] = 1.0 # Replace with NaT dates = dates.astype(np.int64) if fmt.startswith(("%tc", "tc")): # Delta ms relative to base base = stata_epoch ms = dates conv_dates = convert_delta_safe(base, ms, "ms") elif fmt.startswith(("%tC", "tC")): warnings.warn( "Encountered %tC format. Leaving in Stata Internal Format.", stacklevel=find_stack_level(), ) conv_dates = Series(dates, dtype=object) if has_bad_values: conv_dates[bad_locs] = NaT return conv_dates # Delta days relative to base elif fmt.startswith(("%td", "td", "%d", "d")): base = stata_epoch days = dates conv_dates = convert_delta_safe(base, days, "d") # does not count leap days - 7 days is a week. # 52nd week may have more than 7 days elif fmt.startswith(("%tw", "tw")): year = stata_epoch.year + dates // 52 days = (dates % 52) * 7 conv_dates = convert_year_days_safe(year, days) elif fmt.startswith(("%tm", "tm")): # Delta months relative to base year = stata_epoch.year + dates // 12 month = (dates % 12) + 1 conv_dates = convert_year_month_safe(year, month) elif fmt.startswith(("%tq", "tq")): # Delta quarters relative to base year = stata_epoch.year + dates // 4 quarter_month = (dates % 4) * 3 + 1 conv_dates = convert_year_month_safe(year, quarter_month) elif fmt.startswith(("%th", "th")): # Delta half-years relative to base year = stata_epoch.year + dates // 2 month = (dates % 2) * 6 + 1 conv_dates = convert_year_month_safe(year, month) elif fmt.startswith(("%ty", "ty")): # Years -- not delta year = dates first_month = np.ones_like(dates) conv_dates = convert_year_month_safe(year, first_month) else: raise ValueError(f"Date fmt {fmt} not understood") if has_bad_values: # Restore NaT for bad values conv_dates[bad_locs] = NaT return conv_dates
Convert from SIF to datetime. https://www.stata.com/help.cgi?datetime Parameters ---------- dates : Series The Stata Internal Format date to convert to datetime according to fmt fmt : str The format to convert to. Can be, tc, td, tw, tm, tq, th, ty Returns Returns ------- converted : Series The converted dates Examples -------- >>> dates = pd.Series([52]) >>> _stata_elapsed_date_to_datetime_vec(dates , "%tw") 0 1961-01-01 dtype: datetime64[ns] Notes ----- datetime/c - tc milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day datetime/C - tC - NOT IMPLEMENTED milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds date - td days since 01jan1960 (01jan1960 = 0) weekly date - tw weeks since 1960w1 This assumes 52 weeks in a year, then adds 7 * remainder of the weeks. The datetime value is the start of the week in terms of days in the year, not ISO calendar weeks. monthly date - tm months since 1960m1 quarterly date - tq quarters since 1960q1 half-yearly date - th half-years since 1960h1 yearly date - ty years since 0000
173,533
from __future__ import annotations from collections import abc import datetime from io import BytesIO import os import struct import sys from types import TracebackType from typing import ( IO, TYPE_CHECKING, Any, AnyStr, Callable, Final, Hashable, Sequence, cast, ) import warnings from dateutil.relativedelta import relativedelta import numpy as np from pandas._libs.lib import infer_dtype from pandas._libs.writers import max_len_string_array from pandas._typing import ( CompressionOptions, FilePath, ReadBuffer, StorageOptions, WriteBuffer, ) from pandas.errors import ( CategoricalConversionWarning, InvalidColumnName, PossiblePrecisionLoss, ValueLabelTypeMismatch, ) from pandas.util._decorators import ( Appender, doc, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, is_categorical_dtype, is_datetime64_dtype, is_numeric_dtype, ) from pandas import ( Categorical, DatetimeIndex, NaT, Timestamp, isna, to_datetime, to_timedelta, ) from pandas.core.arrays.boolean import BooleanDtype from pandas.core.arrays.integer import IntegerDtype from pandas.core.frame import DataFrame from pandas.core.indexes.base import Index from pandas.core.series import Series from pandas.core.shared_docs import _shared_docs from pandas.io.common import get_handle stata_epoch: Final = datetime.datetime(1960, 1, 1) def find_stack_level() -> int: """ Find the first place in the stack that is not inside pandas (tests notwithstanding). """ import pandas as pd pkg_dir = os.path.dirname(pd.__file__) test_dir = os.path.join(pkg_dir, "tests") # https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow frame = inspect.currentframe() n = 0 while frame: fname = inspect.getfile(frame) if fname.startswith(pkg_dir) and not fname.startswith(test_dir): frame = frame.f_back n += 1 else: break return n def is_datetime64_dtype(arr_or_dtype) -> bool: """ Check whether an array-like or dtype is of the datetime64 dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array-like or dtype to check. Returns ------- boolean Whether or not the array-like or dtype is of the datetime64 dtype. Examples -------- >>> from pandas.api.types import is_datetime64_dtype >>> is_datetime64_dtype(object) False >>> is_datetime64_dtype(np.datetime64) True >>> is_datetime64_dtype(np.array([], dtype=int)) False >>> is_datetime64_dtype(np.array([], dtype=np.datetime64)) True >>> is_datetime64_dtype([1, 2, 3]) False """ if isinstance(arr_or_dtype, np.dtype): # GH#33400 fastpath for dtype object return arr_or_dtype.kind == "M" return _is_dtype_type(arr_or_dtype, classes(np.datetime64)) class DataFrame(NDFrame, OpsMixin): """ Two-dimensional, size-mutable, potentially heterogeneous tabular data. Data structure also contains labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects. The primary pandas data structure. Parameters ---------- data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame Dict can contain Series, arrays, constants, dataclass or list-like objects. If data is a dict, column order follows insertion-order. If a dict contains Series which have an index defined, it is aligned by its index. This alignment also occurs if data is a Series or a DataFrame itself. Alignment is done on Series/DataFrame inputs. If data is a list of dicts, column order follows insertion-order. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided. columns : Index or array-like Column labels to use for resulting frame when data does not have them, defaulting to RangeIndex(0, 1, 2, ..., n). If data contains column labels, will perform column selection instead. dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer. copy : bool or None, default None Copy data from inputs. For dict data, the default of None behaves like ``copy=True``. For DataFrame or 2d ndarray input, the default of None behaves like ``copy=False``. If data is a dict containing one or more Series (possibly of different dtypes), ``copy=False`` will ensure that these inputs are not copied. .. versionchanged:: 1.3.0 See Also -------- DataFrame.from_records : Constructor from tuples, also record arrays. DataFrame.from_dict : From dicts of Series, arrays, or dicts. read_csv : Read a comma-separated values (csv) file into DataFrame. read_table : Read general delimited file into DataFrame. read_clipboard : Read text from clipboard into DataFrame. Notes ----- Please reference the :ref:`User Guide <basics.dataframe>` for more information. Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = pd.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from a dictionary including Series: >>> d = {'col1': [0, 1, 2, 3], 'col2': pd.Series([2, 3], index=[2, 3])} >>> pd.DataFrame(data=d, index=[0, 1, 2, 3]) col1 col2 0 0 NaN 1 1 NaN 2 2 2.0 3 3 3.0 Constructing DataFrame from numpy ndarray: >>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), ... columns=['a', 'b', 'c']) >>> df2 a b c 0 1 2 3 1 4 5 6 2 7 8 9 Constructing DataFrame from a numpy ndarray that has labeled columns: >>> data = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], ... dtype=[("a", "i4"), ("b", "i4"), ("c", "i4")]) >>> df3 = pd.DataFrame(data, columns=['c', 'a']) ... >>> df3 c a 0 3 1 1 6 4 2 9 7 Constructing DataFrame from dataclass: >>> from dataclasses import make_dataclass >>> Point = make_dataclass("Point", [("x", int), ("y", int)]) >>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)]) x y 0 0 0 1 0 3 2 2 3 Constructing DataFrame from Series/DataFrame: >>> ser = pd.Series([1, 2, 3], index=["a", "b", "c"]) >>> df = pd.DataFrame(data=ser, index=["a", "c"]) >>> df 0 a 1 c 3 >>> df1 = pd.DataFrame([1, 2, 3], index=["a", "b", "c"], columns=["x"]) >>> df2 = pd.DataFrame(data=df1, index=["a", "c"]) >>> df2 x a 1 c 3 """ _internal_names_set = {"columns", "index"} | NDFrame._internal_names_set _typ = "dataframe" _HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray) _accessors: set[str] = {"sparse"} _hidden_attrs: frozenset[str] = NDFrame._hidden_attrs | frozenset([]) _mgr: BlockManager | ArrayManager def _constructor(self) -> Callable[..., DataFrame]: return DataFrame _constructor_sliced: Callable[..., Series] = Series # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index: Axes | None = None, columns: Axes | None = None, dtype: Dtype | None = None, copy: bool | None = None, ) -> None: if dtype is not None: dtype = self._validate_dtype(dtype) if isinstance(data, DataFrame): data = data._mgr if not copy: # if not copying data, ensure to still return a shallow copy # to avoid the result sharing the same Manager data = data.copy(deep=False) if isinstance(data, (BlockManager, ArrayManager)): if using_copy_on_write(): data = data.copy(deep=False) # first check if a Manager is passed without any other arguments # -> use fastpath (without checking Manager type) if index is None and columns is None and dtype is None and not copy: # GH#33357 fastpath NDFrame.__init__(self, data) return manager = get_option("mode.data_manager") # GH47215 if index is not None and isinstance(index, set): raise ValueError("index cannot be a set") if columns is not None and isinstance(columns, set): raise ValueError("columns cannot be a set") if copy is None: if isinstance(data, dict): # retain pre-GH#38939 default behavior copy = True elif ( manager == "array" and isinstance(data, (np.ndarray, ExtensionArray)) and data.ndim == 2 ): # INFO(ArrayManager) by default copy the 2D input array to get # contiguous 1D arrays copy = True elif using_copy_on_write() and not isinstance( data, (Index, DataFrame, Series) ): copy = True else: copy = False if data is None: index = index if index is not None else default_index(0) columns = columns if columns is not None else default_index(0) dtype = dtype if dtype is not None else pandas_dtype(object) data = [] if isinstance(data, (BlockManager, ArrayManager)): mgr = self._init_mgr( data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy ) elif isinstance(data, dict): # GH#38939 de facto copy defaults to False only in non-dict cases mgr = dict_to_mgr(data, index, columns, dtype=dtype, copy=copy, typ=manager) elif isinstance(data, ma.MaskedArray): from numpy.ma import mrecords # masked recarray if isinstance(data, mrecords.MaskedRecords): raise TypeError( "MaskedRecords are not supported. Pass " "{name: data[name] for name in data.dtype.names} " "instead" ) # a masked array data = sanitize_masked_array(data) mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) elif isinstance(data, (np.ndarray, Series, Index, ExtensionArray)): if data.dtype.names: # i.e. numpy structured array data = cast(np.ndarray, data) mgr = rec_array_to_mgr( data, index, columns, dtype, copy, typ=manager, ) elif getattr(data, "name", None) is not None: # i.e. Series/Index with non-None name _copy = copy if using_copy_on_write() else True mgr = dict_to_mgr( # error: Item "ndarray" of "Union[ndarray, Series, Index]" has no # attribute "name" {data.name: data}, # type: ignore[union-attr] index, columns, dtype=dtype, typ=manager, copy=_copy, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) # For data is list-like, or Iterable (will consume into list) elif is_list_like(data): if not isinstance(data, abc.Sequence): if hasattr(data, "__array__"): # GH#44616 big perf improvement for e.g. pytorch tensor data = np.asarray(data) else: data = list(data) if len(data) > 0: if is_dataclass(data[0]): data = dataclasses_to_dicts(data) if not isinstance(data, np.ndarray) and treat_as_nested(data): # exclude ndarray as we may have cast it a few lines above if columns is not None: columns = ensure_index(columns) arrays, columns, index = nested_data_to_arrays( # error: Argument 3 to "nested_data_to_arrays" has incompatible # type "Optional[Collection[Any]]"; expected "Optional[Index]" data, columns, index, # type: ignore[arg-type] dtype, ) mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, typ=manager, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) else: mgr = dict_to_mgr( {}, index, columns if columns is not None else default_index(0), dtype=dtype, typ=manager, ) # For data is scalar else: if index is None or columns is None: raise ValueError("DataFrame constructor not properly called!") index = ensure_index(index) columns = ensure_index(columns) if not dtype: dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True) # For data is a scalar extension dtype if isinstance(dtype, ExtensionDtype): # TODO(EA2D): special case not needed with 2D EAs values = [ construct_1d_arraylike_from_scalar(data, len(index), dtype) for _ in range(len(columns)) ] mgr = arrays_to_mgr(values, columns, index, dtype=None, typ=manager) else: arr2d = construct_2d_arraylike_from_scalar( data, len(index), len(columns), dtype, copy, ) mgr = ndarray_to_mgr( arr2d, index, columns, dtype=arr2d.dtype, copy=False, typ=manager, ) # ensure correct Manager type according to settings mgr = mgr_to_mgr(mgr, typ=manager) NDFrame.__init__(self, mgr) # ---------------------------------------------------------------------- def __dataframe__( self, nan_as_null: bool = False, allow_copy: bool = True ) -> DataFrameXchg: """ Return the dataframe interchange object implementing the interchange protocol. Parameters ---------- nan_as_null : bool, default False Whether to tell the DataFrame to overwrite null values in the data with ``NaN`` (or ``NaT``). allow_copy : bool, default True Whether to allow memory copying when exporting. If set to False it would cause non-zero-copy exports to fail. Returns ------- DataFrame interchange object The object which consuming library can use to ingress the dataframe. Notes ----- Details on the interchange protocol: https://data-apis.org/dataframe-protocol/latest/index.html `nan_as_null` currently has no effect; once support for nullable extension dtypes is added, this value should be propagated to columns. """ from pandas.core.interchange.dataframe import PandasDataFrameXchg return PandasDataFrameXchg(self, nan_as_null, allow_copy) # ---------------------------------------------------------------------- def axes(self) -> list[Index]: """ Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='object')] """ return [self.index, self.columns] def shape(self) -> tuple[int, int]: """ Return a tuple representing the dimensionality of the DataFrame. See Also -------- ndarray.shape : Tuple of array dimensions. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self.index), len(self.columns) def _is_homogeneous_type(self) -> bool: """ Whether all the columns in a DataFrame have the same type. Returns ------- bool See Also -------- Index._is_homogeneous_type : Whether the object has a single dtype. MultiIndex._is_homogeneous_type : Whether all the levels of a MultiIndex have the same dtype. Examples -------- >>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type True >>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type False Items with the same type but different sizes are considered different types. >>> DataFrame({ ... "A": np.array([1, 2], dtype=np.int32), ... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type False """ if isinstance(self._mgr, ArrayManager): return len({arr.dtype for arr in self._mgr.arrays}) == 1 if self._mgr.any_extension_types: return len({block.dtype for block in self._mgr.blocks}) == 1 else: return not self._is_mixed_type def _can_fast_transpose(self) -> bool: """ Can we transpose this DataFrame without creating any new array objects. """ if isinstance(self._mgr, ArrayManager): return False blocks = self._mgr.blocks if len(blocks) != 1: return False dtype = blocks[0].dtype # TODO(EA2D) special case would be unnecessary with 2D EAs return not is_1d_only_ea_dtype(dtype) def _values(self) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray: """ Analogue to ._values that may return a 2D ExtensionArray. """ mgr = self._mgr if isinstance(mgr, ArrayManager): if len(mgr.arrays) == 1 and not is_1d_only_ea_dtype(mgr.arrays[0].dtype): # error: Item "ExtensionArray" of "Union[ndarray, ExtensionArray]" # has no attribute "reshape" return mgr.arrays[0].reshape(-1, 1) # type: ignore[union-attr] return ensure_wrapped_if_datetimelike(self.values) blocks = mgr.blocks if len(blocks) != 1: return ensure_wrapped_if_datetimelike(self.values) arr = blocks[0].values if arr.ndim == 1: # non-2D ExtensionArray return self.values # more generally, whatever we allow in NDArrayBackedExtensionBlock arr = cast("np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray", arr) return arr.T # ---------------------------------------------------------------------- # Rendering Methods def _repr_fits_vertical_(self) -> bool: """ Check length against max_rows. """ max_rows = get_option("display.max_rows") return len(self) <= max_rows def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool: """ Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns. In case of non-interactive session, no boundaries apply. `ignore_width` is here so ipynb+HTML output can behave the way users expect. display.max_columns remains in effect. GH3541, GH3573 """ width, height = console.get_console_size() max_columns = get_option("display.max_columns") nb_columns = len(self.columns) # exceed max columns if (max_columns and nb_columns > max_columns) or ( (not ignore_width) and width and nb_columns > (width // 2) ): return False # used by repr_html under IPython notebook or scripts ignore terminal # dims if ignore_width or width is None or not console.in_interactive_session(): return True if get_option("display.width") is not None or console.in_ipython_frontend(): # check at least the column row for excessive width max_rows = 1 else: max_rows = get_option("display.max_rows") # when auto-detecting, so width=None and not in ipython front end # check whether repr fits horizontal by actually checking # the width of the rendered repr buf = StringIO() # only care about the stuff we'll actually print out # and to_string on entire frame may be expensive d = self if max_rows is not None: # unlimited rows # min of two, where one may be None d = d.iloc[: min(max_rows, len(d))] else: return True d.to_string(buf=buf) value = buf.getvalue() repr_width = max(len(line) for line in value.split("\n")) return repr_width < width def _info_repr(self) -> bool: """ True if the repr should show the info view. """ info_repr_option = get_option("display.large_repr") == "info" return info_repr_option and not ( self._repr_fits_horizontal_() and self._repr_fits_vertical_() ) def __repr__(self) -> str: """ Return a string representation for a particular DataFrame. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) return buf.getvalue() repr_params = fmt.get_dataframe_repr_params() return self.to_string(**repr_params) def _repr_html_(self) -> str | None: """ Return a html representation for a particular DataFrame. Mainly for IPython notebook. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) # need to escape the <class>, should be the first line. val = buf.getvalue().replace("<", r"&lt;", 1) val = val.replace(">", r"&gt;", 1) return f"<pre>{val}</pre>" if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") min_rows = get_option("display.min_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") formatter = fmt.DataFrameFormatter( self, columns=None, col_space=None, na_rep="NaN", formatters=None, float_format=None, sparsify=None, justify=None, index_names=True, header=True, index=True, bold_rows=True, escape=True, max_rows=max_rows, min_rows=min_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=".", ) return fmt.DataFrameRenderer(formatter).to_html(notebook=True) else: return None def to_string( self, buf: None = ..., columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> None: ... header_type="bool or sequence of str", header="Write out the column names. If a list of strings " "is given, it is assumed to be aliases for the " "column names", col_space_type="int, list or dict of int", col_space="The minimum width of each column. If a list of ints is given " "every integers corresponds with one column. If a dict is given, the key " "references the column, while the value defines the space to use.", ) def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[str] | None = None, col_space: int | list[int] | dict[Hashable, int] | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: fmt.FormattersType | None = None, float_format: fmt.FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool = False, decimal: str = ".", line_width: int | None = None, min_rows: int | None = None, max_colwidth: int | None = None, encoding: str | None = None, ) -> str | None: """ Render a DataFrame to a console-friendly tabular output. %(shared_params)s line_width : int, optional Width to wrap a line in characters. min_rows : int, optional The number of rows to display in the console in a truncated repr (when number of rows is above `max_rows`). max_colwidth : int, optional Max width to truncate each column in characters. By default, no limit. encoding : str, default "utf-8" Set character encoding. %(returns)s See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]} >>> df = pd.DataFrame(d) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 """ from pandas import option_context with option_context("display.max_colwidth", max_colwidth): formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, min_rows=min_rows, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, ) return fmt.DataFrameRenderer(formatter).to_string( buf=buf, encoding=encoding, line_width=line_width, ) # ---------------------------------------------------------------------- def style(self) -> Styler: """ Returns a Styler object. Contains methods for building a styled HTML representation of the DataFrame. See Also -------- io.formats.style.Styler : Helps style a DataFrame or Series according to the data with HTML and CSS. """ from pandas.io.formats.style import Styler return Styler(self) _shared_docs[ "items" ] = r""" Iterate over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Yields ------ label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. Examples -------- >>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.items(): ... print(f'label: {label}') ... print(f'content: {content}', sep='\n') ... label: species content: panda bear polar bear koala marsupial Name: species, dtype: object label: population content: panda 1864 polar 22000 koala 80000 Name: population, dtype: int64 """ def items(self) -> Iterable[tuple[Hashable, Series]]: if self.columns.is_unique and hasattr(self, "_item_cache"): for k in self.columns: yield k, self._get_item_cache(k) else: for i, k in enumerate(self.columns): yield k, self._ixs(i, axis=1) def iterrows(self) -> Iterable[tuple[Hashable, Series]]: """ Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a `MultiIndex`. data : Series The data of the row as a Series. See Also -------- DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- 1. Because ``iterrows`` returns a Series for each row, it does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float']) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row['int'].dtype) float64 >>> print(df['int'].dtype) int64 To preserve dtypes while iterating over the rows, it is better to use :meth:`itertuples` which returns namedtuples of the values and which is generally faster than ``iterrows``. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect. """ columns = self.columns klass = self._constructor_sliced using_cow = using_copy_on_write() for k, v in zip(self.index, self.values): s = klass(v, index=columns, name=k).__finalize__(self) if using_cow and self._mgr.is_single_block: s._mgr.add_references(self._mgr) # type: ignore[arg-type] yield k, s def itertuples( self, index: bool = True, name: str | None = "Pandas" ) -> Iterable[tuple[Any, ...]]: """ Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "Pandas" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An object to iterate over namedtuples for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. Examples -------- >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) ... Pandas(Index='dog', num_legs=4, num_wings=0) Pandas(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) ... Pandas(num_legs=4, num_wings=0) Pandas(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name='Animal'): ... print(row) ... Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2) """ arrays = [] fields = list(self.columns) if index: arrays.append(self.index) fields.insert(0, "Index") # use integer indexing because of possible duplicate column names arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) if name is not None: # https://github.com/python/mypy/issues/9046 # error: namedtuple() expects a string literal as the first argument itertuple = collections.namedtuple( # type: ignore[misc] name, fields, rename=True ) return map(itertuple._make, zip(*arrays)) # fallback to regular tuples return zip(*arrays) def __len__(self) -> int: """ Returns length of info axis, but here we use the index. """ return len(self.index) def dot(self, other: Series) -> Series: ... def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame: ... def dot(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Compute the matrix multiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series, DataFrame or a numpy array. It can also be called using ``self @ other`` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the matrix product with. Returns ------- Series or DataFrame If other is a Series, return the matrix product between self and other as a Series. If other is a DataFrame or a numpy.array, return the matrix product of self and other in a DataFrame of a np.array. See Also -------- Series.dot: Similar method for Series. Notes ----- The dimensions of DataFrame and other must be compatible in order to compute the matrix multiplication. In addition, the column names of DataFrame and the index of other must contain the same values, as they will be aligned prior to the multiplication. The dot method for Series computes the inner product, instead of the matrix product here. Examples -------- Here we multiply a DataFrame with a Series. >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) >>> s = pd.Series([1, 1, 2, 1]) >>> df.dot(s) 0 -4 1 5 dtype: int64 Here we multiply a DataFrame with another DataFrame. >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(other) 0 1 0 1 4 1 2 2 Note that the dot method give the same result as @ >>> df @ other 0 1 0 1 4 1 2 2 The dot method works also if other is an np.array. >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(arr) 0 1 0 1 4 1 2 2 Note how shuffling of the objects does not change the result. >>> s2 = s.reindex([1, 0, 2, 3]) >>> df.dot(s2) 0 -4 1 5 dtype: int64 """ if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index) if len(common) > len(self.columns) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(columns=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right._values else: left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: raise ValueError( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, DataFrame): return self._constructor( np.dot(lvals, rvals), index=left.index, columns=other.columns, copy=False, ) elif isinstance(other, Series): return self._constructor_sliced( np.dot(lvals, rvals), index=left.index, copy=False ) elif isinstance(rvals, (np.ndarray, Index)): result = np.dot(lvals, rvals) if result.ndim == 2: return self._constructor(result, index=left.index, copy=False) else: return self._constructor_sliced(result, index=left.index, copy=False) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other: Series) -> Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other) -> DataFrame: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ try: return self.T.dot(np.transpose(other)).T except ValueError as err: if "shape mismatch" not in str(err): raise # GH#21581 give exception message for original shapes msg = f"shapes {np.shape(other)} and {self.shape} not aligned" raise ValueError(msg) from err # ---------------------------------------------------------------------- # IO methods (to / from other formats) def from_dict( cls, data: dict, orient: str = "columns", dtype: Dtype | None = None, columns: Axes | None = None, ) -> DataFrame: """ Construct DataFrame from dict of array-like or dicts. Creates DataFrame object from dictionary by columns or by index allowing dtype specification. Parameters ---------- data : dict Of the form {field : array-like} or {field : dict}. orient : {'columns', 'index', 'tight'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. If 'tight', assume a dict with keys ['index', 'columns', 'data', 'index_names', 'column_names']. .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument dtype : dtype, default None Data type to force after DataFrame construction, otherwise infer. columns : list, default None Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'`` or ``orient='tight'``. Returns ------- DataFrame See Also -------- DataFrame.from_records : DataFrame from structured ndarray, sequence of tuples or dicts, or DataFrame. DataFrame : DataFrame object creation using constructor. DataFrame.to_dict : Convert the DataFrame to a dictionary. Examples -------- By default the keys of the dict become the DataFrame columns: >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Specify ``orient='index'`` to create the DataFrame using dictionary keys as rows: >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data, orient='index') 0 1 2 3 row_1 3 2 1 0 row_2 a b c d When using the 'index' orientation, the column names can be specified manually: >>> pd.DataFrame.from_dict(data, orient='index', ... columns=['A', 'B', 'C', 'D']) A B C D row_1 3 2 1 0 row_2 a b c d Specify ``orient='tight'`` to create the DataFrame using a 'tight' format: >>> data = {'index': [('a', 'b'), ('a', 'c')], ... 'columns': [('x', 1), ('y', 2)], ... 'data': [[1, 3], [2, 4]], ... 'index_names': ['n1', 'n2'], ... 'column_names': ['z1', 'z2']} >>> pd.DataFrame.from_dict(data, orient='tight') z1 x y z2 1 2 n1 n2 a b 1 3 c 2 4 """ index = None orient = orient.lower() if orient == "index": if len(data) > 0: # TODO speed up Series case if isinstance(list(data.values())[0], (Series, dict)): data = _from_nested_dict(data) else: index = list(data.keys()) # error: Incompatible types in assignment (expression has type # "List[Any]", variable has type "Dict[Any, Any]") data = list(data.values()) # type: ignore[assignment] elif orient in ("columns", "tight"): if columns is not None: raise ValueError(f"cannot use columns parameter with orient='{orient}'") else: # pragma: no cover raise ValueError( f"Expected 'index', 'columns' or 'tight' for orient parameter. " f"Got '{orient}' instead" ) if orient != "tight": return cls(data, index=index, columns=columns, dtype=dtype) else: realdata = data["data"] def create_index(indexlist, namelist): index: Index if len(namelist) > 1: index = MultiIndex.from_tuples(indexlist, names=namelist) else: index = Index(indexlist, name=namelist[0]) return index index = create_index(data["index"], data["index_names"]) columns = create_index(data["columns"], data["column_names"]) return cls(realdata, index=index, columns=columns, dtype=dtype) def to_numpy( self, dtype: npt.DTypeLike | None = None, copy: bool = False, na_value: object = lib.no_default, ) -> np.ndarray: """ Convert the DataFrame to a NumPy array. By default, the dtype of the returned array will be the common NumPy dtype of all types in the DataFrame. For example, if the dtypes are ``float16`` and ``float32``, the results dtype will be ``float32``. This may require copying data and coercing values, which may be expensive. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray`. copy : bool, default False Whether to ensure that the returned value is not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. na_value : Any, optional The value to use for missing values. The default value depends on `dtype` and the dtypes of the DataFrame columns. .. versionadded:: 1.1.0 Returns ------- numpy.ndarray See Also -------- Series.to_numpy : Similar method for Series. Examples -------- >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() array([[1, 3], [2, 4]]) With heterogeneous data, the lowest common type will have to be used. >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) >>> df.to_numpy() array([[1. , 3. ], [2. , 4.5]]) For a mix of numeric and non-numeric types, the output array will have object dtype. >>> df['C'] = pd.date_range('2000', periods=2) >>> df.to_numpy() array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) """ if dtype is not None: dtype = np.dtype(dtype) result = self._mgr.as_array(dtype=dtype, copy=copy, na_value=na_value) if result.dtype is not dtype: result = np.array(result, dtype=dtype, copy=False) return result def _create_data_for_split_and_tight_to_dict( self, are_all_object_dtype_cols: bool, object_dtype_indices: list[int] ) -> list: """ Simple helper method to create data for to ``to_dict(orient="split")`` and ``to_dict(orient="tight")`` to create the main output data """ if are_all_object_dtype_cols: data = [ list(map(maybe_box_native, t)) for t in self.itertuples(index=False, name=None) ] else: data = [list(t) for t in self.itertuples(index=False, name=None)] if object_dtype_indices: # If we have object_dtype_cols, apply maybe_box_naive after list # comprehension for perf for row in data: for i in object_dtype_indices: row[i] = maybe_box_native(row[i]) return data def to_dict( self, orient: Literal["dict", "list", "series", "split", "tight", "index"] = ..., into: type[dict] = ..., ) -> dict: ... def to_dict(self, orient: Literal["records"], into: type[dict] = ...) -> list[dict]: ... def to_dict( self, orient: Literal[ "dict", "list", "series", "split", "tight", "records", "index" ] = "dict", into: type[dict] = dict, index: bool = True, ) -> dict | list[dict]: """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'tight', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'tight' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values], 'index_names' -> [index.names], 'column_names' -> [column.names]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. index : bool, default True Whether to include the index item (and index_names item if `orient` is 'tight') in the returned dictionary. Can only be ``False`` when `orient` is 'split' or 'tight'. .. versionadded:: 2.0.0 Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. See Also -------- DataFrame.from_dict: Create a DataFrame from a dictionary. DataFrame.to_json: Convert a DataFrame to JSON format. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df.to_dict() {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}} You can specify the return orientation. >>> df.to_dict('series') {'col1': row1 1 row2 2 Name: col1, dtype: int64, 'col2': row1 0.50 row2 0.75 Name: col2, dtype: float64} >>> df.to_dict('split') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]]} >>> df.to_dict('records') [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}] >>> df.to_dict('index') {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}} >>> df.to_dict('tight') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]], 'index_names': [None], 'column_names': [None]} You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) [defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}), defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})] """ from pandas.core.methods.to_dict import to_dict return to_dict(self, orient, into, index) def to_gbq( self, destination_table: str, project_id: str | None = None, chunksize: int | None = None, reauth: bool = False, if_exists: str = "fail", auth_local_webserver: bool = True, table_schema: list[dict[str, str]] | None = None, location: str | None = None, progress_bar: bool = True, credentials=None, ) -> None: """ Write a DataFrame to a Google BigQuery table. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See the `How to authenticate with Google BigQuery <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__ guide for authentication instructions. Parameters ---------- destination_table : str Name of table to be written, in the form ``dataset.tablename``. project_id : str, optional Google BigQuery Account project ID. Optional when available from the environment. chunksize : int, optional Number of rows to be inserted in each chunk from the dataframe. Set to ``None`` to load the whole dataframe at once. reauth : bool, default False Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. if_exists : str, default 'fail' Behavior when the destination table exists. Value can be one of: ``'fail'`` If table exists raise pandas_gbq.gbq.TableCreationError. ``'replace'`` If table exists, drop it, recreate it, and insert data. ``'append'`` If table exists, insert data. Create if does not exist. auth_local_webserver : bool, default True Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. .. _local webserver flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server .. _console flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console *New in version 0.2.0 of pandas-gbq*. .. versionchanged:: 1.5.0 Default value is changed to ``True``. Google has deprecated the ``auth_local_webserver = False`` `"out of band" (copy-paste) flow <https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html?m=1#disallowed-oob>`_. table_schema : list of dicts, optional List of BigQuery table fields to which according DataFrame columns conform to, e.g. ``[{'name': 'col1', 'type': 'STRING'},...]``. If schema is not provided, it will be generated according to dtypes of DataFrame columns. See BigQuery API documentation on available names of a field. *New in version 0.3.1 of pandas-gbq*. location : str, optional Location where the load job should run. See the `BigQuery locations documentation <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a list of available locations. The location must match that of the target dataset. *New in version 0.5.0 of pandas-gbq*. progress_bar : bool, default True Use the library `tqdm` to show the progress bar for the upload, chunk by chunk. *New in version 0.5.0 of pandas-gbq*. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine :class:`google.auth.compute_engine.Credentials` or Service Account :class:`google.oauth2.service_account.Credentials` directly. *New in version 0.8.0 of pandas-gbq*. See Also -------- pandas_gbq.to_gbq : This function in the pandas-gbq library. read_gbq : Read a DataFrame from Google BigQuery. """ from pandas.io import gbq gbq.to_gbq( self, destination_table, project_id=project_id, chunksize=chunksize, reauth=reauth, if_exists=if_exists, auth_local_webserver=auth_local_webserver, table_schema=table_schema, location=location, progress_bar=progress_bar, credentials=credentials, ) def from_records( cls, data, index=None, exclude=None, columns=None, coerce_float: bool = False, nrows: int | None = None, ) -> DataFrame: """ Convert structured or record ndarray to DataFrame. Creates a DataFrame object from a structured ndarray, sequence of tuples or dicts, or DataFrame. Parameters ---------- data : structured ndarray, sequence of tuples or dicts, or DataFrame Structured input data. index : str, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use. exclude : sequence, default None Columns or fields to exclude. columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns). coerce_float : bool, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. nrows : int, default None Number of rows to read if data is an iterator. Returns ------- DataFrame See Also -------- DataFrame.from_dict : DataFrame from dict of array-like or dicts. DataFrame : DataFrame object creation using constructor. Examples -------- Data can be provided as a structured ndarray: >>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')], ... dtype=[('col_1', 'i4'), ('col_2', 'U1')]) >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of dicts: >>> data = [{'col_1': 3, 'col_2': 'a'}, ... {'col_1': 2, 'col_2': 'b'}, ... {'col_1': 1, 'col_2': 'c'}, ... {'col_1': 0, 'col_2': 'd'}] >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of tuples with corresponding columns: >>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')] >>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2']) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d """ if isinstance(data, DataFrame): if columns is not None: if is_scalar(columns): columns = [columns] data = data[columns] if index is not None: data = data.set_index(index) if exclude is not None: data = data.drop(columns=exclude) return data.copy(deep=False) result_index = None # Make a copy of the input columns so we can modify it if columns is not None: columns = ensure_index(columns) def maybe_reorder( arrays: list[ArrayLike], arr_columns: Index, columns: Index, index ) -> tuple[list[ArrayLike], Index, Index | None]: """ If our desired 'columns' do not match the data's pre-existing 'arr_columns', we re-order our arrays. This is like a pre-emptive (cheap) reindex. """ if len(arrays): length = len(arrays[0]) else: length = 0 result_index = None if len(arrays) == 0 and index is None and length == 0: result_index = default_index(0) arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, length) return arrays, arr_columns, result_index if is_iterator(data): if nrows == 0: return cls() try: first_row = next(data) except StopIteration: return cls(index=index, columns=columns) dtype = None if hasattr(first_row, "dtype") and first_row.dtype.names: dtype = first_row.dtype values = [first_row] if nrows is None: values += data else: values.extend(itertools.islice(data, nrows - 1)) if dtype is not None: data = np.array(values, dtype=dtype) else: data = values if isinstance(data, dict): if columns is None: columns = arr_columns = ensure_index(sorted(data)) arrays = [data[k] for k in columns] else: arrays = [] arr_columns_list = [] for k, v in data.items(): if k in columns: arr_columns_list.append(k) arrays.append(v) arr_columns = Index(arr_columns_list) arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = to_arrays(data, columns) arr_columns = columns else: arrays, arr_columns = to_arrays(data, columns) if coerce_float: for i, arr in enumerate(arrays): if arr.dtype == object: # error: Argument 1 to "maybe_convert_objects" has # incompatible type "Union[ExtensionArray, ndarray]"; # expected "ndarray" arrays[i] = lib.maybe_convert_objects( arr, # type: ignore[arg-type] try_float=True, ) arr_columns = ensure_index(arr_columns) if columns is None: columns = arr_columns else: arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) if exclude is None: exclude = set() else: exclude = set(exclude) if index is not None: if isinstance(index, str) or not hasattr(index, "__iter__"): i = columns.get_loc(index) exclude.add(index) if len(arrays) > 0: result_index = Index(arrays[i], name=index) else: result_index = Index([], name=index) else: try: index_data = [arrays[arr_columns.get_loc(field)] for field in index] except (KeyError, TypeError): # raised by get_loc, see GH#29258 result_index = index else: result_index = ensure_index_from_sequences(index_data, names=index) exclude.update(index) if any(exclude): arr_exclude = [x for x in exclude if x in arr_columns] to_remove = [arr_columns.get_loc(col) for col in arr_exclude] arrays = [v for i, v in enumerate(arrays) if i not in to_remove] columns = columns.drop(exclude) manager = get_option("mode.data_manager") mgr = arrays_to_mgr(arrays, columns, result_index, typ=manager) return cls(mgr) def to_records( self, index: bool = True, column_dtypes=None, index_dtypes=None ) -> np.recarray: """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. column_dtypes : str, type, dict, default None If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) If the DataFrame index has no label then the recarray field name is set to 'index'. If the index has a label then this is used as the field name: >>> df.index = df.index.rename("I") >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')]) As well as for the index: >>> df.to_records(index_dtypes="<S2") rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')]) >>> index_dtypes = f"<S{df.index.str.len().max()}" >>> df.to_records(index_dtypes=index_dtypes) rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')]) """ if index: ix_vals = [ np.asarray(self.index.get_level_values(i)) for i in range(self.index.nlevels) ] arrays = ix_vals + [ np.asarray(self.iloc[:, i]) for i in range(len(self.columns)) ] index_names = list(self.index.names) if isinstance(self.index, MultiIndex): index_names = com.fill_missing_names(index_names) elif index_names[0] is None: index_names = ["index"] names = [str(name) for name in itertools.chain(index_names, self.columns)] else: arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))] names = [str(c) for c in self.columns] index_names = [] index_len = len(index_names) formats = [] for i, v in enumerate(arrays): index_int = i # When the names and arrays are collected, we # first collect those in the DataFrame's index, # followed by those in its columns. # # Thus, the total length of the array is: # len(index_names) + len(DataFrame.columns). # # This check allows us to see whether we are # handling a name / array in the index or column. if index_int < index_len: dtype_mapping = index_dtypes name = index_names[index_int] else: index_int -= index_len dtype_mapping = column_dtypes name = self.columns[index_int] # We have a dictionary, so we get the data type # associated with the index or column (which can # be denoted by its name in the DataFrame or its # position in DataFrame's array of indices or # columns, whichever is applicable. if is_dict_like(dtype_mapping): if name in dtype_mapping: dtype_mapping = dtype_mapping[name] elif index_int in dtype_mapping: dtype_mapping = dtype_mapping[index_int] else: dtype_mapping = None # If no mapping can be found, use the array's # dtype attribute for formatting. # # A valid dtype must either be a type or # string naming a type. if dtype_mapping is None: formats.append(v.dtype) elif isinstance(dtype_mapping, (type, np.dtype, str)): # error: Argument 1 to "append" of "list" has incompatible # type "Union[type, dtype[Any], str]"; expected "dtype[Any]" formats.append(dtype_mapping) # type: ignore[arg-type] else: element = "row" if i < index_len else "column" msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}" raise ValueError(msg) return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats}) def _from_arrays( cls, arrays, columns, index, dtype: Dtype | None = None, verify_integrity: bool = True, ) -> DataFrame: """ Create DataFrame from a list of arrays corresponding to the columns. Parameters ---------- arrays : list-like of arrays Each array in the list corresponds to one column, in order. columns : list-like, Index The column names for the resulting DataFrame. index : list-like, Index The rows labels for the resulting DataFrame. dtype : dtype, optional Optional dtype to enforce for all arrays. verify_integrity : bool, default True Validate and homogenize all input. If set to False, it is assumed that all elements of `arrays` are actual arrays how they will be stored in a block (numpy ndarray or ExtensionArray), have the same length as and are aligned with the index, and that `columns` and `index` are ensured to be an Index object. Returns ------- DataFrame """ if dtype is not None: dtype = pandas_dtype(dtype) manager = get_option("mode.data_manager") columns = ensure_index(columns) if len(columns) != len(arrays): raise ValueError("len(columns) must match len(arrays)") mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, verify_integrity=verify_integrity, typ=manager, ) return cls(mgr) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path", ) def to_stata( self, path: FilePath | WriteBuffer[bytes], *, convert_dates: dict[Hashable, str] | None = None, write_index: bool = True, byteorder: str | None = None, time_stamp: datetime.datetime | None = None, data_label: str | None = None, variable_labels: dict[Hashable, str] | None = None, version: int | None = 114, convert_strl: Sequence[Hashable] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, value_labels: dict[Hashable, dict[float, str]] | None = None, ) -> None: """ Export DataFrame object to Stata dta format. Writes the DataFrame to a Stata dataset file. "dta" files contain a Stata dataset. Parameters ---------- path : str, path object, or buffer String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has timezone information. write_index : bool Write the index to Stata dataset. byteorder : str Can be ">", "<", "little", or "big". default is `sys.byteorder`. time_stamp : datetime A datetime to use as file creation date. Default is the current time. data_label : str, optional A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. version : {{114, 117, 118, 119, None}}, default 114 Version to use in the output dta file. Set to None to let pandas decide between 118 or 119 formats depending on the number of columns in the frame. Version 114 can be read by Stata 10 and later. Version 117 can be read by Stata 13 or later. Version 118 is supported in Stata 14 and later. Version 119 is supported in Stata 15 and later. Version 114 limits string variables to 244 characters or fewer while versions 117 and later allow strings with lengths up to 2,000,000 characters. Versions 118 and 119 support Unicode characters, and version 119 supports more than 32,767 variables. Version 119 should usually only be used when the number of variables exceeds the capacity of dta format 118. Exporting smaller datasets in format 119 may have unintended consequences, and, as of November 2020, Stata SE cannot read version 119 files. convert_strl : list, optional List of column names to convert to string columns to Stata StrL format. Only available if version is 117. Storing strings in the StrL format can produce smaller dta files if strings have more than 8 characters and values are repeated. {compression_options} .. versionadded:: 1.1.0 .. versionchanged:: 1.4.0 Zstandard support. {storage_options} .. versionadded:: 1.2.0 value_labels : dict of dicts Dictionary containing columns as keys and dictionaries of column value to labels as values. Labels for a single variable must be 32,000 characters or smaller. .. versionadded:: 1.4.0 Raises ------ NotImplementedError * If datetimes contain timezone information * Column dtype is not representable in Stata ValueError * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters See Also -------- read_stata : Import Stata data files. io.stata.StataWriter : Low-level writer for Stata data files. io.stata.StataWriter117 : Low-level writer for version 117 files. Examples -------- >>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], ... 'speed': [350, 18, 361, 15]}}) >>> df.to_stata('animals.dta') # doctest: +SKIP """ if version not in (114, 117, 118, 119, None): raise ValueError("Only formats 114, 117, 118 and 119 are supported.") if version == 114: if convert_strl is not None: raise ValueError("strl is not supported in format 114") from pandas.io.stata import StataWriter as statawriter elif version == 117: # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriter117 as statawriter, ) else: # versions 118 and 119 # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriterUTF8 as statawriter, ) kwargs: dict[str, Any] = {} if version is None or version >= 117: # strl conversion is only supported >= 117 kwargs["convert_strl"] = convert_strl if version is None or version >= 118: # Specifying the version is only supported for UTF8 (118 or 119) kwargs["version"] = version writer = statawriter( path, self, convert_dates=convert_dates, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, write_index=write_index, variable_labels=variable_labels, compression=compression, storage_options=storage_options, value_labels=value_labels, **kwargs, ) writer.write_file() def to_feather(self, path: FilePath | WriteBuffer[bytes], **kwargs) -> None: """ Write a DataFrame to the binary Feather format. Parameters ---------- path : str, path object, file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If a string or a path, it will be used as Root Directory path when writing a partitioned dataset. **kwargs : Additional keywords passed to :func:`pyarrow.feather.write_feather`. Starting with pyarrow 0.17, this includes the `compression`, `compression_level`, `chunksize` and `version` keywords. .. versionadded:: 1.1.0 Notes ----- This function writes the dataframe as a `feather file <https://arrow.apache.org/docs/python/feather.html>`_. Requires a default index. For saving the DataFrame with your custom index use a method that supports custom indices e.g. `to_parquet`. """ from pandas.io.feather_format import to_feather to_feather(self, path, **kwargs) Series.to_markdown, klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples="""Examples -------- >>> df = pd.DataFrame( ... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]} ... ) >>> print(df.to_markdown()) | | animal_1 | animal_2 | |---:|:-----------|:-----------| | 0 | elk | dog | | 1 | pig | quetzal | Output markdown with a tabulate option. >>> print(df.to_markdown(tablefmt="grid")) +----+------------+------------+ | | animal_1 | animal_2 | +====+============+============+ | 0 | elk | dog | +----+------------+------------+ | 1 | pig | quetzal | +----+------------+------------+""", ) def to_markdown( self, buf: FilePath | WriteBuffer[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: if "showindex" in kwargs: raise ValueError("Pass 'index' instead of 'showindex") kwargs.setdefault("headers", "keys") kwargs.setdefault("tablefmt", "pipe") kwargs.setdefault("showindex", index) tabulate = import_optional_dependency("tabulate") result = tabulate.tabulate(self, **kwargs) if buf is None: return result with get_handle(buf, mode, storage_options=storage_options) as handles: handles.handle.write(result) return None def to_parquet( self, path: None = ..., engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> bytes: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes], engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> None: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes] | None = None, engine: str = "auto", compression: str | None = "snappy", index: bool | None = None, partition_cols: list[str] | None = None, storage_options: StorageOptions = None, **kwargs, ) -> bytes | None: """ Write a DataFrame to the binary parquet format. This function writes the dataframe as a `parquet file <https://parquet.apache.org/>`_. You can choose different parquet backends, and have the option of compression. See :ref:`the user guide <io.parquet>` for more details. Parameters ---------- path : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If None, the result is returned as bytes. If a string or path, it will be used as Root Directory path when writing a partitioned dataset. .. versionchanged:: 1.2.0 Previously this was "fname" engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``True`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. partition_cols : list, optional, default None Column names by which to partition the dataset. Columns are partitioned in the order they are given. Must be None if path is not a string. {storage_options} .. versionadded:: 1.2.0 **kwargs Additional arguments passed to the parquet library. See :ref:`pandas io <io.parquet>` for more details. Returns ------- bytes if no path argument is provided else None See Also -------- read_parquet : Read a parquet file. DataFrame.to_orc : Write an orc file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- This function requires either the `fastparquet <https://pypi.org/project/fastparquet>`_ or `pyarrow <https://arrow.apache.org/docs/python/>`_ library. Examples -------- >>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}}) >>> df.to_parquet('df.parquet.gzip', ... compression='gzip') # doctest: +SKIP >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP col1 col2 0 1 3 1 2 4 If you want to get a buffer to the parquet content you can use a io.BytesIO object, as long as you don't use partition_cols, which creates multiple files. >>> import io >>> f = io.BytesIO() >>> df.to_parquet(f) >>> f.seek(0) 0 >>> content = f.read() """ from pandas.io.parquet import to_parquet return to_parquet( self, path, engine, compression=compression, index=index, partition_cols=partition_cols, storage_options=storage_options, **kwargs, ) def to_orc( self, path: FilePath | WriteBuffer[bytes] | None = None, *, engine: Literal["pyarrow"] = "pyarrow", index: bool | None = None, engine_kwargs: dict[str, Any] | None = None, ) -> bytes | None: """ Write a DataFrame to the ORC format. .. versionadded:: 1.5.0 Parameters ---------- path : str, file-like object or None, default None If a string, it will be used as Root Directory path when writing a partitioned dataset. By file-like object, we refer to objects with a write() method, such as a file handle (e.g. via builtin open function). If path is None, a bytes object is returned. engine : str, default 'pyarrow' ORC library to use. Pyarrow must be >= 7.0.0. index : bool, optional If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``infer`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. engine_kwargs : dict[str, Any] or None, default None Additional keyword arguments passed to :func:`pyarrow.orc.write_table`. Returns ------- bytes if no path argument is provided else None Raises ------ NotImplementedError Dtype of one or more columns is category, unsigned integers, interval, period or sparse. ValueError engine is not pyarrow. See Also -------- read_orc : Read a ORC file. DataFrame.to_parquet : Write a parquet file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- * Before using this function you should read the :ref:`user guide about ORC <io.orc>` and :ref:`install optional dependencies <install.warn_orc>`. * This function requires `pyarrow <https://arrow.apache.org/docs/python/>`_ library. * For supported dtypes please refer to `supported ORC features in Arrow <https://arrow.apache.org/docs/cpp/orc.html#data-types>`__. * Currently timezones in datetime columns are not preserved when a dataframe is converted into ORC files. Examples -------- >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [4, 3]}) >>> df.to_orc('df.orc') # doctest: +SKIP >>> pd.read_orc('df.orc') # doctest: +SKIP col1 col2 0 1 4 1 2 3 If you want to get a buffer to the orc content you can write it to io.BytesIO >>> import io >>> b = io.BytesIO(df.to_orc()) # doctest: +SKIP >>> b.seek(0) # doctest: +SKIP 0 >>> content = b.read() # doctest: +SKIP """ from pandas.io.orc import to_orc return to_orc( self, path, engine=engine, index=index, engine_kwargs=engine_kwargs ) def to_html( self, buf: FilePath | WriteBuffer[str], columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> None: ... def to_html( self, buf: None = ..., columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> str: ... header_type="bool", header="Whether to print column labels, default True", col_space_type="str or int, list or dict of int or str", col_space="The minimum width of each column in CSS length " "units. An int is assumed to be px units.", ) def to_html( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[Level] | None = None, col_space: ColspaceArgType | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: FormattersType | None = None, float_format: FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool | str = False, decimal: str = ".", bold_rows: bool = True, classes: str | list | tuple | None = None, escape: bool = True, notebook: bool = False, border: int | bool | None = None, table_id: str | None = None, render_links: bool = False, encoding: str | None = None, ) -> str | None: """ Render a DataFrame as an HTML table. %(shared_params)s bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.display.html.border``. table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links. encoding : str, default "utf-8" Set character encoding. .. versionadded:: 1.0 %(returns)s See Also -------- to_string : Convert DataFrame to a string. """ if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS: raise ValueError("Invalid value for justify parameter") formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, header=header, index=index, formatters=formatters, float_format=float_format, bold_rows=bold_rows, sparsify=sparsify, justify=justify, index_names=index_names, escape=escape, decimal=decimal, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, ) # TODO: a generic formatter wld b in DataFrameFormatter return fmt.DataFrameRenderer(formatter).to_html( buf=buf, classes=classes, notebook=notebook, border=border, encoding=encoding, table_id=table_id, render_links=render_links, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buffer", ) def to_xml( self, path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, index: bool = True, root_name: str | None = "data", row_name: str | None = "row", na_rep: str | None = None, attr_cols: list[str] | None = None, elem_cols: list[str] | None = None, namespaces: dict[str | None, str] | None = None, prefix: str | None = None, encoding: str = "utf-8", xml_declaration: bool | None = True, pretty_print: bool | None = True, parser: str | None = "lxml", stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ) -> str | None: """ Render a DataFrame to an XML document. .. versionadded:: 1.3.0 Parameters ---------- path_or_buffer : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a ``write()`` function. If None, the result is returned as a string. index : bool, default True Whether to include index in XML document. root_name : str, default 'data' The name of root element in XML document. row_name : str, default 'row' The name of row element in XML document. na_rep : str, optional Missing data representation. attr_cols : list-like, optional List of columns to write as attributes in row element. Hierarchical columns will be flattened with underscore delimiting the different levels. elem_cols : list-like, optional List of columns to write as children in row element. By default, all columns output as children of row element. Hierarchical columns will be flattened with underscore delimiting the different levels. namespaces : dict, optional All namespaces to be defined in root element. Keys of dict should be prefix names and values of dict corresponding URIs. Default namespaces should be given empty string key. For example, :: namespaces = {{"": "https://example.com"}} prefix : str, optional Namespace prefix to be used for every element and/or attribute in document. This should be one of the keys in ``namespaces`` dict. encoding : str, default 'utf-8' Encoding of the resulting document. xml_declaration : bool, default True Whether to include the XML declaration at start of document. pretty_print : bool, default True Whether output should be pretty printed with indentation and line breaks. parser : {{'lxml','etree'}}, default 'lxml' Parser module to use for building of tree. Only 'lxml' and 'etree' are supported. With 'lxml', the ability to use XSLT stylesheet is supported. stylesheet : str, path object or file-like object, optional A URL, file-like object, or a raw string containing an XSLT script used to transform the raw XML output. Script should use layout of elements and attributes from original output. This argument requires ``lxml`` to be installed. Only XSLT 1.0 scripts and not later versions is currently supported. {compression_options} .. versionchanged:: 1.4.0 Zstandard support. {storage_options} Returns ------- None or str If ``io`` is None, returns the resulting XML format as a string. Otherwise returns None. See Also -------- to_json : Convert the pandas object to a JSON string. to_html : Convert DataFrame to a html. Examples -------- >>> df = pd.DataFrame({{'shape': ['square', 'circle', 'triangle'], ... 'degrees': [360, 360, 180], ... 'sides': [4, np.nan, 3]}}) >>> df.to_xml() # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row> <index>0</index> <shape>square</shape> <degrees>360</degrees> <sides>4.0</sides> </row> <row> <index>1</index> <shape>circle</shape> <degrees>360</degrees> <sides/> </row> <row> <index>2</index> <shape>triangle</shape> <degrees>180</degrees> <sides>3.0</sides> </row> </data> >>> df.to_xml(attr_cols=[ ... 'index', 'shape', 'degrees', 'sides' ... ]) # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row index="0" shape="square" degrees="360" sides="4.0"/> <row index="1" shape="circle" degrees="360"/> <row index="2" shape="triangle" degrees="180" sides="3.0"/> </data> >>> df.to_xml(namespaces={{"doc": "https://example.com"}}, ... prefix="doc") # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <doc:data xmlns:doc="https://example.com"> <doc:row> <doc:index>0</doc:index> <doc:shape>square</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides>4.0</doc:sides> </doc:row> <doc:row> <doc:index>1</doc:index> <doc:shape>circle</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides/> </doc:row> <doc:row> <doc:index>2</doc:index> <doc:shape>triangle</doc:shape> <doc:degrees>180</doc:degrees> <doc:sides>3.0</doc:sides> </doc:row> </doc:data> """ from pandas.io.formats.xml import ( EtreeXMLFormatter, LxmlXMLFormatter, ) lxml = import_optional_dependency("lxml.etree", errors="ignore") TreeBuilder: type[EtreeXMLFormatter] | type[LxmlXMLFormatter] if parser == "lxml": if lxml is not None: TreeBuilder = LxmlXMLFormatter else: raise ImportError( "lxml not found, please install or use the etree parser." ) elif parser == "etree": TreeBuilder = EtreeXMLFormatter else: raise ValueError("Values for parser can only be lxml or etree.") xml_formatter = TreeBuilder( self, path_or_buffer=path_or_buffer, index=index, root_name=root_name, row_name=row_name, na_rep=na_rep, attr_cols=attr_cols, elem_cols=elem_cols, namespaces=namespaces, prefix=prefix, encoding=encoding, xml_declaration=xml_declaration, pretty_print=pretty_print, stylesheet=stylesheet, compression=compression, storage_options=storage_options, ) return xml_formatter.write_output() # ---------------------------------------------------------------------- def info( self, verbose: bool | None = None, buf: WriteBuffer[str] | None = None, max_cols: int | None = None, memory_usage: bool | str | None = None, show_counts: bool | None = None, ) -> None: info = DataFrameInfo( data=self, memory_usage=memory_usage, ) info.render( buf=buf, max_cols=max_cols, verbose=verbose, show_counts=show_counts, ) def memory_usage(self, index: bool = True, deep: bool = False) -> Series: """ Return the memory usage of each column in bytes. The memory usage can optionally include the contribution of the index and elements of `object` dtype. This value is displayed in `DataFrame.info` by default. This can be suppressed by setting ``pandas.options.display.memory_usage`` to False. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the DataFrame's index in returned Series. If ``index=True``, the memory usage of the index is the first item in the output. deep : bool, default False If True, introspect the data deeply by interrogating `object` dtypes for system-level memory consumption, and include it in the returned values. Returns ------- Series A Series whose index is the original column names and whose values is the memory usage of each column in bytes. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of an ndarray. Series.memory_usage : Bytes consumed by a Series. Categorical : Memory-efficient array for string values with many repeated values. DataFrame.info : Concise summary of a DataFrame. Notes ----- See the :ref:`Frequently Asked Questions <df-memory-usage>` for more details. Examples -------- >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool'] >>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t)) ... for t in dtypes]) >>> df = pd.DataFrame(data) >>> df.head() int64 float64 complex128 object bool 0 1 1.0 1.0+0.0j 1 True 1 1 1.0 1.0+0.0j 1 True 2 1 1.0 1.0+0.0j 1 True 3 1 1.0 1.0+0.0j 1 True 4 1 1.0 1.0+0.0j 1 True >>> df.memory_usage() Index 128 int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 >>> df.memory_usage(index=False) int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 The memory footprint of `object` dtype columns is ignored by default: >>> df.memory_usage(deep=True) Index 128 int64 40000 float64 40000 complex128 80000 object 180000 bool 5000 dtype: int64 Use a Categorical for efficient storage of an object-dtype column with many repeated values. >>> df['object'].astype('category').memory_usage(deep=True) 5244 """ result = self._constructor_sliced( [c.memory_usage(index=False, deep=deep) for col, c in self.items()], index=self.columns, dtype=np.intp, ) if index: index_memory_usage = self._constructor_sliced( self.index.memory_usage(deep=deep), index=["Index"] ) result = index_memory_usage._append(result) return result def transpose(self, *args, copy: bool = False) -> DataFrame: """ Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. Parameters ---------- *args : tuple, optional Accepted for compatibility with NumPy. copy : bool, default False Whether to copy the data after transposing, even for DataFrames with a single dtype. Note that a copy is always required for mixed dtype DataFrames, or for DataFrames with any extension types. Returns ------- DataFrame The transposed DataFrame. See Also -------- numpy.transpose : Permute the dimensions of a given array. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the `object` dtype. In such a case, a copy of the data is always made. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = pd.DataFrame(data=d1) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T # or df1.transpose() >>> df1_transposed 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'name': ['Alice', 'Bob'], ... 'score': [9.5, 8], ... 'employed': [False, True], ... 'kids': [0, 0]} >>> df2 = pd.DataFrame(data=d2) >>> df2 name score employed kids 0 Alice 9.5 False 0 1 Bob 8.0 True 0 >>> df2_transposed = df2.T # or df2.transpose() >>> df2_transposed 0 1 name Alice Bob score 9.5 8.0 employed False True kids 0 0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the `object` dtype: >>> df2.dtypes name object score float64 employed bool kids int64 dtype: object >>> df2_transposed.dtypes 0 object 1 object dtype: object """ nv.validate_transpose(args, {}) # construct the args dtypes = list(self.dtypes) if self._can_fast_transpose: # Note: tests pass without this, but this improves perf quite a bit. new_vals = self._values.T if copy and not using_copy_on_write(): new_vals = new_vals.copy() result = self._constructor( new_vals, index=self.columns, columns=self.index, copy=False ) if using_copy_on_write() and len(self) > 0: result._mgr.add_references(self._mgr) # type: ignore[arg-type] elif ( self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]) ): # We have EAs with the same dtype. We can preserve that dtype in transpose. dtype = dtypes[0] arr_type = dtype.construct_array_type() values = self.values new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values] result = type(self)._from_arrays( new_values, index=self.columns, columns=self.index ) else: new_arr = self.values.T if copy and not using_copy_on_write(): new_arr = new_arr.copy() result = self._constructor( new_arr, index=self.columns, columns=self.index, # We already made a copy (more than one block) copy=False, ) return result.__finalize__(self, method="transpose") def T(self) -> DataFrame: """ The transpose of the DataFrame. Returns ------- DataFrame The transposed DataFrame. See Also -------- DataFrame.transpose : Transpose index and columns. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.T 0 1 col1 1 2 col2 3 4 """ return self.transpose() # ---------------------------------------------------------------------- # Indexing Methods def _ixs(self, i: int, axis: AxisInt = 0) -> Series: """ Parameters ---------- i : int axis : int Returns ------- Series """ # irow if axis == 0: new_mgr = self._mgr.fast_xs(i) # if we are a copy, mark as such copy = isinstance(new_mgr.array, np.ndarray) and new_mgr.array.base is None result = self._constructor_sliced(new_mgr, name=self.index[i]).__finalize__( self ) result._set_is_copy(self, copy=copy) return result # icol else: label = self.columns[i] col_mgr = self._mgr.iget(i) result = self._box_col_values(col_mgr, i) # this is a cached value, mark it so result._set_as_cached(label, self) return result def _get_column_array(self, i: int) -> ArrayLike: """ Get the values of the i'th column (ndarray or ExtensionArray, as stored in the Block) Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ return self._mgr.iget_values(i) def _iter_column_arrays(self) -> Iterator[ArrayLike]: """ Iterate over the arrays of all columns in order. This returns the values as stored in the Block (ndarray or ExtensionArray). Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ for i in range(len(self.columns)): yield self._get_column_array(i) def _getitem_nocopy(self, key: list): """ Behaves like __getitem__, but returns a view in cases where __getitem__ would make a copy. """ # TODO(CoW): can be removed if/when we are always Copy-on-Write indexer = self.columns._get_indexer_strict(key, "columns")[1] new_axis = self.columns[indexer] new_mgr = self._mgr.reindex_indexer( new_axis, indexer, axis=0, allow_dups=True, copy=False, only_slice=True, ) return self._constructor(new_mgr) def __getitem__(self, key): check_dict_or_set_indexers(key) key = lib.item_from_zerodim(key) key = com.apply_if_callable(key, self) if is_hashable(key) and not is_iterator(key): # is_iterator to exclude generator e.g. test_getitem_listlike # shortcut if the key is in columns is_mi = isinstance(self.columns, MultiIndex) # GH#45316 Return view if key is not duplicated # Only use drop_duplicates with duplicates for performance if not is_mi and ( self.columns.is_unique and key in self.columns or key in self.columns.drop_duplicates(keep=False) ): return self._get_item_cache(key) elif is_mi and self.columns.is_unique and key in self.columns: return self._getitem_multilevel(key) # Do we have a slicer (on rows)? if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") if isinstance(indexer, np.ndarray): # reachable with DatetimeIndex indexer = lib.maybe_indices_to_slice( indexer.astype(np.intp, copy=False), len(self) ) if isinstance(indexer, np.ndarray): # GH#43223 If we can not convert, use take return self.take(indexer, axis=0) return self._slice(indexer, axis=0) # Do we have a (boolean) DataFrame? if isinstance(key, DataFrame): return self.where(key) # Do we have a (boolean) 1d indexer? if com.is_bool_indexer(key): return self._getitem_bool_array(key) # We are left with two options: a single key, and a collection of keys, # We interpret tuples as collections only for non-MultiIndex is_single_key = isinstance(key, tuple) or not is_list_like(key) if is_single_key: if self.columns.nlevels > 1: return self._getitem_multilevel(key) indexer = self.columns.get_loc(key) if is_integer(indexer): indexer = [indexer] else: if is_iterator(key): key = list(key) indexer = self.columns._get_indexer_strict(key, "columns")[1] # take() does not accept boolean indexers if getattr(indexer, "dtype", None) == bool: indexer = np.where(indexer)[0] data = self._take_with_is_copy(indexer, axis=1) if is_single_key: # What does looking for a single key in a non-unique index return? # The behavior is inconsistent. It returns a Series, except when # - the key itself is repeated (test on data.shape, #9519), or # - we have a MultiIndex on columns (test on self.columns, #21309) if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex): # GH#26490 using data[key] can cause RecursionError return data._get_item_cache(key) return data def _getitem_bool_array(self, key): # also raises Exception if object array with NA values # warning here just in case -- previously __setitem__ was # reindexing but __getitem__ was not; it seems more reasonable to # go with the __setitem__ behavior since that is more consistent # with all other indexing behavior if isinstance(key, Series) and not key.index.equals(self.index): warnings.warn( "Boolean Series key will be reindexed to match DataFrame index.", UserWarning, stacklevel=find_stack_level(), ) elif len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}." ) # check_bool_indexer will throw exception if Series key cannot # be reindexed to match DataFrame rows key = check_bool_indexer(self.index, key) if key.all(): return self.copy(deep=None) indexer = key.nonzero()[0] return self._take_with_is_copy(indexer, axis=0) def _getitem_multilevel(self, key): # self.columns is a MultiIndex loc = self.columns.get_loc(key) if isinstance(loc, (slice, np.ndarray)): new_columns = self.columns[loc] result_columns = maybe_droplevels(new_columns, key) if self._is_mixed_type: result = self.reindex(columns=new_columns) result.columns = result_columns else: new_values = self._values[:, loc] result = self._constructor( new_values, index=self.index, columns=result_columns, copy=False ) if using_copy_on_write() and isinstance(loc, slice): result._mgr.add_references(self._mgr) # type: ignore[arg-type] result = result.__finalize__(self) # If there is only one column being returned, and its name is # either an empty string, or a tuple with an empty string as its # first element, then treat the empty string as a placeholder # and return the column as if the user had provided that empty # string in the key. If the result is a Series, exclude the # implied empty string from its name. if len(result.columns) == 1: # e.g. test_frame_getitem_multicolumn_empty_level, # test_frame_mixed_depth_get, test_loc_setitem_single_column_slice top = result.columns[0] if isinstance(top, tuple): top = top[0] if top == "": result = result[""] if isinstance(result, Series): result = self._constructor_sliced( result, index=self.index, name=key ) result._set_is_copy(self) return result else: # loc is neither a slice nor ndarray, so must be an int return self._ixs(loc, axis=1) def _get_value(self, index, col, takeable: bool = False) -> Scalar: """ Quickly retrieve single value at passed column and index. Parameters ---------- index : row label col : column label takeable : interpret the index/col as indexers, default False Returns ------- scalar Notes ----- Assumes that both `self.index._index_as_unique` and `self.columns._index_as_unique`; Caller is responsible for checking. """ if takeable: series = self._ixs(col, axis=1) return series._values[index] series = self._get_item_cache(col) engine = self.index._engine if not isinstance(self.index, MultiIndex): # CategoricalIndex: Trying to use the engine fastpath may give incorrect # results if our categories are integers that dont match our codes # IntervalIndex: IntervalTree has no get_loc row = self.index.get_loc(index) return series._values[row] # For MultiIndex going through engine effectively restricts us to # same-length tuples; see test_get_set_value_no_partial_indexing loc = engine.get_loc(index) return series._values[loc] def isetitem(self, loc, value) -> None: """ Set the given value in the column with position `loc`. This is a positional analogue to ``__setitem__``. Parameters ---------- loc : int or sequence of ints Index position for the column. value : scalar or arraylike Value(s) for the column. Notes ----- ``frame.isetitem(loc, value)`` is an in-place method as it will modify the DataFrame in place (not returning a new object). In contrast to ``frame.iloc[:, i] = value`` which will try to update the existing values in place, ``frame.isetitem(loc, value)`` will not update the values of the column itself in place, it will instead insert a new array. In cases where ``frame.columns`` is unique, this is equivalent to ``frame[frame.columns[i]] = value``. """ if isinstance(value, DataFrame): if is_scalar(loc): loc = [loc] for i, idx in enumerate(loc): arraylike = self._sanitize_column(value.iloc[:, i]) self._iset_item_mgr(idx, arraylike, inplace=False) return arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=False) def __setitem__(self, key, value): if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) key = com.apply_if_callable(key, self) # see if we can slice the rows if isinstance(key, slice): slc = self.index._convert_slice_indexer(key, kind="getitem") return self._setitem_slice(slc, value) if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2: self._setitem_frame(key, value) elif isinstance(key, (Series, np.ndarray, list, Index)): self._setitem_array(key, value) elif isinstance(value, DataFrame): self._set_item_frame_value(key, value) elif ( is_list_like(value) and not self.columns.is_unique and 1 < len(self.columns.get_indexer_for([key])) == len(value) ): # Column to set is duplicated self._setitem_array([key], value) else: # set column self._set_item(key, value) def _setitem_slice(self, key: slice, value) -> None: # NB: we can't just use self.loc[key] = value because that # operates on labels and we need to operate positional for # backwards-compat, xref GH#31469 self._check_setitem_copy() self.iloc[key] = value def _setitem_array(self, key, value): # also raises Exception if object array with NA values if com.is_bool_indexer(key): # bool indexer is indexing along rows if len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}!" ) key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] self._check_setitem_copy() if isinstance(value, DataFrame): # GH#39931 reindex since iloc does not align value = value.reindex(self.index.take(indexer)) self.iloc[indexer] = value else: # Note: unlike self.iloc[:, indexer] = value, this will # never try to overwrite values inplace if isinstance(value, DataFrame): check_key_length(self.columns, key, value) for k1, k2 in zip(key, value.columns): self[k1] = value[k2] elif not is_list_like(value): for col in key: self[col] = value elif isinstance(value, np.ndarray) and value.ndim == 2: self._iset_not_inplace(key, value) elif np.ndim(value) > 1: # list of lists value = DataFrame(value).values return self._setitem_array(key, value) else: self._iset_not_inplace(key, value) def _iset_not_inplace(self, key, value): # GH#39510 when setting with df[key] = obj with a list-like key and # list-like value, we iterate over those listlikes and set columns # one at a time. This is different from dispatching to # `self.loc[:, key]= value` because loc.__setitem__ may overwrite # data inplace, whereas this will insert new arrays. def igetitem(obj, i: int): # Note: we catch DataFrame obj before getting here, but # hypothetically would return obj.iloc[:, i] if isinstance(obj, np.ndarray): return obj[..., i] else: return obj[i] if self.columns.is_unique: if np.shape(value)[-1] != len(key): raise ValueError("Columns must be same length as key") for i, col in enumerate(key): self[col] = igetitem(value, i) else: ilocs = self.columns.get_indexer_non_unique(key)[0] if (ilocs < 0).any(): # key entries not in self.columns raise NotImplementedError if np.shape(value)[-1] != len(ilocs): raise ValueError("Columns must be same length as key") assert np.ndim(value) <= 2 orig_columns = self.columns # Using self.iloc[:, i] = ... may set values inplace, which # by convention we do not do in __setitem__ try: self.columns = Index(range(len(self.columns))) for i, iloc in enumerate(ilocs): self[iloc] = igetitem(value, i) finally: self.columns = orig_columns def _setitem_frame(self, key, value): # support boolean setting with DataFrame input, e.g. # df[df > df2] = 0 if isinstance(key, np.ndarray): if key.shape != self.shape: raise ValueError("Array conditional must be same shape as self") key = self._constructor(key, **self._construct_axes_dict(), copy=False) if key.size and not all(is_bool_dtype(dtype) for dtype in key.dtypes): raise TypeError( "Must pass DataFrame or 2-d ndarray with boolean values only" ) self._check_inplace_setting(value) self._check_setitem_copy() self._where(-key, value, inplace=True) def _set_item_frame_value(self, key, value: DataFrame) -> None: self._ensure_valid_index(value) # align columns if key in self.columns: loc = self.columns.get_loc(key) cols = self.columns[loc] len_cols = 1 if is_scalar(cols) or isinstance(cols, tuple) else len(cols) if len_cols != len(value.columns): raise ValueError("Columns must be same length as key") # align right-hand-side columns if self.columns # is multi-index and self[key] is a sub-frame if isinstance(self.columns, MultiIndex) and isinstance( loc, (slice, Series, np.ndarray, Index) ): cols_droplevel = maybe_droplevels(cols, key) if len(cols_droplevel) and not cols_droplevel.equals(value.columns): value = value.reindex(cols_droplevel, axis=1) for col, col_droplevel in zip(cols, cols_droplevel): self[col] = value[col_droplevel] return if is_scalar(cols): self[cols] = value[value.columns[0]] return # now align rows arraylike = _reindex_for_setitem(value, self.index) self._set_item_mgr(key, arraylike) return if len(value.columns) != 1: raise ValueError( "Cannot set a DataFrame with multiple columns to the single " f"column {key}" ) self[key] = value[value.columns[0]] def _iset_item_mgr( self, loc: int | slice | np.ndarray, value, inplace: bool = False ) -> None: # when called from _set_item_mgr loc can be anything returned from get_loc self._mgr.iset(loc, value, inplace=inplace) self._clear_item_cache() def _set_item_mgr(self, key, value: ArrayLike) -> None: try: loc = self._info_axis.get_loc(key) except KeyError: # This item wasn't present, just insert at end self._mgr.insert(len(self._info_axis), key, value) else: self._iset_item_mgr(loc, value) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _iset_item(self, loc: int, value) -> None: arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=True) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _set_item(self, key, value) -> None: """ Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the same length as the DataFrames index or an error will be thrown. Series/TimeSeries will be conformed to the DataFrames index to ensure homogeneity. """ value = self._sanitize_column(value) if ( key in self.columns and value.ndim == 1 and not is_extension_array_dtype(value) ): # broadcast across multiple columns if necessary if not self.columns.is_unique or isinstance(self.columns, MultiIndex): existing_piece = self[key] if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)).T self._set_item_mgr(key, value) def _set_value( self, index: IndexLabel, col, value: Scalar, takeable: bool = False ) -> None: """ Put single value at passed column and index. Parameters ---------- index : Label row label col : Label column label value : scalar takeable : bool, default False Sets whether or not index/col interpreted as indexers """ try: if takeable: icol = col iindex = cast(int, index) else: icol = self.columns.get_loc(col) iindex = self.index.get_loc(index) self._mgr.column_setitem(icol, iindex, value, inplace_only=True) self._clear_item_cache() except (KeyError, TypeError, ValueError, LossySetitemError): # get_loc might raise a KeyError for missing labels (falling back # to (i)loc will do expansion of the index) # column_setitem will do validation that may raise TypeError, # ValueError, or LossySetitemError # set using a non-recursive method & reset the cache if takeable: self.iloc[index, col] = value else: self.loc[index, col] = value self._item_cache.pop(col, None) except InvalidIndexError as ii_err: # GH48729: Seems like you are trying to assign a value to a # row when only scalar options are permitted raise InvalidIndexError( f"You can only assign a scalar value not a {type(value)}" ) from ii_err def _ensure_valid_index(self, value) -> None: """ Ensure that if we don't have an index, that we can create one from the passed value. """ # GH5632, make sure that we are a Series convertible if not len(self.index) and is_list_like(value) and len(value): if not isinstance(value, DataFrame): try: value = Series(value) except (ValueError, NotImplementedError, TypeError) as err: raise ValueError( "Cannot set a frame with no defined index " "and a value that cannot be converted to a Series" ) from err # GH31368 preserve name of index index_copy = value.index.copy() if self.index.name is not None: index_copy.name = self.index.name self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan) def _box_col_values(self, values: SingleDataManager, loc: int) -> Series: """ Provide boxed values for a column. """ # Lookup in columns so that if e.g. a str datetime was passed # we attach the Timestamp object as the name. name = self.columns[loc] klass = self._constructor_sliced # We get index=self.index bc values is a SingleDataManager return klass(values, name=name, fastpath=True).__finalize__(self) # ---------------------------------------------------------------------- # Lookup Caching def _clear_item_cache(self) -> None: self._item_cache.clear() def _get_item_cache(self, item: Hashable) -> Series: """Return the cached item, item represents a label indexer.""" if using_copy_on_write(): loc = self.columns.get_loc(item) return self._ixs(loc, axis=1) cache = self._item_cache res = cache.get(item) if res is None: # All places that call _get_item_cache have unique columns, # pending resolution of GH#33047 loc = self.columns.get_loc(item) res = self._ixs(loc, axis=1) cache[item] = res # for a chain res._is_copy = self._is_copy return res def _reset_cacher(self) -> None: # no-op for DataFrame pass def _maybe_cache_changed(self, item, value: Series, inplace: bool) -> None: """ The object has called back to us saying maybe it has changed. """ loc = self._info_axis.get_loc(item) arraylike = value._values old = self._ixs(loc, axis=1) if old._values is value._values and inplace: # GH#46149 avoid making unnecessary copies/block-splitting return self._mgr.iset(loc, arraylike, inplace=inplace) # ---------------------------------------------------------------------- # Unsorted def query(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> DataFrame: ... def query(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def query(self, expr: str, *, inplace: bool = ..., **kwargs) -> DataFrame | None: ... def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | None: """ Query the columns of a DataFrame with a boolean expression. Parameters ---------- expr : str The query string to evaluate. You can refer to variables in the environment by prefixing them with an '@' character like ``@a + b``. You can refer to column names that are not valid Python variable names by surrounding them in backticks. Thus, column names containing spaces or punctuations (besides underscores) or starting with digits must be surrounded by backticks. (For example, a column named "Area (cm^2)" would be referenced as ```Area (cm^2)```). Column names which are Python keywords (like "list", "for", "import", etc) cannot be used. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. inplace : bool Whether to modify the DataFrame rather than creating a new one. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`DataFrame.query`. Returns ------- DataFrame or None DataFrame resulting from the provided query expression or None if ``inplace=True``. See Also -------- eval : Evaluate a string describing operations on DataFrame columns. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Notes ----- The result of the evaluation of this expression is first passed to :attr:`DataFrame.loc` and if that fails because of a multidimensional key (e.g., a DataFrame) then the result will be passed to :meth:`DataFrame.__getitem__`. This method uses the top-level :func:`eval` function to evaluate the passed query. The :meth:`~pandas.DataFrame.query` method uses a slightly modified Python syntax by default. For example, the ``&`` and ``|`` (bitwise) operators have the precedence of their boolean cousins, :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, however the semantics are different. You can change the semantics of the expression by passing the keyword argument ``parser='python'``. This enforces the same semantics as evaluation in Python space. Likewise, you can pass ``engine='python'`` to evaluate an expression using Python itself as a backend. This is not recommended as it is inefficient compared to using ``numexpr`` as the engine. The :attr:`DataFrame.index` and :attr:`DataFrame.columns` attributes of the :class:`~pandas.DataFrame` instance are placed in the query namespace by default, which allows you to treat both the index and columns of the frame as a column in the frame. The identifier ``index`` is used for the frame index; you can also use the name of the index to identify it in a query. Please note that Python keywords may not be used as identifiers. For further details and examples see the ``query`` documentation in :ref:`indexing <indexing.query>`. *Backtick quoted variables* Backtick quoted variables are parsed as literal Python code and are converted internally to a Python valid identifier. This can lead to the following problems. During parsing a number of disallowed characters inside the backtick quoted string are replaced by strings that are allowed as a Python identifier. These characters include all operators in Python, the space character, the question mark, the exclamation mark, the dollar sign, and the euro sign. For other characters that fall outside the ASCII range (U+0001..U+007F) and those that are not further specified in PEP 3131, the query parser will raise an error. This excludes whitespace different than the space character, but also the hashtag (as it is used for comments) and the backtick itself (backtick can also not be escaped). In a special case, quotes that make a pair around a backtick can confuse the parser. For example, ```it's` > `that's``` will raise an error, as it forms a quoted string (``'s > `that'``) with a backtick inside. See also the Python documentation about lexical analysis (https://docs.python.org/3/reference/lexical_analysis.html) in combination with the source code in :mod:`pandas.core.computation.parsing`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10 """ inplace = validate_bool_kwarg(inplace, "inplace") if not isinstance(expr, str): msg = f"expr must be a string to be evaluated, {type(expr)} given" raise ValueError(msg) kwargs["level"] = kwargs.pop("level", 0) + 1 kwargs["target"] = None res = self.eval(expr, **kwargs) try: result = self.loc[res] except ValueError: # when res is multi-dimensional loc raises, but this is sometimes a # valid query result = self[res] if inplace: self._update_inplace(result) return None else: return result def eval(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> Any: ... def eval(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def eval(self, expr: str, *, inplace: bool = False, **kwargs) -> Any | None: """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. Returns ------- ndarray, scalar, pandas object, or None The result of the evaluation or None if ``inplace=True``. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Notes ----- For more details see the API documentation for :func:`~eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Multiple columns can be assigned to using multi-line expressions: >>> df.eval( ... ''' ... C = A + B ... D = A - B ... ''' ... ) A B C D 0 1 10 11 -9 1 2 8 10 -6 2 3 6 9 -3 3 4 4 8 0 4 5 2 7 3 """ from pandas.core.computation.eval import eval as _eval inplace = validate_bool_kwarg(inplace, "inplace") kwargs["level"] = kwargs.pop("level", 0) + 1 index_resolvers = self._get_index_resolvers() column_resolvers = self._get_cleaned_column_resolvers() resolvers = column_resolvers, index_resolvers if "target" not in kwargs: kwargs["target"] = self kwargs["resolvers"] = tuple(kwargs.get("resolvers", ())) + resolvers return _eval(expr, inplace=inplace, **kwargs) def select_dtypes(self, include=None, exclude=None) -> DataFrame: """ Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty * If ``include`` and ``exclude`` have overlapping elements * If any kind of string dtype is passed in. See Also -------- DataFrame.dtypes: Return Series with the data type of each column. Notes ----- * To select all *numeric* types, use ``np.number`` or ``'number'`` * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy <https://numpy.org/doc/stable/reference/arrays.scalars.html>`__ * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or ``'timedelta64'`` * To select Pandas categorical dtypes, use ``'category'`` * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in 0.20.0) or ``'datetime64[ns, tz]'`` Examples -------- >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int64']) b c 0 True 1.0 1 False 2.0 2 True 1.0 3 False 2.0 4 True 1.0 5 False 2.0 """ if not is_list_like(include): include = (include,) if include is not None else () if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () selection = (frozenset(include), frozenset(exclude)) if not any(selection): raise ValueError("at least one of include or exclude must be nonempty") # convert the myriad valid dtypes object to a single representation def check_int_infer_dtype(dtypes): converted_dtypes: list[type] = [] for dtype in dtypes: # Numpy maps int to different types (int32, in64) on Windows and Linux # see https://github.com/numpy/numpy/issues/9464 if (isinstance(dtype, str) and dtype == "int") or (dtype is int): converted_dtypes.append(np.int32) converted_dtypes.append(np.int64) elif dtype == "float" or dtype is float: # GH#42452 : np.dtype("float") coerces to np.float64 from Numpy 1.20 converted_dtypes.extend([np.float64, np.float32]) else: converted_dtypes.append(infer_dtype_from_object(dtype)) return frozenset(converted_dtypes) include = check_int_infer_dtype(include) exclude = check_int_infer_dtype(exclude) for dtypes in (include, exclude): invalidate_string_dtypes(dtypes) # can't both include AND exclude! if not include.isdisjoint(exclude): raise ValueError(f"include and exclude overlap on {(include & exclude)}") def dtype_predicate(dtype: DtypeObj, dtypes_set) -> bool: # GH 46870: BooleanDtype._is_numeric == True but should be excluded return issubclass(dtype.type, tuple(dtypes_set)) or ( np.number in dtypes_set and getattr(dtype, "_is_numeric", False) and not is_bool_dtype(dtype) ) def predicate(arr: ArrayLike) -> bool: dtype = arr.dtype if include: if not dtype_predicate(dtype, include): return False if exclude: if dtype_predicate(dtype, exclude): return False return True mgr = self._mgr._get_data_subset(predicate).copy(deep=None) return type(self)(mgr).__finalize__(self) def insert( self, loc: int, column: Hashable, value: Scalar | AnyArrayLike, allow_duplicates: bool | lib.NoDefault = lib.no_default, ) -> None: """ Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns). column : str, number, or hashable object Label of the inserted column. value : Scalar, Series, or array-like allow_duplicates : bool, optional, default lib.no_default See Also -------- Index.insert : Insert new item by index. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.insert(1, "newcol", [99, 99]) >>> df col1 newcol col2 0 1 99 3 1 2 99 4 >>> df.insert(0, "col1", [100, 100], allow_duplicates=True) >>> df col1 col1 newcol col2 0 100 1 99 3 1 100 2 99 4 Notice that pandas uses index alignment in case of `value` from type `Series`: >>> df.insert(0, "col0", pd.Series([5, 6], index=[1, 2])) >>> df col0 col1 col1 newcol col2 0 NaN 100 1 99 3 1 5.0 100 2 99 4 """ if allow_duplicates is lib.no_default: allow_duplicates = False if allow_duplicates and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'allow_duplicates=True' when " "'self.flags.allows_duplicate_labels' is False." ) if not allow_duplicates and column in self.columns: # Should this be a different kind of error?? raise ValueError(f"cannot insert {column}, already exists") if not isinstance(loc, int): raise TypeError("loc must be int") value = self._sanitize_column(value) self._mgr.insert(loc, column, value) def assign(self, **kwargs) -> DataFrame: r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. Later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 You can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 """ data = self.copy(deep=None) for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) return data def _sanitize_column(self, value) -> ArrayLike: """ Ensures new columns (which go into the BlockManager as new blocks) are always copied and converted into an array. Parameters ---------- value : scalar, Series, or array-like Returns ------- numpy.ndarray or ExtensionArray """ self._ensure_valid_index(value) # We can get there through isetitem with a DataFrame # or through loc single_block_path if isinstance(value, DataFrame): return _reindex_for_setitem(value, self.index) elif is_dict_like(value): return _reindex_for_setitem(Series(value), self.index) if is_list_like(value): com.require_length_match(value, self.index) return sanitize_array(value, self.index, copy=True, allow_2d=True) def _series(self): return { item: Series( self._mgr.iget(idx), index=self.index, name=item, fastpath=True ) for idx, item in enumerate(self.columns) } # ---------------------------------------------------------------------- # Reindexing and alignment def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy): frame = self columns = axes["columns"] if columns is not None: frame = frame._reindex_columns( columns, method, copy, level, fill_value, limit, tolerance ) index = axes["index"] if index is not None: frame = frame._reindex_index( index, method, copy, level, fill_value, limit, tolerance ) return frame def _reindex_index( self, new_index, method, copy: bool, level: Level, fill_value=np.nan, limit=None, tolerance=None, ): new_index, indexer = self.index.reindex( new_index, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {0: [new_index, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_columns( self, new_columns, method, copy: bool, level: Level, fill_value=None, limit=None, tolerance=None, ): new_columns, indexer = self.columns.reindex( new_columns, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {1: [new_columns, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_multi( self, axes: dict[str, Index], copy: bool, fill_value ) -> DataFrame: """ We are guaranteed non-Nones in the axes. """ new_index, row_indexer = self.index.reindex(axes["index"]) new_columns, col_indexer = self.columns.reindex(axes["columns"]) if row_indexer is not None and col_indexer is not None: # Fastpath. By doing two 'take's at once we avoid making an # unnecessary copy. # We only get here with `not self._is_mixed_type`, which (almost) # ensures that self.values is cheap. It may be worth making this # condition more specific. indexer = row_indexer, col_indexer new_values = take_2d_multi(self.values, indexer, fill_value=fill_value) return self._constructor( new_values, index=new_index, columns=new_columns, copy=False ) else: return self._reindex_with_indexers( {0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, copy=copy, fill_value=fill_value, ) def align( self, other: DataFrame, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value=None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> DataFrame: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) """ Examples -------- >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) Change the row labels. >>> df.set_axis(['a', 'b', 'c'], axis='index') A B a 1 4 b 2 5 c 3 6 Change the column labels. >>> df.set_axis(['I', 'II'], axis='columns') I II 0 1 4 1 2 5 2 3 6 """ ) **_shared_doc_kwargs, extended_summary_sub=" column or", axis_description_sub=", and 1 identifies the columns", see_also_sub=" or columns", ) ) # ---------------------------------------------------------------------- # Reindex-based selection methods # ---------------------------------------------------------------------- # Sorting # error: Signature of "sort_values" incompatible with supertype "NDFrame" # TODO: Just move the sort_values doc here. ) # ---------------------------------------------------------------------- # Arithmetic Methods ) ) ) # ---------------------------------------------------------------------- # Function application ) # error: Signature of "any" incompatible with supertype "NDFrame" [override] # error: Missing return statement ) # ---------------------------------------------------------------------- # Merging / joining methods # ---------------------------------------------------------------------- # Statistical methods, etc. # ---------------------------------------------------------------------- # ndarray-like stats methods # ---------------------------------------------------------------------- # Add index and columns # ---------------------------------------------------------------------- # Add plotting methods to DataFrame # ---------------------------------------------------------------------- # Internal Interface Methods DataFrame class Series(base.IndexOpsMixin, NDFrame): # type: ignore[misc] """ One-dimensional ndarray with axis labels (including time series). Labels need not be unique but must be a hashable type. The object supports both integer- and label-based indexing and provides a host of methods for performing operations involving the index. Statistical methods from ndarray have been overridden to automatically exclude missing data (currently represented as NaN). Operations between Series (+, -, /, \\*, \\*\\*) align values based on their associated index values-- they need not be the same length. The result index will be the sorted union of the two indexes. Parameters ---------- data : array-like, Iterable, dict, or scalar value Contains data stored in Series. If data is a dict, argument order is maintained. index : array-like or Index (1d) Values must be hashable and have the same length as `data`. Non-unique index values are allowed. Will default to RangeIndex (0, 1, 2, ..., n) if not provided. If data is dict-like and index is None, then the keys in the data are used as the index. If the index is not None, the resulting Series is reindexed with the index values. dtype : str, numpy.dtype, or ExtensionDtype, optional Data type for the output Series. If not specified, this will be inferred from `data`. See the :ref:`user guide <basics.dtypes>` for more usages. name : Hashable, default None The name to give to the Series. copy : bool, default False Copy input data. Only affects Series or 1d ndarray input. See examples. Notes ----- Please reference the :ref:`User Guide <basics.series>` for more information. Examples -------- Constructing Series from a dictionary with an Index specified >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['a', 'b', 'c']) >>> ser a 1 b 2 c 3 dtype: int64 The keys of the dictionary match with the Index values, hence the Index values have no effect. >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['x', 'y', 'z']) >>> ser x NaN y NaN z NaN dtype: float64 Note that the Index is first build with the keys from the dictionary. After this the Series is reindexed with the given Index values, hence we get all NaN as a result. Constructing Series from a list with `copy=False`. >>> r = [1, 2] >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r [1, 2] >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `copy` of the original data even though `copy=False`, so the data is unchanged. Constructing Series from a 1d ndarray with `copy=False`. >>> r = np.array([1, 2]) >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r array([999, 2]) >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `view` on the original data, so the data is changed as well. """ _typ = "series" _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) _name: Hashable _metadata: list[str] = ["name"] _internal_names_set = {"index"} | NDFrame._internal_names_set _accessors = {"dt", "cat", "str", "sparse"} _hidden_attrs = ( base.IndexOpsMixin._hidden_attrs | NDFrame._hidden_attrs | frozenset([]) ) # Override cache_readonly bc Series is mutable # error: Incompatible types in assignment (expression has type "property", # base class "IndexOpsMixin" defined the type as "Callable[[IndexOpsMixin], bool]") hasnans = property( # type: ignore[assignment] # error: "Callable[[IndexOpsMixin], bool]" has no attribute "fget" base.IndexOpsMixin.hasnans.fget, # type: ignore[attr-defined] doc=base.IndexOpsMixin.hasnans.__doc__, ) _mgr: SingleManager div: Callable[[Series, Any], Series] rdiv: Callable[[Series, Any], Series] # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index=None, dtype: Dtype | None = None, name=None, copy: bool | None = None, fastpath: bool = False, ) -> None: if ( isinstance(data, (SingleBlockManager, SingleArrayManager)) and index is None and dtype is None and (copy is False or copy is None) ): if using_copy_on_write(): data = data.copy(deep=False) # GH#33357 called with just the SingleBlockManager NDFrame.__init__(self, data) if fastpath: # e.g. from _box_col_values, skip validation of name object.__setattr__(self, "_name", name) else: self.name = name return if isinstance(data, (ExtensionArray, np.ndarray)): if copy is not False and using_copy_on_write(): if dtype is None or astype_is_view(data.dtype, pandas_dtype(dtype)): data = data.copy() if copy is None: copy = False # we are called internally, so short-circuit if fastpath: # data is a ndarray, index is defined if not isinstance(data, (SingleBlockManager, SingleArrayManager)): manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index) elif manager == "array": data = SingleArrayManager.from_array(data, index) elif using_copy_on_write() and not copy: data = data.copy(deep=False) if copy: data = data.copy() # skips validation of the name object.__setattr__(self, "_name", name) NDFrame.__init__(self, data) return if isinstance(data, SingleBlockManager) and using_copy_on_write() and not copy: data = data.copy(deep=False) name = ibase.maybe_extract_name(name, data, type(self)) if index is not None: index = ensure_index(index) if dtype is not None: dtype = self._validate_dtype(dtype) if data is None: index = index if index is not None else default_index(0) if len(index) or dtype is not None: data = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: data = [] if isinstance(data, MultiIndex): raise NotImplementedError( "initializing a Series from a MultiIndex is not supported" ) refs = None if isinstance(data, Index): if dtype is not None: data = data.astype(dtype, copy=False) if using_copy_on_write(): refs = data._references data = data._values else: # GH#24096 we need to ensure the index remains immutable data = data._values.copy() copy = False elif isinstance(data, np.ndarray): if len(data.dtype): # GH#13296 we are dealing with a compound dtype, which # should be treated as 2D raise ValueError( "Cannot construct a Series from an ndarray with " "compound dtype. Use DataFrame instead." ) elif isinstance(data, Series): if index is None: index = data.index data = data._mgr.copy(deep=False) else: data = data.reindex(index, copy=copy) copy = False data = data._mgr elif is_dict_like(data): data, index = self._init_dict(data, index, dtype) dtype = None copy = False elif isinstance(data, (SingleBlockManager, SingleArrayManager)): if index is None: index = data.index elif not data.index.equals(index) or copy: # GH#19275 SingleBlockManager input should only be called # internally raise AssertionError( "Cannot pass both SingleBlockManager " "`data` argument and a different " "`index` argument. `copy` must be False." ) elif isinstance(data, ExtensionArray): pass else: data = com.maybe_iterable_to_list(data) if is_list_like(data) and not len(data) and dtype is None: # GH 29405: Pre-2.0, this defaulted to float. dtype = np.dtype(object) if index is None: if not is_list_like(data): data = [data] index = default_index(len(data)) elif is_list_like(data): com.require_length_match(data, index) # create/copy the manager if isinstance(data, (SingleBlockManager, SingleArrayManager)): if dtype is not None: data = data.astype(dtype=dtype, errors="ignore", copy=copy) elif copy: data = data.copy() else: data = sanitize_array(data, index, dtype, copy) manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index, refs=refs) elif manager == "array": data = SingleArrayManager.from_array(data, index) NDFrame.__init__(self, data) self.name = name self._set_axis(0, index) def _init_dict( self, data, index: Index | None = None, dtype: DtypeObj | None = None ): """ Derive the "_mgr" and "index" attributes of a new Series from a dictionary input. Parameters ---------- data : dict or dict-like Data used to populate the new Series. index : Index or None, default None Index for the new Series: if None, use dict keys. dtype : np.dtype, ExtensionDtype, or None, default None The dtype for the new Series: if None, infer from data. Returns ------- _data : BlockManager for the new Series index : index for the new Series """ keys: Index | tuple # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')] # raises KeyError), so we iterate the entire dict, and align if data: # GH:34717, issue was using zip to extract key and values from data. # using generators in effects the performance. # Below is the new way of extracting the keys and values keys = tuple(data.keys()) values = list(data.values()) # Generating list of values- faster way elif index is not None: # fastpath for Series(data=None). Just use broadcasting a scalar # instead of reindexing. if len(index) or dtype is not None: values = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: values = [] keys = index else: keys, values = (), [] # Input is now list-like, so rely on "standard" construction: s = self._constructor( values, index=keys, dtype=dtype, ) # Now we just make sure the order is respected, if any if data and index is not None: s = s.reindex(index, copy=False) return s._mgr, s.index # ---------------------------------------------------------------------- def _constructor(self) -> Callable[..., Series]: return Series def _constructor_expanddim(self) -> Callable[..., DataFrame]: """ Used when a manipulation result has one higher dimension as the original, such as Series.to_frame() """ from pandas.core.frame import DataFrame return DataFrame # types def _can_hold_na(self) -> bool: return self._mgr._can_hold_na # ndarray compatibility def dtype(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtype dtype('int64') """ return self._mgr.dtype def dtypes(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtypes dtype('int64') """ # DataFrame compatibility return self.dtype def name(self) -> Hashable: """ Return the name of the Series. The name of a Series becomes its index or column name if it is used to form a DataFrame. It is also used whenever displaying the Series using the interpreter. Returns ------- label (hashable object) The name of the Series, also the column name if part of a DataFrame. See Also -------- Series.rename : Sets the Series name when given a scalar input. Index.name : Corresponding Index property. Examples -------- The Series name can be set initially when calling the constructor. >>> s = pd.Series([1, 2, 3], dtype=np.int64, name='Numbers') >>> s 0 1 1 2 2 3 Name: Numbers, dtype: int64 >>> s.name = "Integers" >>> s 0 1 1 2 2 3 Name: Integers, dtype: int64 The name of a Series within a DataFrame is its column name. >>> df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], ... columns=["Odd Numbers", "Even Numbers"]) >>> df Odd Numbers Even Numbers 0 1 2 1 3 4 2 5 6 >>> df["Even Numbers"].name 'Even Numbers' """ return self._name def name(self, value: Hashable) -> None: validate_all_hashable(value, error_name=f"{type(self).__name__}.name") object.__setattr__(self, "_name", value) def values(self): """ Return Series as ndarray or ndarray-like depending on the dtype. .. warning:: We recommend using :attr:`Series.array` or :meth:`Series.to_numpy`, depending on whether you need a reference to the underlying data or a NumPy array. Returns ------- numpy.ndarray or ndarray-like See Also -------- Series.array : Reference to the underlying data. Series.to_numpy : A NumPy array representing the underlying data. Examples -------- >>> pd.Series([1, 2, 3]).values array([1, 2, 3]) >>> pd.Series(list('aabc')).values array(['a', 'a', 'b', 'c'], dtype=object) >>> pd.Series(list('aabc')).astype('category').values ['a', 'a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] Timezone aware datetime data is converted to UTC: >>> pd.Series(pd.date_range('20130101', periods=3, ... tz='US/Eastern')).values array(['2013-01-01T05:00:00.000000000', '2013-01-02T05:00:00.000000000', '2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]') """ return self._mgr.external_values() def _values(self): """ Return the internal repr of this data (defined by Block.interval_values). This are the values as stored in the Block (ndarray or ExtensionArray depending on the Block class), with datetime64[ns] and timedelta64[ns] wrapped in ExtensionArrays to match Index._values behavior. Differs from the public ``.values`` for certain data types, because of historical backwards compatibility of the public attribute (e.g. period returns object ndarray and datetimetz a datetime64[ns] ndarray for ``.values`` while it returns an ExtensionArray for ``._values`` in those cases). Differs from ``.array`` in that this still returns the numpy array if the Block is backed by a numpy array (except for datetime64 and timedelta64 dtypes), while ``.array`` ensures to always return an ExtensionArray. Overview: dtype | values | _values | array | ----------- | ------------- | ------------- | ------------- | Numeric | ndarray | ndarray | PandasArray | Category | Categorical | Categorical | Categorical | dt64[ns] | ndarray[M8ns] | DatetimeArray | DatetimeArray | dt64[ns tz] | ndarray[M8ns] | DatetimeArray | DatetimeArray | td64[ns] | ndarray[m8ns] | TimedeltaArray| ndarray[m8ns] | Period | ndarray[obj] | PeriodArray | PeriodArray | Nullable | EA | EA | EA | """ return self._mgr.internal_values() def _references(self) -> BlockValuesRefs | None: if isinstance(self._mgr, SingleArrayManager): return None return self._mgr._block.refs # error: Decorated property not supported def array(self) -> ExtensionArray: return self._mgr.array_values() # ops def ravel(self, order: str = "C") -> ArrayLike: """ Return the flattened underlying data as an ndarray or ExtensionArray. Returns ------- numpy.ndarray or ExtensionArray Flattened data of the Series. See Also -------- numpy.ndarray.ravel : Return a flattened array. """ arr = self._values.ravel(order=order) if isinstance(arr, np.ndarray) and using_copy_on_write(): arr.flags.writeable = False return arr def __len__(self) -> int: """ Return the length of the Series. """ return len(self._mgr) def view(self, dtype: Dtype | None = None) -> Series: """ Create a new view of the Series. This function will return a new Series with a view of the same underlying values in memory, optionally reinterpreted with a new data type. The new data type must preserve the same size in bytes as to not cause index misalignment. Parameters ---------- dtype : data type Data type object or one of their string representations. Returns ------- Series A new Series object as a view of the same data in memory. See Also -------- numpy.ndarray.view : Equivalent numpy function to create a new view of the same data in memory. Notes ----- Series are instantiated with ``dtype=float64`` by default. While ``numpy.ndarray.view()`` will return a view with the same data type as the original array, ``Series.view()`` (without specified dtype) will try using ``float64`` and may fail if the original data type size in bytes is not the same. Examples -------- >>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8') >>> s 0 -2 1 -1 2 0 3 1 4 2 dtype: int8 The 8 bit signed integer representation of `-1` is `0b11111111`, but the same bytes represent 255 if read as an 8 bit unsigned integer: >>> us = s.view('uint8') >>> us 0 254 1 255 2 0 3 1 4 2 dtype: uint8 The views share the same underlying values: >>> us[0] = 128 >>> s 0 -128 1 -1 2 0 3 1 4 2 dtype: int8 """ # self.array instead of self._values so we piggyback on PandasArray # implementation res_values = self.array.view(dtype) res_ser = self._constructor(res_values, index=self.index, copy=False) if isinstance(res_ser._mgr, SingleBlockManager) and using_copy_on_write(): blk = res_ser._mgr._block blk.refs = cast("BlockValuesRefs", self._references) blk.refs.add_reference(blk) # type: ignore[arg-type] return res_ser.__finalize__(self, method="view") # ---------------------------------------------------------------------- # NDArray Compat _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: """ Return the values as a NumPy array. Users should not call this directly. Rather, it is invoked by :func:`numpy.array` and :func:`numpy.asarray`. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to use for the resulting NumPy array. By default, the dtype is inferred from the data. Returns ------- numpy.ndarray The values in the series converted to a :class:`numpy.ndarray` with the specified `dtype`. See Also -------- array : Create a new array from data. Series.array : Zero-copy view to the array backing the Series. Series.to_numpy : Series method for similar behavior. Examples -------- >>> ser = pd.Series([1, 2, 3]) >>> np.asarray(ser) array([1, 2, 3]) For timezone-aware data, the timezones may be retained with ``dtype='object'`` >>> tzser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) >>> np.asarray(tzser, dtype="object") array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'), Timestamp('2000-01-02 00:00:00+0100', tz='CET')], dtype=object) Or the values may be localized to UTC and the tzinfo discarded with ``dtype='datetime64[ns]'`` >>> np.asarray(tzser, dtype="datetime64[ns]") # doctest: +ELLIPSIS array(['1999-12-31T23:00:00.000000000', ...], dtype='datetime64[ns]') """ values = self._values arr = np.asarray(values, dtype=dtype) if using_copy_on_write() and astype_is_view(values.dtype, arr.dtype): arr = arr.view() arr.flags.writeable = False return arr # ---------------------------------------------------------------------- # Unary Methods # coercion __float__ = _coerce_method(float) __int__ = _coerce_method(int) # ---------------------------------------------------------------------- # indexers def axes(self) -> list[Index]: """ Return a list of the row axis labels. """ return [self.index] # ---------------------------------------------------------------------- # Indexing Methods def take(self, indices, axis: Axis = 0, **kwargs) -> Series: nv.validate_take((), kwargs) indices = ensure_platform_int(indices) if ( indices.ndim == 1 and using_copy_on_write() and is_range_indexer(indices, len(self)) ): return self.copy(deep=None) new_index = self.index.take(indices) new_values = self._values.take(indices) result = self._constructor(new_values, index=new_index, fastpath=True) return result.__finalize__(self, method="take") def _take_with_is_copy(self, indices, axis: Axis = 0) -> Series: """ Internal version of the `take` method that sets the `_is_copy` attribute to keep track of the parent dataframe (using in indexing for the SettingWithCopyWarning). For Series this does the same as the public take (it never sets `_is_copy`). See the docstring of `take` for full explanation of the parameters. """ return self.take(indices=indices, axis=axis) def _ixs(self, i: int, axis: AxisInt = 0) -> Any: """ Return the i-th value or values in the Series by location. Parameters ---------- i : int Returns ------- scalar (int) or Series (slice, sequence) """ return self._values[i] def _slice(self, slobj: slice | np.ndarray, axis: Axis = 0) -> Series: # axis kwarg is retained for compat with NDFrame method # _slice is *always* positional return self._get_values(slobj) def __getitem__(self, key): check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) if key is Ellipsis: return self key_is_scalar = is_scalar(key) if isinstance(key, (list, tuple)): key = unpack_1tuple(key) if is_integer(key) and self.index._should_fallback_to_positional: return self._values[key] elif key_is_scalar: return self._get_value(key) if is_hashable(key): # Otherwise index.get_value will raise InvalidIndexError try: # For labels that don't resolve as scalars like tuples and frozensets result = self._get_value(key) return result except (KeyError, TypeError, InvalidIndexError): # InvalidIndexError for e.g. generator # see test_series_getitem_corner_generator if isinstance(key, tuple) and isinstance(self.index, MultiIndex): # We still have the corner case where a tuple is a key # in the first level of our MultiIndex return self._get_values_tuple(key) if is_iterator(key): key = list(key) if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) return self._get_values(key) return self._get_with(key) def _get_with(self, key): # other: fancy integer or otherwise if isinstance(key, slice): # _convert_slice_indexer to determine if this slice is positional # or label based, and if the latter, convert to positional slobj = self.index._convert_slice_indexer(key, kind="getitem") return self._slice(slobj) elif isinstance(key, ABCDataFrame): raise TypeError( "Indexing a Series with DataFrame is not " "supported, use the appropriate DataFrame column" ) elif isinstance(key, tuple): return self._get_values_tuple(key) elif not is_list_like(key): # e.g. scalars that aren't recognized by lib.is_scalar, GH#32684 return self.loc[key] if not isinstance(key, (list, np.ndarray, ExtensionArray, Series, Index)): key = list(key) if isinstance(key, Index): key_type = key.inferred_type else: key_type = lib.infer_dtype(key, skipna=False) # Note: The key_type == "boolean" case should be caught by the # com.is_bool_indexer check in __getitem__ if key_type == "integer": # We need to decide whether to treat this as a positional indexer # (i.e. self.iloc) or label-based (i.e. self.loc) if not self.index._should_fallback_to_positional: return self.loc[key] else: return self.iloc[key] # handle the dup indexing case GH#4246 return self.loc[key] def _get_values_tuple(self, key: tuple): # mpl hackaround if com.any_none(*key): # mpl compat if we look up e.g. ser[:, np.newaxis]; # see tests.series.timeseries.test_mpl_compat_hack # the asarray is needed to avoid returning a 2D DatetimeArray result = np.asarray(self._values[key]) disallow_ndim_indexing(result) return result if not isinstance(self.index, MultiIndex): raise KeyError("key of type tuple not found and not a MultiIndex") # If key is contained, would have returned by now indexer, new_index = self.index.get_loc_level(key) new_ser = self._constructor(self._values[indexer], index=new_index, copy=False) if using_copy_on_write() and isinstance(indexer, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) def _get_values(self, indexer: slice | npt.NDArray[np.bool_]) -> Series: new_mgr = self._mgr.getitem_mgr(indexer) return self._constructor(new_mgr).__finalize__(self) def _get_value(self, label, takeable: bool = False): """ Quickly retrieve single value at passed index label. Parameters ---------- label : object takeable : interpret the index as indexers, default False Returns ------- scalar value """ if takeable: return self._values[label] # Similar to Index.get_value, but we do not fall back to positional loc = self.index.get_loc(label) if is_integer(loc): return self._values[loc] if isinstance(self.index, MultiIndex): mi = self.index new_values = self._values[loc] if len(new_values) == 1 and mi.nlevels == 1: # If more than one level left, we can not return a scalar return new_values[0] new_index = mi[loc] new_index = maybe_droplevels(new_index, label) new_ser = self._constructor( new_values, index=new_index, name=self.name, copy=False ) if using_copy_on_write() and isinstance(loc, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) else: return self.iloc[loc] def __setitem__(self, key, value) -> None: if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) cacher_needs_updating = self._check_is_chained_assignment_possible() if key is Ellipsis: key = slice(None) if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") return self._set_values(indexer, value) try: self._set_with_engine(key, value) except KeyError: # We have a scalar (or for MultiIndex or object-dtype, scalar-like) # key that is not present in self.index. if is_integer(key): if not self.index._should_fallback_to_positional: # GH#33469 self.loc[key] = value else: # positional setter # can't use _mgr.setitem_inplace yet bc could have *both* # KeyError and then ValueError, xref GH#45070 self._set_values(key, value) else: # GH#12862 adding a new key to the Series self.loc[key] = value except (TypeError, ValueError, LossySetitemError): # The key was OK, but we cannot set the value losslessly indexer = self.index.get_loc(key) self._set_values(indexer, value) except InvalidIndexError as err: if isinstance(key, tuple) and not isinstance(self.index, MultiIndex): # cases with MultiIndex don't get here bc they raise KeyError # e.g. test_basic_getitem_setitem_corner raise KeyError( "key of type tuple not found and not a MultiIndex" ) from err if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) if ( is_list_like(value) and len(value) != len(self) and not isinstance(value, Series) and not is_object_dtype(self.dtype) ): # Series will be reindexed to have matching length inside # _where call below # GH#44265 indexer = key.nonzero()[0] self._set_values(indexer, value) return # otherwise with listlike other we interpret series[mask] = other # as series[mask] = other[mask] try: self._where(~key, value, inplace=True) except InvalidIndexError: # test_where_dups self.iloc[key] = value return else: self._set_with(key, value) if cacher_needs_updating: self._maybe_update_cacher(inplace=True) def _set_with_engine(self, key, value) -> None: loc = self.index.get_loc(key) # this is equivalent to self._values[key] = value self._mgr.setitem_inplace(loc, value) def _set_with(self, key, value) -> None: # We got here via exception-handling off of InvalidIndexError, so # key should always be listlike at this point. assert not isinstance(key, tuple) if is_iterator(key): # Without this, the call to infer_dtype will consume the generator key = list(key) if not self.index._should_fallback_to_positional: # Regardless of the key type, we're treating it as labels self._set_labels(key, value) else: # Note: key_type == "boolean" should not occur because that # should be caught by the is_bool_indexer check in __setitem__ key_type = lib.infer_dtype(key, skipna=False) if key_type == "integer": self._set_values(key, value) else: self._set_labels(key, value) def _set_labels(self, key, value) -> None: key = com.asarray_tuplesafe(key) indexer: np.ndarray = self.index.get_indexer(key) mask = indexer == -1 if mask.any(): raise KeyError(f"{key[mask]} not in index") self._set_values(indexer, value) def _set_values(self, key, value) -> None: if isinstance(key, (Index, Series)): key = key._values self._mgr = self._mgr.setitem(indexer=key, value=value) self._maybe_update_cacher() def _set_value(self, label, value, takeable: bool = False) -> None: """ Quickly set single value at passed label. If label is not contained, a new object is created with the label placed at the end of the result index. Parameters ---------- label : object Partial indexing with MultiIndex not allowed. value : object Scalar value. takeable : interpret the index as indexers, default False """ if not takeable: try: loc = self.index.get_loc(label) except KeyError: # set using a non-recursive method self.loc[label] = value return else: loc = label self._set_values(loc, value) # ---------------------------------------------------------------------- # Lookup Caching def _is_cached(self) -> bool: """Return boolean indicating if self is cached or not.""" return getattr(self, "_cacher", None) is not None def _get_cacher(self): """return my cacher or None""" cacher = getattr(self, "_cacher", None) if cacher is not None: cacher = cacher[1]() return cacher def _reset_cacher(self) -> None: """ Reset the cacher. """ if hasattr(self, "_cacher"): del self._cacher def _set_as_cached(self, item, cacher) -> None: """ Set the _cacher attribute on the calling object with a weakref to cacher. """ if using_copy_on_write(): return self._cacher = (item, weakref.ref(cacher)) def _clear_item_cache(self) -> None: # no-op for Series pass def _check_is_chained_assignment_possible(self) -> bool: """ See NDFrame._check_is_chained_assignment_possible.__doc__ """ if self._is_view and self._is_cached: ref = self._get_cacher() if ref is not None and ref._is_mixed_type: self._check_setitem_copy(t="referent", force=True) return True return super()._check_is_chained_assignment_possible() def _maybe_update_cacher( self, clear: bool = False, verify_is_copy: bool = True, inplace: bool = False ) -> None: """ See NDFrame._maybe_update_cacher.__doc__ """ # for CoW, we never want to update the parent DataFrame cache # if the Series changed, but don't keep track of any cacher if using_copy_on_write(): return cacher = getattr(self, "_cacher", None) if cacher is not None: assert self.ndim == 1 ref: DataFrame = cacher[1]() # we are trying to reference a dead referent, hence # a copy if ref is None: del self._cacher elif len(self) == len(ref) and self.name in ref.columns: # GH#42530 self.name must be in ref.columns # to ensure column still in dataframe # otherwise, either self or ref has swapped in new arrays ref._maybe_cache_changed(cacher[0], self, inplace=inplace) else: # GH#33675 we have swapped in a new array, so parent # reference to self is now invalid ref._item_cache.pop(cacher[0], None) super()._maybe_update_cacher( clear=clear, verify_is_copy=verify_is_copy, inplace=inplace ) # ---------------------------------------------------------------------- # Unsorted def _is_mixed_type(self) -> bool: return False def repeat(self, repeats: int | Sequence[int], axis: None = None) -> Series: """ Repeat elements of a Series. Returns a new Series where each element of the current Series is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty Series. axis : None Unused. Parameter needed for compatibility with DataFrame. Returns ------- Series Newly created Series with repeated elements. See Also -------- Index.repeat : Equivalent function for Index. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> s = pd.Series(['a', 'b', 'c']) >>> s 0 a 1 b 2 c dtype: object >>> s.repeat(2) 0 a 0 a 1 b 1 b 2 c 2 c dtype: object >>> s.repeat([1, 2, 3]) 0 a 1 b 1 b 2 c 2 c 2 c dtype: object """ nv.validate_repeat((), {"axis": axis}) new_index = self.index.repeat(repeats) new_values = self._values.repeat(repeats) return self._constructor(new_values, index=new_index, copy=False).__finalize__( self, method="repeat" ) def reset_index( self, level: IndexLabel = ..., *, drop: Literal[False] = ..., name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> DataFrame: ... def reset_index( self, level: IndexLabel = ..., *, drop: Literal[True], name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> Series: ... def reset_index( self, level: IndexLabel = ..., *, drop: bool = ..., name: Level = ..., inplace: Literal[True], allow_duplicates: bool = ..., ) -> None: ... def reset_index( self, level: IndexLabel = None, *, drop: bool = False, name: Level = lib.no_default, inplace: bool = False, allow_duplicates: bool = False, ) -> DataFrame | Series | None: """ Generate a new DataFrame or Series with the index reset. This is useful when the index needs to be treated as a column, or when the index is meaningless and needs to be reset to the default before another operation. Parameters ---------- level : int, str, tuple, or list, default optional For a Series with a MultiIndex, only remove the specified levels from the index. Removes all levels by default. drop : bool, default False Just reset the index, without inserting it as a column in the new DataFrame. name : object, optional The name to use for the column containing the original Series values. Uses ``self.name`` by default. This argument is ignored when `drop` is True. inplace : bool, default False Modify the Series in place (do not create a new object). allow_duplicates : bool, default False Allow duplicate column labels to be created. .. versionadded:: 1.5.0 Returns ------- Series or DataFrame or None When `drop` is False (the default), a DataFrame is returned. The newly created columns will come first in the DataFrame, followed by the original Series values. When `drop` is True, a `Series` is returned. In either case, if ``inplace=True``, no value is returned. See Also -------- DataFrame.reset_index: Analogous function for DataFrame. Examples -------- >>> s = pd.Series([1, 2, 3, 4], name='foo', ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx')) Generate a DataFrame with default index. >>> s.reset_index() idx foo 0 a 1 1 b 2 2 c 3 3 d 4 To specify the name of the new column use `name`. >>> s.reset_index(name='values') idx values 0 a 1 1 b 2 2 c 3 3 d 4 To generate a new Series with the default set `drop` to True. >>> s.reset_index(drop=True) 0 1 1 2 2 3 3 4 Name: foo, dtype: int64 The `level` parameter is interesting for Series with a multi-level index. >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']), ... np.array(['one', 'two', 'one', 'two'])] >>> s2 = pd.Series( ... range(4), name='foo', ... index=pd.MultiIndex.from_arrays(arrays, ... names=['a', 'b'])) To remove a specific level from the Index, use `level`. >>> s2.reset_index(level='a') a foo b one bar 0 two bar 1 one baz 2 two baz 3 If `level` is not set, all levels are removed from the Index. >>> s2.reset_index() a b foo 0 bar one 0 1 bar two 1 2 baz one 2 3 baz two 3 """ inplace = validate_bool_kwarg(inplace, "inplace") if drop: new_index = default_index(len(self)) if level is not None: level_list: Sequence[Hashable] if not isinstance(level, (tuple, list)): level_list = [level] else: level_list = level level_list = [self.index._get_level_number(lev) for lev in level_list] if len(level_list) < self.index.nlevels: new_index = self.index.droplevel(level_list) if inplace: self.index = new_index elif using_copy_on_write(): new_ser = self.copy(deep=False) new_ser.index = new_index return new_ser.__finalize__(self, method="reset_index") else: return self._constructor( self._values.copy(), index=new_index, copy=False ).__finalize__(self, method="reset_index") elif inplace: raise TypeError( "Cannot reset_index inplace on a Series to create a DataFrame" ) else: if name is lib.no_default: # For backwards compatibility, keep columns as [0] instead of # [None] when self.name is None if self.name is None: name = 0 else: name = self.name df = self.to_frame(name) return df.reset_index( level=level, drop=drop, allow_duplicates=allow_duplicates ) return None # ---------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str: """ Return a string representation for a particular Series. """ # pylint: disable=invalid-repr-returned repr_params = fmt.get_series_repr_params() return self.to_string(**repr_params) def to_string( self, buf: None = ..., na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> None: ... def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, na_rep: str = "NaN", float_format: str | None = None, header: bool = True, index: bool = True, length: bool = False, dtype: bool = False, name: bool = False, max_rows: int | None = None, min_rows: int | None = None, ) -> str | None: """ Render a string representation of the Series. Parameters ---------- buf : StringIO-like, optional Buffer to write to. na_rep : str, optional String representation of NaN to use, default 'NaN'. float_format : one-parameter function, optional Formatter function to apply to columns' elements if they are floats, default None. header : bool, default True Add the Series header (index name). index : bool, optional Add index (row) labels, default True. length : bool, default False Add the Series length. dtype : bool, default False Add the Series dtype. name : bool, default False Add the Series name if not None. max_rows : int, optional Maximum number of rows to show before truncating. If None, show all. min_rows : int, optional The number of rows to display in a truncated repr (when number of rows is above `max_rows`). Returns ------- str or None String representation of Series if ``buf=None``, otherwise None. """ formatter = fmt.SeriesFormatter( self, name=name, length=length, header=header, index=index, dtype=dtype, na_rep=na_rep, float_format=float_format, min_rows=min_rows, max_rows=max_rows, ) result = formatter.to_string() # catch contract violations if not isinstance(result, str): raise AssertionError( "result must be of type str, type " f"of result is {repr(type(result).__name__)}" ) if buf is None: return result else: if hasattr(buf, "write"): buf.write(result) else: with open(buf, "w") as f: f.write(result) return None klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples=dedent( """Examples -------- >>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal") >>> print(s.to_markdown()) | | animal | |---:|:---------| | 0 | elk | | 1 | pig | | 2 | dog | | 3 | quetzal | Output markdown with a tabulate option. >>> print(s.to_markdown(tablefmt="grid")) +----+----------+ | | animal | +====+==========+ | 0 | elk | +----+----------+ | 1 | pig | +----+----------+ | 2 | dog | +----+----------+ | 3 | quetzal | +----+----------+""" ), ) def to_markdown( self, buf: IO[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: """ Print {klass} in Markdown-friendly format. Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. mode : str, optional Mode in which file is opened, "wt" by default. index : bool, optional, default True Add index (row) labels. .. versionadded:: 1.1.0 {storage_options} .. versionadded:: 1.2.0 **kwargs These parameters will be passed to `tabulate \ <https://pypi.org/project/tabulate>`_. Returns ------- str {klass} in Markdown-friendly format. Notes ----- Requires the `tabulate <https://pypi.org/project/tabulate>`_ package. {examples} """ return self.to_frame().to_markdown( buf, mode, index, storage_options=storage_options, **kwargs ) # ---------------------------------------------------------------------- def items(self) -> Iterable[tuple[Hashable, Any]]: """ Lazily iterate over (index, value) tuples. This method returns an iterable tuple (index, value). This is convenient if you want to create a lazy iterator. Returns ------- iterable Iterable of tuples containing the (index, value) pairs from a Series. See Also -------- DataFrame.items : Iterate over (column name, Series) pairs. DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. Examples -------- >>> s = pd.Series(['A', 'B', 'C']) >>> for index, value in s.items(): ... print(f"Index : {index}, Value : {value}") Index : 0, Value : A Index : 1, Value : B Index : 2, Value : C """ return zip(iter(self.index), iter(self)) # ---------------------------------------------------------------------- # Misc public methods def keys(self) -> Index: """ Return alias for index. Returns ------- Index Index of the Series. """ return self.index def to_dict(self, into: type[dict] = dict) -> dict: """ Convert Series to {label -> value} dict or dict-like object. Parameters ---------- into : class, default dict The collections.abc.Mapping subclass to use as the return object. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- collections.abc.Mapping Key-value representation of Series. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.to_dict() {0: 1, 1: 2, 2: 3, 3: 4} >>> from collections import OrderedDict, defaultdict >>> s.to_dict(OrderedDict) OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> dd = defaultdict(list) >>> s.to_dict(dd) defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4}) """ # GH16122 into_c = com.standardize_mapping(into) if is_object_dtype(self) or is_extension_array_dtype(self): return into_c((k, maybe_box_native(v)) for k, v in self.items()) else: # Not an object dtype => all types will be the same so let the default # indexer return native python type return into_c(self.items()) def to_frame(self, name: Hashable = lib.no_default) -> DataFrame: """ Convert Series to DataFrame. Parameters ---------- name : object, optional The passed name should substitute for the series name (if it has one). Returns ------- DataFrame DataFrame representation of Series. Examples -------- >>> s = pd.Series(["a", "b", "c"], ... name="vals") >>> s.to_frame() vals 0 a 1 b 2 c """ columns: Index if name is lib.no_default: name = self.name if name is None: # default to [0], same as we would get with DataFrame(self) columns = default_index(1) else: columns = Index([name]) else: columns = Index([name]) mgr = self._mgr.to_2d_mgr(columns) df = self._constructor_expanddim(mgr) return df.__finalize__(self, method="to_frame") def _set_name(self, name, inplace: bool = False) -> Series: """ Set the Series name. Parameters ---------- name : str inplace : bool Whether to modify `self` directly or return a copy. """ inplace = validate_bool_kwarg(inplace, "inplace") ser = self if inplace else self.copy() ser.name = name return ser """ Examples -------- >>> ser = pd.Series([390., 350., 30., 20.], ... index=['Falcon', 'Falcon', 'Parrot', 'Parrot'], name="Max Speed") >>> ser Falcon 390.0 Falcon 350.0 Parrot 30.0 Parrot 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", "b"]).mean() a 210.0 b 185.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(ser > 100).mean() Max Speed False 25.0 True 370.0 Name: Max Speed, dtype: float64 **Grouping by Indexes** We can groupby different levels of a hierarchical index using the `level` parameter: >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], ... ['Captive', 'Wild', 'Captive', 'Wild']] >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) >>> ser = pd.Series([390., 350., 30., 20.], index=index, name="Max Speed") >>> ser Animal Type Falcon Captive 390.0 Wild 350.0 Parrot Captive 30.0 Wild 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Animal Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level="Type").mean() Type Captive 210.0 Wild 185.0 Name: Max Speed, dtype: float64 We can also choose to include `NA` in group keys or not by defining `dropna` parameter, the default setting is `True`. >>> ser = pd.Series([1, 2, 3, 3], index=["a", 'a', 'b', np.nan]) >>> ser.groupby(level=0).sum() a 3 b 3 dtype: int64 >>> ser.groupby(level=0, dropna=False).sum() a 3 b 3 NaN 3 dtype: int64 >>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot'] >>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name="Max Speed") >>> ser.groupby(["a", "b", "a", np.nan]).mean() a 210.0 b 350.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", np.nan], dropna=False).mean() a 210.0 b 350.0 NaN 20.0 Name: Max Speed, dtype: float64 """ ) def groupby( self, by=None, axis: Axis = 0, level: IndexLabel = None, as_index: bool = True, sort: bool = True, group_keys: bool = True, observed: bool = False, dropna: bool = True, ) -> SeriesGroupBy: from pandas.core.groupby.generic import SeriesGroupBy if level is None and by is None: raise TypeError("You have to supply one of 'by' and 'level'") if not as_index: raise TypeError("as_index=False only valid with DataFrame") axis = self._get_axis_number(axis) return SeriesGroupBy( obj=self, keys=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, observed=observed, dropna=dropna, ) # ---------------------------------------------------------------------- # Statistics, overridden ndarray methods # TODO: integrate bottleneck def count(self): """ Return number of non-NA/null observations in the Series. Returns ------- int or Series (if level specified) Number of non-null values in the Series. See Also -------- DataFrame.count : Count non-NA cells for each column or row. Examples -------- >>> s = pd.Series([0.0, 1.0, np.nan]) >>> s.count() 2 """ return notna(self._values).sum().astype("int64") def mode(self, dropna: bool = True) -> Series: """ Return the mode(s) of the Series. The mode is the value that appears most often. There can be multiple modes. Always returns Series even if only one value is returned. Parameters ---------- dropna : bool, default True Don't consider counts of NaN/NaT. Returns ------- Series Modes of the Series in sorted order. """ # TODO: Add option for bins like value_counts() values = self._values if isinstance(values, np.ndarray): res_values = algorithms.mode(values, dropna=dropna) else: res_values = values._mode(dropna=dropna) # Ensure index is type stable (should always use int index) return self._constructor( res_values, index=range(len(res_values)), name=self.name, copy=False ) def unique(self) -> ArrayLike: # pylint: disable=useless-parent-delegation """ Return unique values of Series object. Uniques are returned in order of appearance. Hash table-based unique, therefore does NOT sort. Returns ------- ndarray or ExtensionArray The unique values returned as a NumPy array. See Notes. See Also -------- Series.drop_duplicates : Return Series with duplicate values removed. unique : Top-level unique method for any 1-d array-like object. Index.unique : Return Index with unique values from an Index object. Notes ----- Returns the unique values as a NumPy array. In case of an extension-array backed Series, a new :class:`~api.extensions.ExtensionArray` of that type with just the unique values is returned. This includes * Categorical * Period * Datetime with Timezone * Datetime without Timezone * Timedelta * Interval * Sparse * IntegerNA See Examples section. Examples -------- >>> pd.Series([2, 1, 3, 3], name='A').unique() array([2, 1, 3]) >>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00'] Length: 1, dtype: datetime64[ns] >>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern') ... for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00-05:00'] Length: 1, dtype: datetime64[ns, US/Eastern] An Categorical will return categories in the order of appearance and with the same dtype. >>> pd.Series(pd.Categorical(list('baabc'))).unique() ['b', 'a', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'), ... ordered=True)).unique() ['b', 'a', 'c'] Categories (3, object): ['a' < 'b' < 'c'] """ return super().unique() def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[False] = ..., ignore_index: bool = ..., ) -> Series: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[True], ignore_index: bool = ... ) -> None: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: bool = ..., ignore_index: bool = ... ) -> Series | None: ... def drop_duplicates( self, *, keep: DropKeep = "first", inplace: bool = False, ignore_index: bool = False, ) -> Series | None: """ Return Series with duplicate values removed. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' Method to handle dropping duplicates: - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. inplace : bool, default ``False`` If ``True``, performs operation inplace and returns None. ignore_index : bool, default ``False`` If ``True``, the resulting axis will be labeled 0, 1, …, n - 1. .. versionadded:: 2.0.0 Returns ------- Series or None Series with duplicates dropped or None if ``inplace=True``. See Also -------- Index.drop_duplicates : Equivalent method on Index. DataFrame.drop_duplicates : Equivalent method on DataFrame. Series.duplicated : Related method on Series, indicating duplicate Series values. Series.unique : Return unique values as an array. Examples -------- Generate a Series with duplicated entries. >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], ... name='animal') >>> s 0 lama 1 cow 2 lama 3 beetle 4 lama 5 hippo Name: animal, dtype: object With the 'keep' parameter, the selection behaviour of duplicated values can be changed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> s.drop_duplicates() 0 lama 1 cow 3 beetle 5 hippo Name: animal, dtype: object The value 'last' for parameter 'keep' keeps the last occurrence for each set of duplicated entries. >>> s.drop_duplicates(keep='last') 1 cow 3 beetle 4 lama 5 hippo Name: animal, dtype: object The value ``False`` for parameter 'keep' discards all sets of duplicated entries. >>> s.drop_duplicates(keep=False) 1 cow 3 beetle 5 hippo Name: animal, dtype: object """ inplace = validate_bool_kwarg(inplace, "inplace") result = super().drop_duplicates(keep=keep) if ignore_index: result.index = default_index(len(result)) if inplace: self._update_inplace(result) return None else: return result def duplicated(self, keep: DropKeep = "first") -> Series: """ Indicate duplicate Series values. Duplicated values are indicated as ``True`` values in the resulting Series. Either all duplicates, all except the first or all except the last occurrence of duplicates can be indicated. Parameters ---------- keep : {'first', 'last', False}, default 'first' Method to handle dropping duplicates: - 'first' : Mark duplicates as ``True`` except for the first occurrence. - 'last' : Mark duplicates as ``True`` except for the last occurrence. - ``False`` : Mark all duplicates as ``True``. Returns ------- Series[bool] Series indicating whether each value has occurred in the preceding values. See Also -------- Index.duplicated : Equivalent method on pandas.Index. DataFrame.duplicated : Equivalent method on pandas.DataFrame. Series.drop_duplicates : Remove duplicate values from Series. Examples -------- By default, for each set of duplicated values, the first occurrence is set on False and all others on True: >>> animals = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama']) >>> animals.duplicated() 0 False 1 False 2 True 3 False 4 True dtype: bool which is equivalent to >>> animals.duplicated(keep='first') 0 False 1 False 2 True 3 False 4 True dtype: bool By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True: >>> animals.duplicated(keep='last') 0 True 1 False 2 True 3 False 4 False dtype: bool By setting keep on ``False``, all duplicates are True: >>> animals.duplicated(keep=False) 0 True 1 False 2 True 3 False 4 True dtype: bool """ res = self._duplicated(keep=keep) result = self._constructor(res, index=self.index, copy=False) return result.__finalize__(self, method="duplicated") def idxmin(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the minimum value. If multiple values equal the minimum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the minimum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmin : Return indices of the minimum values along the given axis. DataFrame.idxmin : Return index of first occurrence of minimum over requested axis. Series.idxmax : Return index *label* of the first occurrence of maximum of values. Notes ----- This method is the Series version of ``ndarray.argmin``. This method returns the label of the minimum, while ``ndarray.argmin`` returns the position. To get the position, use ``series.values.argmin()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 1], ... index=['A', 'B', 'C', 'D']) >>> s A 1.0 B NaN C 4.0 D 1.0 dtype: float64 >>> s.idxmin() 'A' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmin(skipna=False) nan """ # error: Argument 1 to "argmin" of "IndexOpsMixin" has incompatible type "Union # [int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmin(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def idxmax(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the maximum value. If multiple values equal the maximum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the maximum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmax : Return indices of the maximum values along the given axis. DataFrame.idxmax : Return index of first occurrence of maximum over requested axis. Series.idxmin : Return index *label* of the first occurrence of minimum of values. Notes ----- This method is the Series version of ``ndarray.argmax``. This method returns the label of the maximum, while ``ndarray.argmax`` returns the position. To get the position, use ``series.values.argmax()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 3, 4], ... index=['A', 'B', 'C', 'D', 'E']) >>> s A 1.0 B NaN C 4.0 D 3.0 E 4.0 dtype: float64 >>> s.idxmax() 'C' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmax(skipna=False) nan """ # error: Argument 1 to "argmax" of "IndexOpsMixin" has incompatible type # "Union[int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmax(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def round(self, decimals: int = 0, *args, **kwargs) -> Series: """ Round each value in a Series to the given number of decimals. Parameters ---------- decimals : int, default 0 Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Series Rounded values of the Series. See Also -------- numpy.around : Round values of an np.array. DataFrame.round : Round values of a DataFrame. Examples -------- >>> s = pd.Series([0.1, 1.3, 2.7]) >>> s.round() 0 0.0 1 1.0 2 3.0 dtype: float64 """ nv.validate_round(args, kwargs) result = self._values.round(decimals) result = self._constructor(result, index=self.index, copy=False).__finalize__( self, method="round" ) return result def quantile( self, q: float = ..., interpolation: QuantileInterpolation = ... ) -> float: ... def quantile( self, q: Sequence[float] | AnyArrayLike, interpolation: QuantileInterpolation = ..., ) -> Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = ..., interpolation: QuantileInterpolation = ..., ) -> float | Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = 0.5, interpolation: QuantileInterpolation = "linear", ) -> float | Series: """ Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) The quantile(s) to compute, which can lie in range: 0 <= q <= 1. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- float or Series If ``q`` is an array, a Series will be returned where the index is ``q`` and the values are the quantiles, otherwise a float will be returned. See Also -------- core.window.Rolling.quantile : Calculate the rolling quantile. numpy.percentile : Returns the q-th percentile(s) of the array elements. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.quantile(.5) 2.5 >>> s.quantile([.25, .5, .75]) 0.25 1.75 0.50 2.50 0.75 3.25 dtype: float64 """ validate_percentile(q) # We dispatch to DataFrame so that core.internals only has to worry # about 2D cases. df = self.to_frame() result = df.quantile(q=q, interpolation=interpolation, numeric_only=False) if result.ndim == 2: result = result.iloc[:, 0] if is_list_like(q): result.name = self.name idx = Index(q, dtype=np.float64) return self._constructor(result, index=idx, name=self.name) else: # scalar return result.iloc[0] def corr( self, other: Series, method: CorrelationMethod = "pearson", min_periods: int | None = None, ) -> float: """ Compute correlation with `other` Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the correlation function is applied. Parameters ---------- other : Series Series with which to compute the correlation. method : {'pearson', 'kendall', 'spearman'} or callable Method used to compute correlation: - pearson : Standard correlation coefficient - kendall : Kendall Tau correlation coefficient - spearman : Spearman rank correlation - callable: Callable with input two 1d ndarrays and returning a float. .. warning:: Note that the returned matrix from corr will have 1 along the diagonals and will be symmetric regardless of the callable's behavior. min_periods : int, optional Minimum number of observations needed to have a valid result. Returns ------- float Correlation with other. See Also -------- DataFrame.corr : Compute pairwise correlation between columns. DataFrame.corrwith : Compute pairwise correlation with another DataFrame or Series. Notes ----- Pearson, Kendall and Spearman correlation are currently computed using pairwise complete observations. * `Pearson correlation coefficient <https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`_ * `Kendall rank correlation coefficient <https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient>`_ * `Spearman's rank correlation coefficient <https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient>`_ Examples -------- >>> def histogram_intersection(a, b): ... v = np.minimum(a, b).sum().round(decimals=1) ... return v >>> s1 = pd.Series([.2, .0, .6, .2]) >>> s2 = pd.Series([.3, .6, .0, .1]) >>> s1.corr(s2, method=histogram_intersection) 0.3 """ # noqa:E501 this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan if method in ["pearson", "spearman", "kendall"] or callable(method): return nanops.nancorr( this.values, other.values, method=method, min_periods=min_periods ) raise ValueError( "method must be either 'pearson', " "'spearman', 'kendall', or a callable, " f"'{method}' was supplied" ) def cov( self, other: Series, min_periods: int | None = None, ddof: int | None = 1, ) -> float: """ Compute covariance with Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the covariance is calculated. Parameters ---------- other : Series Series with which to compute the covariance. min_periods : int, optional Minimum number of observations needed to have a valid result. ddof : int, default 1 Delta degrees of freedom. The divisor used in calculations is ``N - ddof``, where ``N`` represents the number of elements. .. versionadded:: 1.1.0 Returns ------- float Covariance between Series and other normalized by N-1 (unbiased estimator). See Also -------- DataFrame.cov : Compute pairwise covariance of columns. Examples -------- >>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035]) >>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198]) >>> s1.cov(s2) -0.01685762652715874 """ this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan return nanops.nancov( this.values, other.values, min_periods=min_periods, ddof=ddof ) klass="Series", extra_params="", other_klass="DataFrame", examples=dedent( """ Difference with previous row >>> s = pd.Series([1, 1, 2, 3, 5, 8]) >>> s.diff() 0 NaN 1 0.0 2 1.0 3 1.0 4 2.0 5 3.0 dtype: float64 Difference with 3rd previous row >>> s.diff(periods=3) 0 NaN 1 NaN 2 NaN 3 2.0 4 4.0 5 6.0 dtype: float64 Difference with following row >>> s.diff(periods=-1) 0 0.0 1 -1.0 2 -1.0 3 -2.0 4 -3.0 5 NaN dtype: float64 Overflow in input dtype >>> s = pd.Series([1, 0], dtype=np.uint8) >>> s.diff() 0 NaN 1 255.0 dtype: float64""" ), ) def diff(self, periods: int = 1) -> Series: """ First discrete difference of element. Calculates the difference of a {klass} element compared with another element in the {klass} (default is element in previous row). Parameters ---------- periods : int, default 1 Periods to shift for calculating difference, accepts negative values. {extra_params} Returns ------- {klass} First differences of the Series. See Also -------- {klass}.pct_change: Percent change over given number of periods. {klass}.shift: Shift index by desired number of periods with an optional time freq. {other_klass}.diff: First discrete difference of object. Notes ----- For boolean dtypes, this uses :meth:`operator.xor` rather than :meth:`operator.sub`. The result is calculated according to current dtype in {klass}, however dtype of the result is always float64. Examples -------- {examples} """ result = algorithms.diff(self._values, periods) return self._constructor(result, index=self.index, copy=False).__finalize__( self, method="diff" ) def autocorr(self, lag: int = 1) -> float: """ Compute the lag-N autocorrelation. This method computes the Pearson correlation between the Series and its shifted self. Parameters ---------- lag : int, default 1 Number of lags to apply before performing autocorrelation. Returns ------- float The Pearson correlation between self and self.shift(lag). See Also -------- Series.corr : Compute the correlation between two Series. Series.shift : Shift index by desired number of periods. DataFrame.corr : Compute pairwise correlation of columns. DataFrame.corrwith : Compute pairwise correlation between rows or columns of two DataFrame objects. Notes ----- If the Pearson correlation is not well defined return 'NaN'. Examples -------- >>> s = pd.Series([0.25, 0.5, 0.2, -0.05]) >>> s.autocorr() # doctest: +ELLIPSIS 0.10355... >>> s.autocorr(lag=2) # doctest: +ELLIPSIS -0.99999... If the Pearson correlation is not well defined, then 'NaN' is returned. >>> s = pd.Series([1, 0, 0, 0]) >>> s.autocorr() nan """ return self.corr(self.shift(lag)) def dot(self, other: AnyArrayLike) -> Series | np.ndarray: """ Compute the dot product between the Series and the columns of other. This method computes the dot product between the Series and another one, or the Series and each columns of a DataFrame, or the Series and each columns of an array. It can also be called using `self @ other` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the dot product with its columns. Returns ------- scalar, Series or numpy.ndarray Return the dot product of the Series and other if other is a Series, the Series of the dot product of Series and each rows of other if other is a DataFrame or a numpy.ndarray between the Series and each columns of the numpy array. See Also -------- DataFrame.dot: Compute the matrix product with the DataFrame. Series.mul: Multiplication of series and other, element-wise. Notes ----- The Series and other has to share the same index if other is a Series or a DataFrame. Examples -------- >>> s = pd.Series([0, 1, 2, 3]) >>> other = pd.Series([-1, 2, -3, 4]) >>> s.dot(other) 8 >>> s @ other 8 >>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(df) 0 24 1 14 dtype: int64 >>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(arr) array([24, 14]) """ if isinstance(other, (Series, ABCDataFrame)): common = self.index.union(other.index) if len(common) > len(self.index) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(index=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right.values else: lvals = self.values rvals = np.asarray(other) if lvals.shape[0] != rvals.shape[0]: raise Exception( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, ABCDataFrame): return self._constructor( np.dot(lvals, rvals), index=other.columns, copy=False ).__finalize__(self, method="dot") elif isinstance(other, Series): return np.dot(lvals, rvals) elif isinstance(rvals, np.ndarray): return np.dot(lvals, rvals) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(np.transpose(other)) # Signature of "searchsorted" incompatible with supertype "IndexOpsMixin" def searchsorted( # type: ignore[override] self, value: NumpyValueArrayLike | ExtensionArray, side: Literal["left", "right"] = "left", sorter: NumpySorter = None, ) -> npt.NDArray[np.intp] | np.intp: return base.IndexOpsMixin.searchsorted(self, value, side=side, sorter=sorter) # ------------------------------------------------------------------- # Combination def _append( self, to_append, ignore_index: bool = False, verify_integrity: bool = False ): from pandas.core.reshape.concat import concat if isinstance(to_append, (list, tuple)): to_concat = [self] to_concat.extend(to_append) else: to_concat = [self, to_append] if any(isinstance(x, (ABCDataFrame,)) for x in to_concat[1:]): msg = "to_append should be a Series or list/tuple of Series, got DataFrame" raise TypeError(msg) return concat( to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity ) def _binop(self, other: Series, func, level=None, fill_value=None): """ Perform generic binary operation with optional fill value. Parameters ---------- other : Series func : binary operator fill_value : float or object Value to substitute for NA/null values. If both Series are NA in a location, the result will be NA regardless of the passed fill value. level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level. Returns ------- Series """ if not isinstance(other, Series): raise AssertionError("Other operand must be Series") this = self if not self.index.equals(other.index): this, other = self.align(other, level=level, join="outer", copy=False) this_vals, other_vals = ops.fill_binop(this._values, other._values, fill_value) with np.errstate(all="ignore"): result = func(this_vals, other_vals) name = ops.get_op_result_name(self, other) return this._construct_result(result, name) def _construct_result( self, result: ArrayLike | tuple[ArrayLike, ArrayLike], name: Hashable ) -> Series | tuple[Series, Series]: """ Construct an appropriately-labelled Series from the result of an op. Parameters ---------- result : ndarray or ExtensionArray name : Label Returns ------- Series In the case of __divmod__ or __rdivmod__, a 2-tuple of Series. """ if isinstance(result, tuple): # produced by divmod or rdivmod res1 = self._construct_result(result[0], name=name) res2 = self._construct_result(result[1], name=name) # GH#33427 assertions to keep mypy happy assert isinstance(res1, Series) assert isinstance(res2, Series) return (res1, res2) # TODO: result should always be ArrayLike, but this fails for some # JSONArray tests dtype = getattr(result, "dtype", None) out = self._constructor(result, index=self.index, dtype=dtype) out = out.__finalize__(self) # Set the result's name after __finalize__ is called because __finalize__ # would set it back to self.name out.name = name return out _shared_docs["compare"], """ Returns ------- Series or DataFrame If axis is 0 or 'index' the result will be a Series. The resulting index will be a MultiIndex with 'self' and 'other' stacked alternately at the inner level. If axis is 1 or 'columns' the result will be a DataFrame. It will have two columns namely 'self' and 'other'. See Also -------- DataFrame.compare : Compare with another DataFrame and show differences. Notes ----- Matching NaNs will not appear as a difference. Examples -------- >>> s1 = pd.Series(["a", "b", "c", "d", "e"]) >>> s2 = pd.Series(["a", "a", "c", "b", "e"]) Align the differences on columns >>> s1.compare(s2) self other 1 b a 3 d b Stack the differences on indices >>> s1.compare(s2, align_axis=0) 1 self b other a 3 self d other b dtype: object Keep all original rows >>> s1.compare(s2, keep_shape=True) self other 0 NaN NaN 1 b a 2 NaN NaN 3 d b 4 NaN NaN Keep all original rows and also all original values >>> s1.compare(s2, keep_shape=True, keep_equal=True) self other 0 a a 1 b a 2 c c 3 d b 4 e e """, klass=_shared_doc_kwargs["klass"], ) def compare( self, other: Series, align_axis: Axis = 1, keep_shape: bool = False, keep_equal: bool = False, result_names: Suffixes = ("self", "other"), ) -> DataFrame | Series: return super().compare( other=other, align_axis=align_axis, keep_shape=keep_shape, keep_equal=keep_equal, result_names=result_names, ) def combine( self, other: Series | Hashable, func: Callable[[Hashable, Hashable], Hashable], fill_value: Hashable = None, ) -> Series: """ Combine the Series with a Series or scalar according to `func`. Combine the Series and `other` using `func` to perform elementwise selection for combined Series. `fill_value` is assumed when value is missing at some index from one of the two objects being combined. Parameters ---------- other : Series or scalar The value(s) to be combined with the `Series`. func : function Function that takes two scalars as inputs and returns an element. fill_value : scalar, optional The value to assume when an index is missing from one Series or the other. The default specifies to use the appropriate NaN value for the underlying dtype of the Series. Returns ------- Series The result of combining the Series with the other object. See Also -------- Series.combine_first : Combine Series values, choosing the calling Series' values first. Examples -------- Consider 2 Datasets ``s1`` and ``s2`` containing highest clocked speeds of different birds. >>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0}) >>> s1 falcon 330.0 eagle 160.0 dtype: float64 >>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0}) >>> s2 falcon 345.0 eagle 200.0 duck 30.0 dtype: float64 Now, to combine the two datasets and view the highest speeds of the birds across the two datasets >>> s1.combine(s2, max) duck NaN eagle 200.0 falcon 345.0 dtype: float64 In the previous example, the resulting value for duck is missing, because the maximum of a NaN and a float is a NaN. So, in the example, we set ``fill_value=0``, so the maximum value returned will be the value from some dataset. >>> s1.combine(s2, max, fill_value=0) duck 30.0 eagle 200.0 falcon 345.0 dtype: float64 """ if fill_value is None: fill_value = na_value_for_dtype(self.dtype, compat=False) if isinstance(other, Series): # If other is a Series, result is based on union of Series, # so do this element by element new_index = self.index.union(other.index) new_name = ops.get_op_result_name(self, other) new_values = np.empty(len(new_index), dtype=object) for i, idx in enumerate(new_index): lv = self.get(idx, fill_value) rv = other.get(idx, fill_value) with np.errstate(all="ignore"): new_values[i] = func(lv, rv) else: # Assume that other is a scalar, so apply the function for # each element in the Series new_index = self.index new_values = np.empty(len(new_index), dtype=object) with np.errstate(all="ignore"): new_values[:] = [func(lv, other) for lv in self._values] new_name = self.name # try_float=False is to match agg_series npvalues = lib.maybe_convert_objects(new_values, try_float=False) res_values = maybe_cast_pointwise_result(npvalues, self.dtype, same_dtype=False) return self._constructor(res_values, index=new_index, name=new_name, copy=False) def combine_first(self, other) -> Series: """ Update null elements with value in the same location in 'other'. Combine two Series objects by filling null values in one Series with non-null values from the other Series. Result index will be the union of the two indexes. Parameters ---------- other : Series The value(s) to be used for filling null values. Returns ------- Series The result of combining the provided Series with the other object. See Also -------- Series.combine : Perform element-wise operation on two Series using a given function. Examples -------- >>> s1 = pd.Series([1, np.nan]) >>> s2 = pd.Series([3, 4, 5]) >>> s1.combine_first(s2) 0 1.0 1 4.0 2 5.0 dtype: float64 Null values still persist if the location of that null value does not exist in `other` >>> s1 = pd.Series({'falcon': np.nan, 'eagle': 160.0}) >>> s2 = pd.Series({'eagle': 200.0, 'duck': 30.0}) >>> s1.combine_first(s2) duck 30.0 eagle 160.0 falcon NaN dtype: float64 """ new_index = self.index.union(other.index) this = self.reindex(new_index, copy=False) other = other.reindex(new_index, copy=False) if this.dtype.kind == "M" and other.dtype.kind != "M": other = to_datetime(other) return this.where(notna(this), other) def update(self, other: Series | Sequence | Mapping) -> None: """ Modify Series in place using values from passed Series. Uses non-NA values from passed Series to make updates. Aligns on index. Parameters ---------- other : Series, or object coercible into Series Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6])) >>> s 0 4 1 5 2 6 dtype: int64 >>> s = pd.Series(['a', 'b', 'c']) >>> s.update(pd.Series(['d', 'e'], index=[0, 2])) >>> s 0 d 1 b 2 e dtype: object >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6, 7, 8])) >>> s 0 4 1 5 2 6 dtype: int64 If ``other`` contains NaNs the corresponding values are not updated in the original Series. >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, np.nan, 6])) >>> s 0 4 1 2 2 6 dtype: int64 ``other`` can also be a non-Series object type that is coercible into a Series >>> s = pd.Series([1, 2, 3]) >>> s.update([4, np.nan, 6]) >>> s 0 4 1 2 2 6 dtype: int64 >>> s = pd.Series([1, 2, 3]) >>> s.update({1: 9}) >>> s 0 1 1 9 2 3 dtype: int64 """ if not isinstance(other, Series): other = Series(other) other = other.reindex_like(self) mask = notna(other) self._mgr = self._mgr.putmask(mask=mask, new=other) self._maybe_update_cacher() # ---------------------------------------------------------------------- # Reindexing, sorting def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[False] = ..., kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> Series: ... def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[True], kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> None: ... def sort_values( self, *, axis: Axis = 0, ascending: bool | int | Sequence[bool] | Sequence[int] = True, inplace: bool = False, kind: str = "quicksort", na_position: str = "last", ignore_index: bool = False, key: ValueKeyFunc = None, ) -> Series | None: """ Sort by the values. Sort a Series in ascending or descending order by some criterion. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. ascending : bool or list of bools, default True If True, sort values in ascending order, otherwise descending. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the series values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect a ``Series`` and return an array-like. .. versionadded:: 1.1.0 Returns ------- Series or None Series ordered by values or None if ``inplace=True``. See Also -------- Series.sort_index : Sort by the Series indices. DataFrame.sort_values : Sort DataFrame by the values along either axis. DataFrame.sort_index : Sort DataFrame by indices. Examples -------- >>> s = pd.Series([np.nan, 1, 3, 10, 5]) >>> s 0 NaN 1 1.0 2 3.0 3 10.0 4 5.0 dtype: float64 Sort values ascending order (default behaviour) >>> s.sort_values(ascending=True) 1 1.0 2 3.0 4 5.0 3 10.0 0 NaN dtype: float64 Sort values descending order >>> s.sort_values(ascending=False) 3 10.0 4 5.0 2 3.0 1 1.0 0 NaN dtype: float64 Sort values putting NAs first >>> s.sort_values(na_position='first') 0 NaN 1 1.0 2 3.0 4 5.0 3 10.0 dtype: float64 Sort a series of strings >>> s = pd.Series(['z', 'b', 'd', 'a', 'c']) >>> s 0 z 1 b 2 d 3 a 4 c dtype: object >>> s.sort_values() 3 a 1 b 4 c 2 d 0 z dtype: object Sort using a key function. Your `key` function will be given the ``Series`` of values and should return an array-like. >>> s = pd.Series(['a', 'B', 'c', 'D', 'e']) >>> s.sort_values() 1 B 3 D 0 a 2 c 4 e dtype: object >>> s.sort_values(key=lambda x: x.str.lower()) 0 a 1 B 2 c 3 D 4 e dtype: object NumPy ufuncs work well here. For example, we can sort by the ``sin`` of the value >>> s = pd.Series([-4, -2, 0, 2, 4]) >>> s.sort_values(key=np.sin) 1 -2 4 4 2 0 0 -4 3 2 dtype: int64 More complicated user-defined functions can be used, as long as they expect a Series and return an array-like >>> s.sort_values(key=lambda x: (np.tan(x.cumsum()))) 0 -4 3 2 4 4 1 -2 2 0 dtype: int64 """ inplace = validate_bool_kwarg(inplace, "inplace") # Validate the axis parameter self._get_axis_number(axis) # GH 5856/5853 if inplace and self._is_cached: raise ValueError( "This Series is a view of some other array, to " "sort in-place you must create a copy" ) if is_list_like(ascending): ascending = cast(Sequence[Union[bool, int]], ascending) if len(ascending) != 1: raise ValueError( f"Length of ascending ({len(ascending)}) must be 1 for Series" ) ascending = ascending[0] ascending = validate_ascending(ascending) if na_position not in ["first", "last"]: raise ValueError(f"invalid na_position: {na_position}") # GH 35922. Make sorting stable by leveraging nargsort values_to_sort = ensure_key_mapped(self, key)._values if key else self._values sorted_index = nargsort(values_to_sort, kind, bool(ascending), na_position) if is_range_indexer(sorted_index, len(sorted_index)): if inplace: return self._update_inplace(self) return self.copy(deep=None) result = self._constructor( self._values[sorted_index], index=self.index[sorted_index], copy=False ) if ignore_index: result.index = default_index(len(sorted_index)) if not inplace: return result.__finalize__(self, method="sort_values") self._update_inplace(result) return None def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[True], kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> None: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[False] = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: bool = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series | None: ... def sort_index( self, *, axis: Axis = 0, level: IndexLabel = None, ascending: bool | Sequence[bool] = True, inplace: bool = False, kind: SortKind = "quicksort", na_position: NaPosition = "last", sort_remaining: bool = True, ignore_index: bool = False, key: IndexKeyFunc = None, ) -> Series | None: """ Sort Series by index labels. Returns a new Series sorted by label if `inplace` argument is ``False``, otherwise updates the original series and returns None. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. level : int, optional If not None, sort on values in specified index level(s). ascending : bool or list-like of bools, default True Sort ascending vs. descending. When the index is a MultiIndex the sort direction can be controlled for each level individually. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. Not implemented for MultiIndex. sort_remaining : bool, default True If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. .. versionadded:: 1.1.0 Returns ------- Series or None The original Series sorted by the labels or None if ``inplace=True``. See Also -------- DataFrame.sort_index: Sort DataFrame by the index. DataFrame.sort_values: Sort DataFrame by the value. Series.sort_values : Sort Series by the value. Examples -------- >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4]) >>> s.sort_index() 1 c 2 b 3 a 4 d dtype: object Sort Descending >>> s.sort_index(ascending=False) 4 d 3 a 2 b 1 c dtype: object By default NaNs are put at the end, but use `na_position` to place them at the beginning >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan]) >>> s.sort_index(na_position='first') NaN d 1.0 c 2.0 b 3.0 a dtype: object Specify index level to sort >>> arrays = [np.array(['qux', 'qux', 'foo', 'foo', ... 'baz', 'baz', 'bar', 'bar']), ... np.array(['two', 'one', 'two', 'one', ... 'two', 'one', 'two', 'one'])] >>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays) >>> s.sort_index(level=1) bar one 8 baz one 6 foo one 4 qux one 2 bar two 7 baz two 5 foo two 3 qux two 1 dtype: int64 Does not sort by remaining levels when sorting by levels >>> s.sort_index(level=1, sort_remaining=False) qux one 2 foo one 4 baz one 6 bar one 8 qux two 1 foo two 3 baz two 5 bar two 7 dtype: int64 Apply a key function before sorting >>> s = pd.Series([1, 2, 3, 4], index=['A', 'b', 'C', 'd']) >>> s.sort_index(key=lambda x : x.str.lower()) A 1 b 2 C 3 d 4 dtype: int64 """ return super().sort_index( axis=axis, level=level, ascending=ascending, inplace=inplace, kind=kind, na_position=na_position, sort_remaining=sort_remaining, ignore_index=ignore_index, key=key, ) def argsort( self, axis: Axis = 0, kind: SortKind = "quicksort", order: None = None, ) -> Series: """ Return the integer indices that would sort the Series values. Override ndarray.argsort. Argsorts the value, omitting NA/null values, and places the result in the same locations as the non-NA values. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. order : None Has no effect but is accepted for compatibility with numpy. Returns ------- Series[np.intp] Positions of values within the sort order with -1 indicating nan values. See Also -------- numpy.ndarray.argsort : Returns the indices that would sort this array. """ values = self._values mask = isna(values) if mask.any(): result = np.full(len(self), -1, dtype=np.intp) notmask = ~mask result[notmask] = np.argsort(values[notmask], kind=kind) else: result = np.argsort(values, kind=kind) res = self._constructor( result, index=self.index, name=self.name, dtype=np.intp, copy=False ) return res.__finalize__(self, method="argsort") def nlargest( self, n: int = 5, keep: Literal["first", "last", "all"] = "first" ) -> Series: """ Return the largest `n` elements. Parameters ---------- n : int, default 5 Return this many descending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` largest values in the Series, sorted in decreasing order. See Also -------- Series.nsmallest: Get the `n` smallest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values(ascending=False).head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Malta": 434000, "Maldives": 434000, ... "Brunei": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Malta 434000 Maldives 434000 Brunei 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` largest elements where ``n=5`` by default. >>> s.nlargest() France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3``. Default `keep` value is 'first' so Malta will be kept. >>> s.nlargest(3) France 65000000 Italy 59000000 Malta 434000 dtype: int64 The `n` largest elements where ``n=3`` and keeping the last duplicates. Brunei will be kept since it is the last with value 434000 based on the index order. >>> s.nlargest(3, keep='last') France 65000000 Italy 59000000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3`` with all duplicates kept. Note that the returned Series has five elements due to the three duplicates. >>> s.nlargest(3, keep='all') France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nlargest() def nsmallest(self, n: int = 5, keep: str = "first") -> Series: """ Return the smallest `n` elements. Parameters ---------- n : int, default 5 Return this many ascending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` smallest values in the Series, sorted in increasing order. See Also -------- Series.nlargest: Get the `n` largest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values().head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Brunei": 434000, "Malta": 434000, ... "Maldives": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Brunei 434000 Malta 434000 Maldives 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` smallest elements where ``n=5`` by default. >>> s.nsmallest() Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 Iceland 337000 dtype: int64 The `n` smallest elements where ``n=3``. Default `keep` value is 'first' so Nauru and Tuvalu will be kept. >>> s.nsmallest(3) Montserrat 5200 Nauru 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` and keeping the last duplicates. Anguilla and Tuvalu will be kept since they are the last with value 11300 based on the index order. >>> s.nsmallest(3, keep='last') Montserrat 5200 Anguilla 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` with all duplicates kept. Note that the returned Series has four elements due to the three duplicates. >>> s.nsmallest(3, keep='all') Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nsmallest() klass=_shared_doc_kwargs["klass"], extra_params=dedent( """copy : bool, default True Whether to copy underlying data.""" ), examples=dedent( """\ Examples -------- >>> s = pd.Series( ... ["A", "B", "A", "C"], ... index=[ ... ["Final exam", "Final exam", "Coursework", "Coursework"], ... ["History", "Geography", "History", "Geography"], ... ["January", "February", "March", "April"], ... ], ... ) >>> s Final exam History January A Geography February B Coursework History March A Geography April C dtype: object In the following example, we will swap the levels of the indices. Here, we will swap the levels column-wise, but levels can be swapped row-wise in a similar manner. Note that column-wise is the default behaviour. By not supplying any arguments for i and j, we swap the last and second to last indices. >>> s.swaplevel() Final exam January History A February Geography B Coursework March History A April Geography C dtype: object By supplying one argument, we can choose which index to swap the last index with. We can for example swap the first index with the last one as follows. >>> s.swaplevel(0) January History Final exam A February Geography Final exam B March History Coursework A April Geography Coursework C dtype: object We can also define explicitly which indices we want to swap by supplying values for both i and j. Here, we for example swap the first and second indices. >>> s.swaplevel(0, 1) History Final exam January A Geography Final exam February B History Coursework March A Geography Coursework April C dtype: object""" ), ) def swaplevel( self, i: Level = -2, j: Level = -1, copy: bool | None = None ) -> Series: """ Swap levels i and j in a :class:`MultiIndex`. Default is to swap the two innermost levels of the index. Parameters ---------- i, j : int or str Levels of the indices to be swapped. Can pass level name as string. {extra_params} Returns ------- {klass} {klass} with levels swapped in MultiIndex. {examples} """ assert isinstance(self.index, MultiIndex) result = self.copy(deep=copy and not using_copy_on_write()) result.index = self.index.swaplevel(i, j) return result def reorder_levels(self, order: Sequence[Level]) -> Series: """ Rearrange index levels using input order. May not drop or duplicate levels. Parameters ---------- order : list of int representing new level order Reference level by number or key. Returns ------- type of caller (new object) """ if not isinstance(self.index, MultiIndex): # pragma: no cover raise Exception("Can only reorder levels on a hierarchical axis.") result = self.copy(deep=None) assert isinstance(result.index, MultiIndex) result.index = result.index.reorder_levels(order) return result def explode(self, ignore_index: bool = False) -> Series: """ Transform each element of a list-like to a row. Parameters ---------- ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 1.1.0 Returns ------- Series Exploded lists to rows; index will be duplicated for these rows. See Also -------- Series.str.split : Split string values on specified separator. Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame. DataFrame.melt : Unpivot a DataFrame from wide format to long format. DataFrame.explode : Explode a DataFrame from list-like columns to long format. Notes ----- This routine will explode list-likes including lists, tuples, sets, Series, and np.ndarray. The result dtype of the subset rows will be object. Scalars will be returned unchanged, and empty list-likes will result in a np.nan for that row. In addition, the ordering of elements in the output will be non-deterministic when exploding sets. Reference :ref:`the user guide <reshaping.explode>` for more examples. Examples -------- >>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]]) >>> s 0 [1, 2, 3] 1 foo 2 [] 3 [3, 4] dtype: object >>> s.explode() 0 1 0 2 0 3 1 foo 2 NaN 3 3 3 4 dtype: object """ if not len(self) or not is_object_dtype(self): result = self.copy() return result.reset_index(drop=True) if ignore_index else result values, counts = reshape.explode(np.asarray(self._values)) if ignore_index: index = default_index(len(values)) else: index = self.index.repeat(counts) return self._constructor(values, index=index, name=self.name, copy=False) def unstack(self, level: IndexLabel = -1, fill_value: Hashable = None) -> DataFrame: """ Unstack, also known as pivot, Series with MultiIndex to produce DataFrame. Parameters ---------- level : int, str, or list of these, default last level Level(s) to unstack, can pass level name. fill_value : scalar value, default None Value to use when replacing NaN values. Returns ------- DataFrame Unstacked Series. Notes ----- Reference :ref:`the user guide <reshaping.stacking>` for more examples. Examples -------- >>> s = pd.Series([1, 2, 3, 4], ... index=pd.MultiIndex.from_product([['one', 'two'], ... ['a', 'b']])) >>> s one a 1 b 2 two a 3 b 4 dtype: int64 >>> s.unstack(level=-1) a b one 1 2 two 3 4 >>> s.unstack(level=0) one two a 1 3 b 2 4 """ from pandas.core.reshape.reshape import unstack return unstack(self, level, fill_value) # ---------------------------------------------------------------------- # function application def map( self, arg: Callable | Mapping | Series, na_action: Literal["ignore"] | None = None, ) -> Series: """ Map values of Series according to an input mapping or function. Used for substituting each value in a Series with another value, that may be derived from a function, a ``dict`` or a :class:`Series`. Parameters ---------- arg : function, collections.abc.Mapping subclass or Series Mapping correspondence. na_action : {None, 'ignore'}, default None If 'ignore', propagate NaN values, without passing them to the mapping correspondence. Returns ------- Series Same index as caller. See Also -------- Series.apply : For applying more complex functions on a Series. DataFrame.apply : Apply a function row-/column-wise. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Notes ----- When ``arg`` is a dictionary, values in Series that are not in the dictionary (as keys) are converted to ``NaN``. However, if the dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e. provides a method for default values), then this default is used rather than ``NaN``. Examples -------- >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit']) >>> s 0 cat 1 dog 2 NaN 3 rabbit dtype: object ``map`` accepts a ``dict`` or a ``Series``. Values that are not found in the ``dict`` are converted to ``NaN``, unless the dict has a default value (e.g. ``defaultdict``): >>> s.map({'cat': 'kitten', 'dog': 'puppy'}) 0 kitten 1 puppy 2 NaN 3 NaN dtype: object It also accepts a function: >>> s.map('I am a {}'.format) 0 I am a cat 1 I am a dog 2 I am a nan 3 I am a rabbit dtype: object To avoid applying the function to missing values (and keep them as ``NaN``) ``na_action='ignore'`` can be used: >>> s.map('I am a {}'.format, na_action='ignore') 0 I am a cat 1 I am a dog 2 NaN 3 I am a rabbit dtype: object """ new_values = self._map_values(arg, na_action=na_action) return self._constructor(new_values, index=self.index, copy=False).__finalize__( self, method="map" ) def _gotitem(self, key, ndim, subset=None) -> Series: """ Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : {1, 2} Requested ndim of result. subset : object, default None Subset to act on. """ return self _agg_see_also_doc = dedent( """ See Also -------- Series.apply : Invoke function on a Series. Series.transform : Transform function producing a Series with like indexes. """ ) _agg_examples_doc = dedent( """ Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.agg('min') 1 >>> s.agg(['min', 'max']) min 1 max 4 dtype: int64 """ ) _shared_docs["aggregate"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, ) def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs): # Validate the axis parameter self._get_axis_number(axis) # if func is None, will switch to user-provided "named aggregation" kwargs if func is None: func = dict(kwargs.items()) op = SeriesApply(self, func, convert_dtype=False, args=args, kwargs=kwargs) result = op.agg() return result agg = aggregate # error: Signature of "any" incompatible with supertype "NDFrame" [override] def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: None = ..., **kwargs, ) -> bool: ... def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: Level, **kwargs, ) -> Series | bool: ... # error: Missing return statement def any( # type: ignore[empty-body] self, axis: Axis = 0, bool_only: bool | None = None, skipna: bool = True, level: Level | None = None, **kwargs, ) -> Series | bool: ... _shared_docs["transform"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], ) def transform( self, func: AggFuncType, axis: Axis = 0, *args, **kwargs ) -> DataFrame | Series: # Validate axis argument self._get_axis_number(axis) result = SeriesApply( self, func=func, convert_dtype=True, args=args, kwargs=kwargs ).transform() return result def apply( self, func: AggFuncType, convert_dtype: bool = True, args: tuple[Any, ...] = (), **kwargs, ) -> DataFrame | Series: """ Invoke function on values of Series. Can be ufunc (a NumPy function that applies to the entire Series) or a Python function that only works on single values. Parameters ---------- func : function Python function or NumPy ufunc to apply. convert_dtype : bool, default True Try to find better dtype for elementwise function results. If False, leave as dtype=object. Note that the dtype is always preserved for some extension array dtypes, such as Categorical. args : tuple Positional arguments passed to func after the series value. **kwargs Additional keyword arguments passed to func. Returns ------- Series or DataFrame If func returns a Series object the result will be a DataFrame. See Also -------- Series.map: For element-wise operations. Series.agg: Only perform aggregating type operations. Series.transform: Only perform transforming type operations. Notes ----- Functions that mutate the passed object can produce unexpected behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` for more details. Examples -------- Create a series with typical summer temperatures for each city. >>> s = pd.Series([20, 21, 12], ... index=['London', 'New York', 'Helsinki']) >>> s London 20 New York 21 Helsinki 12 dtype: int64 Square the values by defining a function and passing it as an argument to ``apply()``. >>> def square(x): ... return x ** 2 >>> s.apply(square) London 400 New York 441 Helsinki 144 dtype: int64 Square the values by passing an anonymous function as an argument to ``apply()``. >>> s.apply(lambda x: x ** 2) London 400 New York 441 Helsinki 144 dtype: int64 Define a custom function that needs additional positional arguments and pass these additional arguments using the ``args`` keyword. >>> def subtract_custom_value(x, custom_value): ... return x - custom_value >>> s.apply(subtract_custom_value, args=(5,)) London 15 New York 16 Helsinki 7 dtype: int64 Define a custom function that takes keyword arguments and pass these arguments to ``apply``. >>> def add_custom_values(x, **kwargs): ... for month in kwargs: ... x += kwargs[month] ... return x >>> s.apply(add_custom_values, june=30, july=20, august=25) London 95 New York 96 Helsinki 87 dtype: int64 Use a function from the Numpy library. >>> s.apply(np.log) London 2.995732 New York 3.044522 Helsinki 2.484907 dtype: float64 """ return SeriesApply(self, func, convert_dtype, args, kwargs).apply() def _reduce( self, op, name: str, *, axis: Axis = 0, skipna: bool = True, numeric_only: bool = False, filter_type=None, **kwds, ): """ Perform a reduction operation. If we have an ndarray as a value, then simply perform the operation, otherwise delegate to the object. """ delegate = self._values if axis is not None: self._get_axis_number(axis) if isinstance(delegate, ExtensionArray): # dispatch to ExtensionArray interface return delegate._reduce(name, skipna=skipna, **kwds) else: # dispatch to numpy arrays if numeric_only and not is_numeric_dtype(self.dtype): kwd_name = "numeric_only" if name in ["any", "all"]: kwd_name = "bool_only" # GH#47500 - change to TypeError to match other methods raise TypeError( f"Series.{name} does not allow {kwd_name}={numeric_only} " "with non-numeric dtypes." ) with np.errstate(all="ignore"): return op(delegate, skipna=skipna, **kwds) def _reindex_indexer( self, new_index: Index | None, indexer: npt.NDArray[np.intp] | None, copy: bool | None, ) -> Series: # Note: new_index is None iff indexer is None # if not None, indexer is np.intp if indexer is None and ( new_index is None or new_index.names == self.index.names ): if using_copy_on_write(): return self.copy(deep=copy) if copy or copy is None: return self.copy(deep=copy) return self new_values = algorithms.take_nd( self._values, indexer, allow_fill=True, fill_value=None ) return self._constructor(new_values, index=new_index, copy=False) def _needs_reindex_multi(self, axes, method, level) -> bool: """ Check if we do need a multi reindex; this is for compat with higher dims. """ return False # error: Cannot determine type of 'align' NDFrame.align, # type: ignore[has-type] klass=_shared_doc_kwargs["klass"], axes_single_arg=_shared_doc_kwargs["axes_single_arg"], ) def align( self, other: Series, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value: Hashable = None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> Series: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[True], level: Level | None = ..., errors: IgnoreRaise = ..., ) -> None: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[False] = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: bool = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series | None: ... def rename( self, index: Renamer | Hashable | None = None, *, axis: Axis | None = None, copy: bool = True, inplace: bool = False, level: Level | None = None, errors: IgnoreRaise = "ignore", ) -> Series | None: """ Alter Series index labels or name. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. Alternatively, change ``Series.name`` with a scalar value. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- index : scalar, hashable sequence, dict-like or function optional Functions or dict-like are transformations to apply to the index. Scalar or hashable sequence-like will alter the ``Series.name`` attribute. axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new Series. If True the value of copy is ignored. level : int or level name, default None In case of MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise `KeyError` when a `dict-like mapper` or `index` contains labels that are not present in the index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- Series or None Series with index labels or name altered or None if ``inplace=True``. See Also -------- DataFrame.rename : Corresponding DataFrame method. Series.rename_axis : Set the name of the axis. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.rename("my_name") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 >>> s.rename(lambda x: x ** 2) # function, changes labels 0 1 1 2 4 3 dtype: int64 >>> s.rename({1: 3, 2: 5}) # mapping, changes labels 0 1 3 2 5 3 dtype: int64 """ if axis is not None: # Make sure we raise if an invalid 'axis' is passed. axis = self._get_axis_number(axis) if callable(index) or is_dict_like(index): # error: Argument 1 to "_rename" of "NDFrame" has incompatible # type "Union[Union[Mapping[Any, Hashable], Callable[[Any], # Hashable]], Hashable, None]"; expected "Union[Mapping[Any, # Hashable], Callable[[Any], Hashable], None]" return super()._rename( index, # type: ignore[arg-type] copy=copy, inplace=inplace, level=level, errors=errors, ) else: return self._set_name(index, inplace=inplace) """ Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.set_axis(['a', 'b', 'c'], axis=0) a 1 b 2 c 3 dtype: int64 """ ) **_shared_doc_kwargs, extended_summary_sub="", axis_description_sub="", see_also_sub="", ) ) ) # error: Cannot determine type of 'shift' # ---------------------------------------------------------------------- # Convert to types that support pd.NA # error: Cannot determine type of 'isna' # error: Return type "Series" of "isna" incompatible with return type "ndarray # [Any, dtype[bool_]]" in supertype "IndexOpsMixin" # error: Cannot determine type of 'isna' # error: Cannot determine type of 'notna' # error: Cannot determine type of 'notna' # ---------------------------------------------------------------------- # Time series-oriented methods # error: Cannot determine type of 'asfreq' # error: Cannot determine type of 'resample' # ---------------------------------------------------------------------- # Add index # ---------------------------------------------------------------------- # Accessor Methods # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # Add plotting methods to Series # ---------------------------------------------------------------------- # Template-Based Arithmetic/Comparison Methods Series The provided code snippet includes necessary dependencies for implementing the `_datetime_to_stata_elapsed_vec` function. Write a Python function `def _datetime_to_stata_elapsed_vec(dates: Series, fmt: str) -> Series` to solve the following problem: Convert from datetime to SIF. https://www.stata.com/help.cgi?datetime Parameters ---------- dates : Series Series or array containing datetime.datetime or datetime64[ns] to convert to the Stata Internal Format given by fmt fmt : str The format to convert to. Can be, tc, td, tw, tm, tq, th, ty Here is the function: def _datetime_to_stata_elapsed_vec(dates: Series, fmt: str) -> Series: """ Convert from datetime to SIF. https://www.stata.com/help.cgi?datetime Parameters ---------- dates : Series Series or array containing datetime.datetime or datetime64[ns] to convert to the Stata Internal Format given by fmt fmt : str The format to convert to. Can be, tc, td, tw, tm, tq, th, ty """ index = dates.index NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000 US_PER_DAY = NS_PER_DAY / 1000 def parse_dates_safe( dates, delta: bool = False, year: bool = False, days: bool = False ): d = {} if is_datetime64_dtype(dates.dtype): if delta: time_delta = dates - Timestamp(stata_epoch).as_unit("ns") d["delta"] = time_delta._values.view(np.int64) // 1000 # microseconds if days or year: date_index = DatetimeIndex(dates) d["year"] = date_index._data.year d["month"] = date_index._data.month if days: days_in_ns = dates.view(np.int64) - to_datetime( d["year"], format="%Y" ).view(np.int64) d["days"] = days_in_ns // NS_PER_DAY elif infer_dtype(dates, skipna=False) == "datetime": if delta: delta = dates._values - stata_epoch def f(x: datetime.timedelta) -> float: return US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds v = np.vectorize(f) d["delta"] = v(delta) if year: year_month = dates.apply(lambda x: 100 * x.year + x.month) d["year"] = year_month._values // 100 d["month"] = year_month._values - d["year"] * 100 if days: def g(x: datetime.datetime) -> int: return (x - datetime.datetime(x.year, 1, 1)).days v = np.vectorize(g) d["days"] = v(dates) else: raise ValueError( "Columns containing dates must contain either " "datetime64, datetime.datetime or null values." ) return DataFrame(d, index=index) bad_loc = isna(dates) index = dates.index if bad_loc.any(): dates = Series(dates) if is_datetime64_dtype(dates): dates[bad_loc] = to_datetime(stata_epoch) else: dates[bad_loc] = stata_epoch if fmt in ["%tc", "tc"]: d = parse_dates_safe(dates, delta=True) conv_dates = d.delta / 1000 elif fmt in ["%tC", "tC"]: warnings.warn( "Stata Internal Format tC not supported.", stacklevel=find_stack_level(), ) conv_dates = dates elif fmt in ["%td", "td"]: d = parse_dates_safe(dates, delta=True) conv_dates = d.delta // US_PER_DAY elif fmt in ["%tw", "tw"]: d = parse_dates_safe(dates, year=True, days=True) conv_dates = 52 * (d.year - stata_epoch.year) + d.days // 7 elif fmt in ["%tm", "tm"]: d = parse_dates_safe(dates, year=True) conv_dates = 12 * (d.year - stata_epoch.year) + d.month - 1 elif fmt in ["%tq", "tq"]: d = parse_dates_safe(dates, year=True) conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3 elif fmt in ["%th", "th"]: d = parse_dates_safe(dates, year=True) conv_dates = 2 * (d.year - stata_epoch.year) + (d.month > 6).astype(int) elif fmt in ["%ty", "ty"]: d = parse_dates_safe(dates, year=True) conv_dates = d.year else: raise ValueError(f"Format {fmt} is not a known Stata date format") conv_dates = Series(conv_dates, dtype=np.float64) missing_value = struct.unpack("<d", b"\x00\x00\x00\x00\x00\x00\xe0\x7f")[0] conv_dates[bad_loc] = missing_value return Series(conv_dates, index=index)
Convert from datetime to SIF. https://www.stata.com/help.cgi?datetime Parameters ---------- dates : Series Series or array containing datetime.datetime or datetime64[ns] to convert to the Stata Internal Format given by fmt fmt : str The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
173,534
from __future__ import annotations from collections import abc import datetime from io import BytesIO import os import struct import sys from types import TracebackType from typing import ( IO, TYPE_CHECKING, Any, AnyStr, Callable, Final, Hashable, Sequence, cast, ) import warnings from dateutil.relativedelta import relativedelta import numpy as np from pandas._libs.lib import infer_dtype from pandas._libs.writers import max_len_string_array from pandas._typing import ( CompressionOptions, FilePath, ReadBuffer, StorageOptions, WriteBuffer, ) from pandas.errors import ( CategoricalConversionWarning, InvalidColumnName, PossiblePrecisionLoss, ValueLabelTypeMismatch, ) from pandas.util._decorators import ( Appender, doc, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, is_categorical_dtype, is_datetime64_dtype, is_numeric_dtype, ) from pandas import ( Categorical, DatetimeIndex, NaT, Timestamp, isna, to_datetime, to_timedelta, ) from pandas.core.arrays.boolean import BooleanDtype from pandas.core.arrays.integer import IntegerDtype from pandas.core.frame import DataFrame from pandas.core.indexes.base import Index from pandas.core.series import Series from pandas.core.shared_docs import _shared_docs from pandas.io.common import get_handle precision_loss_doc: Final = """ Column converted from {0} to {1}, and some data are outside of the lossless conversion range. This may result in a loss of precision in the saved data. """ class StataMissingValue: """ An observation's missing value. Parameters ---------- value : {int, float} The Stata missing value code Notes ----- More information: <https://www.stata.com/help.cgi?missing> Integer missing values make the code '.', '.a', ..., '.z' to the ranges 101 ... 127 (for int8), 32741 ... 32767 (for int16) and 2147483621 ... 2147483647 (for int32). Missing values for floating point data types are more complex but the pattern is simple to discern from the following table. np.float32 missing values (float in Stata) 0000007f . 0008007f .a 0010007f .b ... 00c0007f .x 00c8007f .y 00d0007f .z np.float64 missing values (double in Stata) 000000000000e07f . 000000000001e07f .a 000000000002e07f .b ... 000000000018e07f .x 000000000019e07f .y 00000000001ae07f .z """ # Construct a dictionary of missing values MISSING_VALUES: dict[float, str] = {} bases: Final = (101, 32741, 2147483621) for b in bases: # Conversion to long to avoid hash issues on 32 bit platforms #8968 MISSING_VALUES[b] = "." for i in range(1, 27): MISSING_VALUES[i + b] = "." + chr(96 + i) float32_base: bytes = b"\x00\x00\x00\x7f" increment: int = struct.unpack("<i", b"\x00\x08\x00\x00")[0] for i in range(27): key = struct.unpack("<f", float32_base)[0] MISSING_VALUES[key] = "." if i > 0: MISSING_VALUES[key] += chr(96 + i) int_value = struct.unpack("<i", struct.pack("<f", key))[0] + increment float32_base = struct.pack("<i", int_value) float64_base: bytes = b"\x00\x00\x00\x00\x00\x00\xe0\x7f" increment = struct.unpack("q", b"\x00\x00\x00\x00\x00\x01\x00\x00")[0] for i in range(27): key = struct.unpack("<d", float64_base)[0] MISSING_VALUES[key] = "." if i > 0: MISSING_VALUES[key] += chr(96 + i) int_value = struct.unpack("q", struct.pack("<d", key))[0] + increment float64_base = struct.pack("q", int_value) BASE_MISSING_VALUES: Final = { "int8": 101, "int16": 32741, "int32": 2147483621, "float32": struct.unpack("<f", float32_base)[0], "float64": struct.unpack("<d", float64_base)[0], } def __init__(self, value: float) -> None: self._value = value # Conversion to int to avoid hash issues on 32 bit platforms #8968 value = int(value) if value < 2147483648 else float(value) self._str = self.MISSING_VALUES[value] def string(self) -> str: """ The Stata representation of the missing value: '.', '.a'..'.z' Returns ------- str The representation of the missing value. """ return self._str def value(self) -> float: """ The binary representation of the missing value. Returns ------- {int, float} The binary representation of the missing value. """ return self._value def __str__(self) -> str: return self.string def __repr__(self) -> str: return f"{type(self)}({self})" def __eq__(self, other: Any) -> bool: return ( isinstance(other, type(self)) and self.string == other.string and self.value == other.value ) def get_base_missing_value(cls, dtype: np.dtype) -> float: if dtype.type is np.int8: value = cls.BASE_MISSING_VALUES["int8"] elif dtype.type is np.int16: value = cls.BASE_MISSING_VALUES["int16"] elif dtype.type is np.int32: value = cls.BASE_MISSING_VALUES["int32"] elif dtype.type is np.float32: value = cls.BASE_MISSING_VALUES["float32"] elif dtype.type is np.float64: value = cls.BASE_MISSING_VALUES["float64"] else: raise ValueError("Unsupported dtype") return value class PossiblePrecisionLoss(Warning): """ Warning raised by to_stata on a column with a value outside or equal to int64. When the column value is outside or equal to the int64 value the column is converted to a float64 dtype. Examples -------- >>> df = pd.DataFrame({"s": pd.Series([1, 2**53], dtype=np.int64)}) >>> df.to_stata('test') # doctest: +SKIP ... # PossiblePrecisionLoss: Column converted from int64 to float64... """ def find_stack_level() -> int: """ Find the first place in the stack that is not inside pandas (tests notwithstanding). """ import pandas as pd pkg_dir = os.path.dirname(pd.__file__) test_dir = os.path.join(pkg_dir, "tests") # https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow frame = inspect.currentframe() n = 0 while frame: fname = inspect.getfile(frame) if fname.startswith(pkg_dir) and not fname.startswith(test_dir): frame = frame.f_back n += 1 else: break return n class BooleanDtype(BaseMaskedDtype): """ Extension dtype for boolean data. .. warning:: BooleanDtype is considered experimental. The implementation and parts of the API may change without warning. Attributes ---------- None Methods ------- None Examples -------- >>> pd.BooleanDtype() BooleanDtype """ name = "boolean" # https://github.com/python/mypy/issues/4125 # error: Signature of "type" incompatible with supertype "BaseMaskedDtype" def type(self) -> type: # type: ignore[override] return np.bool_ def kind(self) -> str: return "b" def numpy_dtype(self) -> np.dtype: return np.dtype("bool") def construct_array_type(cls) -> type_t[BooleanArray]: """ Return the array type associated with this dtype. Returns ------- type """ return BooleanArray def __repr__(self) -> str: return "BooleanDtype" def _is_boolean(self) -> bool: return True def _is_numeric(self) -> bool: return True def __from_arrow__( self, array: pyarrow.Array | pyarrow.ChunkedArray ) -> BooleanArray: """ Construct BooleanArray from pyarrow Array/ChunkedArray. """ import pyarrow if array.type != pyarrow.bool_(): raise TypeError(f"Expected array of boolean type, got {array.type} instead") if isinstance(array, pyarrow.Array): chunks = [array] else: # pyarrow.ChunkedArray chunks = array.chunks results = [] for arr in chunks: buflist = arr.buffers() data = pyarrow.BooleanArray.from_buffers( arr.type, len(arr), [None, buflist[1]], offset=arr.offset ).to_numpy(zero_copy_only=False) if arr.null_count != 0: mask = pyarrow.BooleanArray.from_buffers( arr.type, len(arr), [None, buflist[0]], offset=arr.offset ).to_numpy(zero_copy_only=False) mask = ~mask else: mask = np.zeros(len(arr), dtype=bool) bool_arr = BooleanArray(data, mask) results.append(bool_arr) if not results: return BooleanArray( np.array([], dtype=np.bool_), np.array([], dtype=np.bool_) ) else: return BooleanArray._concat_same_type(results) class IntegerDtype(NumericDtype): """ An ExtensionDtype to hold a single size & kind of integer dtype. These specific implementations are subclasses of the non-public IntegerDtype. For example, we have Int8Dtype to represent signed int 8s. The attributes name & type are set when these subclasses are created. """ _default_np_dtype = np.dtype(np.int64) _checker = is_integer_dtype def construct_array_type(cls) -> type[IntegerArray]: """ Return the array type associated with this dtype. Returns ------- type """ return IntegerArray def _str_to_dtype_mapping(cls): return INT_STR_TO_DTYPE def _safe_cast(cls, values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray: """ Safely cast the values to the given dtype. "safe" in this context means the casting is lossless. e.g. if 'values' has a floating dtype, each value must be an integer. """ try: return values.astype(dtype, casting="safe", copy=copy) except TypeError as err: casted = values.astype(dtype, copy=copy) if (casted == values).all(): return casted raise TypeError( f"cannot safely cast non-equivalent {values.dtype} to {np.dtype(dtype)}" ) from err class DataFrame(NDFrame, OpsMixin): """ Two-dimensional, size-mutable, potentially heterogeneous tabular data. Data structure also contains labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects. The primary pandas data structure. Parameters ---------- data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame Dict can contain Series, arrays, constants, dataclass or list-like objects. If data is a dict, column order follows insertion-order. If a dict contains Series which have an index defined, it is aligned by its index. This alignment also occurs if data is a Series or a DataFrame itself. Alignment is done on Series/DataFrame inputs. If data is a list of dicts, column order follows insertion-order. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided. columns : Index or array-like Column labels to use for resulting frame when data does not have them, defaulting to RangeIndex(0, 1, 2, ..., n). If data contains column labels, will perform column selection instead. dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer. copy : bool or None, default None Copy data from inputs. For dict data, the default of None behaves like ``copy=True``. For DataFrame or 2d ndarray input, the default of None behaves like ``copy=False``. If data is a dict containing one or more Series (possibly of different dtypes), ``copy=False`` will ensure that these inputs are not copied. .. versionchanged:: 1.3.0 See Also -------- DataFrame.from_records : Constructor from tuples, also record arrays. DataFrame.from_dict : From dicts of Series, arrays, or dicts. read_csv : Read a comma-separated values (csv) file into DataFrame. read_table : Read general delimited file into DataFrame. read_clipboard : Read text from clipboard into DataFrame. Notes ----- Please reference the :ref:`User Guide <basics.dataframe>` for more information. Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = pd.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from a dictionary including Series: >>> d = {'col1': [0, 1, 2, 3], 'col2': pd.Series([2, 3], index=[2, 3])} >>> pd.DataFrame(data=d, index=[0, 1, 2, 3]) col1 col2 0 0 NaN 1 1 NaN 2 2 2.0 3 3 3.0 Constructing DataFrame from numpy ndarray: >>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), ... columns=['a', 'b', 'c']) >>> df2 a b c 0 1 2 3 1 4 5 6 2 7 8 9 Constructing DataFrame from a numpy ndarray that has labeled columns: >>> data = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], ... dtype=[("a", "i4"), ("b", "i4"), ("c", "i4")]) >>> df3 = pd.DataFrame(data, columns=['c', 'a']) ... >>> df3 c a 0 3 1 1 6 4 2 9 7 Constructing DataFrame from dataclass: >>> from dataclasses import make_dataclass >>> Point = make_dataclass("Point", [("x", int), ("y", int)]) >>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)]) x y 0 0 0 1 0 3 2 2 3 Constructing DataFrame from Series/DataFrame: >>> ser = pd.Series([1, 2, 3], index=["a", "b", "c"]) >>> df = pd.DataFrame(data=ser, index=["a", "c"]) >>> df 0 a 1 c 3 >>> df1 = pd.DataFrame([1, 2, 3], index=["a", "b", "c"], columns=["x"]) >>> df2 = pd.DataFrame(data=df1, index=["a", "c"]) >>> df2 x a 1 c 3 """ _internal_names_set = {"columns", "index"} | NDFrame._internal_names_set _typ = "dataframe" _HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray) _accessors: set[str] = {"sparse"} _hidden_attrs: frozenset[str] = NDFrame._hidden_attrs | frozenset([]) _mgr: BlockManager | ArrayManager def _constructor(self) -> Callable[..., DataFrame]: return DataFrame _constructor_sliced: Callable[..., Series] = Series # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index: Axes | None = None, columns: Axes | None = None, dtype: Dtype | None = None, copy: bool | None = None, ) -> None: if dtype is not None: dtype = self._validate_dtype(dtype) if isinstance(data, DataFrame): data = data._mgr if not copy: # if not copying data, ensure to still return a shallow copy # to avoid the result sharing the same Manager data = data.copy(deep=False) if isinstance(data, (BlockManager, ArrayManager)): if using_copy_on_write(): data = data.copy(deep=False) # first check if a Manager is passed without any other arguments # -> use fastpath (without checking Manager type) if index is None and columns is None and dtype is None and not copy: # GH#33357 fastpath NDFrame.__init__(self, data) return manager = get_option("mode.data_manager") # GH47215 if index is not None and isinstance(index, set): raise ValueError("index cannot be a set") if columns is not None and isinstance(columns, set): raise ValueError("columns cannot be a set") if copy is None: if isinstance(data, dict): # retain pre-GH#38939 default behavior copy = True elif ( manager == "array" and isinstance(data, (np.ndarray, ExtensionArray)) and data.ndim == 2 ): # INFO(ArrayManager) by default copy the 2D input array to get # contiguous 1D arrays copy = True elif using_copy_on_write() and not isinstance( data, (Index, DataFrame, Series) ): copy = True else: copy = False if data is None: index = index if index is not None else default_index(0) columns = columns if columns is not None else default_index(0) dtype = dtype if dtype is not None else pandas_dtype(object) data = [] if isinstance(data, (BlockManager, ArrayManager)): mgr = self._init_mgr( data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy ) elif isinstance(data, dict): # GH#38939 de facto copy defaults to False only in non-dict cases mgr = dict_to_mgr(data, index, columns, dtype=dtype, copy=copy, typ=manager) elif isinstance(data, ma.MaskedArray): from numpy.ma import mrecords # masked recarray if isinstance(data, mrecords.MaskedRecords): raise TypeError( "MaskedRecords are not supported. Pass " "{name: data[name] for name in data.dtype.names} " "instead" ) # a masked array data = sanitize_masked_array(data) mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) elif isinstance(data, (np.ndarray, Series, Index, ExtensionArray)): if data.dtype.names: # i.e. numpy structured array data = cast(np.ndarray, data) mgr = rec_array_to_mgr( data, index, columns, dtype, copy, typ=manager, ) elif getattr(data, "name", None) is not None: # i.e. Series/Index with non-None name _copy = copy if using_copy_on_write() else True mgr = dict_to_mgr( # error: Item "ndarray" of "Union[ndarray, Series, Index]" has no # attribute "name" {data.name: data}, # type: ignore[union-attr] index, columns, dtype=dtype, typ=manager, copy=_copy, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) # For data is list-like, or Iterable (will consume into list) elif is_list_like(data): if not isinstance(data, abc.Sequence): if hasattr(data, "__array__"): # GH#44616 big perf improvement for e.g. pytorch tensor data = np.asarray(data) else: data = list(data) if len(data) > 0: if is_dataclass(data[0]): data = dataclasses_to_dicts(data) if not isinstance(data, np.ndarray) and treat_as_nested(data): # exclude ndarray as we may have cast it a few lines above if columns is not None: columns = ensure_index(columns) arrays, columns, index = nested_data_to_arrays( # error: Argument 3 to "nested_data_to_arrays" has incompatible # type "Optional[Collection[Any]]"; expected "Optional[Index]" data, columns, index, # type: ignore[arg-type] dtype, ) mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, typ=manager, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) else: mgr = dict_to_mgr( {}, index, columns if columns is not None else default_index(0), dtype=dtype, typ=manager, ) # For data is scalar else: if index is None or columns is None: raise ValueError("DataFrame constructor not properly called!") index = ensure_index(index) columns = ensure_index(columns) if not dtype: dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True) # For data is a scalar extension dtype if isinstance(dtype, ExtensionDtype): # TODO(EA2D): special case not needed with 2D EAs values = [ construct_1d_arraylike_from_scalar(data, len(index), dtype) for _ in range(len(columns)) ] mgr = arrays_to_mgr(values, columns, index, dtype=None, typ=manager) else: arr2d = construct_2d_arraylike_from_scalar( data, len(index), len(columns), dtype, copy, ) mgr = ndarray_to_mgr( arr2d, index, columns, dtype=arr2d.dtype, copy=False, typ=manager, ) # ensure correct Manager type according to settings mgr = mgr_to_mgr(mgr, typ=manager) NDFrame.__init__(self, mgr) # ---------------------------------------------------------------------- def __dataframe__( self, nan_as_null: bool = False, allow_copy: bool = True ) -> DataFrameXchg: """ Return the dataframe interchange object implementing the interchange protocol. Parameters ---------- nan_as_null : bool, default False Whether to tell the DataFrame to overwrite null values in the data with ``NaN`` (or ``NaT``). allow_copy : bool, default True Whether to allow memory copying when exporting. If set to False it would cause non-zero-copy exports to fail. Returns ------- DataFrame interchange object The object which consuming library can use to ingress the dataframe. Notes ----- Details on the interchange protocol: https://data-apis.org/dataframe-protocol/latest/index.html `nan_as_null` currently has no effect; once support for nullable extension dtypes is added, this value should be propagated to columns. """ from pandas.core.interchange.dataframe import PandasDataFrameXchg return PandasDataFrameXchg(self, nan_as_null, allow_copy) # ---------------------------------------------------------------------- def axes(self) -> list[Index]: """ Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='object')] """ return [self.index, self.columns] def shape(self) -> tuple[int, int]: """ Return a tuple representing the dimensionality of the DataFrame. See Also -------- ndarray.shape : Tuple of array dimensions. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self.index), len(self.columns) def _is_homogeneous_type(self) -> bool: """ Whether all the columns in a DataFrame have the same type. Returns ------- bool See Also -------- Index._is_homogeneous_type : Whether the object has a single dtype. MultiIndex._is_homogeneous_type : Whether all the levels of a MultiIndex have the same dtype. Examples -------- >>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type True >>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type False Items with the same type but different sizes are considered different types. >>> DataFrame({ ... "A": np.array([1, 2], dtype=np.int32), ... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type False """ if isinstance(self._mgr, ArrayManager): return len({arr.dtype for arr in self._mgr.arrays}) == 1 if self._mgr.any_extension_types: return len({block.dtype for block in self._mgr.blocks}) == 1 else: return not self._is_mixed_type def _can_fast_transpose(self) -> bool: """ Can we transpose this DataFrame without creating any new array objects. """ if isinstance(self._mgr, ArrayManager): return False blocks = self._mgr.blocks if len(blocks) != 1: return False dtype = blocks[0].dtype # TODO(EA2D) special case would be unnecessary with 2D EAs return not is_1d_only_ea_dtype(dtype) def _values(self) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray: """ Analogue to ._values that may return a 2D ExtensionArray. """ mgr = self._mgr if isinstance(mgr, ArrayManager): if len(mgr.arrays) == 1 and not is_1d_only_ea_dtype(mgr.arrays[0].dtype): # error: Item "ExtensionArray" of "Union[ndarray, ExtensionArray]" # has no attribute "reshape" return mgr.arrays[0].reshape(-1, 1) # type: ignore[union-attr] return ensure_wrapped_if_datetimelike(self.values) blocks = mgr.blocks if len(blocks) != 1: return ensure_wrapped_if_datetimelike(self.values) arr = blocks[0].values if arr.ndim == 1: # non-2D ExtensionArray return self.values # more generally, whatever we allow in NDArrayBackedExtensionBlock arr = cast("np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray", arr) return arr.T # ---------------------------------------------------------------------- # Rendering Methods def _repr_fits_vertical_(self) -> bool: """ Check length against max_rows. """ max_rows = get_option("display.max_rows") return len(self) <= max_rows def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool: """ Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns. In case of non-interactive session, no boundaries apply. `ignore_width` is here so ipynb+HTML output can behave the way users expect. display.max_columns remains in effect. GH3541, GH3573 """ width, height = console.get_console_size() max_columns = get_option("display.max_columns") nb_columns = len(self.columns) # exceed max columns if (max_columns and nb_columns > max_columns) or ( (not ignore_width) and width and nb_columns > (width // 2) ): return False # used by repr_html under IPython notebook or scripts ignore terminal # dims if ignore_width or width is None or not console.in_interactive_session(): return True if get_option("display.width") is not None or console.in_ipython_frontend(): # check at least the column row for excessive width max_rows = 1 else: max_rows = get_option("display.max_rows") # when auto-detecting, so width=None and not in ipython front end # check whether repr fits horizontal by actually checking # the width of the rendered repr buf = StringIO() # only care about the stuff we'll actually print out # and to_string on entire frame may be expensive d = self if max_rows is not None: # unlimited rows # min of two, where one may be None d = d.iloc[: min(max_rows, len(d))] else: return True d.to_string(buf=buf) value = buf.getvalue() repr_width = max(len(line) for line in value.split("\n")) return repr_width < width def _info_repr(self) -> bool: """ True if the repr should show the info view. """ info_repr_option = get_option("display.large_repr") == "info" return info_repr_option and not ( self._repr_fits_horizontal_() and self._repr_fits_vertical_() ) def __repr__(self) -> str: """ Return a string representation for a particular DataFrame. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) return buf.getvalue() repr_params = fmt.get_dataframe_repr_params() return self.to_string(**repr_params) def _repr_html_(self) -> str | None: """ Return a html representation for a particular DataFrame. Mainly for IPython notebook. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) # need to escape the <class>, should be the first line. val = buf.getvalue().replace("<", r"&lt;", 1) val = val.replace(">", r"&gt;", 1) return f"<pre>{val}</pre>" if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") min_rows = get_option("display.min_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") formatter = fmt.DataFrameFormatter( self, columns=None, col_space=None, na_rep="NaN", formatters=None, float_format=None, sparsify=None, justify=None, index_names=True, header=True, index=True, bold_rows=True, escape=True, max_rows=max_rows, min_rows=min_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=".", ) return fmt.DataFrameRenderer(formatter).to_html(notebook=True) else: return None def to_string( self, buf: None = ..., columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> None: ... header_type="bool or sequence of str", header="Write out the column names. If a list of strings " "is given, it is assumed to be aliases for the " "column names", col_space_type="int, list or dict of int", col_space="The minimum width of each column. If a list of ints is given " "every integers corresponds with one column. If a dict is given, the key " "references the column, while the value defines the space to use.", ) def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[str] | None = None, col_space: int | list[int] | dict[Hashable, int] | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: fmt.FormattersType | None = None, float_format: fmt.FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool = False, decimal: str = ".", line_width: int | None = None, min_rows: int | None = None, max_colwidth: int | None = None, encoding: str | None = None, ) -> str | None: """ Render a DataFrame to a console-friendly tabular output. %(shared_params)s line_width : int, optional Width to wrap a line in characters. min_rows : int, optional The number of rows to display in the console in a truncated repr (when number of rows is above `max_rows`). max_colwidth : int, optional Max width to truncate each column in characters. By default, no limit. encoding : str, default "utf-8" Set character encoding. %(returns)s See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]} >>> df = pd.DataFrame(d) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 """ from pandas import option_context with option_context("display.max_colwidth", max_colwidth): formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, min_rows=min_rows, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, ) return fmt.DataFrameRenderer(formatter).to_string( buf=buf, encoding=encoding, line_width=line_width, ) # ---------------------------------------------------------------------- def style(self) -> Styler: """ Returns a Styler object. Contains methods for building a styled HTML representation of the DataFrame. See Also -------- io.formats.style.Styler : Helps style a DataFrame or Series according to the data with HTML and CSS. """ from pandas.io.formats.style import Styler return Styler(self) _shared_docs[ "items" ] = r""" Iterate over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Yields ------ label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. Examples -------- >>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.items(): ... print(f'label: {label}') ... print(f'content: {content}', sep='\n') ... label: species content: panda bear polar bear koala marsupial Name: species, dtype: object label: population content: panda 1864 polar 22000 koala 80000 Name: population, dtype: int64 """ def items(self) -> Iterable[tuple[Hashable, Series]]: if self.columns.is_unique and hasattr(self, "_item_cache"): for k in self.columns: yield k, self._get_item_cache(k) else: for i, k in enumerate(self.columns): yield k, self._ixs(i, axis=1) def iterrows(self) -> Iterable[tuple[Hashable, Series]]: """ Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a `MultiIndex`. data : Series The data of the row as a Series. See Also -------- DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- 1. Because ``iterrows`` returns a Series for each row, it does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float']) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row['int'].dtype) float64 >>> print(df['int'].dtype) int64 To preserve dtypes while iterating over the rows, it is better to use :meth:`itertuples` which returns namedtuples of the values and which is generally faster than ``iterrows``. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect. """ columns = self.columns klass = self._constructor_sliced using_cow = using_copy_on_write() for k, v in zip(self.index, self.values): s = klass(v, index=columns, name=k).__finalize__(self) if using_cow and self._mgr.is_single_block: s._mgr.add_references(self._mgr) # type: ignore[arg-type] yield k, s def itertuples( self, index: bool = True, name: str | None = "Pandas" ) -> Iterable[tuple[Any, ...]]: """ Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "Pandas" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An object to iterate over namedtuples for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. Examples -------- >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) ... Pandas(Index='dog', num_legs=4, num_wings=0) Pandas(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) ... Pandas(num_legs=4, num_wings=0) Pandas(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name='Animal'): ... print(row) ... Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2) """ arrays = [] fields = list(self.columns) if index: arrays.append(self.index) fields.insert(0, "Index") # use integer indexing because of possible duplicate column names arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) if name is not None: # https://github.com/python/mypy/issues/9046 # error: namedtuple() expects a string literal as the first argument itertuple = collections.namedtuple( # type: ignore[misc] name, fields, rename=True ) return map(itertuple._make, zip(*arrays)) # fallback to regular tuples return zip(*arrays) def __len__(self) -> int: """ Returns length of info axis, but here we use the index. """ return len(self.index) def dot(self, other: Series) -> Series: ... def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame: ... def dot(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Compute the matrix multiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series, DataFrame or a numpy array. It can also be called using ``self @ other`` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the matrix product with. Returns ------- Series or DataFrame If other is a Series, return the matrix product between self and other as a Series. If other is a DataFrame or a numpy.array, return the matrix product of self and other in a DataFrame of a np.array. See Also -------- Series.dot: Similar method for Series. Notes ----- The dimensions of DataFrame and other must be compatible in order to compute the matrix multiplication. In addition, the column names of DataFrame and the index of other must contain the same values, as they will be aligned prior to the multiplication. The dot method for Series computes the inner product, instead of the matrix product here. Examples -------- Here we multiply a DataFrame with a Series. >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) >>> s = pd.Series([1, 1, 2, 1]) >>> df.dot(s) 0 -4 1 5 dtype: int64 Here we multiply a DataFrame with another DataFrame. >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(other) 0 1 0 1 4 1 2 2 Note that the dot method give the same result as @ >>> df @ other 0 1 0 1 4 1 2 2 The dot method works also if other is an np.array. >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(arr) 0 1 0 1 4 1 2 2 Note how shuffling of the objects does not change the result. >>> s2 = s.reindex([1, 0, 2, 3]) >>> df.dot(s2) 0 -4 1 5 dtype: int64 """ if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index) if len(common) > len(self.columns) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(columns=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right._values else: left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: raise ValueError( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, DataFrame): return self._constructor( np.dot(lvals, rvals), index=left.index, columns=other.columns, copy=False, ) elif isinstance(other, Series): return self._constructor_sliced( np.dot(lvals, rvals), index=left.index, copy=False ) elif isinstance(rvals, (np.ndarray, Index)): result = np.dot(lvals, rvals) if result.ndim == 2: return self._constructor(result, index=left.index, copy=False) else: return self._constructor_sliced(result, index=left.index, copy=False) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other: Series) -> Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other) -> DataFrame: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ try: return self.T.dot(np.transpose(other)).T except ValueError as err: if "shape mismatch" not in str(err): raise # GH#21581 give exception message for original shapes msg = f"shapes {np.shape(other)} and {self.shape} not aligned" raise ValueError(msg) from err # ---------------------------------------------------------------------- # IO methods (to / from other formats) def from_dict( cls, data: dict, orient: str = "columns", dtype: Dtype | None = None, columns: Axes | None = None, ) -> DataFrame: """ Construct DataFrame from dict of array-like or dicts. Creates DataFrame object from dictionary by columns or by index allowing dtype specification. Parameters ---------- data : dict Of the form {field : array-like} or {field : dict}. orient : {'columns', 'index', 'tight'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. If 'tight', assume a dict with keys ['index', 'columns', 'data', 'index_names', 'column_names']. .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument dtype : dtype, default None Data type to force after DataFrame construction, otherwise infer. columns : list, default None Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'`` or ``orient='tight'``. Returns ------- DataFrame See Also -------- DataFrame.from_records : DataFrame from structured ndarray, sequence of tuples or dicts, or DataFrame. DataFrame : DataFrame object creation using constructor. DataFrame.to_dict : Convert the DataFrame to a dictionary. Examples -------- By default the keys of the dict become the DataFrame columns: >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Specify ``orient='index'`` to create the DataFrame using dictionary keys as rows: >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data, orient='index') 0 1 2 3 row_1 3 2 1 0 row_2 a b c d When using the 'index' orientation, the column names can be specified manually: >>> pd.DataFrame.from_dict(data, orient='index', ... columns=['A', 'B', 'C', 'D']) A B C D row_1 3 2 1 0 row_2 a b c d Specify ``orient='tight'`` to create the DataFrame using a 'tight' format: >>> data = {'index': [('a', 'b'), ('a', 'c')], ... 'columns': [('x', 1), ('y', 2)], ... 'data': [[1, 3], [2, 4]], ... 'index_names': ['n1', 'n2'], ... 'column_names': ['z1', 'z2']} >>> pd.DataFrame.from_dict(data, orient='tight') z1 x y z2 1 2 n1 n2 a b 1 3 c 2 4 """ index = None orient = orient.lower() if orient == "index": if len(data) > 0: # TODO speed up Series case if isinstance(list(data.values())[0], (Series, dict)): data = _from_nested_dict(data) else: index = list(data.keys()) # error: Incompatible types in assignment (expression has type # "List[Any]", variable has type "Dict[Any, Any]") data = list(data.values()) # type: ignore[assignment] elif orient in ("columns", "tight"): if columns is not None: raise ValueError(f"cannot use columns parameter with orient='{orient}'") else: # pragma: no cover raise ValueError( f"Expected 'index', 'columns' or 'tight' for orient parameter. " f"Got '{orient}' instead" ) if orient != "tight": return cls(data, index=index, columns=columns, dtype=dtype) else: realdata = data["data"] def create_index(indexlist, namelist): index: Index if len(namelist) > 1: index = MultiIndex.from_tuples(indexlist, names=namelist) else: index = Index(indexlist, name=namelist[0]) return index index = create_index(data["index"], data["index_names"]) columns = create_index(data["columns"], data["column_names"]) return cls(realdata, index=index, columns=columns, dtype=dtype) def to_numpy( self, dtype: npt.DTypeLike | None = None, copy: bool = False, na_value: object = lib.no_default, ) -> np.ndarray: """ Convert the DataFrame to a NumPy array. By default, the dtype of the returned array will be the common NumPy dtype of all types in the DataFrame. For example, if the dtypes are ``float16`` and ``float32``, the results dtype will be ``float32``. This may require copying data and coercing values, which may be expensive. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray`. copy : bool, default False Whether to ensure that the returned value is not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. na_value : Any, optional The value to use for missing values. The default value depends on `dtype` and the dtypes of the DataFrame columns. .. versionadded:: 1.1.0 Returns ------- numpy.ndarray See Also -------- Series.to_numpy : Similar method for Series. Examples -------- >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() array([[1, 3], [2, 4]]) With heterogeneous data, the lowest common type will have to be used. >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) >>> df.to_numpy() array([[1. , 3. ], [2. , 4.5]]) For a mix of numeric and non-numeric types, the output array will have object dtype. >>> df['C'] = pd.date_range('2000', periods=2) >>> df.to_numpy() array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) """ if dtype is not None: dtype = np.dtype(dtype) result = self._mgr.as_array(dtype=dtype, copy=copy, na_value=na_value) if result.dtype is not dtype: result = np.array(result, dtype=dtype, copy=False) return result def _create_data_for_split_and_tight_to_dict( self, are_all_object_dtype_cols: bool, object_dtype_indices: list[int] ) -> list: """ Simple helper method to create data for to ``to_dict(orient="split")`` and ``to_dict(orient="tight")`` to create the main output data """ if are_all_object_dtype_cols: data = [ list(map(maybe_box_native, t)) for t in self.itertuples(index=False, name=None) ] else: data = [list(t) for t in self.itertuples(index=False, name=None)] if object_dtype_indices: # If we have object_dtype_cols, apply maybe_box_naive after list # comprehension for perf for row in data: for i in object_dtype_indices: row[i] = maybe_box_native(row[i]) return data def to_dict( self, orient: Literal["dict", "list", "series", "split", "tight", "index"] = ..., into: type[dict] = ..., ) -> dict: ... def to_dict(self, orient: Literal["records"], into: type[dict] = ...) -> list[dict]: ... def to_dict( self, orient: Literal[ "dict", "list", "series", "split", "tight", "records", "index" ] = "dict", into: type[dict] = dict, index: bool = True, ) -> dict | list[dict]: """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'tight', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'tight' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values], 'index_names' -> [index.names], 'column_names' -> [column.names]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. index : bool, default True Whether to include the index item (and index_names item if `orient` is 'tight') in the returned dictionary. Can only be ``False`` when `orient` is 'split' or 'tight'. .. versionadded:: 2.0.0 Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. See Also -------- DataFrame.from_dict: Create a DataFrame from a dictionary. DataFrame.to_json: Convert a DataFrame to JSON format. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df.to_dict() {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}} You can specify the return orientation. >>> df.to_dict('series') {'col1': row1 1 row2 2 Name: col1, dtype: int64, 'col2': row1 0.50 row2 0.75 Name: col2, dtype: float64} >>> df.to_dict('split') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]]} >>> df.to_dict('records') [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}] >>> df.to_dict('index') {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}} >>> df.to_dict('tight') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]], 'index_names': [None], 'column_names': [None]} You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) [defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}), defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})] """ from pandas.core.methods.to_dict import to_dict return to_dict(self, orient, into, index) def to_gbq( self, destination_table: str, project_id: str | None = None, chunksize: int | None = None, reauth: bool = False, if_exists: str = "fail", auth_local_webserver: bool = True, table_schema: list[dict[str, str]] | None = None, location: str | None = None, progress_bar: bool = True, credentials=None, ) -> None: """ Write a DataFrame to a Google BigQuery table. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See the `How to authenticate with Google BigQuery <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__ guide for authentication instructions. Parameters ---------- destination_table : str Name of table to be written, in the form ``dataset.tablename``. project_id : str, optional Google BigQuery Account project ID. Optional when available from the environment. chunksize : int, optional Number of rows to be inserted in each chunk from the dataframe. Set to ``None`` to load the whole dataframe at once. reauth : bool, default False Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. if_exists : str, default 'fail' Behavior when the destination table exists. Value can be one of: ``'fail'`` If table exists raise pandas_gbq.gbq.TableCreationError. ``'replace'`` If table exists, drop it, recreate it, and insert data. ``'append'`` If table exists, insert data. Create if does not exist. auth_local_webserver : bool, default True Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. .. _local webserver flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server .. _console flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console *New in version 0.2.0 of pandas-gbq*. .. versionchanged:: 1.5.0 Default value is changed to ``True``. Google has deprecated the ``auth_local_webserver = False`` `"out of band" (copy-paste) flow <https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html?m=1#disallowed-oob>`_. table_schema : list of dicts, optional List of BigQuery table fields to which according DataFrame columns conform to, e.g. ``[{'name': 'col1', 'type': 'STRING'},...]``. If schema is not provided, it will be generated according to dtypes of DataFrame columns. See BigQuery API documentation on available names of a field. *New in version 0.3.1 of pandas-gbq*. location : str, optional Location where the load job should run. See the `BigQuery locations documentation <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a list of available locations. The location must match that of the target dataset. *New in version 0.5.0 of pandas-gbq*. progress_bar : bool, default True Use the library `tqdm` to show the progress bar for the upload, chunk by chunk. *New in version 0.5.0 of pandas-gbq*. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine :class:`google.auth.compute_engine.Credentials` or Service Account :class:`google.oauth2.service_account.Credentials` directly. *New in version 0.8.0 of pandas-gbq*. See Also -------- pandas_gbq.to_gbq : This function in the pandas-gbq library. read_gbq : Read a DataFrame from Google BigQuery. """ from pandas.io import gbq gbq.to_gbq( self, destination_table, project_id=project_id, chunksize=chunksize, reauth=reauth, if_exists=if_exists, auth_local_webserver=auth_local_webserver, table_schema=table_schema, location=location, progress_bar=progress_bar, credentials=credentials, ) def from_records( cls, data, index=None, exclude=None, columns=None, coerce_float: bool = False, nrows: int | None = None, ) -> DataFrame: """ Convert structured or record ndarray to DataFrame. Creates a DataFrame object from a structured ndarray, sequence of tuples or dicts, or DataFrame. Parameters ---------- data : structured ndarray, sequence of tuples or dicts, or DataFrame Structured input data. index : str, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use. exclude : sequence, default None Columns or fields to exclude. columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns). coerce_float : bool, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. nrows : int, default None Number of rows to read if data is an iterator. Returns ------- DataFrame See Also -------- DataFrame.from_dict : DataFrame from dict of array-like or dicts. DataFrame : DataFrame object creation using constructor. Examples -------- Data can be provided as a structured ndarray: >>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')], ... dtype=[('col_1', 'i4'), ('col_2', 'U1')]) >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of dicts: >>> data = [{'col_1': 3, 'col_2': 'a'}, ... {'col_1': 2, 'col_2': 'b'}, ... {'col_1': 1, 'col_2': 'c'}, ... {'col_1': 0, 'col_2': 'd'}] >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of tuples with corresponding columns: >>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')] >>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2']) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d """ if isinstance(data, DataFrame): if columns is not None: if is_scalar(columns): columns = [columns] data = data[columns] if index is not None: data = data.set_index(index) if exclude is not None: data = data.drop(columns=exclude) return data.copy(deep=False) result_index = None # Make a copy of the input columns so we can modify it if columns is not None: columns = ensure_index(columns) def maybe_reorder( arrays: list[ArrayLike], arr_columns: Index, columns: Index, index ) -> tuple[list[ArrayLike], Index, Index | None]: """ If our desired 'columns' do not match the data's pre-existing 'arr_columns', we re-order our arrays. This is like a pre-emptive (cheap) reindex. """ if len(arrays): length = len(arrays[0]) else: length = 0 result_index = None if len(arrays) == 0 and index is None and length == 0: result_index = default_index(0) arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, length) return arrays, arr_columns, result_index if is_iterator(data): if nrows == 0: return cls() try: first_row = next(data) except StopIteration: return cls(index=index, columns=columns) dtype = None if hasattr(first_row, "dtype") and first_row.dtype.names: dtype = first_row.dtype values = [first_row] if nrows is None: values += data else: values.extend(itertools.islice(data, nrows - 1)) if dtype is not None: data = np.array(values, dtype=dtype) else: data = values if isinstance(data, dict): if columns is None: columns = arr_columns = ensure_index(sorted(data)) arrays = [data[k] for k in columns] else: arrays = [] arr_columns_list = [] for k, v in data.items(): if k in columns: arr_columns_list.append(k) arrays.append(v) arr_columns = Index(arr_columns_list) arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = to_arrays(data, columns) arr_columns = columns else: arrays, arr_columns = to_arrays(data, columns) if coerce_float: for i, arr in enumerate(arrays): if arr.dtype == object: # error: Argument 1 to "maybe_convert_objects" has # incompatible type "Union[ExtensionArray, ndarray]"; # expected "ndarray" arrays[i] = lib.maybe_convert_objects( arr, # type: ignore[arg-type] try_float=True, ) arr_columns = ensure_index(arr_columns) if columns is None: columns = arr_columns else: arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) if exclude is None: exclude = set() else: exclude = set(exclude) if index is not None: if isinstance(index, str) or not hasattr(index, "__iter__"): i = columns.get_loc(index) exclude.add(index) if len(arrays) > 0: result_index = Index(arrays[i], name=index) else: result_index = Index([], name=index) else: try: index_data = [arrays[arr_columns.get_loc(field)] for field in index] except (KeyError, TypeError): # raised by get_loc, see GH#29258 result_index = index else: result_index = ensure_index_from_sequences(index_data, names=index) exclude.update(index) if any(exclude): arr_exclude = [x for x in exclude if x in arr_columns] to_remove = [arr_columns.get_loc(col) for col in arr_exclude] arrays = [v for i, v in enumerate(arrays) if i not in to_remove] columns = columns.drop(exclude) manager = get_option("mode.data_manager") mgr = arrays_to_mgr(arrays, columns, result_index, typ=manager) return cls(mgr) def to_records( self, index: bool = True, column_dtypes=None, index_dtypes=None ) -> np.recarray: """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. column_dtypes : str, type, dict, default None If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) If the DataFrame index has no label then the recarray field name is set to 'index'. If the index has a label then this is used as the field name: >>> df.index = df.index.rename("I") >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')]) As well as for the index: >>> df.to_records(index_dtypes="<S2") rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')]) >>> index_dtypes = f"<S{df.index.str.len().max()}" >>> df.to_records(index_dtypes=index_dtypes) rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')]) """ if index: ix_vals = [ np.asarray(self.index.get_level_values(i)) for i in range(self.index.nlevels) ] arrays = ix_vals + [ np.asarray(self.iloc[:, i]) for i in range(len(self.columns)) ] index_names = list(self.index.names) if isinstance(self.index, MultiIndex): index_names = com.fill_missing_names(index_names) elif index_names[0] is None: index_names = ["index"] names = [str(name) for name in itertools.chain(index_names, self.columns)] else: arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))] names = [str(c) for c in self.columns] index_names = [] index_len = len(index_names) formats = [] for i, v in enumerate(arrays): index_int = i # When the names and arrays are collected, we # first collect those in the DataFrame's index, # followed by those in its columns. # # Thus, the total length of the array is: # len(index_names) + len(DataFrame.columns). # # This check allows us to see whether we are # handling a name / array in the index or column. if index_int < index_len: dtype_mapping = index_dtypes name = index_names[index_int] else: index_int -= index_len dtype_mapping = column_dtypes name = self.columns[index_int] # We have a dictionary, so we get the data type # associated with the index or column (which can # be denoted by its name in the DataFrame or its # position in DataFrame's array of indices or # columns, whichever is applicable. if is_dict_like(dtype_mapping): if name in dtype_mapping: dtype_mapping = dtype_mapping[name] elif index_int in dtype_mapping: dtype_mapping = dtype_mapping[index_int] else: dtype_mapping = None # If no mapping can be found, use the array's # dtype attribute for formatting. # # A valid dtype must either be a type or # string naming a type. if dtype_mapping is None: formats.append(v.dtype) elif isinstance(dtype_mapping, (type, np.dtype, str)): # error: Argument 1 to "append" of "list" has incompatible # type "Union[type, dtype[Any], str]"; expected "dtype[Any]" formats.append(dtype_mapping) # type: ignore[arg-type] else: element = "row" if i < index_len else "column" msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}" raise ValueError(msg) return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats}) def _from_arrays( cls, arrays, columns, index, dtype: Dtype | None = None, verify_integrity: bool = True, ) -> DataFrame: """ Create DataFrame from a list of arrays corresponding to the columns. Parameters ---------- arrays : list-like of arrays Each array in the list corresponds to one column, in order. columns : list-like, Index The column names for the resulting DataFrame. index : list-like, Index The rows labels for the resulting DataFrame. dtype : dtype, optional Optional dtype to enforce for all arrays. verify_integrity : bool, default True Validate and homogenize all input. If set to False, it is assumed that all elements of `arrays` are actual arrays how they will be stored in a block (numpy ndarray or ExtensionArray), have the same length as and are aligned with the index, and that `columns` and `index` are ensured to be an Index object. Returns ------- DataFrame """ if dtype is not None: dtype = pandas_dtype(dtype) manager = get_option("mode.data_manager") columns = ensure_index(columns) if len(columns) != len(arrays): raise ValueError("len(columns) must match len(arrays)") mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, verify_integrity=verify_integrity, typ=manager, ) return cls(mgr) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path", ) def to_stata( self, path: FilePath | WriteBuffer[bytes], *, convert_dates: dict[Hashable, str] | None = None, write_index: bool = True, byteorder: str | None = None, time_stamp: datetime.datetime | None = None, data_label: str | None = None, variable_labels: dict[Hashable, str] | None = None, version: int | None = 114, convert_strl: Sequence[Hashable] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, value_labels: dict[Hashable, dict[float, str]] | None = None, ) -> None: """ Export DataFrame object to Stata dta format. Writes the DataFrame to a Stata dataset file. "dta" files contain a Stata dataset. Parameters ---------- path : str, path object, or buffer String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has timezone information. write_index : bool Write the index to Stata dataset. byteorder : str Can be ">", "<", "little", or "big". default is `sys.byteorder`. time_stamp : datetime A datetime to use as file creation date. Default is the current time. data_label : str, optional A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. version : {{114, 117, 118, 119, None}}, default 114 Version to use in the output dta file. Set to None to let pandas decide between 118 or 119 formats depending on the number of columns in the frame. Version 114 can be read by Stata 10 and later. Version 117 can be read by Stata 13 or later. Version 118 is supported in Stata 14 and later. Version 119 is supported in Stata 15 and later. Version 114 limits string variables to 244 characters or fewer while versions 117 and later allow strings with lengths up to 2,000,000 characters. Versions 118 and 119 support Unicode characters, and version 119 supports more than 32,767 variables. Version 119 should usually only be used when the number of variables exceeds the capacity of dta format 118. Exporting smaller datasets in format 119 may have unintended consequences, and, as of November 2020, Stata SE cannot read version 119 files. convert_strl : list, optional List of column names to convert to string columns to Stata StrL format. Only available if version is 117. Storing strings in the StrL format can produce smaller dta files if strings have more than 8 characters and values are repeated. {compression_options} .. versionadded:: 1.1.0 .. versionchanged:: 1.4.0 Zstandard support. {storage_options} .. versionadded:: 1.2.0 value_labels : dict of dicts Dictionary containing columns as keys and dictionaries of column value to labels as values. Labels for a single variable must be 32,000 characters or smaller. .. versionadded:: 1.4.0 Raises ------ NotImplementedError * If datetimes contain timezone information * Column dtype is not representable in Stata ValueError * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters See Also -------- read_stata : Import Stata data files. io.stata.StataWriter : Low-level writer for Stata data files. io.stata.StataWriter117 : Low-level writer for version 117 files. Examples -------- >>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], ... 'speed': [350, 18, 361, 15]}}) >>> df.to_stata('animals.dta') # doctest: +SKIP """ if version not in (114, 117, 118, 119, None): raise ValueError("Only formats 114, 117, 118 and 119 are supported.") if version == 114: if convert_strl is not None: raise ValueError("strl is not supported in format 114") from pandas.io.stata import StataWriter as statawriter elif version == 117: # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriter117 as statawriter, ) else: # versions 118 and 119 # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriterUTF8 as statawriter, ) kwargs: dict[str, Any] = {} if version is None or version >= 117: # strl conversion is only supported >= 117 kwargs["convert_strl"] = convert_strl if version is None or version >= 118: # Specifying the version is only supported for UTF8 (118 or 119) kwargs["version"] = version writer = statawriter( path, self, convert_dates=convert_dates, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, write_index=write_index, variable_labels=variable_labels, compression=compression, storage_options=storage_options, value_labels=value_labels, **kwargs, ) writer.write_file() def to_feather(self, path: FilePath | WriteBuffer[bytes], **kwargs) -> None: """ Write a DataFrame to the binary Feather format. Parameters ---------- path : str, path object, file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If a string or a path, it will be used as Root Directory path when writing a partitioned dataset. **kwargs : Additional keywords passed to :func:`pyarrow.feather.write_feather`. Starting with pyarrow 0.17, this includes the `compression`, `compression_level`, `chunksize` and `version` keywords. .. versionadded:: 1.1.0 Notes ----- This function writes the dataframe as a `feather file <https://arrow.apache.org/docs/python/feather.html>`_. Requires a default index. For saving the DataFrame with your custom index use a method that supports custom indices e.g. `to_parquet`. """ from pandas.io.feather_format import to_feather to_feather(self, path, **kwargs) Series.to_markdown, klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples="""Examples -------- >>> df = pd.DataFrame( ... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]} ... ) >>> print(df.to_markdown()) | | animal_1 | animal_2 | |---:|:-----------|:-----------| | 0 | elk | dog | | 1 | pig | quetzal | Output markdown with a tabulate option. >>> print(df.to_markdown(tablefmt="grid")) +----+------------+------------+ | | animal_1 | animal_2 | +====+============+============+ | 0 | elk | dog | +----+------------+------------+ | 1 | pig | quetzal | +----+------------+------------+""", ) def to_markdown( self, buf: FilePath | WriteBuffer[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: if "showindex" in kwargs: raise ValueError("Pass 'index' instead of 'showindex") kwargs.setdefault("headers", "keys") kwargs.setdefault("tablefmt", "pipe") kwargs.setdefault("showindex", index) tabulate = import_optional_dependency("tabulate") result = tabulate.tabulate(self, **kwargs) if buf is None: return result with get_handle(buf, mode, storage_options=storage_options) as handles: handles.handle.write(result) return None def to_parquet( self, path: None = ..., engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> bytes: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes], engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> None: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes] | None = None, engine: str = "auto", compression: str | None = "snappy", index: bool | None = None, partition_cols: list[str] | None = None, storage_options: StorageOptions = None, **kwargs, ) -> bytes | None: """ Write a DataFrame to the binary parquet format. This function writes the dataframe as a `parquet file <https://parquet.apache.org/>`_. You can choose different parquet backends, and have the option of compression. See :ref:`the user guide <io.parquet>` for more details. Parameters ---------- path : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If None, the result is returned as bytes. If a string or path, it will be used as Root Directory path when writing a partitioned dataset. .. versionchanged:: 1.2.0 Previously this was "fname" engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``True`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. partition_cols : list, optional, default None Column names by which to partition the dataset. Columns are partitioned in the order they are given. Must be None if path is not a string. {storage_options} .. versionadded:: 1.2.0 **kwargs Additional arguments passed to the parquet library. See :ref:`pandas io <io.parquet>` for more details. Returns ------- bytes if no path argument is provided else None See Also -------- read_parquet : Read a parquet file. DataFrame.to_orc : Write an orc file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- This function requires either the `fastparquet <https://pypi.org/project/fastparquet>`_ or `pyarrow <https://arrow.apache.org/docs/python/>`_ library. Examples -------- >>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}}) >>> df.to_parquet('df.parquet.gzip', ... compression='gzip') # doctest: +SKIP >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP col1 col2 0 1 3 1 2 4 If you want to get a buffer to the parquet content you can use a io.BytesIO object, as long as you don't use partition_cols, which creates multiple files. >>> import io >>> f = io.BytesIO() >>> df.to_parquet(f) >>> f.seek(0) 0 >>> content = f.read() """ from pandas.io.parquet import to_parquet return to_parquet( self, path, engine, compression=compression, index=index, partition_cols=partition_cols, storage_options=storage_options, **kwargs, ) def to_orc( self, path: FilePath | WriteBuffer[bytes] | None = None, *, engine: Literal["pyarrow"] = "pyarrow", index: bool | None = None, engine_kwargs: dict[str, Any] | None = None, ) -> bytes | None: """ Write a DataFrame to the ORC format. .. versionadded:: 1.5.0 Parameters ---------- path : str, file-like object or None, default None If a string, it will be used as Root Directory path when writing a partitioned dataset. By file-like object, we refer to objects with a write() method, such as a file handle (e.g. via builtin open function). If path is None, a bytes object is returned. engine : str, default 'pyarrow' ORC library to use. Pyarrow must be >= 7.0.0. index : bool, optional If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``infer`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. engine_kwargs : dict[str, Any] or None, default None Additional keyword arguments passed to :func:`pyarrow.orc.write_table`. Returns ------- bytes if no path argument is provided else None Raises ------ NotImplementedError Dtype of one or more columns is category, unsigned integers, interval, period or sparse. ValueError engine is not pyarrow. See Also -------- read_orc : Read a ORC file. DataFrame.to_parquet : Write a parquet file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- * Before using this function you should read the :ref:`user guide about ORC <io.orc>` and :ref:`install optional dependencies <install.warn_orc>`. * This function requires `pyarrow <https://arrow.apache.org/docs/python/>`_ library. * For supported dtypes please refer to `supported ORC features in Arrow <https://arrow.apache.org/docs/cpp/orc.html#data-types>`__. * Currently timezones in datetime columns are not preserved when a dataframe is converted into ORC files. Examples -------- >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [4, 3]}) >>> df.to_orc('df.orc') # doctest: +SKIP >>> pd.read_orc('df.orc') # doctest: +SKIP col1 col2 0 1 4 1 2 3 If you want to get a buffer to the orc content you can write it to io.BytesIO >>> import io >>> b = io.BytesIO(df.to_orc()) # doctest: +SKIP >>> b.seek(0) # doctest: +SKIP 0 >>> content = b.read() # doctest: +SKIP """ from pandas.io.orc import to_orc return to_orc( self, path, engine=engine, index=index, engine_kwargs=engine_kwargs ) def to_html( self, buf: FilePath | WriteBuffer[str], columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> None: ... def to_html( self, buf: None = ..., columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> str: ... header_type="bool", header="Whether to print column labels, default True", col_space_type="str or int, list or dict of int or str", col_space="The minimum width of each column in CSS length " "units. An int is assumed to be px units.", ) def to_html( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[Level] | None = None, col_space: ColspaceArgType | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: FormattersType | None = None, float_format: FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool | str = False, decimal: str = ".", bold_rows: bool = True, classes: str | list | tuple | None = None, escape: bool = True, notebook: bool = False, border: int | bool | None = None, table_id: str | None = None, render_links: bool = False, encoding: str | None = None, ) -> str | None: """ Render a DataFrame as an HTML table. %(shared_params)s bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.display.html.border``. table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links. encoding : str, default "utf-8" Set character encoding. .. versionadded:: 1.0 %(returns)s See Also -------- to_string : Convert DataFrame to a string. """ if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS: raise ValueError("Invalid value for justify parameter") formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, header=header, index=index, formatters=formatters, float_format=float_format, bold_rows=bold_rows, sparsify=sparsify, justify=justify, index_names=index_names, escape=escape, decimal=decimal, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, ) # TODO: a generic formatter wld b in DataFrameFormatter return fmt.DataFrameRenderer(formatter).to_html( buf=buf, classes=classes, notebook=notebook, border=border, encoding=encoding, table_id=table_id, render_links=render_links, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buffer", ) def to_xml( self, path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, index: bool = True, root_name: str | None = "data", row_name: str | None = "row", na_rep: str | None = None, attr_cols: list[str] | None = None, elem_cols: list[str] | None = None, namespaces: dict[str | None, str] | None = None, prefix: str | None = None, encoding: str = "utf-8", xml_declaration: bool | None = True, pretty_print: bool | None = True, parser: str | None = "lxml", stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ) -> str | None: """ Render a DataFrame to an XML document. .. versionadded:: 1.3.0 Parameters ---------- path_or_buffer : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a ``write()`` function. If None, the result is returned as a string. index : bool, default True Whether to include index in XML document. root_name : str, default 'data' The name of root element in XML document. row_name : str, default 'row' The name of row element in XML document. na_rep : str, optional Missing data representation. attr_cols : list-like, optional List of columns to write as attributes in row element. Hierarchical columns will be flattened with underscore delimiting the different levels. elem_cols : list-like, optional List of columns to write as children in row element. By default, all columns output as children of row element. Hierarchical columns will be flattened with underscore delimiting the different levels. namespaces : dict, optional All namespaces to be defined in root element. Keys of dict should be prefix names and values of dict corresponding URIs. Default namespaces should be given empty string key. For example, :: namespaces = {{"": "https://example.com"}} prefix : str, optional Namespace prefix to be used for every element and/or attribute in document. This should be one of the keys in ``namespaces`` dict. encoding : str, default 'utf-8' Encoding of the resulting document. xml_declaration : bool, default True Whether to include the XML declaration at start of document. pretty_print : bool, default True Whether output should be pretty printed with indentation and line breaks. parser : {{'lxml','etree'}}, default 'lxml' Parser module to use for building of tree. Only 'lxml' and 'etree' are supported. With 'lxml', the ability to use XSLT stylesheet is supported. stylesheet : str, path object or file-like object, optional A URL, file-like object, or a raw string containing an XSLT script used to transform the raw XML output. Script should use layout of elements and attributes from original output. This argument requires ``lxml`` to be installed. Only XSLT 1.0 scripts and not later versions is currently supported. {compression_options} .. versionchanged:: 1.4.0 Zstandard support. {storage_options} Returns ------- None or str If ``io`` is None, returns the resulting XML format as a string. Otherwise returns None. See Also -------- to_json : Convert the pandas object to a JSON string. to_html : Convert DataFrame to a html. Examples -------- >>> df = pd.DataFrame({{'shape': ['square', 'circle', 'triangle'], ... 'degrees': [360, 360, 180], ... 'sides': [4, np.nan, 3]}}) >>> df.to_xml() # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row> <index>0</index> <shape>square</shape> <degrees>360</degrees> <sides>4.0</sides> </row> <row> <index>1</index> <shape>circle</shape> <degrees>360</degrees> <sides/> </row> <row> <index>2</index> <shape>triangle</shape> <degrees>180</degrees> <sides>3.0</sides> </row> </data> >>> df.to_xml(attr_cols=[ ... 'index', 'shape', 'degrees', 'sides' ... ]) # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row index="0" shape="square" degrees="360" sides="4.0"/> <row index="1" shape="circle" degrees="360"/> <row index="2" shape="triangle" degrees="180" sides="3.0"/> </data> >>> df.to_xml(namespaces={{"doc": "https://example.com"}}, ... prefix="doc") # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <doc:data xmlns:doc="https://example.com"> <doc:row> <doc:index>0</doc:index> <doc:shape>square</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides>4.0</doc:sides> </doc:row> <doc:row> <doc:index>1</doc:index> <doc:shape>circle</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides/> </doc:row> <doc:row> <doc:index>2</doc:index> <doc:shape>triangle</doc:shape> <doc:degrees>180</doc:degrees> <doc:sides>3.0</doc:sides> </doc:row> </doc:data> """ from pandas.io.formats.xml import ( EtreeXMLFormatter, LxmlXMLFormatter, ) lxml = import_optional_dependency("lxml.etree", errors="ignore") TreeBuilder: type[EtreeXMLFormatter] | type[LxmlXMLFormatter] if parser == "lxml": if lxml is not None: TreeBuilder = LxmlXMLFormatter else: raise ImportError( "lxml not found, please install or use the etree parser." ) elif parser == "etree": TreeBuilder = EtreeXMLFormatter else: raise ValueError("Values for parser can only be lxml or etree.") xml_formatter = TreeBuilder( self, path_or_buffer=path_or_buffer, index=index, root_name=root_name, row_name=row_name, na_rep=na_rep, attr_cols=attr_cols, elem_cols=elem_cols, namespaces=namespaces, prefix=prefix, encoding=encoding, xml_declaration=xml_declaration, pretty_print=pretty_print, stylesheet=stylesheet, compression=compression, storage_options=storage_options, ) return xml_formatter.write_output() # ---------------------------------------------------------------------- def info( self, verbose: bool | None = None, buf: WriteBuffer[str] | None = None, max_cols: int | None = None, memory_usage: bool | str | None = None, show_counts: bool | None = None, ) -> None: info = DataFrameInfo( data=self, memory_usage=memory_usage, ) info.render( buf=buf, max_cols=max_cols, verbose=verbose, show_counts=show_counts, ) def memory_usage(self, index: bool = True, deep: bool = False) -> Series: """ Return the memory usage of each column in bytes. The memory usage can optionally include the contribution of the index and elements of `object` dtype. This value is displayed in `DataFrame.info` by default. This can be suppressed by setting ``pandas.options.display.memory_usage`` to False. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the DataFrame's index in returned Series. If ``index=True``, the memory usage of the index is the first item in the output. deep : bool, default False If True, introspect the data deeply by interrogating `object` dtypes for system-level memory consumption, and include it in the returned values. Returns ------- Series A Series whose index is the original column names and whose values is the memory usage of each column in bytes. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of an ndarray. Series.memory_usage : Bytes consumed by a Series. Categorical : Memory-efficient array for string values with many repeated values. DataFrame.info : Concise summary of a DataFrame. Notes ----- See the :ref:`Frequently Asked Questions <df-memory-usage>` for more details. Examples -------- >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool'] >>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t)) ... for t in dtypes]) >>> df = pd.DataFrame(data) >>> df.head() int64 float64 complex128 object bool 0 1 1.0 1.0+0.0j 1 True 1 1 1.0 1.0+0.0j 1 True 2 1 1.0 1.0+0.0j 1 True 3 1 1.0 1.0+0.0j 1 True 4 1 1.0 1.0+0.0j 1 True >>> df.memory_usage() Index 128 int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 >>> df.memory_usage(index=False) int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 The memory footprint of `object` dtype columns is ignored by default: >>> df.memory_usage(deep=True) Index 128 int64 40000 float64 40000 complex128 80000 object 180000 bool 5000 dtype: int64 Use a Categorical for efficient storage of an object-dtype column with many repeated values. >>> df['object'].astype('category').memory_usage(deep=True) 5244 """ result = self._constructor_sliced( [c.memory_usage(index=False, deep=deep) for col, c in self.items()], index=self.columns, dtype=np.intp, ) if index: index_memory_usage = self._constructor_sliced( self.index.memory_usage(deep=deep), index=["Index"] ) result = index_memory_usage._append(result) return result def transpose(self, *args, copy: bool = False) -> DataFrame: """ Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. Parameters ---------- *args : tuple, optional Accepted for compatibility with NumPy. copy : bool, default False Whether to copy the data after transposing, even for DataFrames with a single dtype. Note that a copy is always required for mixed dtype DataFrames, or for DataFrames with any extension types. Returns ------- DataFrame The transposed DataFrame. See Also -------- numpy.transpose : Permute the dimensions of a given array. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the `object` dtype. In such a case, a copy of the data is always made. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = pd.DataFrame(data=d1) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T # or df1.transpose() >>> df1_transposed 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'name': ['Alice', 'Bob'], ... 'score': [9.5, 8], ... 'employed': [False, True], ... 'kids': [0, 0]} >>> df2 = pd.DataFrame(data=d2) >>> df2 name score employed kids 0 Alice 9.5 False 0 1 Bob 8.0 True 0 >>> df2_transposed = df2.T # or df2.transpose() >>> df2_transposed 0 1 name Alice Bob score 9.5 8.0 employed False True kids 0 0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the `object` dtype: >>> df2.dtypes name object score float64 employed bool kids int64 dtype: object >>> df2_transposed.dtypes 0 object 1 object dtype: object """ nv.validate_transpose(args, {}) # construct the args dtypes = list(self.dtypes) if self._can_fast_transpose: # Note: tests pass without this, but this improves perf quite a bit. new_vals = self._values.T if copy and not using_copy_on_write(): new_vals = new_vals.copy() result = self._constructor( new_vals, index=self.columns, columns=self.index, copy=False ) if using_copy_on_write() and len(self) > 0: result._mgr.add_references(self._mgr) # type: ignore[arg-type] elif ( self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]) ): # We have EAs with the same dtype. We can preserve that dtype in transpose. dtype = dtypes[0] arr_type = dtype.construct_array_type() values = self.values new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values] result = type(self)._from_arrays( new_values, index=self.columns, columns=self.index ) else: new_arr = self.values.T if copy and not using_copy_on_write(): new_arr = new_arr.copy() result = self._constructor( new_arr, index=self.columns, columns=self.index, # We already made a copy (more than one block) copy=False, ) return result.__finalize__(self, method="transpose") def T(self) -> DataFrame: """ The transpose of the DataFrame. Returns ------- DataFrame The transposed DataFrame. See Also -------- DataFrame.transpose : Transpose index and columns. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.T 0 1 col1 1 2 col2 3 4 """ return self.transpose() # ---------------------------------------------------------------------- # Indexing Methods def _ixs(self, i: int, axis: AxisInt = 0) -> Series: """ Parameters ---------- i : int axis : int Returns ------- Series """ # irow if axis == 0: new_mgr = self._mgr.fast_xs(i) # if we are a copy, mark as such copy = isinstance(new_mgr.array, np.ndarray) and new_mgr.array.base is None result = self._constructor_sliced(new_mgr, name=self.index[i]).__finalize__( self ) result._set_is_copy(self, copy=copy) return result # icol else: label = self.columns[i] col_mgr = self._mgr.iget(i) result = self._box_col_values(col_mgr, i) # this is a cached value, mark it so result._set_as_cached(label, self) return result def _get_column_array(self, i: int) -> ArrayLike: """ Get the values of the i'th column (ndarray or ExtensionArray, as stored in the Block) Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ return self._mgr.iget_values(i) def _iter_column_arrays(self) -> Iterator[ArrayLike]: """ Iterate over the arrays of all columns in order. This returns the values as stored in the Block (ndarray or ExtensionArray). Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ for i in range(len(self.columns)): yield self._get_column_array(i) def _getitem_nocopy(self, key: list): """ Behaves like __getitem__, but returns a view in cases where __getitem__ would make a copy. """ # TODO(CoW): can be removed if/when we are always Copy-on-Write indexer = self.columns._get_indexer_strict(key, "columns")[1] new_axis = self.columns[indexer] new_mgr = self._mgr.reindex_indexer( new_axis, indexer, axis=0, allow_dups=True, copy=False, only_slice=True, ) return self._constructor(new_mgr) def __getitem__(self, key): check_dict_or_set_indexers(key) key = lib.item_from_zerodim(key) key = com.apply_if_callable(key, self) if is_hashable(key) and not is_iterator(key): # is_iterator to exclude generator e.g. test_getitem_listlike # shortcut if the key is in columns is_mi = isinstance(self.columns, MultiIndex) # GH#45316 Return view if key is not duplicated # Only use drop_duplicates with duplicates for performance if not is_mi and ( self.columns.is_unique and key in self.columns or key in self.columns.drop_duplicates(keep=False) ): return self._get_item_cache(key) elif is_mi and self.columns.is_unique and key in self.columns: return self._getitem_multilevel(key) # Do we have a slicer (on rows)? if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") if isinstance(indexer, np.ndarray): # reachable with DatetimeIndex indexer = lib.maybe_indices_to_slice( indexer.astype(np.intp, copy=False), len(self) ) if isinstance(indexer, np.ndarray): # GH#43223 If we can not convert, use take return self.take(indexer, axis=0) return self._slice(indexer, axis=0) # Do we have a (boolean) DataFrame? if isinstance(key, DataFrame): return self.where(key) # Do we have a (boolean) 1d indexer? if com.is_bool_indexer(key): return self._getitem_bool_array(key) # We are left with two options: a single key, and a collection of keys, # We interpret tuples as collections only for non-MultiIndex is_single_key = isinstance(key, tuple) or not is_list_like(key) if is_single_key: if self.columns.nlevels > 1: return self._getitem_multilevel(key) indexer = self.columns.get_loc(key) if is_integer(indexer): indexer = [indexer] else: if is_iterator(key): key = list(key) indexer = self.columns._get_indexer_strict(key, "columns")[1] # take() does not accept boolean indexers if getattr(indexer, "dtype", None) == bool: indexer = np.where(indexer)[0] data = self._take_with_is_copy(indexer, axis=1) if is_single_key: # What does looking for a single key in a non-unique index return? # The behavior is inconsistent. It returns a Series, except when # - the key itself is repeated (test on data.shape, #9519), or # - we have a MultiIndex on columns (test on self.columns, #21309) if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex): # GH#26490 using data[key] can cause RecursionError return data._get_item_cache(key) return data def _getitem_bool_array(self, key): # also raises Exception if object array with NA values # warning here just in case -- previously __setitem__ was # reindexing but __getitem__ was not; it seems more reasonable to # go with the __setitem__ behavior since that is more consistent # with all other indexing behavior if isinstance(key, Series) and not key.index.equals(self.index): warnings.warn( "Boolean Series key will be reindexed to match DataFrame index.", UserWarning, stacklevel=find_stack_level(), ) elif len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}." ) # check_bool_indexer will throw exception if Series key cannot # be reindexed to match DataFrame rows key = check_bool_indexer(self.index, key) if key.all(): return self.copy(deep=None) indexer = key.nonzero()[0] return self._take_with_is_copy(indexer, axis=0) def _getitem_multilevel(self, key): # self.columns is a MultiIndex loc = self.columns.get_loc(key) if isinstance(loc, (slice, np.ndarray)): new_columns = self.columns[loc] result_columns = maybe_droplevels(new_columns, key) if self._is_mixed_type: result = self.reindex(columns=new_columns) result.columns = result_columns else: new_values = self._values[:, loc] result = self._constructor( new_values, index=self.index, columns=result_columns, copy=False ) if using_copy_on_write() and isinstance(loc, slice): result._mgr.add_references(self._mgr) # type: ignore[arg-type] result = result.__finalize__(self) # If there is only one column being returned, and its name is # either an empty string, or a tuple with an empty string as its # first element, then treat the empty string as a placeholder # and return the column as if the user had provided that empty # string in the key. If the result is a Series, exclude the # implied empty string from its name. if len(result.columns) == 1: # e.g. test_frame_getitem_multicolumn_empty_level, # test_frame_mixed_depth_get, test_loc_setitem_single_column_slice top = result.columns[0] if isinstance(top, tuple): top = top[0] if top == "": result = result[""] if isinstance(result, Series): result = self._constructor_sliced( result, index=self.index, name=key ) result._set_is_copy(self) return result else: # loc is neither a slice nor ndarray, so must be an int return self._ixs(loc, axis=1) def _get_value(self, index, col, takeable: bool = False) -> Scalar: """ Quickly retrieve single value at passed column and index. Parameters ---------- index : row label col : column label takeable : interpret the index/col as indexers, default False Returns ------- scalar Notes ----- Assumes that both `self.index._index_as_unique` and `self.columns._index_as_unique`; Caller is responsible for checking. """ if takeable: series = self._ixs(col, axis=1) return series._values[index] series = self._get_item_cache(col) engine = self.index._engine if not isinstance(self.index, MultiIndex): # CategoricalIndex: Trying to use the engine fastpath may give incorrect # results if our categories are integers that dont match our codes # IntervalIndex: IntervalTree has no get_loc row = self.index.get_loc(index) return series._values[row] # For MultiIndex going through engine effectively restricts us to # same-length tuples; see test_get_set_value_no_partial_indexing loc = engine.get_loc(index) return series._values[loc] def isetitem(self, loc, value) -> None: """ Set the given value in the column with position `loc`. This is a positional analogue to ``__setitem__``. Parameters ---------- loc : int or sequence of ints Index position for the column. value : scalar or arraylike Value(s) for the column. Notes ----- ``frame.isetitem(loc, value)`` is an in-place method as it will modify the DataFrame in place (not returning a new object). In contrast to ``frame.iloc[:, i] = value`` which will try to update the existing values in place, ``frame.isetitem(loc, value)`` will not update the values of the column itself in place, it will instead insert a new array. In cases where ``frame.columns`` is unique, this is equivalent to ``frame[frame.columns[i]] = value``. """ if isinstance(value, DataFrame): if is_scalar(loc): loc = [loc] for i, idx in enumerate(loc): arraylike = self._sanitize_column(value.iloc[:, i]) self._iset_item_mgr(idx, arraylike, inplace=False) return arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=False) def __setitem__(self, key, value): if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) key = com.apply_if_callable(key, self) # see if we can slice the rows if isinstance(key, slice): slc = self.index._convert_slice_indexer(key, kind="getitem") return self._setitem_slice(slc, value) if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2: self._setitem_frame(key, value) elif isinstance(key, (Series, np.ndarray, list, Index)): self._setitem_array(key, value) elif isinstance(value, DataFrame): self._set_item_frame_value(key, value) elif ( is_list_like(value) and not self.columns.is_unique and 1 < len(self.columns.get_indexer_for([key])) == len(value) ): # Column to set is duplicated self._setitem_array([key], value) else: # set column self._set_item(key, value) def _setitem_slice(self, key: slice, value) -> None: # NB: we can't just use self.loc[key] = value because that # operates on labels and we need to operate positional for # backwards-compat, xref GH#31469 self._check_setitem_copy() self.iloc[key] = value def _setitem_array(self, key, value): # also raises Exception if object array with NA values if com.is_bool_indexer(key): # bool indexer is indexing along rows if len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}!" ) key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] self._check_setitem_copy() if isinstance(value, DataFrame): # GH#39931 reindex since iloc does not align value = value.reindex(self.index.take(indexer)) self.iloc[indexer] = value else: # Note: unlike self.iloc[:, indexer] = value, this will # never try to overwrite values inplace if isinstance(value, DataFrame): check_key_length(self.columns, key, value) for k1, k2 in zip(key, value.columns): self[k1] = value[k2] elif not is_list_like(value): for col in key: self[col] = value elif isinstance(value, np.ndarray) and value.ndim == 2: self._iset_not_inplace(key, value) elif np.ndim(value) > 1: # list of lists value = DataFrame(value).values return self._setitem_array(key, value) else: self._iset_not_inplace(key, value) def _iset_not_inplace(self, key, value): # GH#39510 when setting with df[key] = obj with a list-like key and # list-like value, we iterate over those listlikes and set columns # one at a time. This is different from dispatching to # `self.loc[:, key]= value` because loc.__setitem__ may overwrite # data inplace, whereas this will insert new arrays. def igetitem(obj, i: int): # Note: we catch DataFrame obj before getting here, but # hypothetically would return obj.iloc[:, i] if isinstance(obj, np.ndarray): return obj[..., i] else: return obj[i] if self.columns.is_unique: if np.shape(value)[-1] != len(key): raise ValueError("Columns must be same length as key") for i, col in enumerate(key): self[col] = igetitem(value, i) else: ilocs = self.columns.get_indexer_non_unique(key)[0] if (ilocs < 0).any(): # key entries not in self.columns raise NotImplementedError if np.shape(value)[-1] != len(ilocs): raise ValueError("Columns must be same length as key") assert np.ndim(value) <= 2 orig_columns = self.columns # Using self.iloc[:, i] = ... may set values inplace, which # by convention we do not do in __setitem__ try: self.columns = Index(range(len(self.columns))) for i, iloc in enumerate(ilocs): self[iloc] = igetitem(value, i) finally: self.columns = orig_columns def _setitem_frame(self, key, value): # support boolean setting with DataFrame input, e.g. # df[df > df2] = 0 if isinstance(key, np.ndarray): if key.shape != self.shape: raise ValueError("Array conditional must be same shape as self") key = self._constructor(key, **self._construct_axes_dict(), copy=False) if key.size and not all(is_bool_dtype(dtype) for dtype in key.dtypes): raise TypeError( "Must pass DataFrame or 2-d ndarray with boolean values only" ) self._check_inplace_setting(value) self._check_setitem_copy() self._where(-key, value, inplace=True) def _set_item_frame_value(self, key, value: DataFrame) -> None: self._ensure_valid_index(value) # align columns if key in self.columns: loc = self.columns.get_loc(key) cols = self.columns[loc] len_cols = 1 if is_scalar(cols) or isinstance(cols, tuple) else len(cols) if len_cols != len(value.columns): raise ValueError("Columns must be same length as key") # align right-hand-side columns if self.columns # is multi-index and self[key] is a sub-frame if isinstance(self.columns, MultiIndex) and isinstance( loc, (slice, Series, np.ndarray, Index) ): cols_droplevel = maybe_droplevels(cols, key) if len(cols_droplevel) and not cols_droplevel.equals(value.columns): value = value.reindex(cols_droplevel, axis=1) for col, col_droplevel in zip(cols, cols_droplevel): self[col] = value[col_droplevel] return if is_scalar(cols): self[cols] = value[value.columns[0]] return # now align rows arraylike = _reindex_for_setitem(value, self.index) self._set_item_mgr(key, arraylike) return if len(value.columns) != 1: raise ValueError( "Cannot set a DataFrame with multiple columns to the single " f"column {key}" ) self[key] = value[value.columns[0]] def _iset_item_mgr( self, loc: int | slice | np.ndarray, value, inplace: bool = False ) -> None: # when called from _set_item_mgr loc can be anything returned from get_loc self._mgr.iset(loc, value, inplace=inplace) self._clear_item_cache() def _set_item_mgr(self, key, value: ArrayLike) -> None: try: loc = self._info_axis.get_loc(key) except KeyError: # This item wasn't present, just insert at end self._mgr.insert(len(self._info_axis), key, value) else: self._iset_item_mgr(loc, value) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _iset_item(self, loc: int, value) -> None: arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=True) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _set_item(self, key, value) -> None: """ Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the same length as the DataFrames index or an error will be thrown. Series/TimeSeries will be conformed to the DataFrames index to ensure homogeneity. """ value = self._sanitize_column(value) if ( key in self.columns and value.ndim == 1 and not is_extension_array_dtype(value) ): # broadcast across multiple columns if necessary if not self.columns.is_unique or isinstance(self.columns, MultiIndex): existing_piece = self[key] if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)).T self._set_item_mgr(key, value) def _set_value( self, index: IndexLabel, col, value: Scalar, takeable: bool = False ) -> None: """ Put single value at passed column and index. Parameters ---------- index : Label row label col : Label column label value : scalar takeable : bool, default False Sets whether or not index/col interpreted as indexers """ try: if takeable: icol = col iindex = cast(int, index) else: icol = self.columns.get_loc(col) iindex = self.index.get_loc(index) self._mgr.column_setitem(icol, iindex, value, inplace_only=True) self._clear_item_cache() except (KeyError, TypeError, ValueError, LossySetitemError): # get_loc might raise a KeyError for missing labels (falling back # to (i)loc will do expansion of the index) # column_setitem will do validation that may raise TypeError, # ValueError, or LossySetitemError # set using a non-recursive method & reset the cache if takeable: self.iloc[index, col] = value else: self.loc[index, col] = value self._item_cache.pop(col, None) except InvalidIndexError as ii_err: # GH48729: Seems like you are trying to assign a value to a # row when only scalar options are permitted raise InvalidIndexError( f"You can only assign a scalar value not a {type(value)}" ) from ii_err def _ensure_valid_index(self, value) -> None: """ Ensure that if we don't have an index, that we can create one from the passed value. """ # GH5632, make sure that we are a Series convertible if not len(self.index) and is_list_like(value) and len(value): if not isinstance(value, DataFrame): try: value = Series(value) except (ValueError, NotImplementedError, TypeError) as err: raise ValueError( "Cannot set a frame with no defined index " "and a value that cannot be converted to a Series" ) from err # GH31368 preserve name of index index_copy = value.index.copy() if self.index.name is not None: index_copy.name = self.index.name self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan) def _box_col_values(self, values: SingleDataManager, loc: int) -> Series: """ Provide boxed values for a column. """ # Lookup in columns so that if e.g. a str datetime was passed # we attach the Timestamp object as the name. name = self.columns[loc] klass = self._constructor_sliced # We get index=self.index bc values is a SingleDataManager return klass(values, name=name, fastpath=True).__finalize__(self) # ---------------------------------------------------------------------- # Lookup Caching def _clear_item_cache(self) -> None: self._item_cache.clear() def _get_item_cache(self, item: Hashable) -> Series: """Return the cached item, item represents a label indexer.""" if using_copy_on_write(): loc = self.columns.get_loc(item) return self._ixs(loc, axis=1) cache = self._item_cache res = cache.get(item) if res is None: # All places that call _get_item_cache have unique columns, # pending resolution of GH#33047 loc = self.columns.get_loc(item) res = self._ixs(loc, axis=1) cache[item] = res # for a chain res._is_copy = self._is_copy return res def _reset_cacher(self) -> None: # no-op for DataFrame pass def _maybe_cache_changed(self, item, value: Series, inplace: bool) -> None: """ The object has called back to us saying maybe it has changed. """ loc = self._info_axis.get_loc(item) arraylike = value._values old = self._ixs(loc, axis=1) if old._values is value._values and inplace: # GH#46149 avoid making unnecessary copies/block-splitting return self._mgr.iset(loc, arraylike, inplace=inplace) # ---------------------------------------------------------------------- # Unsorted def query(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> DataFrame: ... def query(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def query(self, expr: str, *, inplace: bool = ..., **kwargs) -> DataFrame | None: ... def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | None: """ Query the columns of a DataFrame with a boolean expression. Parameters ---------- expr : str The query string to evaluate. You can refer to variables in the environment by prefixing them with an '@' character like ``@a + b``. You can refer to column names that are not valid Python variable names by surrounding them in backticks. Thus, column names containing spaces or punctuations (besides underscores) or starting with digits must be surrounded by backticks. (For example, a column named "Area (cm^2)" would be referenced as ```Area (cm^2)```). Column names which are Python keywords (like "list", "for", "import", etc) cannot be used. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. inplace : bool Whether to modify the DataFrame rather than creating a new one. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`DataFrame.query`. Returns ------- DataFrame or None DataFrame resulting from the provided query expression or None if ``inplace=True``. See Also -------- eval : Evaluate a string describing operations on DataFrame columns. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Notes ----- The result of the evaluation of this expression is first passed to :attr:`DataFrame.loc` and if that fails because of a multidimensional key (e.g., a DataFrame) then the result will be passed to :meth:`DataFrame.__getitem__`. This method uses the top-level :func:`eval` function to evaluate the passed query. The :meth:`~pandas.DataFrame.query` method uses a slightly modified Python syntax by default. For example, the ``&`` and ``|`` (bitwise) operators have the precedence of their boolean cousins, :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, however the semantics are different. You can change the semantics of the expression by passing the keyword argument ``parser='python'``. This enforces the same semantics as evaluation in Python space. Likewise, you can pass ``engine='python'`` to evaluate an expression using Python itself as a backend. This is not recommended as it is inefficient compared to using ``numexpr`` as the engine. The :attr:`DataFrame.index` and :attr:`DataFrame.columns` attributes of the :class:`~pandas.DataFrame` instance are placed in the query namespace by default, which allows you to treat both the index and columns of the frame as a column in the frame. The identifier ``index`` is used for the frame index; you can also use the name of the index to identify it in a query. Please note that Python keywords may not be used as identifiers. For further details and examples see the ``query`` documentation in :ref:`indexing <indexing.query>`. *Backtick quoted variables* Backtick quoted variables are parsed as literal Python code and are converted internally to a Python valid identifier. This can lead to the following problems. During parsing a number of disallowed characters inside the backtick quoted string are replaced by strings that are allowed as a Python identifier. These characters include all operators in Python, the space character, the question mark, the exclamation mark, the dollar sign, and the euro sign. For other characters that fall outside the ASCII range (U+0001..U+007F) and those that are not further specified in PEP 3131, the query parser will raise an error. This excludes whitespace different than the space character, but also the hashtag (as it is used for comments) and the backtick itself (backtick can also not be escaped). In a special case, quotes that make a pair around a backtick can confuse the parser. For example, ```it's` > `that's``` will raise an error, as it forms a quoted string (``'s > `that'``) with a backtick inside. See also the Python documentation about lexical analysis (https://docs.python.org/3/reference/lexical_analysis.html) in combination with the source code in :mod:`pandas.core.computation.parsing`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10 """ inplace = validate_bool_kwarg(inplace, "inplace") if not isinstance(expr, str): msg = f"expr must be a string to be evaluated, {type(expr)} given" raise ValueError(msg) kwargs["level"] = kwargs.pop("level", 0) + 1 kwargs["target"] = None res = self.eval(expr, **kwargs) try: result = self.loc[res] except ValueError: # when res is multi-dimensional loc raises, but this is sometimes a # valid query result = self[res] if inplace: self._update_inplace(result) return None else: return result def eval(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> Any: ... def eval(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def eval(self, expr: str, *, inplace: bool = False, **kwargs) -> Any | None: """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. Returns ------- ndarray, scalar, pandas object, or None The result of the evaluation or None if ``inplace=True``. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Notes ----- For more details see the API documentation for :func:`~eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Multiple columns can be assigned to using multi-line expressions: >>> df.eval( ... ''' ... C = A + B ... D = A - B ... ''' ... ) A B C D 0 1 10 11 -9 1 2 8 10 -6 2 3 6 9 -3 3 4 4 8 0 4 5 2 7 3 """ from pandas.core.computation.eval import eval as _eval inplace = validate_bool_kwarg(inplace, "inplace") kwargs["level"] = kwargs.pop("level", 0) + 1 index_resolvers = self._get_index_resolvers() column_resolvers = self._get_cleaned_column_resolvers() resolvers = column_resolvers, index_resolvers if "target" not in kwargs: kwargs["target"] = self kwargs["resolvers"] = tuple(kwargs.get("resolvers", ())) + resolvers return _eval(expr, inplace=inplace, **kwargs) def select_dtypes(self, include=None, exclude=None) -> DataFrame: """ Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty * If ``include`` and ``exclude`` have overlapping elements * If any kind of string dtype is passed in. See Also -------- DataFrame.dtypes: Return Series with the data type of each column. Notes ----- * To select all *numeric* types, use ``np.number`` or ``'number'`` * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy <https://numpy.org/doc/stable/reference/arrays.scalars.html>`__ * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or ``'timedelta64'`` * To select Pandas categorical dtypes, use ``'category'`` * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in 0.20.0) or ``'datetime64[ns, tz]'`` Examples -------- >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int64']) b c 0 True 1.0 1 False 2.0 2 True 1.0 3 False 2.0 4 True 1.0 5 False 2.0 """ if not is_list_like(include): include = (include,) if include is not None else () if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () selection = (frozenset(include), frozenset(exclude)) if not any(selection): raise ValueError("at least one of include or exclude must be nonempty") # convert the myriad valid dtypes object to a single representation def check_int_infer_dtype(dtypes): converted_dtypes: list[type] = [] for dtype in dtypes: # Numpy maps int to different types (int32, in64) on Windows and Linux # see https://github.com/numpy/numpy/issues/9464 if (isinstance(dtype, str) and dtype == "int") or (dtype is int): converted_dtypes.append(np.int32) converted_dtypes.append(np.int64) elif dtype == "float" or dtype is float: # GH#42452 : np.dtype("float") coerces to np.float64 from Numpy 1.20 converted_dtypes.extend([np.float64, np.float32]) else: converted_dtypes.append(infer_dtype_from_object(dtype)) return frozenset(converted_dtypes) include = check_int_infer_dtype(include) exclude = check_int_infer_dtype(exclude) for dtypes in (include, exclude): invalidate_string_dtypes(dtypes) # can't both include AND exclude! if not include.isdisjoint(exclude): raise ValueError(f"include and exclude overlap on {(include & exclude)}") def dtype_predicate(dtype: DtypeObj, dtypes_set) -> bool: # GH 46870: BooleanDtype._is_numeric == True but should be excluded return issubclass(dtype.type, tuple(dtypes_set)) or ( np.number in dtypes_set and getattr(dtype, "_is_numeric", False) and not is_bool_dtype(dtype) ) def predicate(arr: ArrayLike) -> bool: dtype = arr.dtype if include: if not dtype_predicate(dtype, include): return False if exclude: if dtype_predicate(dtype, exclude): return False return True mgr = self._mgr._get_data_subset(predicate).copy(deep=None) return type(self)(mgr).__finalize__(self) def insert( self, loc: int, column: Hashable, value: Scalar | AnyArrayLike, allow_duplicates: bool | lib.NoDefault = lib.no_default, ) -> None: """ Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns). column : str, number, or hashable object Label of the inserted column. value : Scalar, Series, or array-like allow_duplicates : bool, optional, default lib.no_default See Also -------- Index.insert : Insert new item by index. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.insert(1, "newcol", [99, 99]) >>> df col1 newcol col2 0 1 99 3 1 2 99 4 >>> df.insert(0, "col1", [100, 100], allow_duplicates=True) >>> df col1 col1 newcol col2 0 100 1 99 3 1 100 2 99 4 Notice that pandas uses index alignment in case of `value` from type `Series`: >>> df.insert(0, "col0", pd.Series([5, 6], index=[1, 2])) >>> df col0 col1 col1 newcol col2 0 NaN 100 1 99 3 1 5.0 100 2 99 4 """ if allow_duplicates is lib.no_default: allow_duplicates = False if allow_duplicates and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'allow_duplicates=True' when " "'self.flags.allows_duplicate_labels' is False." ) if not allow_duplicates and column in self.columns: # Should this be a different kind of error?? raise ValueError(f"cannot insert {column}, already exists") if not isinstance(loc, int): raise TypeError("loc must be int") value = self._sanitize_column(value) self._mgr.insert(loc, column, value) def assign(self, **kwargs) -> DataFrame: r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. Later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 You can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 """ data = self.copy(deep=None) for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) return data def _sanitize_column(self, value) -> ArrayLike: """ Ensures new columns (which go into the BlockManager as new blocks) are always copied and converted into an array. Parameters ---------- value : scalar, Series, or array-like Returns ------- numpy.ndarray or ExtensionArray """ self._ensure_valid_index(value) # We can get there through isetitem with a DataFrame # or through loc single_block_path if isinstance(value, DataFrame): return _reindex_for_setitem(value, self.index) elif is_dict_like(value): return _reindex_for_setitem(Series(value), self.index) if is_list_like(value): com.require_length_match(value, self.index) return sanitize_array(value, self.index, copy=True, allow_2d=True) def _series(self): return { item: Series( self._mgr.iget(idx), index=self.index, name=item, fastpath=True ) for idx, item in enumerate(self.columns) } # ---------------------------------------------------------------------- # Reindexing and alignment def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy): frame = self columns = axes["columns"] if columns is not None: frame = frame._reindex_columns( columns, method, copy, level, fill_value, limit, tolerance ) index = axes["index"] if index is not None: frame = frame._reindex_index( index, method, copy, level, fill_value, limit, tolerance ) return frame def _reindex_index( self, new_index, method, copy: bool, level: Level, fill_value=np.nan, limit=None, tolerance=None, ): new_index, indexer = self.index.reindex( new_index, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {0: [new_index, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_columns( self, new_columns, method, copy: bool, level: Level, fill_value=None, limit=None, tolerance=None, ): new_columns, indexer = self.columns.reindex( new_columns, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {1: [new_columns, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_multi( self, axes: dict[str, Index], copy: bool, fill_value ) -> DataFrame: """ We are guaranteed non-Nones in the axes. """ new_index, row_indexer = self.index.reindex(axes["index"]) new_columns, col_indexer = self.columns.reindex(axes["columns"]) if row_indexer is not None and col_indexer is not None: # Fastpath. By doing two 'take's at once we avoid making an # unnecessary copy. # We only get here with `not self._is_mixed_type`, which (almost) # ensures that self.values is cheap. It may be worth making this # condition more specific. indexer = row_indexer, col_indexer new_values = take_2d_multi(self.values, indexer, fill_value=fill_value) return self._constructor( new_values, index=new_index, columns=new_columns, copy=False ) else: return self._reindex_with_indexers( {0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, copy=copy, fill_value=fill_value, ) def align( self, other: DataFrame, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value=None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> DataFrame: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) """ Examples -------- >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) Change the row labels. >>> df.set_axis(['a', 'b', 'c'], axis='index') A B a 1 4 b 2 5 c 3 6 Change the column labels. >>> df.set_axis(['I', 'II'], axis='columns') I II 0 1 4 1 2 5 2 3 6 """ ) **_shared_doc_kwargs, extended_summary_sub=" column or", axis_description_sub=", and 1 identifies the columns", see_also_sub=" or columns", ) ) # ---------------------------------------------------------------------- # Reindex-based selection methods # ---------------------------------------------------------------------- # Sorting # error: Signature of "sort_values" incompatible with supertype "NDFrame" # TODO: Just move the sort_values doc here. ) # ---------------------------------------------------------------------- # Arithmetic Methods ) ) ) # ---------------------------------------------------------------------- # Function application ) # error: Signature of "any" incompatible with supertype "NDFrame" [override] # error: Missing return statement ) # ---------------------------------------------------------------------- # Merging / joining methods # ---------------------------------------------------------------------- # Statistical methods, etc. # ---------------------------------------------------------------------- # ndarray-like stats methods # ---------------------------------------------------------------------- # Add index and columns # ---------------------------------------------------------------------- # Add plotting methods to DataFrame # ---------------------------------------------------------------------- # Internal Interface Methods DataFrame The provided code snippet includes necessary dependencies for implementing the `_cast_to_stata_types` function. Write a Python function `def _cast_to_stata_types(data: DataFrame) -> DataFrame` to solve the following problem: Checks the dtypes of the columns of a pandas DataFrame for compatibility with the data types and ranges supported by Stata, and converts if necessary. Parameters ---------- data : DataFrame The DataFrame to check and convert Notes ----- Numeric columns in Stata must be one of int8, int16, int32, float32 or float64, with some additional value restrictions. int8 and int16 columns are checked for violations of the value restrictions and upcast if needed. int64 data is not usable in Stata, and so it is downcast to int32 whenever the value are in the int32 range, and sidecast to float64 when larger than this range. If the int64 values are outside of the range of those perfectly representable as float64 values, a warning is raised. bool columns are cast to int8. uint columns are converted to int of the same size if there is no loss in precision, otherwise are upcast to a larger type. uint64 is currently not supported since it is concerted to object in a DataFrame. Here is the function: def _cast_to_stata_types(data: DataFrame) -> DataFrame: """ Checks the dtypes of the columns of a pandas DataFrame for compatibility with the data types and ranges supported by Stata, and converts if necessary. Parameters ---------- data : DataFrame The DataFrame to check and convert Notes ----- Numeric columns in Stata must be one of int8, int16, int32, float32 or float64, with some additional value restrictions. int8 and int16 columns are checked for violations of the value restrictions and upcast if needed. int64 data is not usable in Stata, and so it is downcast to int32 whenever the value are in the int32 range, and sidecast to float64 when larger than this range. If the int64 values are outside of the range of those perfectly representable as float64 values, a warning is raised. bool columns are cast to int8. uint columns are converted to int of the same size if there is no loss in precision, otherwise are upcast to a larger type. uint64 is currently not supported since it is concerted to object in a DataFrame. """ ws = "" # original, if small, if large conversion_data: tuple[ tuple[type, type, type], tuple[type, type, type], tuple[type, type, type], tuple[type, type, type], tuple[type, type, type], ] = ( (np.bool_, np.int8, np.int8), (np.uint8, np.int8, np.int16), (np.uint16, np.int16, np.int32), (np.uint32, np.int32, np.int64), (np.uint64, np.int64, np.float64), ) float32_max = struct.unpack("<f", b"\xff\xff\xff\x7e")[0] float64_max = struct.unpack("<d", b"\xff\xff\xff\xff\xff\xff\xdf\x7f")[0] for col in data: # Cast from unsupported types to supported types is_nullable_int = isinstance(data[col].dtype, (IntegerDtype, BooleanDtype)) orig = data[col] # We need to find orig_missing before altering data below orig_missing = orig.isna() if is_nullable_int: missing_loc = data[col].isna() if missing_loc.any(): # Replace with always safe value fv = 0 if isinstance(data[col].dtype, IntegerDtype) else False data.loc[missing_loc, col] = fv # Replace with NumPy-compatible column data[col] = data[col].astype(data[col].dtype.numpy_dtype) dtype = data[col].dtype for c_data in conversion_data: if dtype == c_data[0]: if data[col].max() <= np.iinfo(c_data[1]).max: dtype = c_data[1] else: dtype = c_data[2] if c_data[2] == np.int64: # Warn if necessary if data[col].max() >= 2**53: ws = precision_loss_doc.format("uint64", "float64") data[col] = data[col].astype(dtype) # Check values and upcast if necessary if dtype == np.int8: if data[col].max() > 100 or data[col].min() < -127: data[col] = data[col].astype(np.int16) elif dtype == np.int16: if data[col].max() > 32740 or data[col].min() < -32767: data[col] = data[col].astype(np.int32) elif dtype == np.int64: if data[col].max() <= 2147483620 and data[col].min() >= -2147483647: data[col] = data[col].astype(np.int32) else: data[col] = data[col].astype(np.float64) if data[col].max() >= 2**53 or data[col].min() <= -(2**53): ws = precision_loss_doc.format("int64", "float64") elif dtype in (np.float32, np.float64): if np.isinf(data[col]).any(): raise ValueError( f"Column {col} contains infinity or -infinity" "which is outside the range supported by Stata." ) value = data[col].max() if dtype == np.float32 and value > float32_max: data[col] = data[col].astype(np.float64) elif dtype == np.float64: if value > float64_max: raise ValueError( f"Column {col} has a maximum value ({value}) outside the range " f"supported by Stata ({float64_max})" ) if is_nullable_int: if orig_missing.any(): # Replace missing by Stata sentinel value sentinel = StataMissingValue.BASE_MISSING_VALUES[data[col].dtype.name] data.loc[orig_missing, col] = sentinel if ws: warnings.warn( ws, PossiblePrecisionLoss, stacklevel=find_stack_level(), ) return data
Checks the dtypes of the columns of a pandas DataFrame for compatibility with the data types and ranges supported by Stata, and converts if necessary. Parameters ---------- data : DataFrame The DataFrame to check and convert Notes ----- Numeric columns in Stata must be one of int8, int16, int32, float32 or float64, with some additional value restrictions. int8 and int16 columns are checked for violations of the value restrictions and upcast if needed. int64 data is not usable in Stata, and so it is downcast to int32 whenever the value are in the int32 range, and sidecast to float64 when larger than this range. If the int64 values are outside of the range of those perfectly representable as float64 values, a warning is raised. bool columns are cast to int8. uint columns are converted to int of the same size if there is no loss in precision, otherwise are upcast to a larger type. uint64 is currently not supported since it is concerted to object in a DataFrame.
173,535
from __future__ import annotations from collections import abc import datetime from io import BytesIO import os import struct import sys from types import TracebackType from typing import ( IO, TYPE_CHECKING, Any, AnyStr, Callable, Final, Hashable, Sequence, cast, ) import warnings from dateutil.relativedelta import relativedelta import numpy as np from pandas._libs.lib import infer_dtype from pandas._libs.writers import max_len_string_array from pandas._typing import ( CompressionOptions, FilePath, ReadBuffer, StorageOptions, WriteBuffer, ) from pandas.errors import ( CategoricalConversionWarning, InvalidColumnName, PossiblePrecisionLoss, ValueLabelTypeMismatch, ) from pandas.util._decorators import ( Appender, doc, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, is_categorical_dtype, is_datetime64_dtype, is_numeric_dtype, ) from pandas import ( Categorical, DatetimeIndex, NaT, Timestamp, isna, to_datetime, to_timedelta, ) from pandas.core.arrays.boolean import BooleanDtype from pandas.core.arrays.integer import IntegerDtype from pandas.core.frame import DataFrame from pandas.core.indexes.base import Index from pandas.core.series import Series from pandas.core.shared_docs import _shared_docs from pandas.io.common import get_handle class StataReader(StataParser, abc.Iterator): __doc__ = _stata_reader_doc _path_or_buf: IO[bytes] # Will be assigned by `_open_file`. def __init__( self, path_or_buf: FilePath | ReadBuffer[bytes], convert_dates: bool = True, convert_categoricals: bool = True, index_col: str | None = None, convert_missing: bool = False, preserve_dtypes: bool = True, columns: Sequence[str] | None = None, order_categoricals: bool = True, chunksize: int | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ) -> None: super().__init__() self._col_sizes: list[int] = [] # Arguments to the reader (can be temporarily overridden in # calls to read). self._convert_dates = convert_dates self._convert_categoricals = convert_categoricals self._index_col = index_col self._convert_missing = convert_missing self._preserve_dtypes = preserve_dtypes self._columns = columns self._order_categoricals = order_categoricals self._original_path_or_buf = path_or_buf self._compression = compression self._storage_options = storage_options self._encoding = "" self._chunksize = chunksize self._using_iterator = False self._entered = False if self._chunksize is None: self._chunksize = 1 elif not isinstance(chunksize, int) or chunksize <= 0: raise ValueError("chunksize must be a positive integer when set.") # State variables for the file self._close_file: Callable[[], None] | None = None self._has_string_data = False self._missing_values = False self._can_read_value_labels = False self._column_selector_set = False self._value_labels_read = False self._data_read = False self._dtype: np.dtype | None = None self._lines_read = 0 self._native_byteorder = _set_endianness(sys.byteorder) def _ensure_open(self) -> None: """ Ensure the file has been opened and its header data read. """ if not hasattr(self, "_path_or_buf"): self._open_file() def _open_file(self) -> None: """ Open the file (with compression options, etc.), and read header information. """ if not self._entered: warnings.warn( "StataReader is being used without using a context manager. " "Using StataReader as a context manager is the only supported method.", ResourceWarning, stacklevel=find_stack_level(), ) handles = get_handle( self._original_path_or_buf, "rb", storage_options=self._storage_options, is_text=False, compression=self._compression, ) if hasattr(handles.handle, "seekable") and handles.handle.seekable(): # If the handle is directly seekable, use it without an extra copy. self._path_or_buf = handles.handle self._close_file = handles.close else: # Copy to memory, and ensure no encoding. with handles: self._path_or_buf = BytesIO(handles.handle.read()) self._close_file = self._path_or_buf.close self._read_header() self._setup_dtype() def __enter__(self) -> StataReader: """enter context manager""" self._entered = True return self def __exit__( self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None, ) -> None: if self._close_file: self._close_file() def close(self) -> None: """Close the handle if its open. .. deprecated: 2.0.0 The close method is not part of the public API. The only supported way to use StataReader is to use it as a context manager. """ warnings.warn( "The StataReader.close() method is not part of the public API and " "will be removed in a future version without notice. " "Using StataReader as a context manager is the only supported method.", FutureWarning, stacklevel=find_stack_level(), ) if self._close_file: self._close_file() def _set_encoding(self) -> None: """ Set string encoding which depends on file version """ if self._format_version < 118: self._encoding = "latin-1" else: self._encoding = "utf-8" def _read_int8(self) -> int: return struct.unpack("b", self._path_or_buf.read(1))[0] def _read_uint8(self) -> int: return struct.unpack("B", self._path_or_buf.read(1))[0] def _read_uint16(self) -> int: return struct.unpack(f"{self._byteorder}H", self._path_or_buf.read(2))[0] def _read_uint32(self) -> int: return struct.unpack(f"{self._byteorder}I", self._path_or_buf.read(4))[0] def _read_uint64(self) -> int: return struct.unpack(f"{self._byteorder}Q", self._path_or_buf.read(8))[0] def _read_int16(self) -> int: return struct.unpack(f"{self._byteorder}h", self._path_or_buf.read(2))[0] def _read_int32(self) -> int: return struct.unpack(f"{self._byteorder}i", self._path_or_buf.read(4))[0] def _read_int64(self) -> int: return struct.unpack(f"{self._byteorder}q", self._path_or_buf.read(8))[0] def _read_char8(self) -> bytes: return struct.unpack("c", self._path_or_buf.read(1))[0] def _read_int16_count(self, count: int) -> tuple[int, ...]: return struct.unpack( f"{self._byteorder}{'h' * count}", self._path_or_buf.read(2 * count), ) def _read_header(self) -> None: first_char = self._read_char8() if first_char == b"<": self._read_new_header() else: self._read_old_header(first_char) self._has_string_data = len([x for x in self._typlist if type(x) is int]) > 0 # calculate size of a data record self._col_sizes = [self._calcsize(typ) for typ in self._typlist] def _read_new_header(self) -> None: # The first part of the header is common to 117 - 119. self._path_or_buf.read(27) # stata_dta><header><release> self._format_version = int(self._path_or_buf.read(3)) if self._format_version not in [117, 118, 119]: raise ValueError(_version_error.format(version=self._format_version)) self._set_encoding() self._path_or_buf.read(21) # </release><byteorder> self._byteorder = ">" if self._path_or_buf.read(3) == b"MSF" else "<" self._path_or_buf.read(15) # </byteorder><K> self._nvar = ( self._read_uint16() if self._format_version <= 118 else self._read_uint32() ) self._path_or_buf.read(7) # </K><N> self._nobs = self._get_nobs() self._path_or_buf.read(11) # </N><label> self._data_label = self._get_data_label() self._path_or_buf.read(19) # </label><timestamp> self._time_stamp = self._get_time_stamp() self._path_or_buf.read(26) # </timestamp></header><map> self._path_or_buf.read(8) # 0x0000000000000000 self._path_or_buf.read(8) # position of <map> self._seek_vartypes = self._read_int64() + 16 self._seek_varnames = self._read_int64() + 10 self._seek_sortlist = self._read_int64() + 10 self._seek_formats = self._read_int64() + 9 self._seek_value_label_names = self._read_int64() + 19 # Requires version-specific treatment self._seek_variable_labels = self._get_seek_variable_labels() self._path_or_buf.read(8) # <characteristics> self._data_location = self._read_int64() + 6 self._seek_strls = self._read_int64() + 7 self._seek_value_labels = self._read_int64() + 14 self._typlist, self._dtyplist = self._get_dtypes(self._seek_vartypes) self._path_or_buf.seek(self._seek_varnames) self._varlist = self._get_varlist() self._path_or_buf.seek(self._seek_sortlist) self._srtlist = self._read_int16_count(self._nvar + 1)[:-1] self._path_or_buf.seek(self._seek_formats) self._fmtlist = self._get_fmtlist() self._path_or_buf.seek(self._seek_value_label_names) self._lbllist = self._get_lbllist() self._path_or_buf.seek(self._seek_variable_labels) self._variable_labels = self._get_variable_labels() # Get data type information, works for versions 117-119. def _get_dtypes( self, seek_vartypes: int ) -> tuple[list[int | str], list[str | np.dtype]]: self._path_or_buf.seek(seek_vartypes) raw_typlist = [self._read_uint16() for _ in range(self._nvar)] def f(typ: int) -> int | str: if typ <= 2045: return typ try: return self.TYPE_MAP_XML[typ] except KeyError as err: raise ValueError(f"cannot convert stata types [{typ}]") from err typlist = [f(x) for x in raw_typlist] def g(typ: int) -> str | np.dtype: if typ <= 2045: return str(typ) try: return self.DTYPE_MAP_XML[typ] except KeyError as err: raise ValueError(f"cannot convert stata dtype [{typ}]") from err dtyplist = [g(x) for x in raw_typlist] return typlist, dtyplist def _get_varlist(self) -> list[str]: # 33 in order formats, 129 in formats 118 and 119 b = 33 if self._format_version < 118 else 129 return [self._decode(self._path_or_buf.read(b)) for _ in range(self._nvar)] # Returns the format list def _get_fmtlist(self) -> list[str]: if self._format_version >= 118: b = 57 elif self._format_version > 113: b = 49 elif self._format_version > 104: b = 12 else: b = 7 return [self._decode(self._path_or_buf.read(b)) for _ in range(self._nvar)] # Returns the label list def _get_lbllist(self) -> list[str]: if self._format_version >= 118: b = 129 elif self._format_version > 108: b = 33 else: b = 9 return [self._decode(self._path_or_buf.read(b)) for _ in range(self._nvar)] def _get_variable_labels(self) -> list[str]: if self._format_version >= 118: vlblist = [ self._decode(self._path_or_buf.read(321)) for _ in range(self._nvar) ] elif self._format_version > 105: vlblist = [ self._decode(self._path_or_buf.read(81)) for _ in range(self._nvar) ] else: vlblist = [ self._decode(self._path_or_buf.read(32)) for _ in range(self._nvar) ] return vlblist def _get_nobs(self) -> int: if self._format_version >= 118: return self._read_uint64() else: return self._read_uint32() def _get_data_label(self) -> str: if self._format_version >= 118: strlen = self._read_uint16() return self._decode(self._path_or_buf.read(strlen)) elif self._format_version == 117: strlen = self._read_int8() return self._decode(self._path_or_buf.read(strlen)) elif self._format_version > 105: return self._decode(self._path_or_buf.read(81)) else: return self._decode(self._path_or_buf.read(32)) def _get_time_stamp(self) -> str: if self._format_version >= 118: strlen = self._read_int8() return self._path_or_buf.read(strlen).decode("utf-8") elif self._format_version == 117: strlen = self._read_int8() return self._decode(self._path_or_buf.read(strlen)) elif self._format_version > 104: return self._decode(self._path_or_buf.read(18)) else: raise ValueError() def _get_seek_variable_labels(self) -> int: if self._format_version == 117: self._path_or_buf.read(8) # <variable_labels>, throw away # Stata 117 data files do not follow the described format. This is # a work around that uses the previous label, 33 bytes for each # variable, 20 for the closing tag and 17 for the opening tag return self._seek_value_label_names + (33 * self._nvar) + 20 + 17 elif self._format_version >= 118: return self._read_int64() + 17 else: raise ValueError() def _read_old_header(self, first_char: bytes) -> None: self._format_version = int(first_char[0]) if self._format_version not in [104, 105, 108, 111, 113, 114, 115]: raise ValueError(_version_error.format(version=self._format_version)) self._set_encoding() self._byteorder = ">" if self._read_int8() == 0x1 else "<" self._filetype = self._read_int8() self._path_or_buf.read(1) # unused self._nvar = self._read_uint16() self._nobs = self._get_nobs() self._data_label = self._get_data_label() self._time_stamp = self._get_time_stamp() # descriptors if self._format_version > 108: typlist = [int(c) for c in self._path_or_buf.read(self._nvar)] else: buf = self._path_or_buf.read(self._nvar) typlistb = np.frombuffer(buf, dtype=np.uint8) typlist = [] for tp in typlistb: if tp in self.OLD_TYPE_MAPPING: typlist.append(self.OLD_TYPE_MAPPING[tp]) else: typlist.append(tp - 127) # bytes try: self._typlist = [self.TYPE_MAP[typ] for typ in typlist] except ValueError as err: invalid_types = ",".join([str(x) for x in typlist]) raise ValueError(f"cannot convert stata types [{invalid_types}]") from err try: self._dtyplist = [self.DTYPE_MAP[typ] for typ in typlist] except ValueError as err: invalid_dtypes = ",".join([str(x) for x in typlist]) raise ValueError(f"cannot convert stata dtypes [{invalid_dtypes}]") from err if self._format_version > 108: self._varlist = [ self._decode(self._path_or_buf.read(33)) for _ in range(self._nvar) ] else: self._varlist = [ self._decode(self._path_or_buf.read(9)) for _ in range(self._nvar) ] self._srtlist = self._read_int16_count(self._nvar + 1)[:-1] self._fmtlist = self._get_fmtlist() self._lbllist = self._get_lbllist() self._variable_labels = self._get_variable_labels() # ignore expansion fields (Format 105 and later) # When reading, read five bytes; the last four bytes now tell you # the size of the next read, which you discard. You then continue # like this until you read 5 bytes of zeros. if self._format_version > 104: while True: data_type = self._read_int8() if self._format_version > 108: data_len = self._read_int32() else: data_len = self._read_int16() if data_type == 0: break self._path_or_buf.read(data_len) # necessary data to continue parsing self._data_location = self._path_or_buf.tell() def _setup_dtype(self) -> np.dtype: """Map between numpy and state dtypes""" if self._dtype is not None: return self._dtype dtypes = [] # Convert struct data types to numpy data type for i, typ in enumerate(self._typlist): if typ in self.NUMPY_TYPE_MAP: typ = cast(str, typ) # only strs in NUMPY_TYPE_MAP dtypes.append((f"s{i}", f"{self._byteorder}{self.NUMPY_TYPE_MAP[typ]}")) else: dtypes.append((f"s{i}", f"S{typ}")) self._dtype = np.dtype(dtypes) return self._dtype def _calcsize(self, fmt: int | str) -> int: if isinstance(fmt, int): return fmt return struct.calcsize(self._byteorder + fmt) def _decode(self, s: bytes) -> str: # have bytes not strings, so must decode s = s.partition(b"\0")[0] try: return s.decode(self._encoding) except UnicodeDecodeError: # GH 25960, fallback to handle incorrect format produced when 117 # files are converted to 118 files in Stata encoding = self._encoding msg = f""" One or more strings in the dta file could not be decoded using {encoding}, and so the fallback encoding of latin-1 is being used. This can happen when a file has been incorrectly encoded by Stata or some other software. You should verify the string values returned are correct.""" warnings.warn( msg, UnicodeWarning, stacklevel=find_stack_level(), ) return s.decode("latin-1") def _read_value_labels(self) -> None: self._ensure_open() if self._value_labels_read: # Don't read twice return if self._format_version <= 108: # Value labels are not supported in version 108 and earlier. self._value_labels_read = True self._value_label_dict: dict[str, dict[float, str]] = {} return if self._format_version >= 117: self._path_or_buf.seek(self._seek_value_labels) else: assert self._dtype is not None offset = self._nobs * self._dtype.itemsize self._path_or_buf.seek(self._data_location + offset) self._value_labels_read = True self._value_label_dict = {} while True: if self._format_version >= 117: if self._path_or_buf.read(5) == b"</val": # <lbl> break # end of value label table slength = self._path_or_buf.read(4) if not slength: break # end of value label table (format < 117) if self._format_version <= 117: labname = self._decode(self._path_or_buf.read(33)) else: labname = self._decode(self._path_or_buf.read(129)) self._path_or_buf.read(3) # padding n = self._read_uint32() txtlen = self._read_uint32() off = np.frombuffer( self._path_or_buf.read(4 * n), dtype=f"{self._byteorder}i4", count=n ) val = np.frombuffer( self._path_or_buf.read(4 * n), dtype=f"{self._byteorder}i4", count=n ) ii = np.argsort(off) off = off[ii] val = val[ii] txt = self._path_or_buf.read(txtlen) self._value_label_dict[labname] = {} for i in range(n): end = off[i + 1] if i < n - 1 else txtlen self._value_label_dict[labname][val[i]] = self._decode( txt[off[i] : end] ) if self._format_version >= 117: self._path_or_buf.read(6) # </lbl> self._value_labels_read = True def _read_strls(self) -> None: self._path_or_buf.seek(self._seek_strls) # Wrap v_o in a string to allow uint64 values as keys on 32bit OS self.GSO = {"0": ""} while True: if self._path_or_buf.read(3) != b"GSO": break if self._format_version == 117: v_o = self._read_uint64() else: buf = self._path_or_buf.read(12) # Only tested on little endian file on little endian machine. v_size = 2 if self._format_version == 118 else 3 if self._byteorder == "<": buf = buf[0:v_size] + buf[4 : (12 - v_size)] else: # This path may not be correct, impossible to test buf = buf[0:v_size] + buf[(4 + v_size) :] v_o = struct.unpack("Q", buf)[0] typ = self._read_uint8() length = self._read_uint32() va = self._path_or_buf.read(length) if typ == 130: decoded_va = va[0:-1].decode(self._encoding) else: # Stata says typ 129 can be binary, so use str decoded_va = str(va) # Wrap v_o in a string to allow uint64 values as keys on 32bit OS self.GSO[str(v_o)] = decoded_va def __next__(self) -> DataFrame: self._using_iterator = True return self.read(nrows=self._chunksize) def get_chunk(self, size: int | None = None) -> DataFrame: """ Reads lines from Stata file and returns as dataframe Parameters ---------- size : int, defaults to None Number of lines to read. If None, reads whole file. Returns ------- DataFrame """ if size is None: size = self._chunksize return self.read(nrows=size) def read( self, nrows: int | None = None, convert_dates: bool | None = None, convert_categoricals: bool | None = None, index_col: str | None = None, convert_missing: bool | None = None, preserve_dtypes: bool | None = None, columns: Sequence[str] | None = None, order_categoricals: bool | None = None, ) -> DataFrame: self._ensure_open() # Handle empty file or chunk. If reading incrementally raise # StopIteration. If reading the whole thing return an empty # data frame. if (self._nobs == 0) and (nrows is None): self._can_read_value_labels = True self._data_read = True return DataFrame(columns=self._varlist) # Handle options if convert_dates is None: convert_dates = self._convert_dates if convert_categoricals is None: convert_categoricals = self._convert_categoricals if convert_missing is None: convert_missing = self._convert_missing if preserve_dtypes is None: preserve_dtypes = self._preserve_dtypes if columns is None: columns = self._columns if order_categoricals is None: order_categoricals = self._order_categoricals if index_col is None: index_col = self._index_col if nrows is None: nrows = self._nobs if (self._format_version >= 117) and (not self._value_labels_read): self._can_read_value_labels = True self._read_strls() # Read data assert self._dtype is not None dtype = self._dtype max_read_len = (self._nobs - self._lines_read) * dtype.itemsize read_len = nrows * dtype.itemsize read_len = min(read_len, max_read_len) if read_len <= 0: # Iterator has finished, should never be here unless # we are reading the file incrementally if convert_categoricals: self._read_value_labels() raise StopIteration offset = self._lines_read * dtype.itemsize self._path_or_buf.seek(self._data_location + offset) read_lines = min(nrows, self._nobs - self._lines_read) raw_data = np.frombuffer( self._path_or_buf.read(read_len), dtype=dtype, count=read_lines ) self._lines_read += read_lines if self._lines_read == self._nobs: self._can_read_value_labels = True self._data_read = True # if necessary, swap the byte order to native here if self._byteorder != self._native_byteorder: raw_data = raw_data.byteswap().newbyteorder() if convert_categoricals: self._read_value_labels() if len(raw_data) == 0: data = DataFrame(columns=self._varlist) else: data = DataFrame.from_records(raw_data) data.columns = Index(self._varlist) # If index is not specified, use actual row number rather than # restarting at 0 for each chunk. if index_col is None: rng = range(self._lines_read - read_lines, self._lines_read) data.index = Index(rng) # set attr instead of set_index to avoid copy if columns is not None: data = self._do_select_columns(data, columns) # Decode strings for col, typ in zip(data, self._typlist): if type(typ) is int: data[col] = data[col].apply(self._decode, convert_dtype=True) data = self._insert_strls(data) cols_ = np.where([dtyp is not None for dtyp in self._dtyplist])[0] # Convert columns (if needed) to match input type ix = data.index requires_type_conversion = False data_formatted = [] for i in cols_: if self._dtyplist[i] is not None: col = data.columns[i] dtype = data[col].dtype if dtype != np.dtype(object) and dtype != self._dtyplist[i]: requires_type_conversion = True data_formatted.append( (col, Series(data[col], ix, self._dtyplist[i])) ) else: data_formatted.append((col, data[col])) if requires_type_conversion: data = DataFrame.from_dict(dict(data_formatted)) del data_formatted data = self._do_convert_missing(data, convert_missing) if convert_dates: def any_startswith(x: str) -> bool: return any(x.startswith(fmt) for fmt in _date_formats) cols = np.where([any_startswith(x) for x in self._fmtlist])[0] for i in cols: col = data.columns[i] data[col] = _stata_elapsed_date_to_datetime_vec( data[col], self._fmtlist[i] ) if convert_categoricals and self._format_version > 108: data = self._do_convert_categoricals( data, self._value_label_dict, self._lbllist, order_categoricals ) if not preserve_dtypes: retyped_data = [] convert = False for col in data: dtype = data[col].dtype if dtype in (np.dtype(np.float16), np.dtype(np.float32)): dtype = np.dtype(np.float64) convert = True elif dtype in ( np.dtype(np.int8), np.dtype(np.int16), np.dtype(np.int32), ): dtype = np.dtype(np.int64) convert = True retyped_data.append((col, data[col].astype(dtype))) if convert: data = DataFrame.from_dict(dict(retyped_data)) if index_col is not None: data = data.set_index(data.pop(index_col)) return data def _do_convert_missing(self, data: DataFrame, convert_missing: bool) -> DataFrame: # Check for missing values, and replace if found replacements = {} for i, colname in enumerate(data): fmt = self._typlist[i] if fmt not in self.VALID_RANGE: continue fmt = cast(str, fmt) # only strs in VALID_RANGE nmin, nmax = self.VALID_RANGE[fmt] series = data[colname] # appreciably faster to do this with ndarray instead of Series svals = series._values missing = (svals < nmin) | (svals > nmax) if not missing.any(): continue if convert_missing: # Replacement follows Stata notation missing_loc = np.nonzero(np.asarray(missing))[0] umissing, umissing_loc = np.unique(series[missing], return_inverse=True) replacement = Series(series, dtype=object) for j, um in enumerate(umissing): missing_value = StataMissingValue(um) loc = missing_loc[umissing_loc == j] replacement.iloc[loc] = missing_value else: # All replacements are identical dtype = series.dtype if dtype not in (np.float32, np.float64): dtype = np.float64 replacement = Series(series, dtype=dtype) if not replacement._values.flags["WRITEABLE"]: # only relevant for ArrayManager; construction # path for BlockManager ensures writeability replacement = replacement.copy() # Note: operating on ._values is much faster than directly # TODO: can we fix that? replacement._values[missing] = np.nan replacements[colname] = replacement if replacements: for col, value in replacements.items(): data[col] = value return data def _insert_strls(self, data: DataFrame) -> DataFrame: if not hasattr(self, "GSO") or len(self.GSO) == 0: return data for i, typ in enumerate(self._typlist): if typ != "Q": continue # Wrap v_o in a string to allow uint64 values as keys on 32bit OS data.iloc[:, i] = [self.GSO[str(k)] for k in data.iloc[:, i]] return data def _do_select_columns(self, data: DataFrame, columns: Sequence[str]) -> DataFrame: if not self._column_selector_set: column_set = set(columns) if len(column_set) != len(columns): raise ValueError("columns contains duplicate entries") unmatched = column_set.difference(data.columns) if unmatched: joined = ", ".join(list(unmatched)) raise ValueError( "The following columns were not " f"found in the Stata data set: {joined}" ) # Copy information for retained columns for later processing dtyplist = [] typlist = [] fmtlist = [] lbllist = [] for col in columns: i = data.columns.get_loc(col) dtyplist.append(self._dtyplist[i]) typlist.append(self._typlist[i]) fmtlist.append(self._fmtlist[i]) lbllist.append(self._lbllist[i]) self._dtyplist = dtyplist self._typlist = typlist self._fmtlist = fmtlist self._lbllist = lbllist self._column_selector_set = True return data[columns] def _do_convert_categoricals( self, data: DataFrame, value_label_dict: dict[str, dict[float, str]], lbllist: Sequence[str], order_categoricals: bool, ) -> DataFrame: """ Converts categorical columns to Categorical type. """ value_labels = list(value_label_dict.keys()) cat_converted_data = [] for col, label in zip(data, lbllist): if label in value_labels: # Explicit call with ordered=True vl = value_label_dict[label] keys = np.array(list(vl.keys())) column = data[col] key_matches = column.isin(keys) if self._using_iterator and key_matches.all(): initial_categories: np.ndarray | None = keys # If all categories are in the keys and we are iterating, # use the same keys for all chunks. If some are missing # value labels, then we will fall back to the categories # varying across chunks. else: if self._using_iterator: # warn is using an iterator warnings.warn( categorical_conversion_warning, CategoricalConversionWarning, stacklevel=find_stack_level(), ) initial_categories = None cat_data = Categorical( column, categories=initial_categories, ordered=order_categoricals ) if initial_categories is None: # If None here, then we need to match the cats in the Categorical categories = [] for category in cat_data.categories: if category in vl: categories.append(vl[category]) else: categories.append(category) else: # If all cats are matched, we can use the values categories = list(vl.values()) try: # Try to catch duplicate categories # TODO: if we get a non-copying rename_categories, use that cat_data = cat_data.rename_categories(categories) except ValueError as err: vc = Series(categories, copy=False).value_counts() repeated_cats = list(vc.index[vc > 1]) repeats = "-" * 80 + "\n" + "\n".join(repeated_cats) # GH 25772 msg = f""" Value labels for column {col} are not unique. These cannot be converted to pandas categoricals. Either read the file with `convert_categoricals` set to False or use the low level interface in `StataReader` to separately read the values and the value_labels. The repeated labels are: {repeats} """ raise ValueError(msg) from err # TODO: is the next line needed above in the data(...) method? cat_series = Series(cat_data, index=data.index, copy=False) cat_converted_data.append((col, cat_series)) else: cat_converted_data.append((col, data[col])) data = DataFrame(dict(cat_converted_data), copy=False) return data def data_label(self) -> str: """ Return data label of Stata file. """ self._ensure_open() return self._data_label def time_stamp(self) -> str: """ Return time stamp of Stata file. """ self._ensure_open() return self._time_stamp def variable_labels(self) -> dict[str, str]: """ Return a dict associating each variable name with corresponding label. Returns ------- dict """ self._ensure_open() return dict(zip(self._varlist, self._variable_labels)) def value_labels(self) -> dict[str, dict[float, str]]: """ Return a nested dict associating each variable name to its value and label. Returns ------- dict """ if not self._value_labels_read: self._read_value_labels() return self._value_label_dict class Sequence(_Collection[_T_co], Reversible[_T_co], Generic[_T_co]): def __getitem__(self, i: int) -> _T_co: ... def __getitem__(self, s: slice) -> Sequence[_T_co]: ... # Mixin methods def index(self, value: Any, start: int = ..., stop: int = ...) -> int: ... def count(self, value: Any) -> int: ... def __contains__(self, x: object) -> bool: ... def __iter__(self) -> Iterator[_T_co]: ... def __reversed__(self) -> Iterator[_T_co]: ... class ReadBuffer(BaseBuffer, Protocol[AnyStr_co]): def read(self, __n: int = ...) -> AnyStr_co: # for BytesIOWrapper, gzip.GzipFile, bz2.BZ2File ... FilePath = Union[str, "PathLike[str]"] StorageOptions = Optional[Dict[str, Any]] CompressionOptions = Optional[ Union[Literal["infer", "gzip", "bz2", "zip", "xz", "zstd", "tar"], CompressionDict] ] class DataFrame(NDFrame, OpsMixin): """ Two-dimensional, size-mutable, potentially heterogeneous tabular data. Data structure also contains labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects. The primary pandas data structure. Parameters ---------- data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame Dict can contain Series, arrays, constants, dataclass or list-like objects. If data is a dict, column order follows insertion-order. If a dict contains Series which have an index defined, it is aligned by its index. This alignment also occurs if data is a Series or a DataFrame itself. Alignment is done on Series/DataFrame inputs. If data is a list of dicts, column order follows insertion-order. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided. columns : Index or array-like Column labels to use for resulting frame when data does not have them, defaulting to RangeIndex(0, 1, 2, ..., n). If data contains column labels, will perform column selection instead. dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer. copy : bool or None, default None Copy data from inputs. For dict data, the default of None behaves like ``copy=True``. For DataFrame or 2d ndarray input, the default of None behaves like ``copy=False``. If data is a dict containing one or more Series (possibly of different dtypes), ``copy=False`` will ensure that these inputs are not copied. .. versionchanged:: 1.3.0 See Also -------- DataFrame.from_records : Constructor from tuples, also record arrays. DataFrame.from_dict : From dicts of Series, arrays, or dicts. read_csv : Read a comma-separated values (csv) file into DataFrame. read_table : Read general delimited file into DataFrame. read_clipboard : Read text from clipboard into DataFrame. Notes ----- Please reference the :ref:`User Guide <basics.dataframe>` for more information. Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = pd.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from a dictionary including Series: >>> d = {'col1': [0, 1, 2, 3], 'col2': pd.Series([2, 3], index=[2, 3])} >>> pd.DataFrame(data=d, index=[0, 1, 2, 3]) col1 col2 0 0 NaN 1 1 NaN 2 2 2.0 3 3 3.0 Constructing DataFrame from numpy ndarray: >>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), ... columns=['a', 'b', 'c']) >>> df2 a b c 0 1 2 3 1 4 5 6 2 7 8 9 Constructing DataFrame from a numpy ndarray that has labeled columns: >>> data = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], ... dtype=[("a", "i4"), ("b", "i4"), ("c", "i4")]) >>> df3 = pd.DataFrame(data, columns=['c', 'a']) ... >>> df3 c a 0 3 1 1 6 4 2 9 7 Constructing DataFrame from dataclass: >>> from dataclasses import make_dataclass >>> Point = make_dataclass("Point", [("x", int), ("y", int)]) >>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)]) x y 0 0 0 1 0 3 2 2 3 Constructing DataFrame from Series/DataFrame: >>> ser = pd.Series([1, 2, 3], index=["a", "b", "c"]) >>> df = pd.DataFrame(data=ser, index=["a", "c"]) >>> df 0 a 1 c 3 >>> df1 = pd.DataFrame([1, 2, 3], index=["a", "b", "c"], columns=["x"]) >>> df2 = pd.DataFrame(data=df1, index=["a", "c"]) >>> df2 x a 1 c 3 """ _internal_names_set = {"columns", "index"} | NDFrame._internal_names_set _typ = "dataframe" _HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray) _accessors: set[str] = {"sparse"} _hidden_attrs: frozenset[str] = NDFrame._hidden_attrs | frozenset([]) _mgr: BlockManager | ArrayManager def _constructor(self) -> Callable[..., DataFrame]: return DataFrame _constructor_sliced: Callable[..., Series] = Series # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index: Axes | None = None, columns: Axes | None = None, dtype: Dtype | None = None, copy: bool | None = None, ) -> None: if dtype is not None: dtype = self._validate_dtype(dtype) if isinstance(data, DataFrame): data = data._mgr if not copy: # if not copying data, ensure to still return a shallow copy # to avoid the result sharing the same Manager data = data.copy(deep=False) if isinstance(data, (BlockManager, ArrayManager)): if using_copy_on_write(): data = data.copy(deep=False) # first check if a Manager is passed without any other arguments # -> use fastpath (without checking Manager type) if index is None and columns is None and dtype is None and not copy: # GH#33357 fastpath NDFrame.__init__(self, data) return manager = get_option("mode.data_manager") # GH47215 if index is not None and isinstance(index, set): raise ValueError("index cannot be a set") if columns is not None and isinstance(columns, set): raise ValueError("columns cannot be a set") if copy is None: if isinstance(data, dict): # retain pre-GH#38939 default behavior copy = True elif ( manager == "array" and isinstance(data, (np.ndarray, ExtensionArray)) and data.ndim == 2 ): # INFO(ArrayManager) by default copy the 2D input array to get # contiguous 1D arrays copy = True elif using_copy_on_write() and not isinstance( data, (Index, DataFrame, Series) ): copy = True else: copy = False if data is None: index = index if index is not None else default_index(0) columns = columns if columns is not None else default_index(0) dtype = dtype if dtype is not None else pandas_dtype(object) data = [] if isinstance(data, (BlockManager, ArrayManager)): mgr = self._init_mgr( data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy ) elif isinstance(data, dict): # GH#38939 de facto copy defaults to False only in non-dict cases mgr = dict_to_mgr(data, index, columns, dtype=dtype, copy=copy, typ=manager) elif isinstance(data, ma.MaskedArray): from numpy.ma import mrecords # masked recarray if isinstance(data, mrecords.MaskedRecords): raise TypeError( "MaskedRecords are not supported. Pass " "{name: data[name] for name in data.dtype.names} " "instead" ) # a masked array data = sanitize_masked_array(data) mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) elif isinstance(data, (np.ndarray, Series, Index, ExtensionArray)): if data.dtype.names: # i.e. numpy structured array data = cast(np.ndarray, data) mgr = rec_array_to_mgr( data, index, columns, dtype, copy, typ=manager, ) elif getattr(data, "name", None) is not None: # i.e. Series/Index with non-None name _copy = copy if using_copy_on_write() else True mgr = dict_to_mgr( # error: Item "ndarray" of "Union[ndarray, Series, Index]" has no # attribute "name" {data.name: data}, # type: ignore[union-attr] index, columns, dtype=dtype, typ=manager, copy=_copy, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) # For data is list-like, or Iterable (will consume into list) elif is_list_like(data): if not isinstance(data, abc.Sequence): if hasattr(data, "__array__"): # GH#44616 big perf improvement for e.g. pytorch tensor data = np.asarray(data) else: data = list(data) if len(data) > 0: if is_dataclass(data[0]): data = dataclasses_to_dicts(data) if not isinstance(data, np.ndarray) and treat_as_nested(data): # exclude ndarray as we may have cast it a few lines above if columns is not None: columns = ensure_index(columns) arrays, columns, index = nested_data_to_arrays( # error: Argument 3 to "nested_data_to_arrays" has incompatible # type "Optional[Collection[Any]]"; expected "Optional[Index]" data, columns, index, # type: ignore[arg-type] dtype, ) mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, typ=manager, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) else: mgr = dict_to_mgr( {}, index, columns if columns is not None else default_index(0), dtype=dtype, typ=manager, ) # For data is scalar else: if index is None or columns is None: raise ValueError("DataFrame constructor not properly called!") index = ensure_index(index) columns = ensure_index(columns) if not dtype: dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True) # For data is a scalar extension dtype if isinstance(dtype, ExtensionDtype): # TODO(EA2D): special case not needed with 2D EAs values = [ construct_1d_arraylike_from_scalar(data, len(index), dtype) for _ in range(len(columns)) ] mgr = arrays_to_mgr(values, columns, index, dtype=None, typ=manager) else: arr2d = construct_2d_arraylike_from_scalar( data, len(index), len(columns), dtype, copy, ) mgr = ndarray_to_mgr( arr2d, index, columns, dtype=arr2d.dtype, copy=False, typ=manager, ) # ensure correct Manager type according to settings mgr = mgr_to_mgr(mgr, typ=manager) NDFrame.__init__(self, mgr) # ---------------------------------------------------------------------- def __dataframe__( self, nan_as_null: bool = False, allow_copy: bool = True ) -> DataFrameXchg: """ Return the dataframe interchange object implementing the interchange protocol. Parameters ---------- nan_as_null : bool, default False Whether to tell the DataFrame to overwrite null values in the data with ``NaN`` (or ``NaT``). allow_copy : bool, default True Whether to allow memory copying when exporting. If set to False it would cause non-zero-copy exports to fail. Returns ------- DataFrame interchange object The object which consuming library can use to ingress the dataframe. Notes ----- Details on the interchange protocol: https://data-apis.org/dataframe-protocol/latest/index.html `nan_as_null` currently has no effect; once support for nullable extension dtypes is added, this value should be propagated to columns. """ from pandas.core.interchange.dataframe import PandasDataFrameXchg return PandasDataFrameXchg(self, nan_as_null, allow_copy) # ---------------------------------------------------------------------- def axes(self) -> list[Index]: """ Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='object')] """ return [self.index, self.columns] def shape(self) -> tuple[int, int]: """ Return a tuple representing the dimensionality of the DataFrame. See Also -------- ndarray.shape : Tuple of array dimensions. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self.index), len(self.columns) def _is_homogeneous_type(self) -> bool: """ Whether all the columns in a DataFrame have the same type. Returns ------- bool See Also -------- Index._is_homogeneous_type : Whether the object has a single dtype. MultiIndex._is_homogeneous_type : Whether all the levels of a MultiIndex have the same dtype. Examples -------- >>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type True >>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type False Items with the same type but different sizes are considered different types. >>> DataFrame({ ... "A": np.array([1, 2], dtype=np.int32), ... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type False """ if isinstance(self._mgr, ArrayManager): return len({arr.dtype for arr in self._mgr.arrays}) == 1 if self._mgr.any_extension_types: return len({block.dtype for block in self._mgr.blocks}) == 1 else: return not self._is_mixed_type def _can_fast_transpose(self) -> bool: """ Can we transpose this DataFrame without creating any new array objects. """ if isinstance(self._mgr, ArrayManager): return False blocks = self._mgr.blocks if len(blocks) != 1: return False dtype = blocks[0].dtype # TODO(EA2D) special case would be unnecessary with 2D EAs return not is_1d_only_ea_dtype(dtype) def _values(self) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray: """ Analogue to ._values that may return a 2D ExtensionArray. """ mgr = self._mgr if isinstance(mgr, ArrayManager): if len(mgr.arrays) == 1 and not is_1d_only_ea_dtype(mgr.arrays[0].dtype): # error: Item "ExtensionArray" of "Union[ndarray, ExtensionArray]" # has no attribute "reshape" return mgr.arrays[0].reshape(-1, 1) # type: ignore[union-attr] return ensure_wrapped_if_datetimelike(self.values) blocks = mgr.blocks if len(blocks) != 1: return ensure_wrapped_if_datetimelike(self.values) arr = blocks[0].values if arr.ndim == 1: # non-2D ExtensionArray return self.values # more generally, whatever we allow in NDArrayBackedExtensionBlock arr = cast("np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray", arr) return arr.T # ---------------------------------------------------------------------- # Rendering Methods def _repr_fits_vertical_(self) -> bool: """ Check length against max_rows. """ max_rows = get_option("display.max_rows") return len(self) <= max_rows def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool: """ Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns. In case of non-interactive session, no boundaries apply. `ignore_width` is here so ipynb+HTML output can behave the way users expect. display.max_columns remains in effect. GH3541, GH3573 """ width, height = console.get_console_size() max_columns = get_option("display.max_columns") nb_columns = len(self.columns) # exceed max columns if (max_columns and nb_columns > max_columns) or ( (not ignore_width) and width and nb_columns > (width // 2) ): return False # used by repr_html under IPython notebook or scripts ignore terminal # dims if ignore_width or width is None or not console.in_interactive_session(): return True if get_option("display.width") is not None or console.in_ipython_frontend(): # check at least the column row for excessive width max_rows = 1 else: max_rows = get_option("display.max_rows") # when auto-detecting, so width=None and not in ipython front end # check whether repr fits horizontal by actually checking # the width of the rendered repr buf = StringIO() # only care about the stuff we'll actually print out # and to_string on entire frame may be expensive d = self if max_rows is not None: # unlimited rows # min of two, where one may be None d = d.iloc[: min(max_rows, len(d))] else: return True d.to_string(buf=buf) value = buf.getvalue() repr_width = max(len(line) for line in value.split("\n")) return repr_width < width def _info_repr(self) -> bool: """ True if the repr should show the info view. """ info_repr_option = get_option("display.large_repr") == "info" return info_repr_option and not ( self._repr_fits_horizontal_() and self._repr_fits_vertical_() ) def __repr__(self) -> str: """ Return a string representation for a particular DataFrame. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) return buf.getvalue() repr_params = fmt.get_dataframe_repr_params() return self.to_string(**repr_params) def _repr_html_(self) -> str | None: """ Return a html representation for a particular DataFrame. Mainly for IPython notebook. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) # need to escape the <class>, should be the first line. val = buf.getvalue().replace("<", r"&lt;", 1) val = val.replace(">", r"&gt;", 1) return f"<pre>{val}</pre>" if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") min_rows = get_option("display.min_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") formatter = fmt.DataFrameFormatter( self, columns=None, col_space=None, na_rep="NaN", formatters=None, float_format=None, sparsify=None, justify=None, index_names=True, header=True, index=True, bold_rows=True, escape=True, max_rows=max_rows, min_rows=min_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=".", ) return fmt.DataFrameRenderer(formatter).to_html(notebook=True) else: return None def to_string( self, buf: None = ..., columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> None: ... header_type="bool or sequence of str", header="Write out the column names. If a list of strings " "is given, it is assumed to be aliases for the " "column names", col_space_type="int, list or dict of int", col_space="The minimum width of each column. If a list of ints is given " "every integers corresponds with one column. If a dict is given, the key " "references the column, while the value defines the space to use.", ) def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[str] | None = None, col_space: int | list[int] | dict[Hashable, int] | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: fmt.FormattersType | None = None, float_format: fmt.FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool = False, decimal: str = ".", line_width: int | None = None, min_rows: int | None = None, max_colwidth: int | None = None, encoding: str | None = None, ) -> str | None: """ Render a DataFrame to a console-friendly tabular output. %(shared_params)s line_width : int, optional Width to wrap a line in characters. min_rows : int, optional The number of rows to display in the console in a truncated repr (when number of rows is above `max_rows`). max_colwidth : int, optional Max width to truncate each column in characters. By default, no limit. encoding : str, default "utf-8" Set character encoding. %(returns)s See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]} >>> df = pd.DataFrame(d) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 """ from pandas import option_context with option_context("display.max_colwidth", max_colwidth): formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, min_rows=min_rows, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, ) return fmt.DataFrameRenderer(formatter).to_string( buf=buf, encoding=encoding, line_width=line_width, ) # ---------------------------------------------------------------------- def style(self) -> Styler: """ Returns a Styler object. Contains methods for building a styled HTML representation of the DataFrame. See Also -------- io.formats.style.Styler : Helps style a DataFrame or Series according to the data with HTML and CSS. """ from pandas.io.formats.style import Styler return Styler(self) _shared_docs[ "items" ] = r""" Iterate over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Yields ------ label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. Examples -------- >>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.items(): ... print(f'label: {label}') ... print(f'content: {content}', sep='\n') ... label: species content: panda bear polar bear koala marsupial Name: species, dtype: object label: population content: panda 1864 polar 22000 koala 80000 Name: population, dtype: int64 """ def items(self) -> Iterable[tuple[Hashable, Series]]: if self.columns.is_unique and hasattr(self, "_item_cache"): for k in self.columns: yield k, self._get_item_cache(k) else: for i, k in enumerate(self.columns): yield k, self._ixs(i, axis=1) def iterrows(self) -> Iterable[tuple[Hashable, Series]]: """ Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a `MultiIndex`. data : Series The data of the row as a Series. See Also -------- DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- 1. Because ``iterrows`` returns a Series for each row, it does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float']) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row['int'].dtype) float64 >>> print(df['int'].dtype) int64 To preserve dtypes while iterating over the rows, it is better to use :meth:`itertuples` which returns namedtuples of the values and which is generally faster than ``iterrows``. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect. """ columns = self.columns klass = self._constructor_sliced using_cow = using_copy_on_write() for k, v in zip(self.index, self.values): s = klass(v, index=columns, name=k).__finalize__(self) if using_cow and self._mgr.is_single_block: s._mgr.add_references(self._mgr) # type: ignore[arg-type] yield k, s def itertuples( self, index: bool = True, name: str | None = "Pandas" ) -> Iterable[tuple[Any, ...]]: """ Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "Pandas" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An object to iterate over namedtuples for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. Examples -------- >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) ... Pandas(Index='dog', num_legs=4, num_wings=0) Pandas(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) ... Pandas(num_legs=4, num_wings=0) Pandas(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name='Animal'): ... print(row) ... Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2) """ arrays = [] fields = list(self.columns) if index: arrays.append(self.index) fields.insert(0, "Index") # use integer indexing because of possible duplicate column names arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) if name is not None: # https://github.com/python/mypy/issues/9046 # error: namedtuple() expects a string literal as the first argument itertuple = collections.namedtuple( # type: ignore[misc] name, fields, rename=True ) return map(itertuple._make, zip(*arrays)) # fallback to regular tuples return zip(*arrays) def __len__(self) -> int: """ Returns length of info axis, but here we use the index. """ return len(self.index) def dot(self, other: Series) -> Series: ... def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame: ... def dot(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Compute the matrix multiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series, DataFrame or a numpy array. It can also be called using ``self @ other`` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the matrix product with. Returns ------- Series or DataFrame If other is a Series, return the matrix product between self and other as a Series. If other is a DataFrame or a numpy.array, return the matrix product of self and other in a DataFrame of a np.array. See Also -------- Series.dot: Similar method for Series. Notes ----- The dimensions of DataFrame and other must be compatible in order to compute the matrix multiplication. In addition, the column names of DataFrame and the index of other must contain the same values, as they will be aligned prior to the multiplication. The dot method for Series computes the inner product, instead of the matrix product here. Examples -------- Here we multiply a DataFrame with a Series. >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) >>> s = pd.Series([1, 1, 2, 1]) >>> df.dot(s) 0 -4 1 5 dtype: int64 Here we multiply a DataFrame with another DataFrame. >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(other) 0 1 0 1 4 1 2 2 Note that the dot method give the same result as @ >>> df @ other 0 1 0 1 4 1 2 2 The dot method works also if other is an np.array. >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(arr) 0 1 0 1 4 1 2 2 Note how shuffling of the objects does not change the result. >>> s2 = s.reindex([1, 0, 2, 3]) >>> df.dot(s2) 0 -4 1 5 dtype: int64 """ if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index) if len(common) > len(self.columns) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(columns=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right._values else: left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: raise ValueError( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, DataFrame): return self._constructor( np.dot(lvals, rvals), index=left.index, columns=other.columns, copy=False, ) elif isinstance(other, Series): return self._constructor_sliced( np.dot(lvals, rvals), index=left.index, copy=False ) elif isinstance(rvals, (np.ndarray, Index)): result = np.dot(lvals, rvals) if result.ndim == 2: return self._constructor(result, index=left.index, copy=False) else: return self._constructor_sliced(result, index=left.index, copy=False) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other: Series) -> Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other) -> DataFrame: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ try: return self.T.dot(np.transpose(other)).T except ValueError as err: if "shape mismatch" not in str(err): raise # GH#21581 give exception message for original shapes msg = f"shapes {np.shape(other)} and {self.shape} not aligned" raise ValueError(msg) from err # ---------------------------------------------------------------------- # IO methods (to / from other formats) def from_dict( cls, data: dict, orient: str = "columns", dtype: Dtype | None = None, columns: Axes | None = None, ) -> DataFrame: """ Construct DataFrame from dict of array-like or dicts. Creates DataFrame object from dictionary by columns or by index allowing dtype specification. Parameters ---------- data : dict Of the form {field : array-like} or {field : dict}. orient : {'columns', 'index', 'tight'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. If 'tight', assume a dict with keys ['index', 'columns', 'data', 'index_names', 'column_names']. .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument dtype : dtype, default None Data type to force after DataFrame construction, otherwise infer. columns : list, default None Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'`` or ``orient='tight'``. Returns ------- DataFrame See Also -------- DataFrame.from_records : DataFrame from structured ndarray, sequence of tuples or dicts, or DataFrame. DataFrame : DataFrame object creation using constructor. DataFrame.to_dict : Convert the DataFrame to a dictionary. Examples -------- By default the keys of the dict become the DataFrame columns: >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Specify ``orient='index'`` to create the DataFrame using dictionary keys as rows: >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data, orient='index') 0 1 2 3 row_1 3 2 1 0 row_2 a b c d When using the 'index' orientation, the column names can be specified manually: >>> pd.DataFrame.from_dict(data, orient='index', ... columns=['A', 'B', 'C', 'D']) A B C D row_1 3 2 1 0 row_2 a b c d Specify ``orient='tight'`` to create the DataFrame using a 'tight' format: >>> data = {'index': [('a', 'b'), ('a', 'c')], ... 'columns': [('x', 1), ('y', 2)], ... 'data': [[1, 3], [2, 4]], ... 'index_names': ['n1', 'n2'], ... 'column_names': ['z1', 'z2']} >>> pd.DataFrame.from_dict(data, orient='tight') z1 x y z2 1 2 n1 n2 a b 1 3 c 2 4 """ index = None orient = orient.lower() if orient == "index": if len(data) > 0: # TODO speed up Series case if isinstance(list(data.values())[0], (Series, dict)): data = _from_nested_dict(data) else: index = list(data.keys()) # error: Incompatible types in assignment (expression has type # "List[Any]", variable has type "Dict[Any, Any]") data = list(data.values()) # type: ignore[assignment] elif orient in ("columns", "tight"): if columns is not None: raise ValueError(f"cannot use columns parameter with orient='{orient}'") else: # pragma: no cover raise ValueError( f"Expected 'index', 'columns' or 'tight' for orient parameter. " f"Got '{orient}' instead" ) if orient != "tight": return cls(data, index=index, columns=columns, dtype=dtype) else: realdata = data["data"] def create_index(indexlist, namelist): index: Index if len(namelist) > 1: index = MultiIndex.from_tuples(indexlist, names=namelist) else: index = Index(indexlist, name=namelist[0]) return index index = create_index(data["index"], data["index_names"]) columns = create_index(data["columns"], data["column_names"]) return cls(realdata, index=index, columns=columns, dtype=dtype) def to_numpy( self, dtype: npt.DTypeLike | None = None, copy: bool = False, na_value: object = lib.no_default, ) -> np.ndarray: """ Convert the DataFrame to a NumPy array. By default, the dtype of the returned array will be the common NumPy dtype of all types in the DataFrame. For example, if the dtypes are ``float16`` and ``float32``, the results dtype will be ``float32``. This may require copying data and coercing values, which may be expensive. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray`. copy : bool, default False Whether to ensure that the returned value is not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. na_value : Any, optional The value to use for missing values. The default value depends on `dtype` and the dtypes of the DataFrame columns. .. versionadded:: 1.1.0 Returns ------- numpy.ndarray See Also -------- Series.to_numpy : Similar method for Series. Examples -------- >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() array([[1, 3], [2, 4]]) With heterogeneous data, the lowest common type will have to be used. >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) >>> df.to_numpy() array([[1. , 3. ], [2. , 4.5]]) For a mix of numeric and non-numeric types, the output array will have object dtype. >>> df['C'] = pd.date_range('2000', periods=2) >>> df.to_numpy() array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) """ if dtype is not None: dtype = np.dtype(dtype) result = self._mgr.as_array(dtype=dtype, copy=copy, na_value=na_value) if result.dtype is not dtype: result = np.array(result, dtype=dtype, copy=False) return result def _create_data_for_split_and_tight_to_dict( self, are_all_object_dtype_cols: bool, object_dtype_indices: list[int] ) -> list: """ Simple helper method to create data for to ``to_dict(orient="split")`` and ``to_dict(orient="tight")`` to create the main output data """ if are_all_object_dtype_cols: data = [ list(map(maybe_box_native, t)) for t in self.itertuples(index=False, name=None) ] else: data = [list(t) for t in self.itertuples(index=False, name=None)] if object_dtype_indices: # If we have object_dtype_cols, apply maybe_box_naive after list # comprehension for perf for row in data: for i in object_dtype_indices: row[i] = maybe_box_native(row[i]) return data def to_dict( self, orient: Literal["dict", "list", "series", "split", "tight", "index"] = ..., into: type[dict] = ..., ) -> dict: ... def to_dict(self, orient: Literal["records"], into: type[dict] = ...) -> list[dict]: ... def to_dict( self, orient: Literal[ "dict", "list", "series", "split", "tight", "records", "index" ] = "dict", into: type[dict] = dict, index: bool = True, ) -> dict | list[dict]: """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'tight', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'tight' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values], 'index_names' -> [index.names], 'column_names' -> [column.names]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. index : bool, default True Whether to include the index item (and index_names item if `orient` is 'tight') in the returned dictionary. Can only be ``False`` when `orient` is 'split' or 'tight'. .. versionadded:: 2.0.0 Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. See Also -------- DataFrame.from_dict: Create a DataFrame from a dictionary. DataFrame.to_json: Convert a DataFrame to JSON format. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df.to_dict() {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}} You can specify the return orientation. >>> df.to_dict('series') {'col1': row1 1 row2 2 Name: col1, dtype: int64, 'col2': row1 0.50 row2 0.75 Name: col2, dtype: float64} >>> df.to_dict('split') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]]} >>> df.to_dict('records') [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}] >>> df.to_dict('index') {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}} >>> df.to_dict('tight') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]], 'index_names': [None], 'column_names': [None]} You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) [defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}), defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})] """ from pandas.core.methods.to_dict import to_dict return to_dict(self, orient, into, index) def to_gbq( self, destination_table: str, project_id: str | None = None, chunksize: int | None = None, reauth: bool = False, if_exists: str = "fail", auth_local_webserver: bool = True, table_schema: list[dict[str, str]] | None = None, location: str | None = None, progress_bar: bool = True, credentials=None, ) -> None: """ Write a DataFrame to a Google BigQuery table. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See the `How to authenticate with Google BigQuery <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__ guide for authentication instructions. Parameters ---------- destination_table : str Name of table to be written, in the form ``dataset.tablename``. project_id : str, optional Google BigQuery Account project ID. Optional when available from the environment. chunksize : int, optional Number of rows to be inserted in each chunk from the dataframe. Set to ``None`` to load the whole dataframe at once. reauth : bool, default False Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. if_exists : str, default 'fail' Behavior when the destination table exists. Value can be one of: ``'fail'`` If table exists raise pandas_gbq.gbq.TableCreationError. ``'replace'`` If table exists, drop it, recreate it, and insert data. ``'append'`` If table exists, insert data. Create if does not exist. auth_local_webserver : bool, default True Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. .. _local webserver flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server .. _console flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console *New in version 0.2.0 of pandas-gbq*. .. versionchanged:: 1.5.0 Default value is changed to ``True``. Google has deprecated the ``auth_local_webserver = False`` `"out of band" (copy-paste) flow <https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html?m=1#disallowed-oob>`_. table_schema : list of dicts, optional List of BigQuery table fields to which according DataFrame columns conform to, e.g. ``[{'name': 'col1', 'type': 'STRING'},...]``. If schema is not provided, it will be generated according to dtypes of DataFrame columns. See BigQuery API documentation on available names of a field. *New in version 0.3.1 of pandas-gbq*. location : str, optional Location where the load job should run. See the `BigQuery locations documentation <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a list of available locations. The location must match that of the target dataset. *New in version 0.5.0 of pandas-gbq*. progress_bar : bool, default True Use the library `tqdm` to show the progress bar for the upload, chunk by chunk. *New in version 0.5.0 of pandas-gbq*. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine :class:`google.auth.compute_engine.Credentials` or Service Account :class:`google.oauth2.service_account.Credentials` directly. *New in version 0.8.0 of pandas-gbq*. See Also -------- pandas_gbq.to_gbq : This function in the pandas-gbq library. read_gbq : Read a DataFrame from Google BigQuery. """ from pandas.io import gbq gbq.to_gbq( self, destination_table, project_id=project_id, chunksize=chunksize, reauth=reauth, if_exists=if_exists, auth_local_webserver=auth_local_webserver, table_schema=table_schema, location=location, progress_bar=progress_bar, credentials=credentials, ) def from_records( cls, data, index=None, exclude=None, columns=None, coerce_float: bool = False, nrows: int | None = None, ) -> DataFrame: """ Convert structured or record ndarray to DataFrame. Creates a DataFrame object from a structured ndarray, sequence of tuples or dicts, or DataFrame. Parameters ---------- data : structured ndarray, sequence of tuples or dicts, or DataFrame Structured input data. index : str, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use. exclude : sequence, default None Columns or fields to exclude. columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns). coerce_float : bool, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. nrows : int, default None Number of rows to read if data is an iterator. Returns ------- DataFrame See Also -------- DataFrame.from_dict : DataFrame from dict of array-like or dicts. DataFrame : DataFrame object creation using constructor. Examples -------- Data can be provided as a structured ndarray: >>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')], ... dtype=[('col_1', 'i4'), ('col_2', 'U1')]) >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of dicts: >>> data = [{'col_1': 3, 'col_2': 'a'}, ... {'col_1': 2, 'col_2': 'b'}, ... {'col_1': 1, 'col_2': 'c'}, ... {'col_1': 0, 'col_2': 'd'}] >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of tuples with corresponding columns: >>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')] >>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2']) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d """ if isinstance(data, DataFrame): if columns is not None: if is_scalar(columns): columns = [columns] data = data[columns] if index is not None: data = data.set_index(index) if exclude is not None: data = data.drop(columns=exclude) return data.copy(deep=False) result_index = None # Make a copy of the input columns so we can modify it if columns is not None: columns = ensure_index(columns) def maybe_reorder( arrays: list[ArrayLike], arr_columns: Index, columns: Index, index ) -> tuple[list[ArrayLike], Index, Index | None]: """ If our desired 'columns' do not match the data's pre-existing 'arr_columns', we re-order our arrays. This is like a pre-emptive (cheap) reindex. """ if len(arrays): length = len(arrays[0]) else: length = 0 result_index = None if len(arrays) == 0 and index is None and length == 0: result_index = default_index(0) arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, length) return arrays, arr_columns, result_index if is_iterator(data): if nrows == 0: return cls() try: first_row = next(data) except StopIteration: return cls(index=index, columns=columns) dtype = None if hasattr(first_row, "dtype") and first_row.dtype.names: dtype = first_row.dtype values = [first_row] if nrows is None: values += data else: values.extend(itertools.islice(data, nrows - 1)) if dtype is not None: data = np.array(values, dtype=dtype) else: data = values if isinstance(data, dict): if columns is None: columns = arr_columns = ensure_index(sorted(data)) arrays = [data[k] for k in columns] else: arrays = [] arr_columns_list = [] for k, v in data.items(): if k in columns: arr_columns_list.append(k) arrays.append(v) arr_columns = Index(arr_columns_list) arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = to_arrays(data, columns) arr_columns = columns else: arrays, arr_columns = to_arrays(data, columns) if coerce_float: for i, arr in enumerate(arrays): if arr.dtype == object: # error: Argument 1 to "maybe_convert_objects" has # incompatible type "Union[ExtensionArray, ndarray]"; # expected "ndarray" arrays[i] = lib.maybe_convert_objects( arr, # type: ignore[arg-type] try_float=True, ) arr_columns = ensure_index(arr_columns) if columns is None: columns = arr_columns else: arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) if exclude is None: exclude = set() else: exclude = set(exclude) if index is not None: if isinstance(index, str) or not hasattr(index, "__iter__"): i = columns.get_loc(index) exclude.add(index) if len(arrays) > 0: result_index = Index(arrays[i], name=index) else: result_index = Index([], name=index) else: try: index_data = [arrays[arr_columns.get_loc(field)] for field in index] except (KeyError, TypeError): # raised by get_loc, see GH#29258 result_index = index else: result_index = ensure_index_from_sequences(index_data, names=index) exclude.update(index) if any(exclude): arr_exclude = [x for x in exclude if x in arr_columns] to_remove = [arr_columns.get_loc(col) for col in arr_exclude] arrays = [v for i, v in enumerate(arrays) if i not in to_remove] columns = columns.drop(exclude) manager = get_option("mode.data_manager") mgr = arrays_to_mgr(arrays, columns, result_index, typ=manager) return cls(mgr) def to_records( self, index: bool = True, column_dtypes=None, index_dtypes=None ) -> np.recarray: """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. column_dtypes : str, type, dict, default None If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) If the DataFrame index has no label then the recarray field name is set to 'index'. If the index has a label then this is used as the field name: >>> df.index = df.index.rename("I") >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')]) As well as for the index: >>> df.to_records(index_dtypes="<S2") rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')]) >>> index_dtypes = f"<S{df.index.str.len().max()}" >>> df.to_records(index_dtypes=index_dtypes) rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')]) """ if index: ix_vals = [ np.asarray(self.index.get_level_values(i)) for i in range(self.index.nlevels) ] arrays = ix_vals + [ np.asarray(self.iloc[:, i]) for i in range(len(self.columns)) ] index_names = list(self.index.names) if isinstance(self.index, MultiIndex): index_names = com.fill_missing_names(index_names) elif index_names[0] is None: index_names = ["index"] names = [str(name) for name in itertools.chain(index_names, self.columns)] else: arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))] names = [str(c) for c in self.columns] index_names = [] index_len = len(index_names) formats = [] for i, v in enumerate(arrays): index_int = i # When the names and arrays are collected, we # first collect those in the DataFrame's index, # followed by those in its columns. # # Thus, the total length of the array is: # len(index_names) + len(DataFrame.columns). # # This check allows us to see whether we are # handling a name / array in the index or column. if index_int < index_len: dtype_mapping = index_dtypes name = index_names[index_int] else: index_int -= index_len dtype_mapping = column_dtypes name = self.columns[index_int] # We have a dictionary, so we get the data type # associated with the index or column (which can # be denoted by its name in the DataFrame or its # position in DataFrame's array of indices or # columns, whichever is applicable. if is_dict_like(dtype_mapping): if name in dtype_mapping: dtype_mapping = dtype_mapping[name] elif index_int in dtype_mapping: dtype_mapping = dtype_mapping[index_int] else: dtype_mapping = None # If no mapping can be found, use the array's # dtype attribute for formatting. # # A valid dtype must either be a type or # string naming a type. if dtype_mapping is None: formats.append(v.dtype) elif isinstance(dtype_mapping, (type, np.dtype, str)): # error: Argument 1 to "append" of "list" has incompatible # type "Union[type, dtype[Any], str]"; expected "dtype[Any]" formats.append(dtype_mapping) # type: ignore[arg-type] else: element = "row" if i < index_len else "column" msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}" raise ValueError(msg) return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats}) def _from_arrays( cls, arrays, columns, index, dtype: Dtype | None = None, verify_integrity: bool = True, ) -> DataFrame: """ Create DataFrame from a list of arrays corresponding to the columns. Parameters ---------- arrays : list-like of arrays Each array in the list corresponds to one column, in order. columns : list-like, Index The column names for the resulting DataFrame. index : list-like, Index The rows labels for the resulting DataFrame. dtype : dtype, optional Optional dtype to enforce for all arrays. verify_integrity : bool, default True Validate and homogenize all input. If set to False, it is assumed that all elements of `arrays` are actual arrays how they will be stored in a block (numpy ndarray or ExtensionArray), have the same length as and are aligned with the index, and that `columns` and `index` are ensured to be an Index object. Returns ------- DataFrame """ if dtype is not None: dtype = pandas_dtype(dtype) manager = get_option("mode.data_manager") columns = ensure_index(columns) if len(columns) != len(arrays): raise ValueError("len(columns) must match len(arrays)") mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, verify_integrity=verify_integrity, typ=manager, ) return cls(mgr) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path", ) def to_stata( self, path: FilePath | WriteBuffer[bytes], *, convert_dates: dict[Hashable, str] | None = None, write_index: bool = True, byteorder: str | None = None, time_stamp: datetime.datetime | None = None, data_label: str | None = None, variable_labels: dict[Hashable, str] | None = None, version: int | None = 114, convert_strl: Sequence[Hashable] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, value_labels: dict[Hashable, dict[float, str]] | None = None, ) -> None: """ Export DataFrame object to Stata dta format. Writes the DataFrame to a Stata dataset file. "dta" files contain a Stata dataset. Parameters ---------- path : str, path object, or buffer String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has timezone information. write_index : bool Write the index to Stata dataset. byteorder : str Can be ">", "<", "little", or "big". default is `sys.byteorder`. time_stamp : datetime A datetime to use as file creation date. Default is the current time. data_label : str, optional A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. version : {{114, 117, 118, 119, None}}, default 114 Version to use in the output dta file. Set to None to let pandas decide between 118 or 119 formats depending on the number of columns in the frame. Version 114 can be read by Stata 10 and later. Version 117 can be read by Stata 13 or later. Version 118 is supported in Stata 14 and later. Version 119 is supported in Stata 15 and later. Version 114 limits string variables to 244 characters or fewer while versions 117 and later allow strings with lengths up to 2,000,000 characters. Versions 118 and 119 support Unicode characters, and version 119 supports more than 32,767 variables. Version 119 should usually only be used when the number of variables exceeds the capacity of dta format 118. Exporting smaller datasets in format 119 may have unintended consequences, and, as of November 2020, Stata SE cannot read version 119 files. convert_strl : list, optional List of column names to convert to string columns to Stata StrL format. Only available if version is 117. Storing strings in the StrL format can produce smaller dta files if strings have more than 8 characters and values are repeated. {compression_options} .. versionadded:: 1.1.0 .. versionchanged:: 1.4.0 Zstandard support. {storage_options} .. versionadded:: 1.2.0 value_labels : dict of dicts Dictionary containing columns as keys and dictionaries of column value to labels as values. Labels for a single variable must be 32,000 characters or smaller. .. versionadded:: 1.4.0 Raises ------ NotImplementedError * If datetimes contain timezone information * Column dtype is not representable in Stata ValueError * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters See Also -------- read_stata : Import Stata data files. io.stata.StataWriter : Low-level writer for Stata data files. io.stata.StataWriter117 : Low-level writer for version 117 files. Examples -------- >>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], ... 'speed': [350, 18, 361, 15]}}) >>> df.to_stata('animals.dta') # doctest: +SKIP """ if version not in (114, 117, 118, 119, None): raise ValueError("Only formats 114, 117, 118 and 119 are supported.") if version == 114: if convert_strl is not None: raise ValueError("strl is not supported in format 114") from pandas.io.stata import StataWriter as statawriter elif version == 117: # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriter117 as statawriter, ) else: # versions 118 and 119 # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriterUTF8 as statawriter, ) kwargs: dict[str, Any] = {} if version is None or version >= 117: # strl conversion is only supported >= 117 kwargs["convert_strl"] = convert_strl if version is None or version >= 118: # Specifying the version is only supported for UTF8 (118 or 119) kwargs["version"] = version writer = statawriter( path, self, convert_dates=convert_dates, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, write_index=write_index, variable_labels=variable_labels, compression=compression, storage_options=storage_options, value_labels=value_labels, **kwargs, ) writer.write_file() def to_feather(self, path: FilePath | WriteBuffer[bytes], **kwargs) -> None: """ Write a DataFrame to the binary Feather format. Parameters ---------- path : str, path object, file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If a string or a path, it will be used as Root Directory path when writing a partitioned dataset. **kwargs : Additional keywords passed to :func:`pyarrow.feather.write_feather`. Starting with pyarrow 0.17, this includes the `compression`, `compression_level`, `chunksize` and `version` keywords. .. versionadded:: 1.1.0 Notes ----- This function writes the dataframe as a `feather file <https://arrow.apache.org/docs/python/feather.html>`_. Requires a default index. For saving the DataFrame with your custom index use a method that supports custom indices e.g. `to_parquet`. """ from pandas.io.feather_format import to_feather to_feather(self, path, **kwargs) Series.to_markdown, klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples="""Examples -------- >>> df = pd.DataFrame( ... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]} ... ) >>> print(df.to_markdown()) | | animal_1 | animal_2 | |---:|:-----------|:-----------| | 0 | elk | dog | | 1 | pig | quetzal | Output markdown with a tabulate option. >>> print(df.to_markdown(tablefmt="grid")) +----+------------+------------+ | | animal_1 | animal_2 | +====+============+============+ | 0 | elk | dog | +----+------------+------------+ | 1 | pig | quetzal | +----+------------+------------+""", ) def to_markdown( self, buf: FilePath | WriteBuffer[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: if "showindex" in kwargs: raise ValueError("Pass 'index' instead of 'showindex") kwargs.setdefault("headers", "keys") kwargs.setdefault("tablefmt", "pipe") kwargs.setdefault("showindex", index) tabulate = import_optional_dependency("tabulate") result = tabulate.tabulate(self, **kwargs) if buf is None: return result with get_handle(buf, mode, storage_options=storage_options) as handles: handles.handle.write(result) return None def to_parquet( self, path: None = ..., engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> bytes: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes], engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> None: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes] | None = None, engine: str = "auto", compression: str | None = "snappy", index: bool | None = None, partition_cols: list[str] | None = None, storage_options: StorageOptions = None, **kwargs, ) -> bytes | None: """ Write a DataFrame to the binary parquet format. This function writes the dataframe as a `parquet file <https://parquet.apache.org/>`_. You can choose different parquet backends, and have the option of compression. See :ref:`the user guide <io.parquet>` for more details. Parameters ---------- path : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If None, the result is returned as bytes. If a string or path, it will be used as Root Directory path when writing a partitioned dataset. .. versionchanged:: 1.2.0 Previously this was "fname" engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``True`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. partition_cols : list, optional, default None Column names by which to partition the dataset. Columns are partitioned in the order they are given. Must be None if path is not a string. {storage_options} .. versionadded:: 1.2.0 **kwargs Additional arguments passed to the parquet library. See :ref:`pandas io <io.parquet>` for more details. Returns ------- bytes if no path argument is provided else None See Also -------- read_parquet : Read a parquet file. DataFrame.to_orc : Write an orc file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- This function requires either the `fastparquet <https://pypi.org/project/fastparquet>`_ or `pyarrow <https://arrow.apache.org/docs/python/>`_ library. Examples -------- >>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}}) >>> df.to_parquet('df.parquet.gzip', ... compression='gzip') # doctest: +SKIP >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP col1 col2 0 1 3 1 2 4 If you want to get a buffer to the parquet content you can use a io.BytesIO object, as long as you don't use partition_cols, which creates multiple files. >>> import io >>> f = io.BytesIO() >>> df.to_parquet(f) >>> f.seek(0) 0 >>> content = f.read() """ from pandas.io.parquet import to_parquet return to_parquet( self, path, engine, compression=compression, index=index, partition_cols=partition_cols, storage_options=storage_options, **kwargs, ) def to_orc( self, path: FilePath | WriteBuffer[bytes] | None = None, *, engine: Literal["pyarrow"] = "pyarrow", index: bool | None = None, engine_kwargs: dict[str, Any] | None = None, ) -> bytes | None: """ Write a DataFrame to the ORC format. .. versionadded:: 1.5.0 Parameters ---------- path : str, file-like object or None, default None If a string, it will be used as Root Directory path when writing a partitioned dataset. By file-like object, we refer to objects with a write() method, such as a file handle (e.g. via builtin open function). If path is None, a bytes object is returned. engine : str, default 'pyarrow' ORC library to use. Pyarrow must be >= 7.0.0. index : bool, optional If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``infer`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. engine_kwargs : dict[str, Any] or None, default None Additional keyword arguments passed to :func:`pyarrow.orc.write_table`. Returns ------- bytes if no path argument is provided else None Raises ------ NotImplementedError Dtype of one or more columns is category, unsigned integers, interval, period or sparse. ValueError engine is not pyarrow. See Also -------- read_orc : Read a ORC file. DataFrame.to_parquet : Write a parquet file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- * Before using this function you should read the :ref:`user guide about ORC <io.orc>` and :ref:`install optional dependencies <install.warn_orc>`. * This function requires `pyarrow <https://arrow.apache.org/docs/python/>`_ library. * For supported dtypes please refer to `supported ORC features in Arrow <https://arrow.apache.org/docs/cpp/orc.html#data-types>`__. * Currently timezones in datetime columns are not preserved when a dataframe is converted into ORC files. Examples -------- >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [4, 3]}) >>> df.to_orc('df.orc') # doctest: +SKIP >>> pd.read_orc('df.orc') # doctest: +SKIP col1 col2 0 1 4 1 2 3 If you want to get a buffer to the orc content you can write it to io.BytesIO >>> import io >>> b = io.BytesIO(df.to_orc()) # doctest: +SKIP >>> b.seek(0) # doctest: +SKIP 0 >>> content = b.read() # doctest: +SKIP """ from pandas.io.orc import to_orc return to_orc( self, path, engine=engine, index=index, engine_kwargs=engine_kwargs ) def to_html( self, buf: FilePath | WriteBuffer[str], columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> None: ... def to_html( self, buf: None = ..., columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> str: ... header_type="bool", header="Whether to print column labels, default True", col_space_type="str or int, list or dict of int or str", col_space="The minimum width of each column in CSS length " "units. An int is assumed to be px units.", ) def to_html( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[Level] | None = None, col_space: ColspaceArgType | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: FormattersType | None = None, float_format: FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool | str = False, decimal: str = ".", bold_rows: bool = True, classes: str | list | tuple | None = None, escape: bool = True, notebook: bool = False, border: int | bool | None = None, table_id: str | None = None, render_links: bool = False, encoding: str | None = None, ) -> str | None: """ Render a DataFrame as an HTML table. %(shared_params)s bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.display.html.border``. table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links. encoding : str, default "utf-8" Set character encoding. .. versionadded:: 1.0 %(returns)s See Also -------- to_string : Convert DataFrame to a string. """ if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS: raise ValueError("Invalid value for justify parameter") formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, header=header, index=index, formatters=formatters, float_format=float_format, bold_rows=bold_rows, sparsify=sparsify, justify=justify, index_names=index_names, escape=escape, decimal=decimal, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, ) # TODO: a generic formatter wld b in DataFrameFormatter return fmt.DataFrameRenderer(formatter).to_html( buf=buf, classes=classes, notebook=notebook, border=border, encoding=encoding, table_id=table_id, render_links=render_links, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buffer", ) def to_xml( self, path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, index: bool = True, root_name: str | None = "data", row_name: str | None = "row", na_rep: str | None = None, attr_cols: list[str] | None = None, elem_cols: list[str] | None = None, namespaces: dict[str | None, str] | None = None, prefix: str | None = None, encoding: str = "utf-8", xml_declaration: bool | None = True, pretty_print: bool | None = True, parser: str | None = "lxml", stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ) -> str | None: """ Render a DataFrame to an XML document. .. versionadded:: 1.3.0 Parameters ---------- path_or_buffer : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a ``write()`` function. If None, the result is returned as a string. index : bool, default True Whether to include index in XML document. root_name : str, default 'data' The name of root element in XML document. row_name : str, default 'row' The name of row element in XML document. na_rep : str, optional Missing data representation. attr_cols : list-like, optional List of columns to write as attributes in row element. Hierarchical columns will be flattened with underscore delimiting the different levels. elem_cols : list-like, optional List of columns to write as children in row element. By default, all columns output as children of row element. Hierarchical columns will be flattened with underscore delimiting the different levels. namespaces : dict, optional All namespaces to be defined in root element. Keys of dict should be prefix names and values of dict corresponding URIs. Default namespaces should be given empty string key. For example, :: namespaces = {{"": "https://example.com"}} prefix : str, optional Namespace prefix to be used for every element and/or attribute in document. This should be one of the keys in ``namespaces`` dict. encoding : str, default 'utf-8' Encoding of the resulting document. xml_declaration : bool, default True Whether to include the XML declaration at start of document. pretty_print : bool, default True Whether output should be pretty printed with indentation and line breaks. parser : {{'lxml','etree'}}, default 'lxml' Parser module to use for building of tree. Only 'lxml' and 'etree' are supported. With 'lxml', the ability to use XSLT stylesheet is supported. stylesheet : str, path object or file-like object, optional A URL, file-like object, or a raw string containing an XSLT script used to transform the raw XML output. Script should use layout of elements and attributes from original output. This argument requires ``lxml`` to be installed. Only XSLT 1.0 scripts and not later versions is currently supported. {compression_options} .. versionchanged:: 1.4.0 Zstandard support. {storage_options} Returns ------- None or str If ``io`` is None, returns the resulting XML format as a string. Otherwise returns None. See Also -------- to_json : Convert the pandas object to a JSON string. to_html : Convert DataFrame to a html. Examples -------- >>> df = pd.DataFrame({{'shape': ['square', 'circle', 'triangle'], ... 'degrees': [360, 360, 180], ... 'sides': [4, np.nan, 3]}}) >>> df.to_xml() # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row> <index>0</index> <shape>square</shape> <degrees>360</degrees> <sides>4.0</sides> </row> <row> <index>1</index> <shape>circle</shape> <degrees>360</degrees> <sides/> </row> <row> <index>2</index> <shape>triangle</shape> <degrees>180</degrees> <sides>3.0</sides> </row> </data> >>> df.to_xml(attr_cols=[ ... 'index', 'shape', 'degrees', 'sides' ... ]) # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row index="0" shape="square" degrees="360" sides="4.0"/> <row index="1" shape="circle" degrees="360"/> <row index="2" shape="triangle" degrees="180" sides="3.0"/> </data> >>> df.to_xml(namespaces={{"doc": "https://example.com"}}, ... prefix="doc") # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <doc:data xmlns:doc="https://example.com"> <doc:row> <doc:index>0</doc:index> <doc:shape>square</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides>4.0</doc:sides> </doc:row> <doc:row> <doc:index>1</doc:index> <doc:shape>circle</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides/> </doc:row> <doc:row> <doc:index>2</doc:index> <doc:shape>triangle</doc:shape> <doc:degrees>180</doc:degrees> <doc:sides>3.0</doc:sides> </doc:row> </doc:data> """ from pandas.io.formats.xml import ( EtreeXMLFormatter, LxmlXMLFormatter, ) lxml = import_optional_dependency("lxml.etree", errors="ignore") TreeBuilder: type[EtreeXMLFormatter] | type[LxmlXMLFormatter] if parser == "lxml": if lxml is not None: TreeBuilder = LxmlXMLFormatter else: raise ImportError( "lxml not found, please install or use the etree parser." ) elif parser == "etree": TreeBuilder = EtreeXMLFormatter else: raise ValueError("Values for parser can only be lxml or etree.") xml_formatter = TreeBuilder( self, path_or_buffer=path_or_buffer, index=index, root_name=root_name, row_name=row_name, na_rep=na_rep, attr_cols=attr_cols, elem_cols=elem_cols, namespaces=namespaces, prefix=prefix, encoding=encoding, xml_declaration=xml_declaration, pretty_print=pretty_print, stylesheet=stylesheet, compression=compression, storage_options=storage_options, ) return xml_formatter.write_output() # ---------------------------------------------------------------------- def info( self, verbose: bool | None = None, buf: WriteBuffer[str] | None = None, max_cols: int | None = None, memory_usage: bool | str | None = None, show_counts: bool | None = None, ) -> None: info = DataFrameInfo( data=self, memory_usage=memory_usage, ) info.render( buf=buf, max_cols=max_cols, verbose=verbose, show_counts=show_counts, ) def memory_usage(self, index: bool = True, deep: bool = False) -> Series: """ Return the memory usage of each column in bytes. The memory usage can optionally include the contribution of the index and elements of `object` dtype. This value is displayed in `DataFrame.info` by default. This can be suppressed by setting ``pandas.options.display.memory_usage`` to False. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the DataFrame's index in returned Series. If ``index=True``, the memory usage of the index is the first item in the output. deep : bool, default False If True, introspect the data deeply by interrogating `object` dtypes for system-level memory consumption, and include it in the returned values. Returns ------- Series A Series whose index is the original column names and whose values is the memory usage of each column in bytes. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of an ndarray. Series.memory_usage : Bytes consumed by a Series. Categorical : Memory-efficient array for string values with many repeated values. DataFrame.info : Concise summary of a DataFrame. Notes ----- See the :ref:`Frequently Asked Questions <df-memory-usage>` for more details. Examples -------- >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool'] >>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t)) ... for t in dtypes]) >>> df = pd.DataFrame(data) >>> df.head() int64 float64 complex128 object bool 0 1 1.0 1.0+0.0j 1 True 1 1 1.0 1.0+0.0j 1 True 2 1 1.0 1.0+0.0j 1 True 3 1 1.0 1.0+0.0j 1 True 4 1 1.0 1.0+0.0j 1 True >>> df.memory_usage() Index 128 int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 >>> df.memory_usage(index=False) int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 The memory footprint of `object` dtype columns is ignored by default: >>> df.memory_usage(deep=True) Index 128 int64 40000 float64 40000 complex128 80000 object 180000 bool 5000 dtype: int64 Use a Categorical for efficient storage of an object-dtype column with many repeated values. >>> df['object'].astype('category').memory_usage(deep=True) 5244 """ result = self._constructor_sliced( [c.memory_usage(index=False, deep=deep) for col, c in self.items()], index=self.columns, dtype=np.intp, ) if index: index_memory_usage = self._constructor_sliced( self.index.memory_usage(deep=deep), index=["Index"] ) result = index_memory_usage._append(result) return result def transpose(self, *args, copy: bool = False) -> DataFrame: """ Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. Parameters ---------- *args : tuple, optional Accepted for compatibility with NumPy. copy : bool, default False Whether to copy the data after transposing, even for DataFrames with a single dtype. Note that a copy is always required for mixed dtype DataFrames, or for DataFrames with any extension types. Returns ------- DataFrame The transposed DataFrame. See Also -------- numpy.transpose : Permute the dimensions of a given array. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the `object` dtype. In such a case, a copy of the data is always made. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = pd.DataFrame(data=d1) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T # or df1.transpose() >>> df1_transposed 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'name': ['Alice', 'Bob'], ... 'score': [9.5, 8], ... 'employed': [False, True], ... 'kids': [0, 0]} >>> df2 = pd.DataFrame(data=d2) >>> df2 name score employed kids 0 Alice 9.5 False 0 1 Bob 8.0 True 0 >>> df2_transposed = df2.T # or df2.transpose() >>> df2_transposed 0 1 name Alice Bob score 9.5 8.0 employed False True kids 0 0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the `object` dtype: >>> df2.dtypes name object score float64 employed bool kids int64 dtype: object >>> df2_transposed.dtypes 0 object 1 object dtype: object """ nv.validate_transpose(args, {}) # construct the args dtypes = list(self.dtypes) if self._can_fast_transpose: # Note: tests pass without this, but this improves perf quite a bit. new_vals = self._values.T if copy and not using_copy_on_write(): new_vals = new_vals.copy() result = self._constructor( new_vals, index=self.columns, columns=self.index, copy=False ) if using_copy_on_write() and len(self) > 0: result._mgr.add_references(self._mgr) # type: ignore[arg-type] elif ( self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]) ): # We have EAs with the same dtype. We can preserve that dtype in transpose. dtype = dtypes[0] arr_type = dtype.construct_array_type() values = self.values new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values] result = type(self)._from_arrays( new_values, index=self.columns, columns=self.index ) else: new_arr = self.values.T if copy and not using_copy_on_write(): new_arr = new_arr.copy() result = self._constructor( new_arr, index=self.columns, columns=self.index, # We already made a copy (more than one block) copy=False, ) return result.__finalize__(self, method="transpose") def T(self) -> DataFrame: """ The transpose of the DataFrame. Returns ------- DataFrame The transposed DataFrame. See Also -------- DataFrame.transpose : Transpose index and columns. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.T 0 1 col1 1 2 col2 3 4 """ return self.transpose() # ---------------------------------------------------------------------- # Indexing Methods def _ixs(self, i: int, axis: AxisInt = 0) -> Series: """ Parameters ---------- i : int axis : int Returns ------- Series """ # irow if axis == 0: new_mgr = self._mgr.fast_xs(i) # if we are a copy, mark as such copy = isinstance(new_mgr.array, np.ndarray) and new_mgr.array.base is None result = self._constructor_sliced(new_mgr, name=self.index[i]).__finalize__( self ) result._set_is_copy(self, copy=copy) return result # icol else: label = self.columns[i] col_mgr = self._mgr.iget(i) result = self._box_col_values(col_mgr, i) # this is a cached value, mark it so result._set_as_cached(label, self) return result def _get_column_array(self, i: int) -> ArrayLike: """ Get the values of the i'th column (ndarray or ExtensionArray, as stored in the Block) Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ return self._mgr.iget_values(i) def _iter_column_arrays(self) -> Iterator[ArrayLike]: """ Iterate over the arrays of all columns in order. This returns the values as stored in the Block (ndarray or ExtensionArray). Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ for i in range(len(self.columns)): yield self._get_column_array(i) def _getitem_nocopy(self, key: list): """ Behaves like __getitem__, but returns a view in cases where __getitem__ would make a copy. """ # TODO(CoW): can be removed if/when we are always Copy-on-Write indexer = self.columns._get_indexer_strict(key, "columns")[1] new_axis = self.columns[indexer] new_mgr = self._mgr.reindex_indexer( new_axis, indexer, axis=0, allow_dups=True, copy=False, only_slice=True, ) return self._constructor(new_mgr) def __getitem__(self, key): check_dict_or_set_indexers(key) key = lib.item_from_zerodim(key) key = com.apply_if_callable(key, self) if is_hashable(key) and not is_iterator(key): # is_iterator to exclude generator e.g. test_getitem_listlike # shortcut if the key is in columns is_mi = isinstance(self.columns, MultiIndex) # GH#45316 Return view if key is not duplicated # Only use drop_duplicates with duplicates for performance if not is_mi and ( self.columns.is_unique and key in self.columns or key in self.columns.drop_duplicates(keep=False) ): return self._get_item_cache(key) elif is_mi and self.columns.is_unique and key in self.columns: return self._getitem_multilevel(key) # Do we have a slicer (on rows)? if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") if isinstance(indexer, np.ndarray): # reachable with DatetimeIndex indexer = lib.maybe_indices_to_slice( indexer.astype(np.intp, copy=False), len(self) ) if isinstance(indexer, np.ndarray): # GH#43223 If we can not convert, use take return self.take(indexer, axis=0) return self._slice(indexer, axis=0) # Do we have a (boolean) DataFrame? if isinstance(key, DataFrame): return self.where(key) # Do we have a (boolean) 1d indexer? if com.is_bool_indexer(key): return self._getitem_bool_array(key) # We are left with two options: a single key, and a collection of keys, # We interpret tuples as collections only for non-MultiIndex is_single_key = isinstance(key, tuple) or not is_list_like(key) if is_single_key: if self.columns.nlevels > 1: return self._getitem_multilevel(key) indexer = self.columns.get_loc(key) if is_integer(indexer): indexer = [indexer] else: if is_iterator(key): key = list(key) indexer = self.columns._get_indexer_strict(key, "columns")[1] # take() does not accept boolean indexers if getattr(indexer, "dtype", None) == bool: indexer = np.where(indexer)[0] data = self._take_with_is_copy(indexer, axis=1) if is_single_key: # What does looking for a single key in a non-unique index return? # The behavior is inconsistent. It returns a Series, except when # - the key itself is repeated (test on data.shape, #9519), or # - we have a MultiIndex on columns (test on self.columns, #21309) if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex): # GH#26490 using data[key] can cause RecursionError return data._get_item_cache(key) return data def _getitem_bool_array(self, key): # also raises Exception if object array with NA values # warning here just in case -- previously __setitem__ was # reindexing but __getitem__ was not; it seems more reasonable to # go with the __setitem__ behavior since that is more consistent # with all other indexing behavior if isinstance(key, Series) and not key.index.equals(self.index): warnings.warn( "Boolean Series key will be reindexed to match DataFrame index.", UserWarning, stacklevel=find_stack_level(), ) elif len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}." ) # check_bool_indexer will throw exception if Series key cannot # be reindexed to match DataFrame rows key = check_bool_indexer(self.index, key) if key.all(): return self.copy(deep=None) indexer = key.nonzero()[0] return self._take_with_is_copy(indexer, axis=0) def _getitem_multilevel(self, key): # self.columns is a MultiIndex loc = self.columns.get_loc(key) if isinstance(loc, (slice, np.ndarray)): new_columns = self.columns[loc] result_columns = maybe_droplevels(new_columns, key) if self._is_mixed_type: result = self.reindex(columns=new_columns) result.columns = result_columns else: new_values = self._values[:, loc] result = self._constructor( new_values, index=self.index, columns=result_columns, copy=False ) if using_copy_on_write() and isinstance(loc, slice): result._mgr.add_references(self._mgr) # type: ignore[arg-type] result = result.__finalize__(self) # If there is only one column being returned, and its name is # either an empty string, or a tuple with an empty string as its # first element, then treat the empty string as a placeholder # and return the column as if the user had provided that empty # string in the key. If the result is a Series, exclude the # implied empty string from its name. if len(result.columns) == 1: # e.g. test_frame_getitem_multicolumn_empty_level, # test_frame_mixed_depth_get, test_loc_setitem_single_column_slice top = result.columns[0] if isinstance(top, tuple): top = top[0] if top == "": result = result[""] if isinstance(result, Series): result = self._constructor_sliced( result, index=self.index, name=key ) result._set_is_copy(self) return result else: # loc is neither a slice nor ndarray, so must be an int return self._ixs(loc, axis=1) def _get_value(self, index, col, takeable: bool = False) -> Scalar: """ Quickly retrieve single value at passed column and index. Parameters ---------- index : row label col : column label takeable : interpret the index/col as indexers, default False Returns ------- scalar Notes ----- Assumes that both `self.index._index_as_unique` and `self.columns._index_as_unique`; Caller is responsible for checking. """ if takeable: series = self._ixs(col, axis=1) return series._values[index] series = self._get_item_cache(col) engine = self.index._engine if not isinstance(self.index, MultiIndex): # CategoricalIndex: Trying to use the engine fastpath may give incorrect # results if our categories are integers that dont match our codes # IntervalIndex: IntervalTree has no get_loc row = self.index.get_loc(index) return series._values[row] # For MultiIndex going through engine effectively restricts us to # same-length tuples; see test_get_set_value_no_partial_indexing loc = engine.get_loc(index) return series._values[loc] def isetitem(self, loc, value) -> None: """ Set the given value in the column with position `loc`. This is a positional analogue to ``__setitem__``. Parameters ---------- loc : int or sequence of ints Index position for the column. value : scalar or arraylike Value(s) for the column. Notes ----- ``frame.isetitem(loc, value)`` is an in-place method as it will modify the DataFrame in place (not returning a new object). In contrast to ``frame.iloc[:, i] = value`` which will try to update the existing values in place, ``frame.isetitem(loc, value)`` will not update the values of the column itself in place, it will instead insert a new array. In cases where ``frame.columns`` is unique, this is equivalent to ``frame[frame.columns[i]] = value``. """ if isinstance(value, DataFrame): if is_scalar(loc): loc = [loc] for i, idx in enumerate(loc): arraylike = self._sanitize_column(value.iloc[:, i]) self._iset_item_mgr(idx, arraylike, inplace=False) return arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=False) def __setitem__(self, key, value): if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) key = com.apply_if_callable(key, self) # see if we can slice the rows if isinstance(key, slice): slc = self.index._convert_slice_indexer(key, kind="getitem") return self._setitem_slice(slc, value) if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2: self._setitem_frame(key, value) elif isinstance(key, (Series, np.ndarray, list, Index)): self._setitem_array(key, value) elif isinstance(value, DataFrame): self._set_item_frame_value(key, value) elif ( is_list_like(value) and not self.columns.is_unique and 1 < len(self.columns.get_indexer_for([key])) == len(value) ): # Column to set is duplicated self._setitem_array([key], value) else: # set column self._set_item(key, value) def _setitem_slice(self, key: slice, value) -> None: # NB: we can't just use self.loc[key] = value because that # operates on labels and we need to operate positional for # backwards-compat, xref GH#31469 self._check_setitem_copy() self.iloc[key] = value def _setitem_array(self, key, value): # also raises Exception if object array with NA values if com.is_bool_indexer(key): # bool indexer is indexing along rows if len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}!" ) key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] self._check_setitem_copy() if isinstance(value, DataFrame): # GH#39931 reindex since iloc does not align value = value.reindex(self.index.take(indexer)) self.iloc[indexer] = value else: # Note: unlike self.iloc[:, indexer] = value, this will # never try to overwrite values inplace if isinstance(value, DataFrame): check_key_length(self.columns, key, value) for k1, k2 in zip(key, value.columns): self[k1] = value[k2] elif not is_list_like(value): for col in key: self[col] = value elif isinstance(value, np.ndarray) and value.ndim == 2: self._iset_not_inplace(key, value) elif np.ndim(value) > 1: # list of lists value = DataFrame(value).values return self._setitem_array(key, value) else: self._iset_not_inplace(key, value) def _iset_not_inplace(self, key, value): # GH#39510 when setting with df[key] = obj with a list-like key and # list-like value, we iterate over those listlikes and set columns # one at a time. This is different from dispatching to # `self.loc[:, key]= value` because loc.__setitem__ may overwrite # data inplace, whereas this will insert new arrays. def igetitem(obj, i: int): # Note: we catch DataFrame obj before getting here, but # hypothetically would return obj.iloc[:, i] if isinstance(obj, np.ndarray): return obj[..., i] else: return obj[i] if self.columns.is_unique: if np.shape(value)[-1] != len(key): raise ValueError("Columns must be same length as key") for i, col in enumerate(key): self[col] = igetitem(value, i) else: ilocs = self.columns.get_indexer_non_unique(key)[0] if (ilocs < 0).any(): # key entries not in self.columns raise NotImplementedError if np.shape(value)[-1] != len(ilocs): raise ValueError("Columns must be same length as key") assert np.ndim(value) <= 2 orig_columns = self.columns # Using self.iloc[:, i] = ... may set values inplace, which # by convention we do not do in __setitem__ try: self.columns = Index(range(len(self.columns))) for i, iloc in enumerate(ilocs): self[iloc] = igetitem(value, i) finally: self.columns = orig_columns def _setitem_frame(self, key, value): # support boolean setting with DataFrame input, e.g. # df[df > df2] = 0 if isinstance(key, np.ndarray): if key.shape != self.shape: raise ValueError("Array conditional must be same shape as self") key = self._constructor(key, **self._construct_axes_dict(), copy=False) if key.size and not all(is_bool_dtype(dtype) for dtype in key.dtypes): raise TypeError( "Must pass DataFrame or 2-d ndarray with boolean values only" ) self._check_inplace_setting(value) self._check_setitem_copy() self._where(-key, value, inplace=True) def _set_item_frame_value(self, key, value: DataFrame) -> None: self._ensure_valid_index(value) # align columns if key in self.columns: loc = self.columns.get_loc(key) cols = self.columns[loc] len_cols = 1 if is_scalar(cols) or isinstance(cols, tuple) else len(cols) if len_cols != len(value.columns): raise ValueError("Columns must be same length as key") # align right-hand-side columns if self.columns # is multi-index and self[key] is a sub-frame if isinstance(self.columns, MultiIndex) and isinstance( loc, (slice, Series, np.ndarray, Index) ): cols_droplevel = maybe_droplevels(cols, key) if len(cols_droplevel) and not cols_droplevel.equals(value.columns): value = value.reindex(cols_droplevel, axis=1) for col, col_droplevel in zip(cols, cols_droplevel): self[col] = value[col_droplevel] return if is_scalar(cols): self[cols] = value[value.columns[0]] return # now align rows arraylike = _reindex_for_setitem(value, self.index) self._set_item_mgr(key, arraylike) return if len(value.columns) != 1: raise ValueError( "Cannot set a DataFrame with multiple columns to the single " f"column {key}" ) self[key] = value[value.columns[0]] def _iset_item_mgr( self, loc: int | slice | np.ndarray, value, inplace: bool = False ) -> None: # when called from _set_item_mgr loc can be anything returned from get_loc self._mgr.iset(loc, value, inplace=inplace) self._clear_item_cache() def _set_item_mgr(self, key, value: ArrayLike) -> None: try: loc = self._info_axis.get_loc(key) except KeyError: # This item wasn't present, just insert at end self._mgr.insert(len(self._info_axis), key, value) else: self._iset_item_mgr(loc, value) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _iset_item(self, loc: int, value) -> None: arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=True) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _set_item(self, key, value) -> None: """ Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the same length as the DataFrames index or an error will be thrown. Series/TimeSeries will be conformed to the DataFrames index to ensure homogeneity. """ value = self._sanitize_column(value) if ( key in self.columns and value.ndim == 1 and not is_extension_array_dtype(value) ): # broadcast across multiple columns if necessary if not self.columns.is_unique or isinstance(self.columns, MultiIndex): existing_piece = self[key] if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)).T self._set_item_mgr(key, value) def _set_value( self, index: IndexLabel, col, value: Scalar, takeable: bool = False ) -> None: """ Put single value at passed column and index. Parameters ---------- index : Label row label col : Label column label value : scalar takeable : bool, default False Sets whether or not index/col interpreted as indexers """ try: if takeable: icol = col iindex = cast(int, index) else: icol = self.columns.get_loc(col) iindex = self.index.get_loc(index) self._mgr.column_setitem(icol, iindex, value, inplace_only=True) self._clear_item_cache() except (KeyError, TypeError, ValueError, LossySetitemError): # get_loc might raise a KeyError for missing labels (falling back # to (i)loc will do expansion of the index) # column_setitem will do validation that may raise TypeError, # ValueError, or LossySetitemError # set using a non-recursive method & reset the cache if takeable: self.iloc[index, col] = value else: self.loc[index, col] = value self._item_cache.pop(col, None) except InvalidIndexError as ii_err: # GH48729: Seems like you are trying to assign a value to a # row when only scalar options are permitted raise InvalidIndexError( f"You can only assign a scalar value not a {type(value)}" ) from ii_err def _ensure_valid_index(self, value) -> None: """ Ensure that if we don't have an index, that we can create one from the passed value. """ # GH5632, make sure that we are a Series convertible if not len(self.index) and is_list_like(value) and len(value): if not isinstance(value, DataFrame): try: value = Series(value) except (ValueError, NotImplementedError, TypeError) as err: raise ValueError( "Cannot set a frame with no defined index " "and a value that cannot be converted to a Series" ) from err # GH31368 preserve name of index index_copy = value.index.copy() if self.index.name is not None: index_copy.name = self.index.name self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan) def _box_col_values(self, values: SingleDataManager, loc: int) -> Series: """ Provide boxed values for a column. """ # Lookup in columns so that if e.g. a str datetime was passed # we attach the Timestamp object as the name. name = self.columns[loc] klass = self._constructor_sliced # We get index=self.index bc values is a SingleDataManager return klass(values, name=name, fastpath=True).__finalize__(self) # ---------------------------------------------------------------------- # Lookup Caching def _clear_item_cache(self) -> None: self._item_cache.clear() def _get_item_cache(self, item: Hashable) -> Series: """Return the cached item, item represents a label indexer.""" if using_copy_on_write(): loc = self.columns.get_loc(item) return self._ixs(loc, axis=1) cache = self._item_cache res = cache.get(item) if res is None: # All places that call _get_item_cache have unique columns, # pending resolution of GH#33047 loc = self.columns.get_loc(item) res = self._ixs(loc, axis=1) cache[item] = res # for a chain res._is_copy = self._is_copy return res def _reset_cacher(self) -> None: # no-op for DataFrame pass def _maybe_cache_changed(self, item, value: Series, inplace: bool) -> None: """ The object has called back to us saying maybe it has changed. """ loc = self._info_axis.get_loc(item) arraylike = value._values old = self._ixs(loc, axis=1) if old._values is value._values and inplace: # GH#46149 avoid making unnecessary copies/block-splitting return self._mgr.iset(loc, arraylike, inplace=inplace) # ---------------------------------------------------------------------- # Unsorted def query(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> DataFrame: ... def query(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def query(self, expr: str, *, inplace: bool = ..., **kwargs) -> DataFrame | None: ... def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | None: """ Query the columns of a DataFrame with a boolean expression. Parameters ---------- expr : str The query string to evaluate. You can refer to variables in the environment by prefixing them with an '@' character like ``@a + b``. You can refer to column names that are not valid Python variable names by surrounding them in backticks. Thus, column names containing spaces or punctuations (besides underscores) or starting with digits must be surrounded by backticks. (For example, a column named "Area (cm^2)" would be referenced as ```Area (cm^2)```). Column names which are Python keywords (like "list", "for", "import", etc) cannot be used. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. inplace : bool Whether to modify the DataFrame rather than creating a new one. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`DataFrame.query`. Returns ------- DataFrame or None DataFrame resulting from the provided query expression or None if ``inplace=True``. See Also -------- eval : Evaluate a string describing operations on DataFrame columns. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Notes ----- The result of the evaluation of this expression is first passed to :attr:`DataFrame.loc` and if that fails because of a multidimensional key (e.g., a DataFrame) then the result will be passed to :meth:`DataFrame.__getitem__`. This method uses the top-level :func:`eval` function to evaluate the passed query. The :meth:`~pandas.DataFrame.query` method uses a slightly modified Python syntax by default. For example, the ``&`` and ``|`` (bitwise) operators have the precedence of their boolean cousins, :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, however the semantics are different. You can change the semantics of the expression by passing the keyword argument ``parser='python'``. This enforces the same semantics as evaluation in Python space. Likewise, you can pass ``engine='python'`` to evaluate an expression using Python itself as a backend. This is not recommended as it is inefficient compared to using ``numexpr`` as the engine. The :attr:`DataFrame.index` and :attr:`DataFrame.columns` attributes of the :class:`~pandas.DataFrame` instance are placed in the query namespace by default, which allows you to treat both the index and columns of the frame as a column in the frame. The identifier ``index`` is used for the frame index; you can also use the name of the index to identify it in a query. Please note that Python keywords may not be used as identifiers. For further details and examples see the ``query`` documentation in :ref:`indexing <indexing.query>`. *Backtick quoted variables* Backtick quoted variables are parsed as literal Python code and are converted internally to a Python valid identifier. This can lead to the following problems. During parsing a number of disallowed characters inside the backtick quoted string are replaced by strings that are allowed as a Python identifier. These characters include all operators in Python, the space character, the question mark, the exclamation mark, the dollar sign, and the euro sign. For other characters that fall outside the ASCII range (U+0001..U+007F) and those that are not further specified in PEP 3131, the query parser will raise an error. This excludes whitespace different than the space character, but also the hashtag (as it is used for comments) and the backtick itself (backtick can also not be escaped). In a special case, quotes that make a pair around a backtick can confuse the parser. For example, ```it's` > `that's``` will raise an error, as it forms a quoted string (``'s > `that'``) with a backtick inside. See also the Python documentation about lexical analysis (https://docs.python.org/3/reference/lexical_analysis.html) in combination with the source code in :mod:`pandas.core.computation.parsing`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10 """ inplace = validate_bool_kwarg(inplace, "inplace") if not isinstance(expr, str): msg = f"expr must be a string to be evaluated, {type(expr)} given" raise ValueError(msg) kwargs["level"] = kwargs.pop("level", 0) + 1 kwargs["target"] = None res = self.eval(expr, **kwargs) try: result = self.loc[res] except ValueError: # when res is multi-dimensional loc raises, but this is sometimes a # valid query result = self[res] if inplace: self._update_inplace(result) return None else: return result def eval(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> Any: ... def eval(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def eval(self, expr: str, *, inplace: bool = False, **kwargs) -> Any | None: """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. Returns ------- ndarray, scalar, pandas object, or None The result of the evaluation or None if ``inplace=True``. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Notes ----- For more details see the API documentation for :func:`~eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Multiple columns can be assigned to using multi-line expressions: >>> df.eval( ... ''' ... C = A + B ... D = A - B ... ''' ... ) A B C D 0 1 10 11 -9 1 2 8 10 -6 2 3 6 9 -3 3 4 4 8 0 4 5 2 7 3 """ from pandas.core.computation.eval import eval as _eval inplace = validate_bool_kwarg(inplace, "inplace") kwargs["level"] = kwargs.pop("level", 0) + 1 index_resolvers = self._get_index_resolvers() column_resolvers = self._get_cleaned_column_resolvers() resolvers = column_resolvers, index_resolvers if "target" not in kwargs: kwargs["target"] = self kwargs["resolvers"] = tuple(kwargs.get("resolvers", ())) + resolvers return _eval(expr, inplace=inplace, **kwargs) def select_dtypes(self, include=None, exclude=None) -> DataFrame: """ Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty * If ``include`` and ``exclude`` have overlapping elements * If any kind of string dtype is passed in. See Also -------- DataFrame.dtypes: Return Series with the data type of each column. Notes ----- * To select all *numeric* types, use ``np.number`` or ``'number'`` * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy <https://numpy.org/doc/stable/reference/arrays.scalars.html>`__ * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or ``'timedelta64'`` * To select Pandas categorical dtypes, use ``'category'`` * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in 0.20.0) or ``'datetime64[ns, tz]'`` Examples -------- >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int64']) b c 0 True 1.0 1 False 2.0 2 True 1.0 3 False 2.0 4 True 1.0 5 False 2.0 """ if not is_list_like(include): include = (include,) if include is not None else () if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () selection = (frozenset(include), frozenset(exclude)) if not any(selection): raise ValueError("at least one of include or exclude must be nonempty") # convert the myriad valid dtypes object to a single representation def check_int_infer_dtype(dtypes): converted_dtypes: list[type] = [] for dtype in dtypes: # Numpy maps int to different types (int32, in64) on Windows and Linux # see https://github.com/numpy/numpy/issues/9464 if (isinstance(dtype, str) and dtype == "int") or (dtype is int): converted_dtypes.append(np.int32) converted_dtypes.append(np.int64) elif dtype == "float" or dtype is float: # GH#42452 : np.dtype("float") coerces to np.float64 from Numpy 1.20 converted_dtypes.extend([np.float64, np.float32]) else: converted_dtypes.append(infer_dtype_from_object(dtype)) return frozenset(converted_dtypes) include = check_int_infer_dtype(include) exclude = check_int_infer_dtype(exclude) for dtypes in (include, exclude): invalidate_string_dtypes(dtypes) # can't both include AND exclude! if not include.isdisjoint(exclude): raise ValueError(f"include and exclude overlap on {(include & exclude)}") def dtype_predicate(dtype: DtypeObj, dtypes_set) -> bool: # GH 46870: BooleanDtype._is_numeric == True but should be excluded return issubclass(dtype.type, tuple(dtypes_set)) or ( np.number in dtypes_set and getattr(dtype, "_is_numeric", False) and not is_bool_dtype(dtype) ) def predicate(arr: ArrayLike) -> bool: dtype = arr.dtype if include: if not dtype_predicate(dtype, include): return False if exclude: if dtype_predicate(dtype, exclude): return False return True mgr = self._mgr._get_data_subset(predicate).copy(deep=None) return type(self)(mgr).__finalize__(self) def insert( self, loc: int, column: Hashable, value: Scalar | AnyArrayLike, allow_duplicates: bool | lib.NoDefault = lib.no_default, ) -> None: """ Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns). column : str, number, or hashable object Label of the inserted column. value : Scalar, Series, or array-like allow_duplicates : bool, optional, default lib.no_default See Also -------- Index.insert : Insert new item by index. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.insert(1, "newcol", [99, 99]) >>> df col1 newcol col2 0 1 99 3 1 2 99 4 >>> df.insert(0, "col1", [100, 100], allow_duplicates=True) >>> df col1 col1 newcol col2 0 100 1 99 3 1 100 2 99 4 Notice that pandas uses index alignment in case of `value` from type `Series`: >>> df.insert(0, "col0", pd.Series([5, 6], index=[1, 2])) >>> df col0 col1 col1 newcol col2 0 NaN 100 1 99 3 1 5.0 100 2 99 4 """ if allow_duplicates is lib.no_default: allow_duplicates = False if allow_duplicates and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'allow_duplicates=True' when " "'self.flags.allows_duplicate_labels' is False." ) if not allow_duplicates and column in self.columns: # Should this be a different kind of error?? raise ValueError(f"cannot insert {column}, already exists") if not isinstance(loc, int): raise TypeError("loc must be int") value = self._sanitize_column(value) self._mgr.insert(loc, column, value) def assign(self, **kwargs) -> DataFrame: r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. Later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 You can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 """ data = self.copy(deep=None) for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) return data def _sanitize_column(self, value) -> ArrayLike: """ Ensures new columns (which go into the BlockManager as new blocks) are always copied and converted into an array. Parameters ---------- value : scalar, Series, or array-like Returns ------- numpy.ndarray or ExtensionArray """ self._ensure_valid_index(value) # We can get there through isetitem with a DataFrame # or through loc single_block_path if isinstance(value, DataFrame): return _reindex_for_setitem(value, self.index) elif is_dict_like(value): return _reindex_for_setitem(Series(value), self.index) if is_list_like(value): com.require_length_match(value, self.index) return sanitize_array(value, self.index, copy=True, allow_2d=True) def _series(self): return { item: Series( self._mgr.iget(idx), index=self.index, name=item, fastpath=True ) for idx, item in enumerate(self.columns) } # ---------------------------------------------------------------------- # Reindexing and alignment def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy): frame = self columns = axes["columns"] if columns is not None: frame = frame._reindex_columns( columns, method, copy, level, fill_value, limit, tolerance ) index = axes["index"] if index is not None: frame = frame._reindex_index( index, method, copy, level, fill_value, limit, tolerance ) return frame def _reindex_index( self, new_index, method, copy: bool, level: Level, fill_value=np.nan, limit=None, tolerance=None, ): new_index, indexer = self.index.reindex( new_index, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {0: [new_index, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_columns( self, new_columns, method, copy: bool, level: Level, fill_value=None, limit=None, tolerance=None, ): new_columns, indexer = self.columns.reindex( new_columns, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {1: [new_columns, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_multi( self, axes: dict[str, Index], copy: bool, fill_value ) -> DataFrame: """ We are guaranteed non-Nones in the axes. """ new_index, row_indexer = self.index.reindex(axes["index"]) new_columns, col_indexer = self.columns.reindex(axes["columns"]) if row_indexer is not None and col_indexer is not None: # Fastpath. By doing two 'take's at once we avoid making an # unnecessary copy. # We only get here with `not self._is_mixed_type`, which (almost) # ensures that self.values is cheap. It may be worth making this # condition more specific. indexer = row_indexer, col_indexer new_values = take_2d_multi(self.values, indexer, fill_value=fill_value) return self._constructor( new_values, index=new_index, columns=new_columns, copy=False ) else: return self._reindex_with_indexers( {0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, copy=copy, fill_value=fill_value, ) def align( self, other: DataFrame, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value=None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> DataFrame: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) """ Examples -------- >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) Change the row labels. >>> df.set_axis(['a', 'b', 'c'], axis='index') A B a 1 4 b 2 5 c 3 6 Change the column labels. >>> df.set_axis(['I', 'II'], axis='columns') I II 0 1 4 1 2 5 2 3 6 """ ) **_shared_doc_kwargs, extended_summary_sub=" column or", axis_description_sub=", and 1 identifies the columns", see_also_sub=" or columns", ) ) # ---------------------------------------------------------------------- # Reindex-based selection methods # ---------------------------------------------------------------------- # Sorting # error: Signature of "sort_values" incompatible with supertype "NDFrame" # TODO: Just move the sort_values doc here. ) # ---------------------------------------------------------------------- # Arithmetic Methods ) ) ) # ---------------------------------------------------------------------- # Function application ) # error: Signature of "any" incompatible with supertype "NDFrame" [override] # error: Missing return statement ) # ---------------------------------------------------------------------- # Merging / joining methods # ---------------------------------------------------------------------- # Statistical methods, etc. # ---------------------------------------------------------------------- # ndarray-like stats methods # ---------------------------------------------------------------------- # Add index and columns # ---------------------------------------------------------------------- # Add plotting methods to DataFrame # ---------------------------------------------------------------------- # Internal Interface Methods DataFrame def read_stata( filepath_or_buffer: FilePath | ReadBuffer[bytes], *, convert_dates: bool = True, convert_categoricals: bool = True, index_col: str | None = None, convert_missing: bool = False, preserve_dtypes: bool = True, columns: Sequence[str] | None = None, order_categoricals: bool = True, chunksize: int | None = None, iterator: bool = False, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ) -> DataFrame | StataReader: reader = StataReader( filepath_or_buffer, convert_dates=convert_dates, convert_categoricals=convert_categoricals, index_col=index_col, convert_missing=convert_missing, preserve_dtypes=preserve_dtypes, columns=columns, order_categoricals=order_categoricals, chunksize=chunksize, storage_options=storage_options, compression=compression, ) if iterator or chunksize: return reader with reader: return reader.read()
null
173,536
from __future__ import annotations from collections import abc import datetime from io import BytesIO import os import struct import sys from types import TracebackType from typing import ( IO, TYPE_CHECKING, Any, AnyStr, Callable, Final, Hashable, Sequence, cast, ) import warnings from dateutil.relativedelta import relativedelta import numpy as np from pandas._libs.lib import infer_dtype from pandas._libs.writers import max_len_string_array from pandas._typing import ( CompressionOptions, FilePath, ReadBuffer, StorageOptions, WriteBuffer, ) from pandas.errors import ( CategoricalConversionWarning, InvalidColumnName, PossiblePrecisionLoss, ValueLabelTypeMismatch, ) from pandas.util._decorators import ( Appender, doc, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, is_categorical_dtype, is_datetime64_dtype, is_numeric_dtype, ) from pandas import ( Categorical, DatetimeIndex, NaT, Timestamp, isna, to_datetime, to_timedelta, ) from pandas.core.arrays.boolean import BooleanDtype from pandas.core.arrays.integer import IntegerDtype from pandas.core.frame import DataFrame from pandas.core.indexes.base import Index from pandas.core.series import Series from pandas.core.shared_docs import _shared_docs from pandas.io.common import get_handle def _set_endianness(endianness: str) -> str: if endianness.lower() in ["<", "little"]: return "<" elif endianness.lower() in [">", "big"]: return ">" else: # pragma : no cover raise ValueError(f"Endianness {endianness} not understood")
null
173,537
from __future__ import annotations from collections import abc import datetime from io import BytesIO import os import struct import sys from types import TracebackType from typing import ( IO, TYPE_CHECKING, Any, AnyStr, Callable, Final, Hashable, Sequence, cast, ) import warnings from dateutil.relativedelta import relativedelta import numpy as np from pandas._libs.lib import infer_dtype from pandas._libs.writers import max_len_string_array from pandas._typing import ( CompressionOptions, FilePath, ReadBuffer, StorageOptions, WriteBuffer, ) from pandas.errors import ( CategoricalConversionWarning, InvalidColumnName, PossiblePrecisionLoss, ValueLabelTypeMismatch, ) from pandas.util._decorators import ( Appender, doc, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, is_categorical_dtype, is_datetime64_dtype, is_numeric_dtype, ) from pandas import ( Categorical, DatetimeIndex, NaT, Timestamp, isna, to_datetime, to_timedelta, ) from pandas.core.arrays.boolean import BooleanDtype from pandas.core.arrays.integer import IntegerDtype from pandas.core.frame import DataFrame from pandas.core.indexes.base import Index from pandas.core.series import Series from pandas.core.shared_docs import _shared_docs from pandas.io.common import get_handle AnyStr = TypeVar("AnyStr", str, bytes) The provided code snippet includes necessary dependencies for implementing the `_pad_bytes` function. Write a Python function `def _pad_bytes(name: AnyStr, length: int) -> AnyStr` to solve the following problem: Take a char string and pads it with null bytes until it's length chars. Here is the function: def _pad_bytes(name: AnyStr, length: int) -> AnyStr: """ Take a char string and pads it with null bytes until it's length chars. """ if isinstance(name, bytes): return name + b"\x00" * (length - len(name)) return name + "\x00" * (length - len(name))
Take a char string and pads it with null bytes until it's length chars.
173,538
from __future__ import annotations from collections import abc import datetime from io import BytesIO import os import struct import sys from types import TracebackType from typing import ( IO, TYPE_CHECKING, Any, AnyStr, Callable, Final, Hashable, Sequence, cast, ) import warnings from dateutil.relativedelta import relativedelta import numpy as np from pandas._libs.lib import infer_dtype from pandas._libs.writers import max_len_string_array from pandas._typing import ( CompressionOptions, FilePath, ReadBuffer, StorageOptions, WriteBuffer, ) from pandas.errors import ( CategoricalConversionWarning, InvalidColumnName, PossiblePrecisionLoss, ValueLabelTypeMismatch, ) from pandas.util._decorators import ( Appender, doc, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, is_categorical_dtype, is_datetime64_dtype, is_numeric_dtype, ) from pandas import ( Categorical, DatetimeIndex, NaT, Timestamp, isna, to_datetime, to_timedelta, ) from pandas.core.arrays.boolean import BooleanDtype from pandas.core.arrays.integer import IntegerDtype from pandas.core.frame import DataFrame from pandas.core.indexes.base import Index from pandas.core.series import Series from pandas.core.shared_docs import _shared_docs from pandas.io.common import get_handle The provided code snippet includes necessary dependencies for implementing the `_convert_datetime_to_stata_type` function. Write a Python function `def _convert_datetime_to_stata_type(fmt: str) -> np.dtype` to solve the following problem: Convert from one of the stata date formats to a type in TYPE_MAP. Here is the function: def _convert_datetime_to_stata_type(fmt: str) -> np.dtype: """ Convert from one of the stata date formats to a type in TYPE_MAP. """ if fmt in [ "tc", "%tc", "td", "%td", "tw", "%tw", "tm", "%tm", "tq", "%tq", "th", "%th", "ty", "%ty", ]: return np.dtype(np.float64) # Stata expects doubles for SIFs else: raise NotImplementedError(f"Format {fmt} not implemented")
Convert from one of the stata date formats to a type in TYPE_MAP.
173,539
from __future__ import annotations from collections import abc import datetime from io import BytesIO import os import struct import sys from types import TracebackType from typing import ( IO, TYPE_CHECKING, Any, AnyStr, Callable, Final, Hashable, Sequence, cast, ) import warnings from dateutil.relativedelta import relativedelta import numpy as np from pandas._libs.lib import infer_dtype from pandas._libs.writers import max_len_string_array from pandas._typing import ( CompressionOptions, FilePath, ReadBuffer, StorageOptions, WriteBuffer, ) from pandas.errors import ( CategoricalConversionWarning, InvalidColumnName, PossiblePrecisionLoss, ValueLabelTypeMismatch, ) from pandas.util._decorators import ( Appender, doc, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, is_categorical_dtype, is_datetime64_dtype, is_numeric_dtype, ) from pandas import ( Categorical, DatetimeIndex, NaT, Timestamp, isna, to_datetime, to_timedelta, ) from pandas.core.arrays.boolean import BooleanDtype from pandas.core.arrays.integer import IntegerDtype from pandas.core.frame import DataFrame from pandas.core.indexes.base import Index from pandas.core.series import Series from pandas.core.shared_docs import _shared_docs from pandas.io.common import get_handle class Hashable(Protocol, metaclass=ABCMeta): # TODO: This is special, in that a subclass of a hashable class may not be hashable # (for example, list vs. object). It's not obvious how to represent this. This class # is currently mostly useless for static checking. def __hash__(self) -> int: ... def _maybe_convert_to_int_keys(convert_dates: dict, varlist: list[Hashable]) -> dict: new_dict = {} for key in convert_dates: if not convert_dates[key].startswith("%"): # make sure proper fmts convert_dates[key] = "%" + convert_dates[key] if key in varlist: new_dict.update({varlist.index(key): convert_dates[key]}) else: if not isinstance(key, int): raise ValueError("convert_dates key must be a column or an integer") new_dict.update({key: convert_dates[key]}) return new_dict
null
173,540
from __future__ import annotations from collections import abc import datetime from io import BytesIO import os import struct import sys from types import TracebackType from typing import ( IO, TYPE_CHECKING, Any, AnyStr, Callable, Final, Hashable, Sequence, cast, ) import warnings from dateutil.relativedelta import relativedelta import numpy as np from pandas._libs.lib import infer_dtype from pandas._libs.writers import max_len_string_array from pandas._typing import ( CompressionOptions, FilePath, ReadBuffer, StorageOptions, WriteBuffer, ) from pandas.errors import ( CategoricalConversionWarning, InvalidColumnName, PossiblePrecisionLoss, ValueLabelTypeMismatch, ) from pandas.util._decorators import ( Appender, doc, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, is_categorical_dtype, is_datetime64_dtype, is_numeric_dtype, ) from pandas import ( Categorical, DatetimeIndex, NaT, Timestamp, isna, to_datetime, to_timedelta, ) from pandas.core.arrays.boolean import BooleanDtype from pandas.core.arrays.integer import IntegerDtype from pandas.core.frame import DataFrame from pandas.core.indexes.base import Index from pandas.core.series import Series from pandas.core.shared_docs import _shared_docs from pandas.io.common import get_handle ensure_object = algos.ensure_object class Series(base.IndexOpsMixin, NDFrame): # type: ignore[misc] """ One-dimensional ndarray with axis labels (including time series). Labels need not be unique but must be a hashable type. The object supports both integer- and label-based indexing and provides a host of methods for performing operations involving the index. Statistical methods from ndarray have been overridden to automatically exclude missing data (currently represented as NaN). Operations between Series (+, -, /, \\*, \\*\\*) align values based on their associated index values-- they need not be the same length. The result index will be the sorted union of the two indexes. Parameters ---------- data : array-like, Iterable, dict, or scalar value Contains data stored in Series. If data is a dict, argument order is maintained. index : array-like or Index (1d) Values must be hashable and have the same length as `data`. Non-unique index values are allowed. Will default to RangeIndex (0, 1, 2, ..., n) if not provided. If data is dict-like and index is None, then the keys in the data are used as the index. If the index is not None, the resulting Series is reindexed with the index values. dtype : str, numpy.dtype, or ExtensionDtype, optional Data type for the output Series. If not specified, this will be inferred from `data`. See the :ref:`user guide <basics.dtypes>` for more usages. name : Hashable, default None The name to give to the Series. copy : bool, default False Copy input data. Only affects Series or 1d ndarray input. See examples. Notes ----- Please reference the :ref:`User Guide <basics.series>` for more information. Examples -------- Constructing Series from a dictionary with an Index specified >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['a', 'b', 'c']) >>> ser a 1 b 2 c 3 dtype: int64 The keys of the dictionary match with the Index values, hence the Index values have no effect. >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['x', 'y', 'z']) >>> ser x NaN y NaN z NaN dtype: float64 Note that the Index is first build with the keys from the dictionary. After this the Series is reindexed with the given Index values, hence we get all NaN as a result. Constructing Series from a list with `copy=False`. >>> r = [1, 2] >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r [1, 2] >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `copy` of the original data even though `copy=False`, so the data is unchanged. Constructing Series from a 1d ndarray with `copy=False`. >>> r = np.array([1, 2]) >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r array([999, 2]) >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `view` on the original data, so the data is changed as well. """ _typ = "series" _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) _name: Hashable _metadata: list[str] = ["name"] _internal_names_set = {"index"} | NDFrame._internal_names_set _accessors = {"dt", "cat", "str", "sparse"} _hidden_attrs = ( base.IndexOpsMixin._hidden_attrs | NDFrame._hidden_attrs | frozenset([]) ) # Override cache_readonly bc Series is mutable # error: Incompatible types in assignment (expression has type "property", # base class "IndexOpsMixin" defined the type as "Callable[[IndexOpsMixin], bool]") hasnans = property( # type: ignore[assignment] # error: "Callable[[IndexOpsMixin], bool]" has no attribute "fget" base.IndexOpsMixin.hasnans.fget, # type: ignore[attr-defined] doc=base.IndexOpsMixin.hasnans.__doc__, ) _mgr: SingleManager div: Callable[[Series, Any], Series] rdiv: Callable[[Series, Any], Series] # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index=None, dtype: Dtype | None = None, name=None, copy: bool | None = None, fastpath: bool = False, ) -> None: if ( isinstance(data, (SingleBlockManager, SingleArrayManager)) and index is None and dtype is None and (copy is False or copy is None) ): if using_copy_on_write(): data = data.copy(deep=False) # GH#33357 called with just the SingleBlockManager NDFrame.__init__(self, data) if fastpath: # e.g. from _box_col_values, skip validation of name object.__setattr__(self, "_name", name) else: self.name = name return if isinstance(data, (ExtensionArray, np.ndarray)): if copy is not False and using_copy_on_write(): if dtype is None or astype_is_view(data.dtype, pandas_dtype(dtype)): data = data.copy() if copy is None: copy = False # we are called internally, so short-circuit if fastpath: # data is a ndarray, index is defined if not isinstance(data, (SingleBlockManager, SingleArrayManager)): manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index) elif manager == "array": data = SingleArrayManager.from_array(data, index) elif using_copy_on_write() and not copy: data = data.copy(deep=False) if copy: data = data.copy() # skips validation of the name object.__setattr__(self, "_name", name) NDFrame.__init__(self, data) return if isinstance(data, SingleBlockManager) and using_copy_on_write() and not copy: data = data.copy(deep=False) name = ibase.maybe_extract_name(name, data, type(self)) if index is not None: index = ensure_index(index) if dtype is not None: dtype = self._validate_dtype(dtype) if data is None: index = index if index is not None else default_index(0) if len(index) or dtype is not None: data = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: data = [] if isinstance(data, MultiIndex): raise NotImplementedError( "initializing a Series from a MultiIndex is not supported" ) refs = None if isinstance(data, Index): if dtype is not None: data = data.astype(dtype, copy=False) if using_copy_on_write(): refs = data._references data = data._values else: # GH#24096 we need to ensure the index remains immutable data = data._values.copy() copy = False elif isinstance(data, np.ndarray): if len(data.dtype): # GH#13296 we are dealing with a compound dtype, which # should be treated as 2D raise ValueError( "Cannot construct a Series from an ndarray with " "compound dtype. Use DataFrame instead." ) elif isinstance(data, Series): if index is None: index = data.index data = data._mgr.copy(deep=False) else: data = data.reindex(index, copy=copy) copy = False data = data._mgr elif is_dict_like(data): data, index = self._init_dict(data, index, dtype) dtype = None copy = False elif isinstance(data, (SingleBlockManager, SingleArrayManager)): if index is None: index = data.index elif not data.index.equals(index) or copy: # GH#19275 SingleBlockManager input should only be called # internally raise AssertionError( "Cannot pass both SingleBlockManager " "`data` argument and a different " "`index` argument. `copy` must be False." ) elif isinstance(data, ExtensionArray): pass else: data = com.maybe_iterable_to_list(data) if is_list_like(data) and not len(data) and dtype is None: # GH 29405: Pre-2.0, this defaulted to float. dtype = np.dtype(object) if index is None: if not is_list_like(data): data = [data] index = default_index(len(data)) elif is_list_like(data): com.require_length_match(data, index) # create/copy the manager if isinstance(data, (SingleBlockManager, SingleArrayManager)): if dtype is not None: data = data.astype(dtype=dtype, errors="ignore", copy=copy) elif copy: data = data.copy() else: data = sanitize_array(data, index, dtype, copy) manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index, refs=refs) elif manager == "array": data = SingleArrayManager.from_array(data, index) NDFrame.__init__(self, data) self.name = name self._set_axis(0, index) def _init_dict( self, data, index: Index | None = None, dtype: DtypeObj | None = None ): """ Derive the "_mgr" and "index" attributes of a new Series from a dictionary input. Parameters ---------- data : dict or dict-like Data used to populate the new Series. index : Index or None, default None Index for the new Series: if None, use dict keys. dtype : np.dtype, ExtensionDtype, or None, default None The dtype for the new Series: if None, infer from data. Returns ------- _data : BlockManager for the new Series index : index for the new Series """ keys: Index | tuple # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')] # raises KeyError), so we iterate the entire dict, and align if data: # GH:34717, issue was using zip to extract key and values from data. # using generators in effects the performance. # Below is the new way of extracting the keys and values keys = tuple(data.keys()) values = list(data.values()) # Generating list of values- faster way elif index is not None: # fastpath for Series(data=None). Just use broadcasting a scalar # instead of reindexing. if len(index) or dtype is not None: values = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: values = [] keys = index else: keys, values = (), [] # Input is now list-like, so rely on "standard" construction: s = self._constructor( values, index=keys, dtype=dtype, ) # Now we just make sure the order is respected, if any if data and index is not None: s = s.reindex(index, copy=False) return s._mgr, s.index # ---------------------------------------------------------------------- def _constructor(self) -> Callable[..., Series]: return Series def _constructor_expanddim(self) -> Callable[..., DataFrame]: """ Used when a manipulation result has one higher dimension as the original, such as Series.to_frame() """ from pandas.core.frame import DataFrame return DataFrame # types def _can_hold_na(self) -> bool: return self._mgr._can_hold_na # ndarray compatibility def dtype(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtype dtype('int64') """ return self._mgr.dtype def dtypes(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtypes dtype('int64') """ # DataFrame compatibility return self.dtype def name(self) -> Hashable: """ Return the name of the Series. The name of a Series becomes its index or column name if it is used to form a DataFrame. It is also used whenever displaying the Series using the interpreter. Returns ------- label (hashable object) The name of the Series, also the column name if part of a DataFrame. See Also -------- Series.rename : Sets the Series name when given a scalar input. Index.name : Corresponding Index property. Examples -------- The Series name can be set initially when calling the constructor. >>> s = pd.Series([1, 2, 3], dtype=np.int64, name='Numbers') >>> s 0 1 1 2 2 3 Name: Numbers, dtype: int64 >>> s.name = "Integers" >>> s 0 1 1 2 2 3 Name: Integers, dtype: int64 The name of a Series within a DataFrame is its column name. >>> df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], ... columns=["Odd Numbers", "Even Numbers"]) >>> df Odd Numbers Even Numbers 0 1 2 1 3 4 2 5 6 >>> df["Even Numbers"].name 'Even Numbers' """ return self._name def name(self, value: Hashable) -> None: validate_all_hashable(value, error_name=f"{type(self).__name__}.name") object.__setattr__(self, "_name", value) def values(self): """ Return Series as ndarray or ndarray-like depending on the dtype. .. warning:: We recommend using :attr:`Series.array` or :meth:`Series.to_numpy`, depending on whether you need a reference to the underlying data or a NumPy array. Returns ------- numpy.ndarray or ndarray-like See Also -------- Series.array : Reference to the underlying data. Series.to_numpy : A NumPy array representing the underlying data. Examples -------- >>> pd.Series([1, 2, 3]).values array([1, 2, 3]) >>> pd.Series(list('aabc')).values array(['a', 'a', 'b', 'c'], dtype=object) >>> pd.Series(list('aabc')).astype('category').values ['a', 'a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] Timezone aware datetime data is converted to UTC: >>> pd.Series(pd.date_range('20130101', periods=3, ... tz='US/Eastern')).values array(['2013-01-01T05:00:00.000000000', '2013-01-02T05:00:00.000000000', '2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]') """ return self._mgr.external_values() def _values(self): """ Return the internal repr of this data (defined by Block.interval_values). This are the values as stored in the Block (ndarray or ExtensionArray depending on the Block class), with datetime64[ns] and timedelta64[ns] wrapped in ExtensionArrays to match Index._values behavior. Differs from the public ``.values`` for certain data types, because of historical backwards compatibility of the public attribute (e.g. period returns object ndarray and datetimetz a datetime64[ns] ndarray for ``.values`` while it returns an ExtensionArray for ``._values`` in those cases). Differs from ``.array`` in that this still returns the numpy array if the Block is backed by a numpy array (except for datetime64 and timedelta64 dtypes), while ``.array`` ensures to always return an ExtensionArray. Overview: dtype | values | _values | array | ----------- | ------------- | ------------- | ------------- | Numeric | ndarray | ndarray | PandasArray | Category | Categorical | Categorical | Categorical | dt64[ns] | ndarray[M8ns] | DatetimeArray | DatetimeArray | dt64[ns tz] | ndarray[M8ns] | DatetimeArray | DatetimeArray | td64[ns] | ndarray[m8ns] | TimedeltaArray| ndarray[m8ns] | Period | ndarray[obj] | PeriodArray | PeriodArray | Nullable | EA | EA | EA | """ return self._mgr.internal_values() def _references(self) -> BlockValuesRefs | None: if isinstance(self._mgr, SingleArrayManager): return None return self._mgr._block.refs # error: Decorated property not supported def array(self) -> ExtensionArray: return self._mgr.array_values() # ops def ravel(self, order: str = "C") -> ArrayLike: """ Return the flattened underlying data as an ndarray or ExtensionArray. Returns ------- numpy.ndarray or ExtensionArray Flattened data of the Series. See Also -------- numpy.ndarray.ravel : Return a flattened array. """ arr = self._values.ravel(order=order) if isinstance(arr, np.ndarray) and using_copy_on_write(): arr.flags.writeable = False return arr def __len__(self) -> int: """ Return the length of the Series. """ return len(self._mgr) def view(self, dtype: Dtype | None = None) -> Series: """ Create a new view of the Series. This function will return a new Series with a view of the same underlying values in memory, optionally reinterpreted with a new data type. The new data type must preserve the same size in bytes as to not cause index misalignment. Parameters ---------- dtype : data type Data type object or one of their string representations. Returns ------- Series A new Series object as a view of the same data in memory. See Also -------- numpy.ndarray.view : Equivalent numpy function to create a new view of the same data in memory. Notes ----- Series are instantiated with ``dtype=float64`` by default. While ``numpy.ndarray.view()`` will return a view with the same data type as the original array, ``Series.view()`` (without specified dtype) will try using ``float64`` and may fail if the original data type size in bytes is not the same. Examples -------- >>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8') >>> s 0 -2 1 -1 2 0 3 1 4 2 dtype: int8 The 8 bit signed integer representation of `-1` is `0b11111111`, but the same bytes represent 255 if read as an 8 bit unsigned integer: >>> us = s.view('uint8') >>> us 0 254 1 255 2 0 3 1 4 2 dtype: uint8 The views share the same underlying values: >>> us[0] = 128 >>> s 0 -128 1 -1 2 0 3 1 4 2 dtype: int8 """ # self.array instead of self._values so we piggyback on PandasArray # implementation res_values = self.array.view(dtype) res_ser = self._constructor(res_values, index=self.index, copy=False) if isinstance(res_ser._mgr, SingleBlockManager) and using_copy_on_write(): blk = res_ser._mgr._block blk.refs = cast("BlockValuesRefs", self._references) blk.refs.add_reference(blk) # type: ignore[arg-type] return res_ser.__finalize__(self, method="view") # ---------------------------------------------------------------------- # NDArray Compat _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: """ Return the values as a NumPy array. Users should not call this directly. Rather, it is invoked by :func:`numpy.array` and :func:`numpy.asarray`. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to use for the resulting NumPy array. By default, the dtype is inferred from the data. Returns ------- numpy.ndarray The values in the series converted to a :class:`numpy.ndarray` with the specified `dtype`. See Also -------- array : Create a new array from data. Series.array : Zero-copy view to the array backing the Series. Series.to_numpy : Series method for similar behavior. Examples -------- >>> ser = pd.Series([1, 2, 3]) >>> np.asarray(ser) array([1, 2, 3]) For timezone-aware data, the timezones may be retained with ``dtype='object'`` >>> tzser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) >>> np.asarray(tzser, dtype="object") array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'), Timestamp('2000-01-02 00:00:00+0100', tz='CET')], dtype=object) Or the values may be localized to UTC and the tzinfo discarded with ``dtype='datetime64[ns]'`` >>> np.asarray(tzser, dtype="datetime64[ns]") # doctest: +ELLIPSIS array(['1999-12-31T23:00:00.000000000', ...], dtype='datetime64[ns]') """ values = self._values arr = np.asarray(values, dtype=dtype) if using_copy_on_write() and astype_is_view(values.dtype, arr.dtype): arr = arr.view() arr.flags.writeable = False return arr # ---------------------------------------------------------------------- # Unary Methods # coercion __float__ = _coerce_method(float) __int__ = _coerce_method(int) # ---------------------------------------------------------------------- # indexers def axes(self) -> list[Index]: """ Return a list of the row axis labels. """ return [self.index] # ---------------------------------------------------------------------- # Indexing Methods def take(self, indices, axis: Axis = 0, **kwargs) -> Series: nv.validate_take((), kwargs) indices = ensure_platform_int(indices) if ( indices.ndim == 1 and using_copy_on_write() and is_range_indexer(indices, len(self)) ): return self.copy(deep=None) new_index = self.index.take(indices) new_values = self._values.take(indices) result = self._constructor(new_values, index=new_index, fastpath=True) return result.__finalize__(self, method="take") def _take_with_is_copy(self, indices, axis: Axis = 0) -> Series: """ Internal version of the `take` method that sets the `_is_copy` attribute to keep track of the parent dataframe (using in indexing for the SettingWithCopyWarning). For Series this does the same as the public take (it never sets `_is_copy`). See the docstring of `take` for full explanation of the parameters. """ return self.take(indices=indices, axis=axis) def _ixs(self, i: int, axis: AxisInt = 0) -> Any: """ Return the i-th value or values in the Series by location. Parameters ---------- i : int Returns ------- scalar (int) or Series (slice, sequence) """ return self._values[i] def _slice(self, slobj: slice | np.ndarray, axis: Axis = 0) -> Series: # axis kwarg is retained for compat with NDFrame method # _slice is *always* positional return self._get_values(slobj) def __getitem__(self, key): check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) if key is Ellipsis: return self key_is_scalar = is_scalar(key) if isinstance(key, (list, tuple)): key = unpack_1tuple(key) if is_integer(key) and self.index._should_fallback_to_positional: return self._values[key] elif key_is_scalar: return self._get_value(key) if is_hashable(key): # Otherwise index.get_value will raise InvalidIndexError try: # For labels that don't resolve as scalars like tuples and frozensets result = self._get_value(key) return result except (KeyError, TypeError, InvalidIndexError): # InvalidIndexError for e.g. generator # see test_series_getitem_corner_generator if isinstance(key, tuple) and isinstance(self.index, MultiIndex): # We still have the corner case where a tuple is a key # in the first level of our MultiIndex return self._get_values_tuple(key) if is_iterator(key): key = list(key) if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) return self._get_values(key) return self._get_with(key) def _get_with(self, key): # other: fancy integer or otherwise if isinstance(key, slice): # _convert_slice_indexer to determine if this slice is positional # or label based, and if the latter, convert to positional slobj = self.index._convert_slice_indexer(key, kind="getitem") return self._slice(slobj) elif isinstance(key, ABCDataFrame): raise TypeError( "Indexing a Series with DataFrame is not " "supported, use the appropriate DataFrame column" ) elif isinstance(key, tuple): return self._get_values_tuple(key) elif not is_list_like(key): # e.g. scalars that aren't recognized by lib.is_scalar, GH#32684 return self.loc[key] if not isinstance(key, (list, np.ndarray, ExtensionArray, Series, Index)): key = list(key) if isinstance(key, Index): key_type = key.inferred_type else: key_type = lib.infer_dtype(key, skipna=False) # Note: The key_type == "boolean" case should be caught by the # com.is_bool_indexer check in __getitem__ if key_type == "integer": # We need to decide whether to treat this as a positional indexer # (i.e. self.iloc) or label-based (i.e. self.loc) if not self.index._should_fallback_to_positional: return self.loc[key] else: return self.iloc[key] # handle the dup indexing case GH#4246 return self.loc[key] def _get_values_tuple(self, key: tuple): # mpl hackaround if com.any_none(*key): # mpl compat if we look up e.g. ser[:, np.newaxis]; # see tests.series.timeseries.test_mpl_compat_hack # the asarray is needed to avoid returning a 2D DatetimeArray result = np.asarray(self._values[key]) disallow_ndim_indexing(result) return result if not isinstance(self.index, MultiIndex): raise KeyError("key of type tuple not found and not a MultiIndex") # If key is contained, would have returned by now indexer, new_index = self.index.get_loc_level(key) new_ser = self._constructor(self._values[indexer], index=new_index, copy=False) if using_copy_on_write() and isinstance(indexer, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) def _get_values(self, indexer: slice | npt.NDArray[np.bool_]) -> Series: new_mgr = self._mgr.getitem_mgr(indexer) return self._constructor(new_mgr).__finalize__(self) def _get_value(self, label, takeable: bool = False): """ Quickly retrieve single value at passed index label. Parameters ---------- label : object takeable : interpret the index as indexers, default False Returns ------- scalar value """ if takeable: return self._values[label] # Similar to Index.get_value, but we do not fall back to positional loc = self.index.get_loc(label) if is_integer(loc): return self._values[loc] if isinstance(self.index, MultiIndex): mi = self.index new_values = self._values[loc] if len(new_values) == 1 and mi.nlevels == 1: # If more than one level left, we can not return a scalar return new_values[0] new_index = mi[loc] new_index = maybe_droplevels(new_index, label) new_ser = self._constructor( new_values, index=new_index, name=self.name, copy=False ) if using_copy_on_write() and isinstance(loc, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) else: return self.iloc[loc] def __setitem__(self, key, value) -> None: if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) cacher_needs_updating = self._check_is_chained_assignment_possible() if key is Ellipsis: key = slice(None) if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") return self._set_values(indexer, value) try: self._set_with_engine(key, value) except KeyError: # We have a scalar (or for MultiIndex or object-dtype, scalar-like) # key that is not present in self.index. if is_integer(key): if not self.index._should_fallback_to_positional: # GH#33469 self.loc[key] = value else: # positional setter # can't use _mgr.setitem_inplace yet bc could have *both* # KeyError and then ValueError, xref GH#45070 self._set_values(key, value) else: # GH#12862 adding a new key to the Series self.loc[key] = value except (TypeError, ValueError, LossySetitemError): # The key was OK, but we cannot set the value losslessly indexer = self.index.get_loc(key) self._set_values(indexer, value) except InvalidIndexError as err: if isinstance(key, tuple) and not isinstance(self.index, MultiIndex): # cases with MultiIndex don't get here bc they raise KeyError # e.g. test_basic_getitem_setitem_corner raise KeyError( "key of type tuple not found and not a MultiIndex" ) from err if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) if ( is_list_like(value) and len(value) != len(self) and not isinstance(value, Series) and not is_object_dtype(self.dtype) ): # Series will be reindexed to have matching length inside # _where call below # GH#44265 indexer = key.nonzero()[0] self._set_values(indexer, value) return # otherwise with listlike other we interpret series[mask] = other # as series[mask] = other[mask] try: self._where(~key, value, inplace=True) except InvalidIndexError: # test_where_dups self.iloc[key] = value return else: self._set_with(key, value) if cacher_needs_updating: self._maybe_update_cacher(inplace=True) def _set_with_engine(self, key, value) -> None: loc = self.index.get_loc(key) # this is equivalent to self._values[key] = value self._mgr.setitem_inplace(loc, value) def _set_with(self, key, value) -> None: # We got here via exception-handling off of InvalidIndexError, so # key should always be listlike at this point. assert not isinstance(key, tuple) if is_iterator(key): # Without this, the call to infer_dtype will consume the generator key = list(key) if not self.index._should_fallback_to_positional: # Regardless of the key type, we're treating it as labels self._set_labels(key, value) else: # Note: key_type == "boolean" should not occur because that # should be caught by the is_bool_indexer check in __setitem__ key_type = lib.infer_dtype(key, skipna=False) if key_type == "integer": self._set_values(key, value) else: self._set_labels(key, value) def _set_labels(self, key, value) -> None: key = com.asarray_tuplesafe(key) indexer: np.ndarray = self.index.get_indexer(key) mask = indexer == -1 if mask.any(): raise KeyError(f"{key[mask]} not in index") self._set_values(indexer, value) def _set_values(self, key, value) -> None: if isinstance(key, (Index, Series)): key = key._values self._mgr = self._mgr.setitem(indexer=key, value=value) self._maybe_update_cacher() def _set_value(self, label, value, takeable: bool = False) -> None: """ Quickly set single value at passed label. If label is not contained, a new object is created with the label placed at the end of the result index. Parameters ---------- label : object Partial indexing with MultiIndex not allowed. value : object Scalar value. takeable : interpret the index as indexers, default False """ if not takeable: try: loc = self.index.get_loc(label) except KeyError: # set using a non-recursive method self.loc[label] = value return else: loc = label self._set_values(loc, value) # ---------------------------------------------------------------------- # Lookup Caching def _is_cached(self) -> bool: """Return boolean indicating if self is cached or not.""" return getattr(self, "_cacher", None) is not None def _get_cacher(self): """return my cacher or None""" cacher = getattr(self, "_cacher", None) if cacher is not None: cacher = cacher[1]() return cacher def _reset_cacher(self) -> None: """ Reset the cacher. """ if hasattr(self, "_cacher"): del self._cacher def _set_as_cached(self, item, cacher) -> None: """ Set the _cacher attribute on the calling object with a weakref to cacher. """ if using_copy_on_write(): return self._cacher = (item, weakref.ref(cacher)) def _clear_item_cache(self) -> None: # no-op for Series pass def _check_is_chained_assignment_possible(self) -> bool: """ See NDFrame._check_is_chained_assignment_possible.__doc__ """ if self._is_view and self._is_cached: ref = self._get_cacher() if ref is not None and ref._is_mixed_type: self._check_setitem_copy(t="referent", force=True) return True return super()._check_is_chained_assignment_possible() def _maybe_update_cacher( self, clear: bool = False, verify_is_copy: bool = True, inplace: bool = False ) -> None: """ See NDFrame._maybe_update_cacher.__doc__ """ # for CoW, we never want to update the parent DataFrame cache # if the Series changed, but don't keep track of any cacher if using_copy_on_write(): return cacher = getattr(self, "_cacher", None) if cacher is not None: assert self.ndim == 1 ref: DataFrame = cacher[1]() # we are trying to reference a dead referent, hence # a copy if ref is None: del self._cacher elif len(self) == len(ref) and self.name in ref.columns: # GH#42530 self.name must be in ref.columns # to ensure column still in dataframe # otherwise, either self or ref has swapped in new arrays ref._maybe_cache_changed(cacher[0], self, inplace=inplace) else: # GH#33675 we have swapped in a new array, so parent # reference to self is now invalid ref._item_cache.pop(cacher[0], None) super()._maybe_update_cacher( clear=clear, verify_is_copy=verify_is_copy, inplace=inplace ) # ---------------------------------------------------------------------- # Unsorted def _is_mixed_type(self) -> bool: return False def repeat(self, repeats: int | Sequence[int], axis: None = None) -> Series: """ Repeat elements of a Series. Returns a new Series where each element of the current Series is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty Series. axis : None Unused. Parameter needed for compatibility with DataFrame. Returns ------- Series Newly created Series with repeated elements. See Also -------- Index.repeat : Equivalent function for Index. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> s = pd.Series(['a', 'b', 'c']) >>> s 0 a 1 b 2 c dtype: object >>> s.repeat(2) 0 a 0 a 1 b 1 b 2 c 2 c dtype: object >>> s.repeat([1, 2, 3]) 0 a 1 b 1 b 2 c 2 c 2 c dtype: object """ nv.validate_repeat((), {"axis": axis}) new_index = self.index.repeat(repeats) new_values = self._values.repeat(repeats) return self._constructor(new_values, index=new_index, copy=False).__finalize__( self, method="repeat" ) def reset_index( self, level: IndexLabel = ..., *, drop: Literal[False] = ..., name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> DataFrame: ... def reset_index( self, level: IndexLabel = ..., *, drop: Literal[True], name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> Series: ... def reset_index( self, level: IndexLabel = ..., *, drop: bool = ..., name: Level = ..., inplace: Literal[True], allow_duplicates: bool = ..., ) -> None: ... def reset_index( self, level: IndexLabel = None, *, drop: bool = False, name: Level = lib.no_default, inplace: bool = False, allow_duplicates: bool = False, ) -> DataFrame | Series | None: """ Generate a new DataFrame or Series with the index reset. This is useful when the index needs to be treated as a column, or when the index is meaningless and needs to be reset to the default before another operation. Parameters ---------- level : int, str, tuple, or list, default optional For a Series with a MultiIndex, only remove the specified levels from the index. Removes all levels by default. drop : bool, default False Just reset the index, without inserting it as a column in the new DataFrame. name : object, optional The name to use for the column containing the original Series values. Uses ``self.name`` by default. This argument is ignored when `drop` is True. inplace : bool, default False Modify the Series in place (do not create a new object). allow_duplicates : bool, default False Allow duplicate column labels to be created. .. versionadded:: 1.5.0 Returns ------- Series or DataFrame or None When `drop` is False (the default), a DataFrame is returned. The newly created columns will come first in the DataFrame, followed by the original Series values. When `drop` is True, a `Series` is returned. In either case, if ``inplace=True``, no value is returned. See Also -------- DataFrame.reset_index: Analogous function for DataFrame. Examples -------- >>> s = pd.Series([1, 2, 3, 4], name='foo', ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx')) Generate a DataFrame with default index. >>> s.reset_index() idx foo 0 a 1 1 b 2 2 c 3 3 d 4 To specify the name of the new column use `name`. >>> s.reset_index(name='values') idx values 0 a 1 1 b 2 2 c 3 3 d 4 To generate a new Series with the default set `drop` to True. >>> s.reset_index(drop=True) 0 1 1 2 2 3 3 4 Name: foo, dtype: int64 The `level` parameter is interesting for Series with a multi-level index. >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']), ... np.array(['one', 'two', 'one', 'two'])] >>> s2 = pd.Series( ... range(4), name='foo', ... index=pd.MultiIndex.from_arrays(arrays, ... names=['a', 'b'])) To remove a specific level from the Index, use `level`. >>> s2.reset_index(level='a') a foo b one bar 0 two bar 1 one baz 2 two baz 3 If `level` is not set, all levels are removed from the Index. >>> s2.reset_index() a b foo 0 bar one 0 1 bar two 1 2 baz one 2 3 baz two 3 """ inplace = validate_bool_kwarg(inplace, "inplace") if drop: new_index = default_index(len(self)) if level is not None: level_list: Sequence[Hashable] if not isinstance(level, (tuple, list)): level_list = [level] else: level_list = level level_list = [self.index._get_level_number(lev) for lev in level_list] if len(level_list) < self.index.nlevels: new_index = self.index.droplevel(level_list) if inplace: self.index = new_index elif using_copy_on_write(): new_ser = self.copy(deep=False) new_ser.index = new_index return new_ser.__finalize__(self, method="reset_index") else: return self._constructor( self._values.copy(), index=new_index, copy=False ).__finalize__(self, method="reset_index") elif inplace: raise TypeError( "Cannot reset_index inplace on a Series to create a DataFrame" ) else: if name is lib.no_default: # For backwards compatibility, keep columns as [0] instead of # [None] when self.name is None if self.name is None: name = 0 else: name = self.name df = self.to_frame(name) return df.reset_index( level=level, drop=drop, allow_duplicates=allow_duplicates ) return None # ---------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str: """ Return a string representation for a particular Series. """ # pylint: disable=invalid-repr-returned repr_params = fmt.get_series_repr_params() return self.to_string(**repr_params) def to_string( self, buf: None = ..., na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> None: ... def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, na_rep: str = "NaN", float_format: str | None = None, header: bool = True, index: bool = True, length: bool = False, dtype: bool = False, name: bool = False, max_rows: int | None = None, min_rows: int | None = None, ) -> str | None: """ Render a string representation of the Series. Parameters ---------- buf : StringIO-like, optional Buffer to write to. na_rep : str, optional String representation of NaN to use, default 'NaN'. float_format : one-parameter function, optional Formatter function to apply to columns' elements if they are floats, default None. header : bool, default True Add the Series header (index name). index : bool, optional Add index (row) labels, default True. length : bool, default False Add the Series length. dtype : bool, default False Add the Series dtype. name : bool, default False Add the Series name if not None. max_rows : int, optional Maximum number of rows to show before truncating. If None, show all. min_rows : int, optional The number of rows to display in a truncated repr (when number of rows is above `max_rows`). Returns ------- str or None String representation of Series if ``buf=None``, otherwise None. """ formatter = fmt.SeriesFormatter( self, name=name, length=length, header=header, index=index, dtype=dtype, na_rep=na_rep, float_format=float_format, min_rows=min_rows, max_rows=max_rows, ) result = formatter.to_string() # catch contract violations if not isinstance(result, str): raise AssertionError( "result must be of type str, type " f"of result is {repr(type(result).__name__)}" ) if buf is None: return result else: if hasattr(buf, "write"): buf.write(result) else: with open(buf, "w") as f: f.write(result) return None klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples=dedent( """Examples -------- >>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal") >>> print(s.to_markdown()) | | animal | |---:|:---------| | 0 | elk | | 1 | pig | | 2 | dog | | 3 | quetzal | Output markdown with a tabulate option. >>> print(s.to_markdown(tablefmt="grid")) +----+----------+ | | animal | +====+==========+ | 0 | elk | +----+----------+ | 1 | pig | +----+----------+ | 2 | dog | +----+----------+ | 3 | quetzal | +----+----------+""" ), ) def to_markdown( self, buf: IO[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: """ Print {klass} in Markdown-friendly format. Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. mode : str, optional Mode in which file is opened, "wt" by default. index : bool, optional, default True Add index (row) labels. .. versionadded:: 1.1.0 {storage_options} .. versionadded:: 1.2.0 **kwargs These parameters will be passed to `tabulate \ <https://pypi.org/project/tabulate>`_. Returns ------- str {klass} in Markdown-friendly format. Notes ----- Requires the `tabulate <https://pypi.org/project/tabulate>`_ package. {examples} """ return self.to_frame().to_markdown( buf, mode, index, storage_options=storage_options, **kwargs ) # ---------------------------------------------------------------------- def items(self) -> Iterable[tuple[Hashable, Any]]: """ Lazily iterate over (index, value) tuples. This method returns an iterable tuple (index, value). This is convenient if you want to create a lazy iterator. Returns ------- iterable Iterable of tuples containing the (index, value) pairs from a Series. See Also -------- DataFrame.items : Iterate over (column name, Series) pairs. DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. Examples -------- >>> s = pd.Series(['A', 'B', 'C']) >>> for index, value in s.items(): ... print(f"Index : {index}, Value : {value}") Index : 0, Value : A Index : 1, Value : B Index : 2, Value : C """ return zip(iter(self.index), iter(self)) # ---------------------------------------------------------------------- # Misc public methods def keys(self) -> Index: """ Return alias for index. Returns ------- Index Index of the Series. """ return self.index def to_dict(self, into: type[dict] = dict) -> dict: """ Convert Series to {label -> value} dict or dict-like object. Parameters ---------- into : class, default dict The collections.abc.Mapping subclass to use as the return object. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- collections.abc.Mapping Key-value representation of Series. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.to_dict() {0: 1, 1: 2, 2: 3, 3: 4} >>> from collections import OrderedDict, defaultdict >>> s.to_dict(OrderedDict) OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> dd = defaultdict(list) >>> s.to_dict(dd) defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4}) """ # GH16122 into_c = com.standardize_mapping(into) if is_object_dtype(self) or is_extension_array_dtype(self): return into_c((k, maybe_box_native(v)) for k, v in self.items()) else: # Not an object dtype => all types will be the same so let the default # indexer return native python type return into_c(self.items()) def to_frame(self, name: Hashable = lib.no_default) -> DataFrame: """ Convert Series to DataFrame. Parameters ---------- name : object, optional The passed name should substitute for the series name (if it has one). Returns ------- DataFrame DataFrame representation of Series. Examples -------- >>> s = pd.Series(["a", "b", "c"], ... name="vals") >>> s.to_frame() vals 0 a 1 b 2 c """ columns: Index if name is lib.no_default: name = self.name if name is None: # default to [0], same as we would get with DataFrame(self) columns = default_index(1) else: columns = Index([name]) else: columns = Index([name]) mgr = self._mgr.to_2d_mgr(columns) df = self._constructor_expanddim(mgr) return df.__finalize__(self, method="to_frame") def _set_name(self, name, inplace: bool = False) -> Series: """ Set the Series name. Parameters ---------- name : str inplace : bool Whether to modify `self` directly or return a copy. """ inplace = validate_bool_kwarg(inplace, "inplace") ser = self if inplace else self.copy() ser.name = name return ser """ Examples -------- >>> ser = pd.Series([390., 350., 30., 20.], ... index=['Falcon', 'Falcon', 'Parrot', 'Parrot'], name="Max Speed") >>> ser Falcon 390.0 Falcon 350.0 Parrot 30.0 Parrot 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", "b"]).mean() a 210.0 b 185.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(ser > 100).mean() Max Speed False 25.0 True 370.0 Name: Max Speed, dtype: float64 **Grouping by Indexes** We can groupby different levels of a hierarchical index using the `level` parameter: >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], ... ['Captive', 'Wild', 'Captive', 'Wild']] >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) >>> ser = pd.Series([390., 350., 30., 20.], index=index, name="Max Speed") >>> ser Animal Type Falcon Captive 390.0 Wild 350.0 Parrot Captive 30.0 Wild 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Animal Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level="Type").mean() Type Captive 210.0 Wild 185.0 Name: Max Speed, dtype: float64 We can also choose to include `NA` in group keys or not by defining `dropna` parameter, the default setting is `True`. >>> ser = pd.Series([1, 2, 3, 3], index=["a", 'a', 'b', np.nan]) >>> ser.groupby(level=0).sum() a 3 b 3 dtype: int64 >>> ser.groupby(level=0, dropna=False).sum() a 3 b 3 NaN 3 dtype: int64 >>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot'] >>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name="Max Speed") >>> ser.groupby(["a", "b", "a", np.nan]).mean() a 210.0 b 350.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", np.nan], dropna=False).mean() a 210.0 b 350.0 NaN 20.0 Name: Max Speed, dtype: float64 """ ) def groupby( self, by=None, axis: Axis = 0, level: IndexLabel = None, as_index: bool = True, sort: bool = True, group_keys: bool = True, observed: bool = False, dropna: bool = True, ) -> SeriesGroupBy: from pandas.core.groupby.generic import SeriesGroupBy if level is None and by is None: raise TypeError("You have to supply one of 'by' and 'level'") if not as_index: raise TypeError("as_index=False only valid with DataFrame") axis = self._get_axis_number(axis) return SeriesGroupBy( obj=self, keys=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, observed=observed, dropna=dropna, ) # ---------------------------------------------------------------------- # Statistics, overridden ndarray methods # TODO: integrate bottleneck def count(self): """ Return number of non-NA/null observations in the Series. Returns ------- int or Series (if level specified) Number of non-null values in the Series. See Also -------- DataFrame.count : Count non-NA cells for each column or row. Examples -------- >>> s = pd.Series([0.0, 1.0, np.nan]) >>> s.count() 2 """ return notna(self._values).sum().astype("int64") def mode(self, dropna: bool = True) -> Series: """ Return the mode(s) of the Series. The mode is the value that appears most often. There can be multiple modes. Always returns Series even if only one value is returned. Parameters ---------- dropna : bool, default True Don't consider counts of NaN/NaT. Returns ------- Series Modes of the Series in sorted order. """ # TODO: Add option for bins like value_counts() values = self._values if isinstance(values, np.ndarray): res_values = algorithms.mode(values, dropna=dropna) else: res_values = values._mode(dropna=dropna) # Ensure index is type stable (should always use int index) return self._constructor( res_values, index=range(len(res_values)), name=self.name, copy=False ) def unique(self) -> ArrayLike: # pylint: disable=useless-parent-delegation """ Return unique values of Series object. Uniques are returned in order of appearance. Hash table-based unique, therefore does NOT sort. Returns ------- ndarray or ExtensionArray The unique values returned as a NumPy array. See Notes. See Also -------- Series.drop_duplicates : Return Series with duplicate values removed. unique : Top-level unique method for any 1-d array-like object. Index.unique : Return Index with unique values from an Index object. Notes ----- Returns the unique values as a NumPy array. In case of an extension-array backed Series, a new :class:`~api.extensions.ExtensionArray` of that type with just the unique values is returned. This includes * Categorical * Period * Datetime with Timezone * Datetime without Timezone * Timedelta * Interval * Sparse * IntegerNA See Examples section. Examples -------- >>> pd.Series([2, 1, 3, 3], name='A').unique() array([2, 1, 3]) >>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00'] Length: 1, dtype: datetime64[ns] >>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern') ... for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00-05:00'] Length: 1, dtype: datetime64[ns, US/Eastern] An Categorical will return categories in the order of appearance and with the same dtype. >>> pd.Series(pd.Categorical(list('baabc'))).unique() ['b', 'a', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'), ... ordered=True)).unique() ['b', 'a', 'c'] Categories (3, object): ['a' < 'b' < 'c'] """ return super().unique() def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[False] = ..., ignore_index: bool = ..., ) -> Series: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[True], ignore_index: bool = ... ) -> None: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: bool = ..., ignore_index: bool = ... ) -> Series | None: ... def drop_duplicates( self, *, keep: DropKeep = "first", inplace: bool = False, ignore_index: bool = False, ) -> Series | None: """ Return Series with duplicate values removed. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' Method to handle dropping duplicates: - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. inplace : bool, default ``False`` If ``True``, performs operation inplace and returns None. ignore_index : bool, default ``False`` If ``True``, the resulting axis will be labeled 0, 1, …, n - 1. .. versionadded:: 2.0.0 Returns ------- Series or None Series with duplicates dropped or None if ``inplace=True``. See Also -------- Index.drop_duplicates : Equivalent method on Index. DataFrame.drop_duplicates : Equivalent method on DataFrame. Series.duplicated : Related method on Series, indicating duplicate Series values. Series.unique : Return unique values as an array. Examples -------- Generate a Series with duplicated entries. >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], ... name='animal') >>> s 0 lama 1 cow 2 lama 3 beetle 4 lama 5 hippo Name: animal, dtype: object With the 'keep' parameter, the selection behaviour of duplicated values can be changed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> s.drop_duplicates() 0 lama 1 cow 3 beetle 5 hippo Name: animal, dtype: object The value 'last' for parameter 'keep' keeps the last occurrence for each set of duplicated entries. >>> s.drop_duplicates(keep='last') 1 cow 3 beetle 4 lama 5 hippo Name: animal, dtype: object The value ``False`` for parameter 'keep' discards all sets of duplicated entries. >>> s.drop_duplicates(keep=False) 1 cow 3 beetle 5 hippo Name: animal, dtype: object """ inplace = validate_bool_kwarg(inplace, "inplace") result = super().drop_duplicates(keep=keep) if ignore_index: result.index = default_index(len(result)) if inplace: self._update_inplace(result) return None else: return result def duplicated(self, keep: DropKeep = "first") -> Series: """ Indicate duplicate Series values. Duplicated values are indicated as ``True`` values in the resulting Series. Either all duplicates, all except the first or all except the last occurrence of duplicates can be indicated. Parameters ---------- keep : {'first', 'last', False}, default 'first' Method to handle dropping duplicates: - 'first' : Mark duplicates as ``True`` except for the first occurrence. - 'last' : Mark duplicates as ``True`` except for the last occurrence. - ``False`` : Mark all duplicates as ``True``. Returns ------- Series[bool] Series indicating whether each value has occurred in the preceding values. See Also -------- Index.duplicated : Equivalent method on pandas.Index. DataFrame.duplicated : Equivalent method on pandas.DataFrame. Series.drop_duplicates : Remove duplicate values from Series. Examples -------- By default, for each set of duplicated values, the first occurrence is set on False and all others on True: >>> animals = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama']) >>> animals.duplicated() 0 False 1 False 2 True 3 False 4 True dtype: bool which is equivalent to >>> animals.duplicated(keep='first') 0 False 1 False 2 True 3 False 4 True dtype: bool By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True: >>> animals.duplicated(keep='last') 0 True 1 False 2 True 3 False 4 False dtype: bool By setting keep on ``False``, all duplicates are True: >>> animals.duplicated(keep=False) 0 True 1 False 2 True 3 False 4 True dtype: bool """ res = self._duplicated(keep=keep) result = self._constructor(res, index=self.index, copy=False) return result.__finalize__(self, method="duplicated") def idxmin(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the minimum value. If multiple values equal the minimum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the minimum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmin : Return indices of the minimum values along the given axis. DataFrame.idxmin : Return index of first occurrence of minimum over requested axis. Series.idxmax : Return index *label* of the first occurrence of maximum of values. Notes ----- This method is the Series version of ``ndarray.argmin``. This method returns the label of the minimum, while ``ndarray.argmin`` returns the position. To get the position, use ``series.values.argmin()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 1], ... index=['A', 'B', 'C', 'D']) >>> s A 1.0 B NaN C 4.0 D 1.0 dtype: float64 >>> s.idxmin() 'A' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmin(skipna=False) nan """ # error: Argument 1 to "argmin" of "IndexOpsMixin" has incompatible type "Union # [int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmin(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def idxmax(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the maximum value. If multiple values equal the maximum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the maximum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmax : Return indices of the maximum values along the given axis. DataFrame.idxmax : Return index of first occurrence of maximum over requested axis. Series.idxmin : Return index *label* of the first occurrence of minimum of values. Notes ----- This method is the Series version of ``ndarray.argmax``. This method returns the label of the maximum, while ``ndarray.argmax`` returns the position. To get the position, use ``series.values.argmax()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 3, 4], ... index=['A', 'B', 'C', 'D', 'E']) >>> s A 1.0 B NaN C 4.0 D 3.0 E 4.0 dtype: float64 >>> s.idxmax() 'C' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmax(skipna=False) nan """ # error: Argument 1 to "argmax" of "IndexOpsMixin" has incompatible type # "Union[int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmax(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def round(self, decimals: int = 0, *args, **kwargs) -> Series: """ Round each value in a Series to the given number of decimals. Parameters ---------- decimals : int, default 0 Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Series Rounded values of the Series. See Also -------- numpy.around : Round values of an np.array. DataFrame.round : Round values of a DataFrame. Examples -------- >>> s = pd.Series([0.1, 1.3, 2.7]) >>> s.round() 0 0.0 1 1.0 2 3.0 dtype: float64 """ nv.validate_round(args, kwargs) result = self._values.round(decimals) result = self._constructor(result, index=self.index, copy=False).__finalize__( self, method="round" ) return result def quantile( self, q: float = ..., interpolation: QuantileInterpolation = ... ) -> float: ... def quantile( self, q: Sequence[float] | AnyArrayLike, interpolation: QuantileInterpolation = ..., ) -> Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = ..., interpolation: QuantileInterpolation = ..., ) -> float | Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = 0.5, interpolation: QuantileInterpolation = "linear", ) -> float | Series: """ Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) The quantile(s) to compute, which can lie in range: 0 <= q <= 1. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- float or Series If ``q`` is an array, a Series will be returned where the index is ``q`` and the values are the quantiles, otherwise a float will be returned. See Also -------- core.window.Rolling.quantile : Calculate the rolling quantile. numpy.percentile : Returns the q-th percentile(s) of the array elements. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.quantile(.5) 2.5 >>> s.quantile([.25, .5, .75]) 0.25 1.75 0.50 2.50 0.75 3.25 dtype: float64 """ validate_percentile(q) # We dispatch to DataFrame so that core.internals only has to worry # about 2D cases. df = self.to_frame() result = df.quantile(q=q, interpolation=interpolation, numeric_only=False) if result.ndim == 2: result = result.iloc[:, 0] if is_list_like(q): result.name = self.name idx = Index(q, dtype=np.float64) return self._constructor(result, index=idx, name=self.name) else: # scalar return result.iloc[0] def corr( self, other: Series, method: CorrelationMethod = "pearson", min_periods: int | None = None, ) -> float: """ Compute correlation with `other` Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the correlation function is applied. Parameters ---------- other : Series Series with which to compute the correlation. method : {'pearson', 'kendall', 'spearman'} or callable Method used to compute correlation: - pearson : Standard correlation coefficient - kendall : Kendall Tau correlation coefficient - spearman : Spearman rank correlation - callable: Callable with input two 1d ndarrays and returning a float. .. warning:: Note that the returned matrix from corr will have 1 along the diagonals and will be symmetric regardless of the callable's behavior. min_periods : int, optional Minimum number of observations needed to have a valid result. Returns ------- float Correlation with other. See Also -------- DataFrame.corr : Compute pairwise correlation between columns. DataFrame.corrwith : Compute pairwise correlation with another DataFrame or Series. Notes ----- Pearson, Kendall and Spearman correlation are currently computed using pairwise complete observations. * `Pearson correlation coefficient <https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`_ * `Kendall rank correlation coefficient <https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient>`_ * `Spearman's rank correlation coefficient <https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient>`_ Examples -------- >>> def histogram_intersection(a, b): ... v = np.minimum(a, b).sum().round(decimals=1) ... return v >>> s1 = pd.Series([.2, .0, .6, .2]) >>> s2 = pd.Series([.3, .6, .0, .1]) >>> s1.corr(s2, method=histogram_intersection) 0.3 """ # noqa:E501 this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan if method in ["pearson", "spearman", "kendall"] or callable(method): return nanops.nancorr( this.values, other.values, method=method, min_periods=min_periods ) raise ValueError( "method must be either 'pearson', " "'spearman', 'kendall', or a callable, " f"'{method}' was supplied" ) def cov( self, other: Series, min_periods: int | None = None, ddof: int | None = 1, ) -> float: """ Compute covariance with Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the covariance is calculated. Parameters ---------- other : Series Series with which to compute the covariance. min_periods : int, optional Minimum number of observations needed to have a valid result. ddof : int, default 1 Delta degrees of freedom. The divisor used in calculations is ``N - ddof``, where ``N`` represents the number of elements. .. versionadded:: 1.1.0 Returns ------- float Covariance between Series and other normalized by N-1 (unbiased estimator). See Also -------- DataFrame.cov : Compute pairwise covariance of columns. Examples -------- >>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035]) >>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198]) >>> s1.cov(s2) -0.01685762652715874 """ this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan return nanops.nancov( this.values, other.values, min_periods=min_periods, ddof=ddof ) klass="Series", extra_params="", other_klass="DataFrame", examples=dedent( """ Difference with previous row >>> s = pd.Series([1, 1, 2, 3, 5, 8]) >>> s.diff() 0 NaN 1 0.0 2 1.0 3 1.0 4 2.0 5 3.0 dtype: float64 Difference with 3rd previous row >>> s.diff(periods=3) 0 NaN 1 NaN 2 NaN 3 2.0 4 4.0 5 6.0 dtype: float64 Difference with following row >>> s.diff(periods=-1) 0 0.0 1 -1.0 2 -1.0 3 -2.0 4 -3.0 5 NaN dtype: float64 Overflow in input dtype >>> s = pd.Series([1, 0], dtype=np.uint8) >>> s.diff() 0 NaN 1 255.0 dtype: float64""" ), ) def diff(self, periods: int = 1) -> Series: """ First discrete difference of element. Calculates the difference of a {klass} element compared with another element in the {klass} (default is element in previous row). Parameters ---------- periods : int, default 1 Periods to shift for calculating difference, accepts negative values. {extra_params} Returns ------- {klass} First differences of the Series. See Also -------- {klass}.pct_change: Percent change over given number of periods. {klass}.shift: Shift index by desired number of periods with an optional time freq. {other_klass}.diff: First discrete difference of object. Notes ----- For boolean dtypes, this uses :meth:`operator.xor` rather than :meth:`operator.sub`. The result is calculated according to current dtype in {klass}, however dtype of the result is always float64. Examples -------- {examples} """ result = algorithms.diff(self._values, periods) return self._constructor(result, index=self.index, copy=False).__finalize__( self, method="diff" ) def autocorr(self, lag: int = 1) -> float: """ Compute the lag-N autocorrelation. This method computes the Pearson correlation between the Series and its shifted self. Parameters ---------- lag : int, default 1 Number of lags to apply before performing autocorrelation. Returns ------- float The Pearson correlation between self and self.shift(lag). See Also -------- Series.corr : Compute the correlation between two Series. Series.shift : Shift index by desired number of periods. DataFrame.corr : Compute pairwise correlation of columns. DataFrame.corrwith : Compute pairwise correlation between rows or columns of two DataFrame objects. Notes ----- If the Pearson correlation is not well defined return 'NaN'. Examples -------- >>> s = pd.Series([0.25, 0.5, 0.2, -0.05]) >>> s.autocorr() # doctest: +ELLIPSIS 0.10355... >>> s.autocorr(lag=2) # doctest: +ELLIPSIS -0.99999... If the Pearson correlation is not well defined, then 'NaN' is returned. >>> s = pd.Series([1, 0, 0, 0]) >>> s.autocorr() nan """ return self.corr(self.shift(lag)) def dot(self, other: AnyArrayLike) -> Series | np.ndarray: """ Compute the dot product between the Series and the columns of other. This method computes the dot product between the Series and another one, or the Series and each columns of a DataFrame, or the Series and each columns of an array. It can also be called using `self @ other` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the dot product with its columns. Returns ------- scalar, Series or numpy.ndarray Return the dot product of the Series and other if other is a Series, the Series of the dot product of Series and each rows of other if other is a DataFrame or a numpy.ndarray between the Series and each columns of the numpy array. See Also -------- DataFrame.dot: Compute the matrix product with the DataFrame. Series.mul: Multiplication of series and other, element-wise. Notes ----- The Series and other has to share the same index if other is a Series or a DataFrame. Examples -------- >>> s = pd.Series([0, 1, 2, 3]) >>> other = pd.Series([-1, 2, -3, 4]) >>> s.dot(other) 8 >>> s @ other 8 >>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(df) 0 24 1 14 dtype: int64 >>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(arr) array([24, 14]) """ if isinstance(other, (Series, ABCDataFrame)): common = self.index.union(other.index) if len(common) > len(self.index) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(index=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right.values else: lvals = self.values rvals = np.asarray(other) if lvals.shape[0] != rvals.shape[0]: raise Exception( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, ABCDataFrame): return self._constructor( np.dot(lvals, rvals), index=other.columns, copy=False ).__finalize__(self, method="dot") elif isinstance(other, Series): return np.dot(lvals, rvals) elif isinstance(rvals, np.ndarray): return np.dot(lvals, rvals) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(np.transpose(other)) # Signature of "searchsorted" incompatible with supertype "IndexOpsMixin" def searchsorted( # type: ignore[override] self, value: NumpyValueArrayLike | ExtensionArray, side: Literal["left", "right"] = "left", sorter: NumpySorter = None, ) -> npt.NDArray[np.intp] | np.intp: return base.IndexOpsMixin.searchsorted(self, value, side=side, sorter=sorter) # ------------------------------------------------------------------- # Combination def _append( self, to_append, ignore_index: bool = False, verify_integrity: bool = False ): from pandas.core.reshape.concat import concat if isinstance(to_append, (list, tuple)): to_concat = [self] to_concat.extend(to_append) else: to_concat = [self, to_append] if any(isinstance(x, (ABCDataFrame,)) for x in to_concat[1:]): msg = "to_append should be a Series or list/tuple of Series, got DataFrame" raise TypeError(msg) return concat( to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity ) def _binop(self, other: Series, func, level=None, fill_value=None): """ Perform generic binary operation with optional fill value. Parameters ---------- other : Series func : binary operator fill_value : float or object Value to substitute for NA/null values. If both Series are NA in a location, the result will be NA regardless of the passed fill value. level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level. Returns ------- Series """ if not isinstance(other, Series): raise AssertionError("Other operand must be Series") this = self if not self.index.equals(other.index): this, other = self.align(other, level=level, join="outer", copy=False) this_vals, other_vals = ops.fill_binop(this._values, other._values, fill_value) with np.errstate(all="ignore"): result = func(this_vals, other_vals) name = ops.get_op_result_name(self, other) return this._construct_result(result, name) def _construct_result( self, result: ArrayLike | tuple[ArrayLike, ArrayLike], name: Hashable ) -> Series | tuple[Series, Series]: """ Construct an appropriately-labelled Series from the result of an op. Parameters ---------- result : ndarray or ExtensionArray name : Label Returns ------- Series In the case of __divmod__ or __rdivmod__, a 2-tuple of Series. """ if isinstance(result, tuple): # produced by divmod or rdivmod res1 = self._construct_result(result[0], name=name) res2 = self._construct_result(result[1], name=name) # GH#33427 assertions to keep mypy happy assert isinstance(res1, Series) assert isinstance(res2, Series) return (res1, res2) # TODO: result should always be ArrayLike, but this fails for some # JSONArray tests dtype = getattr(result, "dtype", None) out = self._constructor(result, index=self.index, dtype=dtype) out = out.__finalize__(self) # Set the result's name after __finalize__ is called because __finalize__ # would set it back to self.name out.name = name return out _shared_docs["compare"], """ Returns ------- Series or DataFrame If axis is 0 or 'index' the result will be a Series. The resulting index will be a MultiIndex with 'self' and 'other' stacked alternately at the inner level. If axis is 1 or 'columns' the result will be a DataFrame. It will have two columns namely 'self' and 'other'. See Also -------- DataFrame.compare : Compare with another DataFrame and show differences. Notes ----- Matching NaNs will not appear as a difference. Examples -------- >>> s1 = pd.Series(["a", "b", "c", "d", "e"]) >>> s2 = pd.Series(["a", "a", "c", "b", "e"]) Align the differences on columns >>> s1.compare(s2) self other 1 b a 3 d b Stack the differences on indices >>> s1.compare(s2, align_axis=0) 1 self b other a 3 self d other b dtype: object Keep all original rows >>> s1.compare(s2, keep_shape=True) self other 0 NaN NaN 1 b a 2 NaN NaN 3 d b 4 NaN NaN Keep all original rows and also all original values >>> s1.compare(s2, keep_shape=True, keep_equal=True) self other 0 a a 1 b a 2 c c 3 d b 4 e e """, klass=_shared_doc_kwargs["klass"], ) def compare( self, other: Series, align_axis: Axis = 1, keep_shape: bool = False, keep_equal: bool = False, result_names: Suffixes = ("self", "other"), ) -> DataFrame | Series: return super().compare( other=other, align_axis=align_axis, keep_shape=keep_shape, keep_equal=keep_equal, result_names=result_names, ) def combine( self, other: Series | Hashable, func: Callable[[Hashable, Hashable], Hashable], fill_value: Hashable = None, ) -> Series: """ Combine the Series with a Series or scalar according to `func`. Combine the Series and `other` using `func` to perform elementwise selection for combined Series. `fill_value` is assumed when value is missing at some index from one of the two objects being combined. Parameters ---------- other : Series or scalar The value(s) to be combined with the `Series`. func : function Function that takes two scalars as inputs and returns an element. fill_value : scalar, optional The value to assume when an index is missing from one Series or the other. The default specifies to use the appropriate NaN value for the underlying dtype of the Series. Returns ------- Series The result of combining the Series with the other object. See Also -------- Series.combine_first : Combine Series values, choosing the calling Series' values first. Examples -------- Consider 2 Datasets ``s1`` and ``s2`` containing highest clocked speeds of different birds. >>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0}) >>> s1 falcon 330.0 eagle 160.0 dtype: float64 >>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0}) >>> s2 falcon 345.0 eagle 200.0 duck 30.0 dtype: float64 Now, to combine the two datasets and view the highest speeds of the birds across the two datasets >>> s1.combine(s2, max) duck NaN eagle 200.0 falcon 345.0 dtype: float64 In the previous example, the resulting value for duck is missing, because the maximum of a NaN and a float is a NaN. So, in the example, we set ``fill_value=0``, so the maximum value returned will be the value from some dataset. >>> s1.combine(s2, max, fill_value=0) duck 30.0 eagle 200.0 falcon 345.0 dtype: float64 """ if fill_value is None: fill_value = na_value_for_dtype(self.dtype, compat=False) if isinstance(other, Series): # If other is a Series, result is based on union of Series, # so do this element by element new_index = self.index.union(other.index) new_name = ops.get_op_result_name(self, other) new_values = np.empty(len(new_index), dtype=object) for i, idx in enumerate(new_index): lv = self.get(idx, fill_value) rv = other.get(idx, fill_value) with np.errstate(all="ignore"): new_values[i] = func(lv, rv) else: # Assume that other is a scalar, so apply the function for # each element in the Series new_index = self.index new_values = np.empty(len(new_index), dtype=object) with np.errstate(all="ignore"): new_values[:] = [func(lv, other) for lv in self._values] new_name = self.name # try_float=False is to match agg_series npvalues = lib.maybe_convert_objects(new_values, try_float=False) res_values = maybe_cast_pointwise_result(npvalues, self.dtype, same_dtype=False) return self._constructor(res_values, index=new_index, name=new_name, copy=False) def combine_first(self, other) -> Series: """ Update null elements with value in the same location in 'other'. Combine two Series objects by filling null values in one Series with non-null values from the other Series. Result index will be the union of the two indexes. Parameters ---------- other : Series The value(s) to be used for filling null values. Returns ------- Series The result of combining the provided Series with the other object. See Also -------- Series.combine : Perform element-wise operation on two Series using a given function. Examples -------- >>> s1 = pd.Series([1, np.nan]) >>> s2 = pd.Series([3, 4, 5]) >>> s1.combine_first(s2) 0 1.0 1 4.0 2 5.0 dtype: float64 Null values still persist if the location of that null value does not exist in `other` >>> s1 = pd.Series({'falcon': np.nan, 'eagle': 160.0}) >>> s2 = pd.Series({'eagle': 200.0, 'duck': 30.0}) >>> s1.combine_first(s2) duck 30.0 eagle 160.0 falcon NaN dtype: float64 """ new_index = self.index.union(other.index) this = self.reindex(new_index, copy=False) other = other.reindex(new_index, copy=False) if this.dtype.kind == "M" and other.dtype.kind != "M": other = to_datetime(other) return this.where(notna(this), other) def update(self, other: Series | Sequence | Mapping) -> None: """ Modify Series in place using values from passed Series. Uses non-NA values from passed Series to make updates. Aligns on index. Parameters ---------- other : Series, or object coercible into Series Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6])) >>> s 0 4 1 5 2 6 dtype: int64 >>> s = pd.Series(['a', 'b', 'c']) >>> s.update(pd.Series(['d', 'e'], index=[0, 2])) >>> s 0 d 1 b 2 e dtype: object >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6, 7, 8])) >>> s 0 4 1 5 2 6 dtype: int64 If ``other`` contains NaNs the corresponding values are not updated in the original Series. >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, np.nan, 6])) >>> s 0 4 1 2 2 6 dtype: int64 ``other`` can also be a non-Series object type that is coercible into a Series >>> s = pd.Series([1, 2, 3]) >>> s.update([4, np.nan, 6]) >>> s 0 4 1 2 2 6 dtype: int64 >>> s = pd.Series([1, 2, 3]) >>> s.update({1: 9}) >>> s 0 1 1 9 2 3 dtype: int64 """ if not isinstance(other, Series): other = Series(other) other = other.reindex_like(self) mask = notna(other) self._mgr = self._mgr.putmask(mask=mask, new=other) self._maybe_update_cacher() # ---------------------------------------------------------------------- # Reindexing, sorting def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[False] = ..., kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> Series: ... def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[True], kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> None: ... def sort_values( self, *, axis: Axis = 0, ascending: bool | int | Sequence[bool] | Sequence[int] = True, inplace: bool = False, kind: str = "quicksort", na_position: str = "last", ignore_index: bool = False, key: ValueKeyFunc = None, ) -> Series | None: """ Sort by the values. Sort a Series in ascending or descending order by some criterion. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. ascending : bool or list of bools, default True If True, sort values in ascending order, otherwise descending. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the series values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect a ``Series`` and return an array-like. .. versionadded:: 1.1.0 Returns ------- Series or None Series ordered by values or None if ``inplace=True``. See Also -------- Series.sort_index : Sort by the Series indices. DataFrame.sort_values : Sort DataFrame by the values along either axis. DataFrame.sort_index : Sort DataFrame by indices. Examples -------- >>> s = pd.Series([np.nan, 1, 3, 10, 5]) >>> s 0 NaN 1 1.0 2 3.0 3 10.0 4 5.0 dtype: float64 Sort values ascending order (default behaviour) >>> s.sort_values(ascending=True) 1 1.0 2 3.0 4 5.0 3 10.0 0 NaN dtype: float64 Sort values descending order >>> s.sort_values(ascending=False) 3 10.0 4 5.0 2 3.0 1 1.0 0 NaN dtype: float64 Sort values putting NAs first >>> s.sort_values(na_position='first') 0 NaN 1 1.0 2 3.0 4 5.0 3 10.0 dtype: float64 Sort a series of strings >>> s = pd.Series(['z', 'b', 'd', 'a', 'c']) >>> s 0 z 1 b 2 d 3 a 4 c dtype: object >>> s.sort_values() 3 a 1 b 4 c 2 d 0 z dtype: object Sort using a key function. Your `key` function will be given the ``Series`` of values and should return an array-like. >>> s = pd.Series(['a', 'B', 'c', 'D', 'e']) >>> s.sort_values() 1 B 3 D 0 a 2 c 4 e dtype: object >>> s.sort_values(key=lambda x: x.str.lower()) 0 a 1 B 2 c 3 D 4 e dtype: object NumPy ufuncs work well here. For example, we can sort by the ``sin`` of the value >>> s = pd.Series([-4, -2, 0, 2, 4]) >>> s.sort_values(key=np.sin) 1 -2 4 4 2 0 0 -4 3 2 dtype: int64 More complicated user-defined functions can be used, as long as they expect a Series and return an array-like >>> s.sort_values(key=lambda x: (np.tan(x.cumsum()))) 0 -4 3 2 4 4 1 -2 2 0 dtype: int64 """ inplace = validate_bool_kwarg(inplace, "inplace") # Validate the axis parameter self._get_axis_number(axis) # GH 5856/5853 if inplace and self._is_cached: raise ValueError( "This Series is a view of some other array, to " "sort in-place you must create a copy" ) if is_list_like(ascending): ascending = cast(Sequence[Union[bool, int]], ascending) if len(ascending) != 1: raise ValueError( f"Length of ascending ({len(ascending)}) must be 1 for Series" ) ascending = ascending[0] ascending = validate_ascending(ascending) if na_position not in ["first", "last"]: raise ValueError(f"invalid na_position: {na_position}") # GH 35922. Make sorting stable by leveraging nargsort values_to_sort = ensure_key_mapped(self, key)._values if key else self._values sorted_index = nargsort(values_to_sort, kind, bool(ascending), na_position) if is_range_indexer(sorted_index, len(sorted_index)): if inplace: return self._update_inplace(self) return self.copy(deep=None) result = self._constructor( self._values[sorted_index], index=self.index[sorted_index], copy=False ) if ignore_index: result.index = default_index(len(sorted_index)) if not inplace: return result.__finalize__(self, method="sort_values") self._update_inplace(result) return None def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[True], kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> None: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[False] = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: bool = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series | None: ... def sort_index( self, *, axis: Axis = 0, level: IndexLabel = None, ascending: bool | Sequence[bool] = True, inplace: bool = False, kind: SortKind = "quicksort", na_position: NaPosition = "last", sort_remaining: bool = True, ignore_index: bool = False, key: IndexKeyFunc = None, ) -> Series | None: """ Sort Series by index labels. Returns a new Series sorted by label if `inplace` argument is ``False``, otherwise updates the original series and returns None. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. level : int, optional If not None, sort on values in specified index level(s). ascending : bool or list-like of bools, default True Sort ascending vs. descending. When the index is a MultiIndex the sort direction can be controlled for each level individually. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. Not implemented for MultiIndex. sort_remaining : bool, default True If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. .. versionadded:: 1.1.0 Returns ------- Series or None The original Series sorted by the labels or None if ``inplace=True``. See Also -------- DataFrame.sort_index: Sort DataFrame by the index. DataFrame.sort_values: Sort DataFrame by the value. Series.sort_values : Sort Series by the value. Examples -------- >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4]) >>> s.sort_index() 1 c 2 b 3 a 4 d dtype: object Sort Descending >>> s.sort_index(ascending=False) 4 d 3 a 2 b 1 c dtype: object By default NaNs are put at the end, but use `na_position` to place them at the beginning >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan]) >>> s.sort_index(na_position='first') NaN d 1.0 c 2.0 b 3.0 a dtype: object Specify index level to sort >>> arrays = [np.array(['qux', 'qux', 'foo', 'foo', ... 'baz', 'baz', 'bar', 'bar']), ... np.array(['two', 'one', 'two', 'one', ... 'two', 'one', 'two', 'one'])] >>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays) >>> s.sort_index(level=1) bar one 8 baz one 6 foo one 4 qux one 2 bar two 7 baz two 5 foo two 3 qux two 1 dtype: int64 Does not sort by remaining levels when sorting by levels >>> s.sort_index(level=1, sort_remaining=False) qux one 2 foo one 4 baz one 6 bar one 8 qux two 1 foo two 3 baz two 5 bar two 7 dtype: int64 Apply a key function before sorting >>> s = pd.Series([1, 2, 3, 4], index=['A', 'b', 'C', 'd']) >>> s.sort_index(key=lambda x : x.str.lower()) A 1 b 2 C 3 d 4 dtype: int64 """ return super().sort_index( axis=axis, level=level, ascending=ascending, inplace=inplace, kind=kind, na_position=na_position, sort_remaining=sort_remaining, ignore_index=ignore_index, key=key, ) def argsort( self, axis: Axis = 0, kind: SortKind = "quicksort", order: None = None, ) -> Series: """ Return the integer indices that would sort the Series values. Override ndarray.argsort. Argsorts the value, omitting NA/null values, and places the result in the same locations as the non-NA values. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. order : None Has no effect but is accepted for compatibility with numpy. Returns ------- Series[np.intp] Positions of values within the sort order with -1 indicating nan values. See Also -------- numpy.ndarray.argsort : Returns the indices that would sort this array. """ values = self._values mask = isna(values) if mask.any(): result = np.full(len(self), -1, dtype=np.intp) notmask = ~mask result[notmask] = np.argsort(values[notmask], kind=kind) else: result = np.argsort(values, kind=kind) res = self._constructor( result, index=self.index, name=self.name, dtype=np.intp, copy=False ) return res.__finalize__(self, method="argsort") def nlargest( self, n: int = 5, keep: Literal["first", "last", "all"] = "first" ) -> Series: """ Return the largest `n` elements. Parameters ---------- n : int, default 5 Return this many descending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` largest values in the Series, sorted in decreasing order. See Also -------- Series.nsmallest: Get the `n` smallest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values(ascending=False).head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Malta": 434000, "Maldives": 434000, ... "Brunei": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Malta 434000 Maldives 434000 Brunei 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` largest elements where ``n=5`` by default. >>> s.nlargest() France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3``. Default `keep` value is 'first' so Malta will be kept. >>> s.nlargest(3) France 65000000 Italy 59000000 Malta 434000 dtype: int64 The `n` largest elements where ``n=3`` and keeping the last duplicates. Brunei will be kept since it is the last with value 434000 based on the index order. >>> s.nlargest(3, keep='last') France 65000000 Italy 59000000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3`` with all duplicates kept. Note that the returned Series has five elements due to the three duplicates. >>> s.nlargest(3, keep='all') France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nlargest() def nsmallest(self, n: int = 5, keep: str = "first") -> Series: """ Return the smallest `n` elements. Parameters ---------- n : int, default 5 Return this many ascending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` smallest values in the Series, sorted in increasing order. See Also -------- Series.nlargest: Get the `n` largest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values().head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Brunei": 434000, "Malta": 434000, ... "Maldives": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Brunei 434000 Malta 434000 Maldives 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` smallest elements where ``n=5`` by default. >>> s.nsmallest() Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 Iceland 337000 dtype: int64 The `n` smallest elements where ``n=3``. Default `keep` value is 'first' so Nauru and Tuvalu will be kept. >>> s.nsmallest(3) Montserrat 5200 Nauru 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` and keeping the last duplicates. Anguilla and Tuvalu will be kept since they are the last with value 11300 based on the index order. >>> s.nsmallest(3, keep='last') Montserrat 5200 Anguilla 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` with all duplicates kept. Note that the returned Series has four elements due to the three duplicates. >>> s.nsmallest(3, keep='all') Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nsmallest() klass=_shared_doc_kwargs["klass"], extra_params=dedent( """copy : bool, default True Whether to copy underlying data.""" ), examples=dedent( """\ Examples -------- >>> s = pd.Series( ... ["A", "B", "A", "C"], ... index=[ ... ["Final exam", "Final exam", "Coursework", "Coursework"], ... ["History", "Geography", "History", "Geography"], ... ["January", "February", "March", "April"], ... ], ... ) >>> s Final exam History January A Geography February B Coursework History March A Geography April C dtype: object In the following example, we will swap the levels of the indices. Here, we will swap the levels column-wise, but levels can be swapped row-wise in a similar manner. Note that column-wise is the default behaviour. By not supplying any arguments for i and j, we swap the last and second to last indices. >>> s.swaplevel() Final exam January History A February Geography B Coursework March History A April Geography C dtype: object By supplying one argument, we can choose which index to swap the last index with. We can for example swap the first index with the last one as follows. >>> s.swaplevel(0) January History Final exam A February Geography Final exam B March History Coursework A April Geography Coursework C dtype: object We can also define explicitly which indices we want to swap by supplying values for both i and j. Here, we for example swap the first and second indices. >>> s.swaplevel(0, 1) History Final exam January A Geography Final exam February B History Coursework March A Geography Coursework April C dtype: object""" ), ) def swaplevel( self, i: Level = -2, j: Level = -1, copy: bool | None = None ) -> Series: """ Swap levels i and j in a :class:`MultiIndex`. Default is to swap the two innermost levels of the index. Parameters ---------- i, j : int or str Levels of the indices to be swapped. Can pass level name as string. {extra_params} Returns ------- {klass} {klass} with levels swapped in MultiIndex. {examples} """ assert isinstance(self.index, MultiIndex) result = self.copy(deep=copy and not using_copy_on_write()) result.index = self.index.swaplevel(i, j) return result def reorder_levels(self, order: Sequence[Level]) -> Series: """ Rearrange index levels using input order. May not drop or duplicate levels. Parameters ---------- order : list of int representing new level order Reference level by number or key. Returns ------- type of caller (new object) """ if not isinstance(self.index, MultiIndex): # pragma: no cover raise Exception("Can only reorder levels on a hierarchical axis.") result = self.copy(deep=None) assert isinstance(result.index, MultiIndex) result.index = result.index.reorder_levels(order) return result def explode(self, ignore_index: bool = False) -> Series: """ Transform each element of a list-like to a row. Parameters ---------- ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 1.1.0 Returns ------- Series Exploded lists to rows; index will be duplicated for these rows. See Also -------- Series.str.split : Split string values on specified separator. Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame. DataFrame.melt : Unpivot a DataFrame from wide format to long format. DataFrame.explode : Explode a DataFrame from list-like columns to long format. Notes ----- This routine will explode list-likes including lists, tuples, sets, Series, and np.ndarray. The result dtype of the subset rows will be object. Scalars will be returned unchanged, and empty list-likes will result in a np.nan for that row. In addition, the ordering of elements in the output will be non-deterministic when exploding sets. Reference :ref:`the user guide <reshaping.explode>` for more examples. Examples -------- >>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]]) >>> s 0 [1, 2, 3] 1 foo 2 [] 3 [3, 4] dtype: object >>> s.explode() 0 1 0 2 0 3 1 foo 2 NaN 3 3 3 4 dtype: object """ if not len(self) or not is_object_dtype(self): result = self.copy() return result.reset_index(drop=True) if ignore_index else result values, counts = reshape.explode(np.asarray(self._values)) if ignore_index: index = default_index(len(values)) else: index = self.index.repeat(counts) return self._constructor(values, index=index, name=self.name, copy=False) def unstack(self, level: IndexLabel = -1, fill_value: Hashable = None) -> DataFrame: """ Unstack, also known as pivot, Series with MultiIndex to produce DataFrame. Parameters ---------- level : int, str, or list of these, default last level Level(s) to unstack, can pass level name. fill_value : scalar value, default None Value to use when replacing NaN values. Returns ------- DataFrame Unstacked Series. Notes ----- Reference :ref:`the user guide <reshaping.stacking>` for more examples. Examples -------- >>> s = pd.Series([1, 2, 3, 4], ... index=pd.MultiIndex.from_product([['one', 'two'], ... ['a', 'b']])) >>> s one a 1 b 2 two a 3 b 4 dtype: int64 >>> s.unstack(level=-1) a b one 1 2 two 3 4 >>> s.unstack(level=0) one two a 1 3 b 2 4 """ from pandas.core.reshape.reshape import unstack return unstack(self, level, fill_value) # ---------------------------------------------------------------------- # function application def map( self, arg: Callable | Mapping | Series, na_action: Literal["ignore"] | None = None, ) -> Series: """ Map values of Series according to an input mapping or function. Used for substituting each value in a Series with another value, that may be derived from a function, a ``dict`` or a :class:`Series`. Parameters ---------- arg : function, collections.abc.Mapping subclass or Series Mapping correspondence. na_action : {None, 'ignore'}, default None If 'ignore', propagate NaN values, without passing them to the mapping correspondence. Returns ------- Series Same index as caller. See Also -------- Series.apply : For applying more complex functions on a Series. DataFrame.apply : Apply a function row-/column-wise. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Notes ----- When ``arg`` is a dictionary, values in Series that are not in the dictionary (as keys) are converted to ``NaN``. However, if the dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e. provides a method for default values), then this default is used rather than ``NaN``. Examples -------- >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit']) >>> s 0 cat 1 dog 2 NaN 3 rabbit dtype: object ``map`` accepts a ``dict`` or a ``Series``. Values that are not found in the ``dict`` are converted to ``NaN``, unless the dict has a default value (e.g. ``defaultdict``): >>> s.map({'cat': 'kitten', 'dog': 'puppy'}) 0 kitten 1 puppy 2 NaN 3 NaN dtype: object It also accepts a function: >>> s.map('I am a {}'.format) 0 I am a cat 1 I am a dog 2 I am a nan 3 I am a rabbit dtype: object To avoid applying the function to missing values (and keep them as ``NaN``) ``na_action='ignore'`` can be used: >>> s.map('I am a {}'.format, na_action='ignore') 0 I am a cat 1 I am a dog 2 NaN 3 I am a rabbit dtype: object """ new_values = self._map_values(arg, na_action=na_action) return self._constructor(new_values, index=self.index, copy=False).__finalize__( self, method="map" ) def _gotitem(self, key, ndim, subset=None) -> Series: """ Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : {1, 2} Requested ndim of result. subset : object, default None Subset to act on. """ return self _agg_see_also_doc = dedent( """ See Also -------- Series.apply : Invoke function on a Series. Series.transform : Transform function producing a Series with like indexes. """ ) _agg_examples_doc = dedent( """ Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.agg('min') 1 >>> s.agg(['min', 'max']) min 1 max 4 dtype: int64 """ ) _shared_docs["aggregate"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, ) def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs): # Validate the axis parameter self._get_axis_number(axis) # if func is None, will switch to user-provided "named aggregation" kwargs if func is None: func = dict(kwargs.items()) op = SeriesApply(self, func, convert_dtype=False, args=args, kwargs=kwargs) result = op.agg() return result agg = aggregate # error: Signature of "any" incompatible with supertype "NDFrame" [override] def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: None = ..., **kwargs, ) -> bool: ... def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: Level, **kwargs, ) -> Series | bool: ... # error: Missing return statement def any( # type: ignore[empty-body] self, axis: Axis = 0, bool_only: bool | None = None, skipna: bool = True, level: Level | None = None, **kwargs, ) -> Series | bool: ... _shared_docs["transform"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], ) def transform( self, func: AggFuncType, axis: Axis = 0, *args, **kwargs ) -> DataFrame | Series: # Validate axis argument self._get_axis_number(axis) result = SeriesApply( self, func=func, convert_dtype=True, args=args, kwargs=kwargs ).transform() return result def apply( self, func: AggFuncType, convert_dtype: bool = True, args: tuple[Any, ...] = (), **kwargs, ) -> DataFrame | Series: """ Invoke function on values of Series. Can be ufunc (a NumPy function that applies to the entire Series) or a Python function that only works on single values. Parameters ---------- func : function Python function or NumPy ufunc to apply. convert_dtype : bool, default True Try to find better dtype for elementwise function results. If False, leave as dtype=object. Note that the dtype is always preserved for some extension array dtypes, such as Categorical. args : tuple Positional arguments passed to func after the series value. **kwargs Additional keyword arguments passed to func. Returns ------- Series or DataFrame If func returns a Series object the result will be a DataFrame. See Also -------- Series.map: For element-wise operations. Series.agg: Only perform aggregating type operations. Series.transform: Only perform transforming type operations. Notes ----- Functions that mutate the passed object can produce unexpected behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` for more details. Examples -------- Create a series with typical summer temperatures for each city. >>> s = pd.Series([20, 21, 12], ... index=['London', 'New York', 'Helsinki']) >>> s London 20 New York 21 Helsinki 12 dtype: int64 Square the values by defining a function and passing it as an argument to ``apply()``. >>> def square(x): ... return x ** 2 >>> s.apply(square) London 400 New York 441 Helsinki 144 dtype: int64 Square the values by passing an anonymous function as an argument to ``apply()``. >>> s.apply(lambda x: x ** 2) London 400 New York 441 Helsinki 144 dtype: int64 Define a custom function that needs additional positional arguments and pass these additional arguments using the ``args`` keyword. >>> def subtract_custom_value(x, custom_value): ... return x - custom_value >>> s.apply(subtract_custom_value, args=(5,)) London 15 New York 16 Helsinki 7 dtype: int64 Define a custom function that takes keyword arguments and pass these arguments to ``apply``. >>> def add_custom_values(x, **kwargs): ... for month in kwargs: ... x += kwargs[month] ... return x >>> s.apply(add_custom_values, june=30, july=20, august=25) London 95 New York 96 Helsinki 87 dtype: int64 Use a function from the Numpy library. >>> s.apply(np.log) London 2.995732 New York 3.044522 Helsinki 2.484907 dtype: float64 """ return SeriesApply(self, func, convert_dtype, args, kwargs).apply() def _reduce( self, op, name: str, *, axis: Axis = 0, skipna: bool = True, numeric_only: bool = False, filter_type=None, **kwds, ): """ Perform a reduction operation. If we have an ndarray as a value, then simply perform the operation, otherwise delegate to the object. """ delegate = self._values if axis is not None: self._get_axis_number(axis) if isinstance(delegate, ExtensionArray): # dispatch to ExtensionArray interface return delegate._reduce(name, skipna=skipna, **kwds) else: # dispatch to numpy arrays if numeric_only and not is_numeric_dtype(self.dtype): kwd_name = "numeric_only" if name in ["any", "all"]: kwd_name = "bool_only" # GH#47500 - change to TypeError to match other methods raise TypeError( f"Series.{name} does not allow {kwd_name}={numeric_only} " "with non-numeric dtypes." ) with np.errstate(all="ignore"): return op(delegate, skipna=skipna, **kwds) def _reindex_indexer( self, new_index: Index | None, indexer: npt.NDArray[np.intp] | None, copy: bool | None, ) -> Series: # Note: new_index is None iff indexer is None # if not None, indexer is np.intp if indexer is None and ( new_index is None or new_index.names == self.index.names ): if using_copy_on_write(): return self.copy(deep=copy) if copy or copy is None: return self.copy(deep=copy) return self new_values = algorithms.take_nd( self._values, indexer, allow_fill=True, fill_value=None ) return self._constructor(new_values, index=new_index, copy=False) def _needs_reindex_multi(self, axes, method, level) -> bool: """ Check if we do need a multi reindex; this is for compat with higher dims. """ return False # error: Cannot determine type of 'align' NDFrame.align, # type: ignore[has-type] klass=_shared_doc_kwargs["klass"], axes_single_arg=_shared_doc_kwargs["axes_single_arg"], ) def align( self, other: Series, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value: Hashable = None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> Series: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[True], level: Level | None = ..., errors: IgnoreRaise = ..., ) -> None: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[False] = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: bool = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series | None: ... def rename( self, index: Renamer | Hashable | None = None, *, axis: Axis | None = None, copy: bool = True, inplace: bool = False, level: Level | None = None, errors: IgnoreRaise = "ignore", ) -> Series | None: """ Alter Series index labels or name. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. Alternatively, change ``Series.name`` with a scalar value. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- index : scalar, hashable sequence, dict-like or function optional Functions or dict-like are transformations to apply to the index. Scalar or hashable sequence-like will alter the ``Series.name`` attribute. axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new Series. If True the value of copy is ignored. level : int or level name, default None In case of MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise `KeyError` when a `dict-like mapper` or `index` contains labels that are not present in the index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- Series or None Series with index labels or name altered or None if ``inplace=True``. See Also -------- DataFrame.rename : Corresponding DataFrame method. Series.rename_axis : Set the name of the axis. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.rename("my_name") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 >>> s.rename(lambda x: x ** 2) # function, changes labels 0 1 1 2 4 3 dtype: int64 >>> s.rename({1: 3, 2: 5}) # mapping, changes labels 0 1 3 2 5 3 dtype: int64 """ if axis is not None: # Make sure we raise if an invalid 'axis' is passed. axis = self._get_axis_number(axis) if callable(index) or is_dict_like(index): # error: Argument 1 to "_rename" of "NDFrame" has incompatible # type "Union[Union[Mapping[Any, Hashable], Callable[[Any], # Hashable]], Hashable, None]"; expected "Union[Mapping[Any, # Hashable], Callable[[Any], Hashable], None]" return super()._rename( index, # type: ignore[arg-type] copy=copy, inplace=inplace, level=level, errors=errors, ) else: return self._set_name(index, inplace=inplace) """ Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.set_axis(['a', 'b', 'c'], axis=0) a 1 b 2 c 3 dtype: int64 """ ) **_shared_doc_kwargs, extended_summary_sub="", axis_description_sub="", see_also_sub="", ) ) ) # error: Cannot determine type of 'shift' # ---------------------------------------------------------------------- # Convert to types that support pd.NA # error: Cannot determine type of 'isna' # error: Return type "Series" of "isna" incompatible with return type "ndarray # [Any, dtype[bool_]]" in supertype "IndexOpsMixin" # error: Cannot determine type of 'isna' # error: Cannot determine type of 'notna' # error: Cannot determine type of 'notna' # ---------------------------------------------------------------------- # Time series-oriented methods # error: Cannot determine type of 'asfreq' # error: Cannot determine type of 'resample' # ---------------------------------------------------------------------- # Add index # ---------------------------------------------------------------------- # Accessor Methods # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # Add plotting methods to Series # ---------------------------------------------------------------------- # Template-Based Arithmetic/Comparison Methods Series The provided code snippet includes necessary dependencies for implementing the `_dtype_to_stata_type` function. Write a Python function `def _dtype_to_stata_type(dtype: np.dtype, column: Series) -> int` to solve the following problem: Convert dtype types to stata types. Returns the byte of the given ordinal. See TYPE_MAP and comments for an explanation. This is also explained in the dta spec. 1 - 244 are strings of this length Pandas Stata 251 - for int8 byte 252 - for int16 int 253 - for int32 long 254 - for float32 float 255 - for double double If there are dates to convert, then dtype will already have the correct type inserted. Here is the function: def _dtype_to_stata_type(dtype: np.dtype, column: Series) -> int: """ Convert dtype types to stata types. Returns the byte of the given ordinal. See TYPE_MAP and comments for an explanation. This is also explained in the dta spec. 1 - 244 are strings of this length Pandas Stata 251 - for int8 byte 252 - for int16 int 253 - for int32 long 254 - for float32 float 255 - for double double If there are dates to convert, then dtype will already have the correct type inserted. """ # TODO: expand to handle datetime to integer conversion if dtype.type is np.object_: # try to coerce it to the biggest string # not memory efficient, what else could we # do? itemsize = max_len_string_array(ensure_object(column._values)) return max(itemsize, 1) elif dtype.type is np.float64: return 255 elif dtype.type is np.float32: return 254 elif dtype.type is np.int32: return 253 elif dtype.type is np.int16: return 252 elif dtype.type is np.int8: return 251 else: # pragma : no cover raise NotImplementedError(f"Data type {dtype} not supported.")
Convert dtype types to stata types. Returns the byte of the given ordinal. See TYPE_MAP and comments for an explanation. This is also explained in the dta spec. 1 - 244 are strings of this length Pandas Stata 251 - for int8 byte 252 - for int16 int 253 - for int32 long 254 - for float32 float 255 - for double double If there are dates to convert, then dtype will already have the correct type inserted.
173,541
from __future__ import annotations from collections import abc import datetime from io import BytesIO import os import struct import sys from types import TracebackType from typing import ( IO, TYPE_CHECKING, Any, AnyStr, Callable, Final, Hashable, Sequence, cast, ) import warnings from dateutil.relativedelta import relativedelta import numpy as np from pandas._libs.lib import infer_dtype from pandas._libs.writers import max_len_string_array from pandas._typing import ( CompressionOptions, FilePath, ReadBuffer, StorageOptions, WriteBuffer, ) from pandas.errors import ( CategoricalConversionWarning, InvalidColumnName, PossiblePrecisionLoss, ValueLabelTypeMismatch, ) from pandas.util._decorators import ( Appender, doc, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, is_categorical_dtype, is_datetime64_dtype, is_numeric_dtype, ) from pandas import ( Categorical, DatetimeIndex, NaT, Timestamp, isna, to_datetime, to_timedelta, ) from pandas.core.arrays.boolean import BooleanDtype from pandas.core.arrays.integer import IntegerDtype from pandas.core.frame import DataFrame from pandas.core.indexes.base import Index from pandas.core.series import Series from pandas.core.shared_docs import _shared_docs from pandas.io.common import get_handle excessive_string_length_error: Final = """ Fixed width strings in Stata .dta files are limited to 244 (or fewer) characters. Column '{0}' does not satisfy this restriction. Use the 'version=117' parameter to write the newer (Stata 13 and later) format. """ ensure_object = algos.ensure_object class Series(base.IndexOpsMixin, NDFrame): # type: ignore[misc] """ One-dimensional ndarray with axis labels (including time series). Labels need not be unique but must be a hashable type. The object supports both integer- and label-based indexing and provides a host of methods for performing operations involving the index. Statistical methods from ndarray have been overridden to automatically exclude missing data (currently represented as NaN). Operations between Series (+, -, /, \\*, \\*\\*) align values based on their associated index values-- they need not be the same length. The result index will be the sorted union of the two indexes. Parameters ---------- data : array-like, Iterable, dict, or scalar value Contains data stored in Series. If data is a dict, argument order is maintained. index : array-like or Index (1d) Values must be hashable and have the same length as `data`. Non-unique index values are allowed. Will default to RangeIndex (0, 1, 2, ..., n) if not provided. If data is dict-like and index is None, then the keys in the data are used as the index. If the index is not None, the resulting Series is reindexed with the index values. dtype : str, numpy.dtype, or ExtensionDtype, optional Data type for the output Series. If not specified, this will be inferred from `data`. See the :ref:`user guide <basics.dtypes>` for more usages. name : Hashable, default None The name to give to the Series. copy : bool, default False Copy input data. Only affects Series or 1d ndarray input. See examples. Notes ----- Please reference the :ref:`User Guide <basics.series>` for more information. Examples -------- Constructing Series from a dictionary with an Index specified >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['a', 'b', 'c']) >>> ser a 1 b 2 c 3 dtype: int64 The keys of the dictionary match with the Index values, hence the Index values have no effect. >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['x', 'y', 'z']) >>> ser x NaN y NaN z NaN dtype: float64 Note that the Index is first build with the keys from the dictionary. After this the Series is reindexed with the given Index values, hence we get all NaN as a result. Constructing Series from a list with `copy=False`. >>> r = [1, 2] >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r [1, 2] >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `copy` of the original data even though `copy=False`, so the data is unchanged. Constructing Series from a 1d ndarray with `copy=False`. >>> r = np.array([1, 2]) >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r array([999, 2]) >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `view` on the original data, so the data is changed as well. """ _typ = "series" _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) _name: Hashable _metadata: list[str] = ["name"] _internal_names_set = {"index"} | NDFrame._internal_names_set _accessors = {"dt", "cat", "str", "sparse"} _hidden_attrs = ( base.IndexOpsMixin._hidden_attrs | NDFrame._hidden_attrs | frozenset([]) ) # Override cache_readonly bc Series is mutable # error: Incompatible types in assignment (expression has type "property", # base class "IndexOpsMixin" defined the type as "Callable[[IndexOpsMixin], bool]") hasnans = property( # type: ignore[assignment] # error: "Callable[[IndexOpsMixin], bool]" has no attribute "fget" base.IndexOpsMixin.hasnans.fget, # type: ignore[attr-defined] doc=base.IndexOpsMixin.hasnans.__doc__, ) _mgr: SingleManager div: Callable[[Series, Any], Series] rdiv: Callable[[Series, Any], Series] # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index=None, dtype: Dtype | None = None, name=None, copy: bool | None = None, fastpath: bool = False, ) -> None: if ( isinstance(data, (SingleBlockManager, SingleArrayManager)) and index is None and dtype is None and (copy is False or copy is None) ): if using_copy_on_write(): data = data.copy(deep=False) # GH#33357 called with just the SingleBlockManager NDFrame.__init__(self, data) if fastpath: # e.g. from _box_col_values, skip validation of name object.__setattr__(self, "_name", name) else: self.name = name return if isinstance(data, (ExtensionArray, np.ndarray)): if copy is not False and using_copy_on_write(): if dtype is None or astype_is_view(data.dtype, pandas_dtype(dtype)): data = data.copy() if copy is None: copy = False # we are called internally, so short-circuit if fastpath: # data is a ndarray, index is defined if not isinstance(data, (SingleBlockManager, SingleArrayManager)): manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index) elif manager == "array": data = SingleArrayManager.from_array(data, index) elif using_copy_on_write() and not copy: data = data.copy(deep=False) if copy: data = data.copy() # skips validation of the name object.__setattr__(self, "_name", name) NDFrame.__init__(self, data) return if isinstance(data, SingleBlockManager) and using_copy_on_write() and not copy: data = data.copy(deep=False) name = ibase.maybe_extract_name(name, data, type(self)) if index is not None: index = ensure_index(index) if dtype is not None: dtype = self._validate_dtype(dtype) if data is None: index = index if index is not None else default_index(0) if len(index) or dtype is not None: data = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: data = [] if isinstance(data, MultiIndex): raise NotImplementedError( "initializing a Series from a MultiIndex is not supported" ) refs = None if isinstance(data, Index): if dtype is not None: data = data.astype(dtype, copy=False) if using_copy_on_write(): refs = data._references data = data._values else: # GH#24096 we need to ensure the index remains immutable data = data._values.copy() copy = False elif isinstance(data, np.ndarray): if len(data.dtype): # GH#13296 we are dealing with a compound dtype, which # should be treated as 2D raise ValueError( "Cannot construct a Series from an ndarray with " "compound dtype. Use DataFrame instead." ) elif isinstance(data, Series): if index is None: index = data.index data = data._mgr.copy(deep=False) else: data = data.reindex(index, copy=copy) copy = False data = data._mgr elif is_dict_like(data): data, index = self._init_dict(data, index, dtype) dtype = None copy = False elif isinstance(data, (SingleBlockManager, SingleArrayManager)): if index is None: index = data.index elif not data.index.equals(index) or copy: # GH#19275 SingleBlockManager input should only be called # internally raise AssertionError( "Cannot pass both SingleBlockManager " "`data` argument and a different " "`index` argument. `copy` must be False." ) elif isinstance(data, ExtensionArray): pass else: data = com.maybe_iterable_to_list(data) if is_list_like(data) and not len(data) and dtype is None: # GH 29405: Pre-2.0, this defaulted to float. dtype = np.dtype(object) if index is None: if not is_list_like(data): data = [data] index = default_index(len(data)) elif is_list_like(data): com.require_length_match(data, index) # create/copy the manager if isinstance(data, (SingleBlockManager, SingleArrayManager)): if dtype is not None: data = data.astype(dtype=dtype, errors="ignore", copy=copy) elif copy: data = data.copy() else: data = sanitize_array(data, index, dtype, copy) manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index, refs=refs) elif manager == "array": data = SingleArrayManager.from_array(data, index) NDFrame.__init__(self, data) self.name = name self._set_axis(0, index) def _init_dict( self, data, index: Index | None = None, dtype: DtypeObj | None = None ): """ Derive the "_mgr" and "index" attributes of a new Series from a dictionary input. Parameters ---------- data : dict or dict-like Data used to populate the new Series. index : Index or None, default None Index for the new Series: if None, use dict keys. dtype : np.dtype, ExtensionDtype, or None, default None The dtype for the new Series: if None, infer from data. Returns ------- _data : BlockManager for the new Series index : index for the new Series """ keys: Index | tuple # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')] # raises KeyError), so we iterate the entire dict, and align if data: # GH:34717, issue was using zip to extract key and values from data. # using generators in effects the performance. # Below is the new way of extracting the keys and values keys = tuple(data.keys()) values = list(data.values()) # Generating list of values- faster way elif index is not None: # fastpath for Series(data=None). Just use broadcasting a scalar # instead of reindexing. if len(index) or dtype is not None: values = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: values = [] keys = index else: keys, values = (), [] # Input is now list-like, so rely on "standard" construction: s = self._constructor( values, index=keys, dtype=dtype, ) # Now we just make sure the order is respected, if any if data and index is not None: s = s.reindex(index, copy=False) return s._mgr, s.index # ---------------------------------------------------------------------- def _constructor(self) -> Callable[..., Series]: return Series def _constructor_expanddim(self) -> Callable[..., DataFrame]: """ Used when a manipulation result has one higher dimension as the original, such as Series.to_frame() """ from pandas.core.frame import DataFrame return DataFrame # types def _can_hold_na(self) -> bool: return self._mgr._can_hold_na # ndarray compatibility def dtype(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtype dtype('int64') """ return self._mgr.dtype def dtypes(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtypes dtype('int64') """ # DataFrame compatibility return self.dtype def name(self) -> Hashable: """ Return the name of the Series. The name of a Series becomes its index or column name if it is used to form a DataFrame. It is also used whenever displaying the Series using the interpreter. Returns ------- label (hashable object) The name of the Series, also the column name if part of a DataFrame. See Also -------- Series.rename : Sets the Series name when given a scalar input. Index.name : Corresponding Index property. Examples -------- The Series name can be set initially when calling the constructor. >>> s = pd.Series([1, 2, 3], dtype=np.int64, name='Numbers') >>> s 0 1 1 2 2 3 Name: Numbers, dtype: int64 >>> s.name = "Integers" >>> s 0 1 1 2 2 3 Name: Integers, dtype: int64 The name of a Series within a DataFrame is its column name. >>> df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], ... columns=["Odd Numbers", "Even Numbers"]) >>> df Odd Numbers Even Numbers 0 1 2 1 3 4 2 5 6 >>> df["Even Numbers"].name 'Even Numbers' """ return self._name def name(self, value: Hashable) -> None: validate_all_hashable(value, error_name=f"{type(self).__name__}.name") object.__setattr__(self, "_name", value) def values(self): """ Return Series as ndarray or ndarray-like depending on the dtype. .. warning:: We recommend using :attr:`Series.array` or :meth:`Series.to_numpy`, depending on whether you need a reference to the underlying data or a NumPy array. Returns ------- numpy.ndarray or ndarray-like See Also -------- Series.array : Reference to the underlying data. Series.to_numpy : A NumPy array representing the underlying data. Examples -------- >>> pd.Series([1, 2, 3]).values array([1, 2, 3]) >>> pd.Series(list('aabc')).values array(['a', 'a', 'b', 'c'], dtype=object) >>> pd.Series(list('aabc')).astype('category').values ['a', 'a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] Timezone aware datetime data is converted to UTC: >>> pd.Series(pd.date_range('20130101', periods=3, ... tz='US/Eastern')).values array(['2013-01-01T05:00:00.000000000', '2013-01-02T05:00:00.000000000', '2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]') """ return self._mgr.external_values() def _values(self): """ Return the internal repr of this data (defined by Block.interval_values). This are the values as stored in the Block (ndarray or ExtensionArray depending on the Block class), with datetime64[ns] and timedelta64[ns] wrapped in ExtensionArrays to match Index._values behavior. Differs from the public ``.values`` for certain data types, because of historical backwards compatibility of the public attribute (e.g. period returns object ndarray and datetimetz a datetime64[ns] ndarray for ``.values`` while it returns an ExtensionArray for ``._values`` in those cases). Differs from ``.array`` in that this still returns the numpy array if the Block is backed by a numpy array (except for datetime64 and timedelta64 dtypes), while ``.array`` ensures to always return an ExtensionArray. Overview: dtype | values | _values | array | ----------- | ------------- | ------------- | ------------- | Numeric | ndarray | ndarray | PandasArray | Category | Categorical | Categorical | Categorical | dt64[ns] | ndarray[M8ns] | DatetimeArray | DatetimeArray | dt64[ns tz] | ndarray[M8ns] | DatetimeArray | DatetimeArray | td64[ns] | ndarray[m8ns] | TimedeltaArray| ndarray[m8ns] | Period | ndarray[obj] | PeriodArray | PeriodArray | Nullable | EA | EA | EA | """ return self._mgr.internal_values() def _references(self) -> BlockValuesRefs | None: if isinstance(self._mgr, SingleArrayManager): return None return self._mgr._block.refs # error: Decorated property not supported def array(self) -> ExtensionArray: return self._mgr.array_values() # ops def ravel(self, order: str = "C") -> ArrayLike: """ Return the flattened underlying data as an ndarray or ExtensionArray. Returns ------- numpy.ndarray or ExtensionArray Flattened data of the Series. See Also -------- numpy.ndarray.ravel : Return a flattened array. """ arr = self._values.ravel(order=order) if isinstance(arr, np.ndarray) and using_copy_on_write(): arr.flags.writeable = False return arr def __len__(self) -> int: """ Return the length of the Series. """ return len(self._mgr) def view(self, dtype: Dtype | None = None) -> Series: """ Create a new view of the Series. This function will return a new Series with a view of the same underlying values in memory, optionally reinterpreted with a new data type. The new data type must preserve the same size in bytes as to not cause index misalignment. Parameters ---------- dtype : data type Data type object or one of their string representations. Returns ------- Series A new Series object as a view of the same data in memory. See Also -------- numpy.ndarray.view : Equivalent numpy function to create a new view of the same data in memory. Notes ----- Series are instantiated with ``dtype=float64`` by default. While ``numpy.ndarray.view()`` will return a view with the same data type as the original array, ``Series.view()`` (without specified dtype) will try using ``float64`` and may fail if the original data type size in bytes is not the same. Examples -------- >>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8') >>> s 0 -2 1 -1 2 0 3 1 4 2 dtype: int8 The 8 bit signed integer representation of `-1` is `0b11111111`, but the same bytes represent 255 if read as an 8 bit unsigned integer: >>> us = s.view('uint8') >>> us 0 254 1 255 2 0 3 1 4 2 dtype: uint8 The views share the same underlying values: >>> us[0] = 128 >>> s 0 -128 1 -1 2 0 3 1 4 2 dtype: int8 """ # self.array instead of self._values so we piggyback on PandasArray # implementation res_values = self.array.view(dtype) res_ser = self._constructor(res_values, index=self.index, copy=False) if isinstance(res_ser._mgr, SingleBlockManager) and using_copy_on_write(): blk = res_ser._mgr._block blk.refs = cast("BlockValuesRefs", self._references) blk.refs.add_reference(blk) # type: ignore[arg-type] return res_ser.__finalize__(self, method="view") # ---------------------------------------------------------------------- # NDArray Compat _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: """ Return the values as a NumPy array. Users should not call this directly. Rather, it is invoked by :func:`numpy.array` and :func:`numpy.asarray`. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to use for the resulting NumPy array. By default, the dtype is inferred from the data. Returns ------- numpy.ndarray The values in the series converted to a :class:`numpy.ndarray` with the specified `dtype`. See Also -------- array : Create a new array from data. Series.array : Zero-copy view to the array backing the Series. Series.to_numpy : Series method for similar behavior. Examples -------- >>> ser = pd.Series([1, 2, 3]) >>> np.asarray(ser) array([1, 2, 3]) For timezone-aware data, the timezones may be retained with ``dtype='object'`` >>> tzser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) >>> np.asarray(tzser, dtype="object") array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'), Timestamp('2000-01-02 00:00:00+0100', tz='CET')], dtype=object) Or the values may be localized to UTC and the tzinfo discarded with ``dtype='datetime64[ns]'`` >>> np.asarray(tzser, dtype="datetime64[ns]") # doctest: +ELLIPSIS array(['1999-12-31T23:00:00.000000000', ...], dtype='datetime64[ns]') """ values = self._values arr = np.asarray(values, dtype=dtype) if using_copy_on_write() and astype_is_view(values.dtype, arr.dtype): arr = arr.view() arr.flags.writeable = False return arr # ---------------------------------------------------------------------- # Unary Methods # coercion __float__ = _coerce_method(float) __int__ = _coerce_method(int) # ---------------------------------------------------------------------- # indexers def axes(self) -> list[Index]: """ Return a list of the row axis labels. """ return [self.index] # ---------------------------------------------------------------------- # Indexing Methods def take(self, indices, axis: Axis = 0, **kwargs) -> Series: nv.validate_take((), kwargs) indices = ensure_platform_int(indices) if ( indices.ndim == 1 and using_copy_on_write() and is_range_indexer(indices, len(self)) ): return self.copy(deep=None) new_index = self.index.take(indices) new_values = self._values.take(indices) result = self._constructor(new_values, index=new_index, fastpath=True) return result.__finalize__(self, method="take") def _take_with_is_copy(self, indices, axis: Axis = 0) -> Series: """ Internal version of the `take` method that sets the `_is_copy` attribute to keep track of the parent dataframe (using in indexing for the SettingWithCopyWarning). For Series this does the same as the public take (it never sets `_is_copy`). See the docstring of `take` for full explanation of the parameters. """ return self.take(indices=indices, axis=axis) def _ixs(self, i: int, axis: AxisInt = 0) -> Any: """ Return the i-th value or values in the Series by location. Parameters ---------- i : int Returns ------- scalar (int) or Series (slice, sequence) """ return self._values[i] def _slice(self, slobj: slice | np.ndarray, axis: Axis = 0) -> Series: # axis kwarg is retained for compat with NDFrame method # _slice is *always* positional return self._get_values(slobj) def __getitem__(self, key): check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) if key is Ellipsis: return self key_is_scalar = is_scalar(key) if isinstance(key, (list, tuple)): key = unpack_1tuple(key) if is_integer(key) and self.index._should_fallback_to_positional: return self._values[key] elif key_is_scalar: return self._get_value(key) if is_hashable(key): # Otherwise index.get_value will raise InvalidIndexError try: # For labels that don't resolve as scalars like tuples and frozensets result = self._get_value(key) return result except (KeyError, TypeError, InvalidIndexError): # InvalidIndexError for e.g. generator # see test_series_getitem_corner_generator if isinstance(key, tuple) and isinstance(self.index, MultiIndex): # We still have the corner case where a tuple is a key # in the first level of our MultiIndex return self._get_values_tuple(key) if is_iterator(key): key = list(key) if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) return self._get_values(key) return self._get_with(key) def _get_with(self, key): # other: fancy integer or otherwise if isinstance(key, slice): # _convert_slice_indexer to determine if this slice is positional # or label based, and if the latter, convert to positional slobj = self.index._convert_slice_indexer(key, kind="getitem") return self._slice(slobj) elif isinstance(key, ABCDataFrame): raise TypeError( "Indexing a Series with DataFrame is not " "supported, use the appropriate DataFrame column" ) elif isinstance(key, tuple): return self._get_values_tuple(key) elif not is_list_like(key): # e.g. scalars that aren't recognized by lib.is_scalar, GH#32684 return self.loc[key] if not isinstance(key, (list, np.ndarray, ExtensionArray, Series, Index)): key = list(key) if isinstance(key, Index): key_type = key.inferred_type else: key_type = lib.infer_dtype(key, skipna=False) # Note: The key_type == "boolean" case should be caught by the # com.is_bool_indexer check in __getitem__ if key_type == "integer": # We need to decide whether to treat this as a positional indexer # (i.e. self.iloc) or label-based (i.e. self.loc) if not self.index._should_fallback_to_positional: return self.loc[key] else: return self.iloc[key] # handle the dup indexing case GH#4246 return self.loc[key] def _get_values_tuple(self, key: tuple): # mpl hackaround if com.any_none(*key): # mpl compat if we look up e.g. ser[:, np.newaxis]; # see tests.series.timeseries.test_mpl_compat_hack # the asarray is needed to avoid returning a 2D DatetimeArray result = np.asarray(self._values[key]) disallow_ndim_indexing(result) return result if not isinstance(self.index, MultiIndex): raise KeyError("key of type tuple not found and not a MultiIndex") # If key is contained, would have returned by now indexer, new_index = self.index.get_loc_level(key) new_ser = self._constructor(self._values[indexer], index=new_index, copy=False) if using_copy_on_write() and isinstance(indexer, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) def _get_values(self, indexer: slice | npt.NDArray[np.bool_]) -> Series: new_mgr = self._mgr.getitem_mgr(indexer) return self._constructor(new_mgr).__finalize__(self) def _get_value(self, label, takeable: bool = False): """ Quickly retrieve single value at passed index label. Parameters ---------- label : object takeable : interpret the index as indexers, default False Returns ------- scalar value """ if takeable: return self._values[label] # Similar to Index.get_value, but we do not fall back to positional loc = self.index.get_loc(label) if is_integer(loc): return self._values[loc] if isinstance(self.index, MultiIndex): mi = self.index new_values = self._values[loc] if len(new_values) == 1 and mi.nlevels == 1: # If more than one level left, we can not return a scalar return new_values[0] new_index = mi[loc] new_index = maybe_droplevels(new_index, label) new_ser = self._constructor( new_values, index=new_index, name=self.name, copy=False ) if using_copy_on_write() and isinstance(loc, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) else: return self.iloc[loc] def __setitem__(self, key, value) -> None: if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) cacher_needs_updating = self._check_is_chained_assignment_possible() if key is Ellipsis: key = slice(None) if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") return self._set_values(indexer, value) try: self._set_with_engine(key, value) except KeyError: # We have a scalar (or for MultiIndex or object-dtype, scalar-like) # key that is not present in self.index. if is_integer(key): if not self.index._should_fallback_to_positional: # GH#33469 self.loc[key] = value else: # positional setter # can't use _mgr.setitem_inplace yet bc could have *both* # KeyError and then ValueError, xref GH#45070 self._set_values(key, value) else: # GH#12862 adding a new key to the Series self.loc[key] = value except (TypeError, ValueError, LossySetitemError): # The key was OK, but we cannot set the value losslessly indexer = self.index.get_loc(key) self._set_values(indexer, value) except InvalidIndexError as err: if isinstance(key, tuple) and not isinstance(self.index, MultiIndex): # cases with MultiIndex don't get here bc they raise KeyError # e.g. test_basic_getitem_setitem_corner raise KeyError( "key of type tuple not found and not a MultiIndex" ) from err if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) if ( is_list_like(value) and len(value) != len(self) and not isinstance(value, Series) and not is_object_dtype(self.dtype) ): # Series will be reindexed to have matching length inside # _where call below # GH#44265 indexer = key.nonzero()[0] self._set_values(indexer, value) return # otherwise with listlike other we interpret series[mask] = other # as series[mask] = other[mask] try: self._where(~key, value, inplace=True) except InvalidIndexError: # test_where_dups self.iloc[key] = value return else: self._set_with(key, value) if cacher_needs_updating: self._maybe_update_cacher(inplace=True) def _set_with_engine(self, key, value) -> None: loc = self.index.get_loc(key) # this is equivalent to self._values[key] = value self._mgr.setitem_inplace(loc, value) def _set_with(self, key, value) -> None: # We got here via exception-handling off of InvalidIndexError, so # key should always be listlike at this point. assert not isinstance(key, tuple) if is_iterator(key): # Without this, the call to infer_dtype will consume the generator key = list(key) if not self.index._should_fallback_to_positional: # Regardless of the key type, we're treating it as labels self._set_labels(key, value) else: # Note: key_type == "boolean" should not occur because that # should be caught by the is_bool_indexer check in __setitem__ key_type = lib.infer_dtype(key, skipna=False) if key_type == "integer": self._set_values(key, value) else: self._set_labels(key, value) def _set_labels(self, key, value) -> None: key = com.asarray_tuplesafe(key) indexer: np.ndarray = self.index.get_indexer(key) mask = indexer == -1 if mask.any(): raise KeyError(f"{key[mask]} not in index") self._set_values(indexer, value) def _set_values(self, key, value) -> None: if isinstance(key, (Index, Series)): key = key._values self._mgr = self._mgr.setitem(indexer=key, value=value) self._maybe_update_cacher() def _set_value(self, label, value, takeable: bool = False) -> None: """ Quickly set single value at passed label. If label is not contained, a new object is created with the label placed at the end of the result index. Parameters ---------- label : object Partial indexing with MultiIndex not allowed. value : object Scalar value. takeable : interpret the index as indexers, default False """ if not takeable: try: loc = self.index.get_loc(label) except KeyError: # set using a non-recursive method self.loc[label] = value return else: loc = label self._set_values(loc, value) # ---------------------------------------------------------------------- # Lookup Caching def _is_cached(self) -> bool: """Return boolean indicating if self is cached or not.""" return getattr(self, "_cacher", None) is not None def _get_cacher(self): """return my cacher or None""" cacher = getattr(self, "_cacher", None) if cacher is not None: cacher = cacher[1]() return cacher def _reset_cacher(self) -> None: """ Reset the cacher. """ if hasattr(self, "_cacher"): del self._cacher def _set_as_cached(self, item, cacher) -> None: """ Set the _cacher attribute on the calling object with a weakref to cacher. """ if using_copy_on_write(): return self._cacher = (item, weakref.ref(cacher)) def _clear_item_cache(self) -> None: # no-op for Series pass def _check_is_chained_assignment_possible(self) -> bool: """ See NDFrame._check_is_chained_assignment_possible.__doc__ """ if self._is_view and self._is_cached: ref = self._get_cacher() if ref is not None and ref._is_mixed_type: self._check_setitem_copy(t="referent", force=True) return True return super()._check_is_chained_assignment_possible() def _maybe_update_cacher( self, clear: bool = False, verify_is_copy: bool = True, inplace: bool = False ) -> None: """ See NDFrame._maybe_update_cacher.__doc__ """ # for CoW, we never want to update the parent DataFrame cache # if the Series changed, but don't keep track of any cacher if using_copy_on_write(): return cacher = getattr(self, "_cacher", None) if cacher is not None: assert self.ndim == 1 ref: DataFrame = cacher[1]() # we are trying to reference a dead referent, hence # a copy if ref is None: del self._cacher elif len(self) == len(ref) and self.name in ref.columns: # GH#42530 self.name must be in ref.columns # to ensure column still in dataframe # otherwise, either self or ref has swapped in new arrays ref._maybe_cache_changed(cacher[0], self, inplace=inplace) else: # GH#33675 we have swapped in a new array, so parent # reference to self is now invalid ref._item_cache.pop(cacher[0], None) super()._maybe_update_cacher( clear=clear, verify_is_copy=verify_is_copy, inplace=inplace ) # ---------------------------------------------------------------------- # Unsorted def _is_mixed_type(self) -> bool: return False def repeat(self, repeats: int | Sequence[int], axis: None = None) -> Series: """ Repeat elements of a Series. Returns a new Series where each element of the current Series is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty Series. axis : None Unused. Parameter needed for compatibility with DataFrame. Returns ------- Series Newly created Series with repeated elements. See Also -------- Index.repeat : Equivalent function for Index. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> s = pd.Series(['a', 'b', 'c']) >>> s 0 a 1 b 2 c dtype: object >>> s.repeat(2) 0 a 0 a 1 b 1 b 2 c 2 c dtype: object >>> s.repeat([1, 2, 3]) 0 a 1 b 1 b 2 c 2 c 2 c dtype: object """ nv.validate_repeat((), {"axis": axis}) new_index = self.index.repeat(repeats) new_values = self._values.repeat(repeats) return self._constructor(new_values, index=new_index, copy=False).__finalize__( self, method="repeat" ) def reset_index( self, level: IndexLabel = ..., *, drop: Literal[False] = ..., name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> DataFrame: ... def reset_index( self, level: IndexLabel = ..., *, drop: Literal[True], name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> Series: ... def reset_index( self, level: IndexLabel = ..., *, drop: bool = ..., name: Level = ..., inplace: Literal[True], allow_duplicates: bool = ..., ) -> None: ... def reset_index( self, level: IndexLabel = None, *, drop: bool = False, name: Level = lib.no_default, inplace: bool = False, allow_duplicates: bool = False, ) -> DataFrame | Series | None: """ Generate a new DataFrame or Series with the index reset. This is useful when the index needs to be treated as a column, or when the index is meaningless and needs to be reset to the default before another operation. Parameters ---------- level : int, str, tuple, or list, default optional For a Series with a MultiIndex, only remove the specified levels from the index. Removes all levels by default. drop : bool, default False Just reset the index, without inserting it as a column in the new DataFrame. name : object, optional The name to use for the column containing the original Series values. Uses ``self.name`` by default. This argument is ignored when `drop` is True. inplace : bool, default False Modify the Series in place (do not create a new object). allow_duplicates : bool, default False Allow duplicate column labels to be created. .. versionadded:: 1.5.0 Returns ------- Series or DataFrame or None When `drop` is False (the default), a DataFrame is returned. The newly created columns will come first in the DataFrame, followed by the original Series values. When `drop` is True, a `Series` is returned. In either case, if ``inplace=True``, no value is returned. See Also -------- DataFrame.reset_index: Analogous function for DataFrame. Examples -------- >>> s = pd.Series([1, 2, 3, 4], name='foo', ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx')) Generate a DataFrame with default index. >>> s.reset_index() idx foo 0 a 1 1 b 2 2 c 3 3 d 4 To specify the name of the new column use `name`. >>> s.reset_index(name='values') idx values 0 a 1 1 b 2 2 c 3 3 d 4 To generate a new Series with the default set `drop` to True. >>> s.reset_index(drop=True) 0 1 1 2 2 3 3 4 Name: foo, dtype: int64 The `level` parameter is interesting for Series with a multi-level index. >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']), ... np.array(['one', 'two', 'one', 'two'])] >>> s2 = pd.Series( ... range(4), name='foo', ... index=pd.MultiIndex.from_arrays(arrays, ... names=['a', 'b'])) To remove a specific level from the Index, use `level`. >>> s2.reset_index(level='a') a foo b one bar 0 two bar 1 one baz 2 two baz 3 If `level` is not set, all levels are removed from the Index. >>> s2.reset_index() a b foo 0 bar one 0 1 bar two 1 2 baz one 2 3 baz two 3 """ inplace = validate_bool_kwarg(inplace, "inplace") if drop: new_index = default_index(len(self)) if level is not None: level_list: Sequence[Hashable] if not isinstance(level, (tuple, list)): level_list = [level] else: level_list = level level_list = [self.index._get_level_number(lev) for lev in level_list] if len(level_list) < self.index.nlevels: new_index = self.index.droplevel(level_list) if inplace: self.index = new_index elif using_copy_on_write(): new_ser = self.copy(deep=False) new_ser.index = new_index return new_ser.__finalize__(self, method="reset_index") else: return self._constructor( self._values.copy(), index=new_index, copy=False ).__finalize__(self, method="reset_index") elif inplace: raise TypeError( "Cannot reset_index inplace on a Series to create a DataFrame" ) else: if name is lib.no_default: # For backwards compatibility, keep columns as [0] instead of # [None] when self.name is None if self.name is None: name = 0 else: name = self.name df = self.to_frame(name) return df.reset_index( level=level, drop=drop, allow_duplicates=allow_duplicates ) return None # ---------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str: """ Return a string representation for a particular Series. """ # pylint: disable=invalid-repr-returned repr_params = fmt.get_series_repr_params() return self.to_string(**repr_params) def to_string( self, buf: None = ..., na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> None: ... def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, na_rep: str = "NaN", float_format: str | None = None, header: bool = True, index: bool = True, length: bool = False, dtype: bool = False, name: bool = False, max_rows: int | None = None, min_rows: int | None = None, ) -> str | None: """ Render a string representation of the Series. Parameters ---------- buf : StringIO-like, optional Buffer to write to. na_rep : str, optional String representation of NaN to use, default 'NaN'. float_format : one-parameter function, optional Formatter function to apply to columns' elements if they are floats, default None. header : bool, default True Add the Series header (index name). index : bool, optional Add index (row) labels, default True. length : bool, default False Add the Series length. dtype : bool, default False Add the Series dtype. name : bool, default False Add the Series name if not None. max_rows : int, optional Maximum number of rows to show before truncating. If None, show all. min_rows : int, optional The number of rows to display in a truncated repr (when number of rows is above `max_rows`). Returns ------- str or None String representation of Series if ``buf=None``, otherwise None. """ formatter = fmt.SeriesFormatter( self, name=name, length=length, header=header, index=index, dtype=dtype, na_rep=na_rep, float_format=float_format, min_rows=min_rows, max_rows=max_rows, ) result = formatter.to_string() # catch contract violations if not isinstance(result, str): raise AssertionError( "result must be of type str, type " f"of result is {repr(type(result).__name__)}" ) if buf is None: return result else: if hasattr(buf, "write"): buf.write(result) else: with open(buf, "w") as f: f.write(result) return None klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples=dedent( """Examples -------- >>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal") >>> print(s.to_markdown()) | | animal | |---:|:---------| | 0 | elk | | 1 | pig | | 2 | dog | | 3 | quetzal | Output markdown with a tabulate option. >>> print(s.to_markdown(tablefmt="grid")) +----+----------+ | | animal | +====+==========+ | 0 | elk | +----+----------+ | 1 | pig | +----+----------+ | 2 | dog | +----+----------+ | 3 | quetzal | +----+----------+""" ), ) def to_markdown( self, buf: IO[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: """ Print {klass} in Markdown-friendly format. Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. mode : str, optional Mode in which file is opened, "wt" by default. index : bool, optional, default True Add index (row) labels. .. versionadded:: 1.1.0 {storage_options} .. versionadded:: 1.2.0 **kwargs These parameters will be passed to `tabulate \ <https://pypi.org/project/tabulate>`_. Returns ------- str {klass} in Markdown-friendly format. Notes ----- Requires the `tabulate <https://pypi.org/project/tabulate>`_ package. {examples} """ return self.to_frame().to_markdown( buf, mode, index, storage_options=storage_options, **kwargs ) # ---------------------------------------------------------------------- def items(self) -> Iterable[tuple[Hashable, Any]]: """ Lazily iterate over (index, value) tuples. This method returns an iterable tuple (index, value). This is convenient if you want to create a lazy iterator. Returns ------- iterable Iterable of tuples containing the (index, value) pairs from a Series. See Also -------- DataFrame.items : Iterate over (column name, Series) pairs. DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. Examples -------- >>> s = pd.Series(['A', 'B', 'C']) >>> for index, value in s.items(): ... print(f"Index : {index}, Value : {value}") Index : 0, Value : A Index : 1, Value : B Index : 2, Value : C """ return zip(iter(self.index), iter(self)) # ---------------------------------------------------------------------- # Misc public methods def keys(self) -> Index: """ Return alias for index. Returns ------- Index Index of the Series. """ return self.index def to_dict(self, into: type[dict] = dict) -> dict: """ Convert Series to {label -> value} dict or dict-like object. Parameters ---------- into : class, default dict The collections.abc.Mapping subclass to use as the return object. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- collections.abc.Mapping Key-value representation of Series. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.to_dict() {0: 1, 1: 2, 2: 3, 3: 4} >>> from collections import OrderedDict, defaultdict >>> s.to_dict(OrderedDict) OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> dd = defaultdict(list) >>> s.to_dict(dd) defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4}) """ # GH16122 into_c = com.standardize_mapping(into) if is_object_dtype(self) or is_extension_array_dtype(self): return into_c((k, maybe_box_native(v)) for k, v in self.items()) else: # Not an object dtype => all types will be the same so let the default # indexer return native python type return into_c(self.items()) def to_frame(self, name: Hashable = lib.no_default) -> DataFrame: """ Convert Series to DataFrame. Parameters ---------- name : object, optional The passed name should substitute for the series name (if it has one). Returns ------- DataFrame DataFrame representation of Series. Examples -------- >>> s = pd.Series(["a", "b", "c"], ... name="vals") >>> s.to_frame() vals 0 a 1 b 2 c """ columns: Index if name is lib.no_default: name = self.name if name is None: # default to [0], same as we would get with DataFrame(self) columns = default_index(1) else: columns = Index([name]) else: columns = Index([name]) mgr = self._mgr.to_2d_mgr(columns) df = self._constructor_expanddim(mgr) return df.__finalize__(self, method="to_frame") def _set_name(self, name, inplace: bool = False) -> Series: """ Set the Series name. Parameters ---------- name : str inplace : bool Whether to modify `self` directly or return a copy. """ inplace = validate_bool_kwarg(inplace, "inplace") ser = self if inplace else self.copy() ser.name = name return ser """ Examples -------- >>> ser = pd.Series([390., 350., 30., 20.], ... index=['Falcon', 'Falcon', 'Parrot', 'Parrot'], name="Max Speed") >>> ser Falcon 390.0 Falcon 350.0 Parrot 30.0 Parrot 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", "b"]).mean() a 210.0 b 185.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(ser > 100).mean() Max Speed False 25.0 True 370.0 Name: Max Speed, dtype: float64 **Grouping by Indexes** We can groupby different levels of a hierarchical index using the `level` parameter: >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], ... ['Captive', 'Wild', 'Captive', 'Wild']] >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) >>> ser = pd.Series([390., 350., 30., 20.], index=index, name="Max Speed") >>> ser Animal Type Falcon Captive 390.0 Wild 350.0 Parrot Captive 30.0 Wild 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Animal Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level="Type").mean() Type Captive 210.0 Wild 185.0 Name: Max Speed, dtype: float64 We can also choose to include `NA` in group keys or not by defining `dropna` parameter, the default setting is `True`. >>> ser = pd.Series([1, 2, 3, 3], index=["a", 'a', 'b', np.nan]) >>> ser.groupby(level=0).sum() a 3 b 3 dtype: int64 >>> ser.groupby(level=0, dropna=False).sum() a 3 b 3 NaN 3 dtype: int64 >>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot'] >>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name="Max Speed") >>> ser.groupby(["a", "b", "a", np.nan]).mean() a 210.0 b 350.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", np.nan], dropna=False).mean() a 210.0 b 350.0 NaN 20.0 Name: Max Speed, dtype: float64 """ ) def groupby( self, by=None, axis: Axis = 0, level: IndexLabel = None, as_index: bool = True, sort: bool = True, group_keys: bool = True, observed: bool = False, dropna: bool = True, ) -> SeriesGroupBy: from pandas.core.groupby.generic import SeriesGroupBy if level is None and by is None: raise TypeError("You have to supply one of 'by' and 'level'") if not as_index: raise TypeError("as_index=False only valid with DataFrame") axis = self._get_axis_number(axis) return SeriesGroupBy( obj=self, keys=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, observed=observed, dropna=dropna, ) # ---------------------------------------------------------------------- # Statistics, overridden ndarray methods # TODO: integrate bottleneck def count(self): """ Return number of non-NA/null observations in the Series. Returns ------- int or Series (if level specified) Number of non-null values in the Series. See Also -------- DataFrame.count : Count non-NA cells for each column or row. Examples -------- >>> s = pd.Series([0.0, 1.0, np.nan]) >>> s.count() 2 """ return notna(self._values).sum().astype("int64") def mode(self, dropna: bool = True) -> Series: """ Return the mode(s) of the Series. The mode is the value that appears most often. There can be multiple modes. Always returns Series even if only one value is returned. Parameters ---------- dropna : bool, default True Don't consider counts of NaN/NaT. Returns ------- Series Modes of the Series in sorted order. """ # TODO: Add option for bins like value_counts() values = self._values if isinstance(values, np.ndarray): res_values = algorithms.mode(values, dropna=dropna) else: res_values = values._mode(dropna=dropna) # Ensure index is type stable (should always use int index) return self._constructor( res_values, index=range(len(res_values)), name=self.name, copy=False ) def unique(self) -> ArrayLike: # pylint: disable=useless-parent-delegation """ Return unique values of Series object. Uniques are returned in order of appearance. Hash table-based unique, therefore does NOT sort. Returns ------- ndarray or ExtensionArray The unique values returned as a NumPy array. See Notes. See Also -------- Series.drop_duplicates : Return Series with duplicate values removed. unique : Top-level unique method for any 1-d array-like object. Index.unique : Return Index with unique values from an Index object. Notes ----- Returns the unique values as a NumPy array. In case of an extension-array backed Series, a new :class:`~api.extensions.ExtensionArray` of that type with just the unique values is returned. This includes * Categorical * Period * Datetime with Timezone * Datetime without Timezone * Timedelta * Interval * Sparse * IntegerNA See Examples section. Examples -------- >>> pd.Series([2, 1, 3, 3], name='A').unique() array([2, 1, 3]) >>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00'] Length: 1, dtype: datetime64[ns] >>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern') ... for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00-05:00'] Length: 1, dtype: datetime64[ns, US/Eastern] An Categorical will return categories in the order of appearance and with the same dtype. >>> pd.Series(pd.Categorical(list('baabc'))).unique() ['b', 'a', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'), ... ordered=True)).unique() ['b', 'a', 'c'] Categories (3, object): ['a' < 'b' < 'c'] """ return super().unique() def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[False] = ..., ignore_index: bool = ..., ) -> Series: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[True], ignore_index: bool = ... ) -> None: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: bool = ..., ignore_index: bool = ... ) -> Series | None: ... def drop_duplicates( self, *, keep: DropKeep = "first", inplace: bool = False, ignore_index: bool = False, ) -> Series | None: """ Return Series with duplicate values removed. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' Method to handle dropping duplicates: - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. inplace : bool, default ``False`` If ``True``, performs operation inplace and returns None. ignore_index : bool, default ``False`` If ``True``, the resulting axis will be labeled 0, 1, …, n - 1. .. versionadded:: 2.0.0 Returns ------- Series or None Series with duplicates dropped or None if ``inplace=True``. See Also -------- Index.drop_duplicates : Equivalent method on Index. DataFrame.drop_duplicates : Equivalent method on DataFrame. Series.duplicated : Related method on Series, indicating duplicate Series values. Series.unique : Return unique values as an array. Examples -------- Generate a Series with duplicated entries. >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], ... name='animal') >>> s 0 lama 1 cow 2 lama 3 beetle 4 lama 5 hippo Name: animal, dtype: object With the 'keep' parameter, the selection behaviour of duplicated values can be changed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> s.drop_duplicates() 0 lama 1 cow 3 beetle 5 hippo Name: animal, dtype: object The value 'last' for parameter 'keep' keeps the last occurrence for each set of duplicated entries. >>> s.drop_duplicates(keep='last') 1 cow 3 beetle 4 lama 5 hippo Name: animal, dtype: object The value ``False`` for parameter 'keep' discards all sets of duplicated entries. >>> s.drop_duplicates(keep=False) 1 cow 3 beetle 5 hippo Name: animal, dtype: object """ inplace = validate_bool_kwarg(inplace, "inplace") result = super().drop_duplicates(keep=keep) if ignore_index: result.index = default_index(len(result)) if inplace: self._update_inplace(result) return None else: return result def duplicated(self, keep: DropKeep = "first") -> Series: """ Indicate duplicate Series values. Duplicated values are indicated as ``True`` values in the resulting Series. Either all duplicates, all except the first or all except the last occurrence of duplicates can be indicated. Parameters ---------- keep : {'first', 'last', False}, default 'first' Method to handle dropping duplicates: - 'first' : Mark duplicates as ``True`` except for the first occurrence. - 'last' : Mark duplicates as ``True`` except for the last occurrence. - ``False`` : Mark all duplicates as ``True``. Returns ------- Series[bool] Series indicating whether each value has occurred in the preceding values. See Also -------- Index.duplicated : Equivalent method on pandas.Index. DataFrame.duplicated : Equivalent method on pandas.DataFrame. Series.drop_duplicates : Remove duplicate values from Series. Examples -------- By default, for each set of duplicated values, the first occurrence is set on False and all others on True: >>> animals = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama']) >>> animals.duplicated() 0 False 1 False 2 True 3 False 4 True dtype: bool which is equivalent to >>> animals.duplicated(keep='first') 0 False 1 False 2 True 3 False 4 True dtype: bool By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True: >>> animals.duplicated(keep='last') 0 True 1 False 2 True 3 False 4 False dtype: bool By setting keep on ``False``, all duplicates are True: >>> animals.duplicated(keep=False) 0 True 1 False 2 True 3 False 4 True dtype: bool """ res = self._duplicated(keep=keep) result = self._constructor(res, index=self.index, copy=False) return result.__finalize__(self, method="duplicated") def idxmin(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the minimum value. If multiple values equal the minimum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the minimum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmin : Return indices of the minimum values along the given axis. DataFrame.idxmin : Return index of first occurrence of minimum over requested axis. Series.idxmax : Return index *label* of the first occurrence of maximum of values. Notes ----- This method is the Series version of ``ndarray.argmin``. This method returns the label of the minimum, while ``ndarray.argmin`` returns the position. To get the position, use ``series.values.argmin()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 1], ... index=['A', 'B', 'C', 'D']) >>> s A 1.0 B NaN C 4.0 D 1.0 dtype: float64 >>> s.idxmin() 'A' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmin(skipna=False) nan """ # error: Argument 1 to "argmin" of "IndexOpsMixin" has incompatible type "Union # [int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmin(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def idxmax(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the maximum value. If multiple values equal the maximum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the maximum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmax : Return indices of the maximum values along the given axis. DataFrame.idxmax : Return index of first occurrence of maximum over requested axis. Series.idxmin : Return index *label* of the first occurrence of minimum of values. Notes ----- This method is the Series version of ``ndarray.argmax``. This method returns the label of the maximum, while ``ndarray.argmax`` returns the position. To get the position, use ``series.values.argmax()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 3, 4], ... index=['A', 'B', 'C', 'D', 'E']) >>> s A 1.0 B NaN C 4.0 D 3.0 E 4.0 dtype: float64 >>> s.idxmax() 'C' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmax(skipna=False) nan """ # error: Argument 1 to "argmax" of "IndexOpsMixin" has incompatible type # "Union[int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmax(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def round(self, decimals: int = 0, *args, **kwargs) -> Series: """ Round each value in a Series to the given number of decimals. Parameters ---------- decimals : int, default 0 Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Series Rounded values of the Series. See Also -------- numpy.around : Round values of an np.array. DataFrame.round : Round values of a DataFrame. Examples -------- >>> s = pd.Series([0.1, 1.3, 2.7]) >>> s.round() 0 0.0 1 1.0 2 3.0 dtype: float64 """ nv.validate_round(args, kwargs) result = self._values.round(decimals) result = self._constructor(result, index=self.index, copy=False).__finalize__( self, method="round" ) return result def quantile( self, q: float = ..., interpolation: QuantileInterpolation = ... ) -> float: ... def quantile( self, q: Sequence[float] | AnyArrayLike, interpolation: QuantileInterpolation = ..., ) -> Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = ..., interpolation: QuantileInterpolation = ..., ) -> float | Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = 0.5, interpolation: QuantileInterpolation = "linear", ) -> float | Series: """ Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) The quantile(s) to compute, which can lie in range: 0 <= q <= 1. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- float or Series If ``q`` is an array, a Series will be returned where the index is ``q`` and the values are the quantiles, otherwise a float will be returned. See Also -------- core.window.Rolling.quantile : Calculate the rolling quantile. numpy.percentile : Returns the q-th percentile(s) of the array elements. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.quantile(.5) 2.5 >>> s.quantile([.25, .5, .75]) 0.25 1.75 0.50 2.50 0.75 3.25 dtype: float64 """ validate_percentile(q) # We dispatch to DataFrame so that core.internals only has to worry # about 2D cases. df = self.to_frame() result = df.quantile(q=q, interpolation=interpolation, numeric_only=False) if result.ndim == 2: result = result.iloc[:, 0] if is_list_like(q): result.name = self.name idx = Index(q, dtype=np.float64) return self._constructor(result, index=idx, name=self.name) else: # scalar return result.iloc[0] def corr( self, other: Series, method: CorrelationMethod = "pearson", min_periods: int | None = None, ) -> float: """ Compute correlation with `other` Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the correlation function is applied. Parameters ---------- other : Series Series with which to compute the correlation. method : {'pearson', 'kendall', 'spearman'} or callable Method used to compute correlation: - pearson : Standard correlation coefficient - kendall : Kendall Tau correlation coefficient - spearman : Spearman rank correlation - callable: Callable with input two 1d ndarrays and returning a float. .. warning:: Note that the returned matrix from corr will have 1 along the diagonals and will be symmetric regardless of the callable's behavior. min_periods : int, optional Minimum number of observations needed to have a valid result. Returns ------- float Correlation with other. See Also -------- DataFrame.corr : Compute pairwise correlation between columns. DataFrame.corrwith : Compute pairwise correlation with another DataFrame or Series. Notes ----- Pearson, Kendall and Spearman correlation are currently computed using pairwise complete observations. * `Pearson correlation coefficient <https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`_ * `Kendall rank correlation coefficient <https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient>`_ * `Spearman's rank correlation coefficient <https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient>`_ Examples -------- >>> def histogram_intersection(a, b): ... v = np.minimum(a, b).sum().round(decimals=1) ... return v >>> s1 = pd.Series([.2, .0, .6, .2]) >>> s2 = pd.Series([.3, .6, .0, .1]) >>> s1.corr(s2, method=histogram_intersection) 0.3 """ # noqa:E501 this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan if method in ["pearson", "spearman", "kendall"] or callable(method): return nanops.nancorr( this.values, other.values, method=method, min_periods=min_periods ) raise ValueError( "method must be either 'pearson', " "'spearman', 'kendall', or a callable, " f"'{method}' was supplied" ) def cov( self, other: Series, min_periods: int | None = None, ddof: int | None = 1, ) -> float: """ Compute covariance with Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the covariance is calculated. Parameters ---------- other : Series Series with which to compute the covariance. min_periods : int, optional Minimum number of observations needed to have a valid result. ddof : int, default 1 Delta degrees of freedom. The divisor used in calculations is ``N - ddof``, where ``N`` represents the number of elements. .. versionadded:: 1.1.0 Returns ------- float Covariance between Series and other normalized by N-1 (unbiased estimator). See Also -------- DataFrame.cov : Compute pairwise covariance of columns. Examples -------- >>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035]) >>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198]) >>> s1.cov(s2) -0.01685762652715874 """ this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan return nanops.nancov( this.values, other.values, min_periods=min_periods, ddof=ddof ) klass="Series", extra_params="", other_klass="DataFrame", examples=dedent( """ Difference with previous row >>> s = pd.Series([1, 1, 2, 3, 5, 8]) >>> s.diff() 0 NaN 1 0.0 2 1.0 3 1.0 4 2.0 5 3.0 dtype: float64 Difference with 3rd previous row >>> s.diff(periods=3) 0 NaN 1 NaN 2 NaN 3 2.0 4 4.0 5 6.0 dtype: float64 Difference with following row >>> s.diff(periods=-1) 0 0.0 1 -1.0 2 -1.0 3 -2.0 4 -3.0 5 NaN dtype: float64 Overflow in input dtype >>> s = pd.Series([1, 0], dtype=np.uint8) >>> s.diff() 0 NaN 1 255.0 dtype: float64""" ), ) def diff(self, periods: int = 1) -> Series: """ First discrete difference of element. Calculates the difference of a {klass} element compared with another element in the {klass} (default is element in previous row). Parameters ---------- periods : int, default 1 Periods to shift for calculating difference, accepts negative values. {extra_params} Returns ------- {klass} First differences of the Series. See Also -------- {klass}.pct_change: Percent change over given number of periods. {klass}.shift: Shift index by desired number of periods with an optional time freq. {other_klass}.diff: First discrete difference of object. Notes ----- For boolean dtypes, this uses :meth:`operator.xor` rather than :meth:`operator.sub`. The result is calculated according to current dtype in {klass}, however dtype of the result is always float64. Examples -------- {examples} """ result = algorithms.diff(self._values, periods) return self._constructor(result, index=self.index, copy=False).__finalize__( self, method="diff" ) def autocorr(self, lag: int = 1) -> float: """ Compute the lag-N autocorrelation. This method computes the Pearson correlation between the Series and its shifted self. Parameters ---------- lag : int, default 1 Number of lags to apply before performing autocorrelation. Returns ------- float The Pearson correlation between self and self.shift(lag). See Also -------- Series.corr : Compute the correlation between two Series. Series.shift : Shift index by desired number of periods. DataFrame.corr : Compute pairwise correlation of columns. DataFrame.corrwith : Compute pairwise correlation between rows or columns of two DataFrame objects. Notes ----- If the Pearson correlation is not well defined return 'NaN'. Examples -------- >>> s = pd.Series([0.25, 0.5, 0.2, -0.05]) >>> s.autocorr() # doctest: +ELLIPSIS 0.10355... >>> s.autocorr(lag=2) # doctest: +ELLIPSIS -0.99999... If the Pearson correlation is not well defined, then 'NaN' is returned. >>> s = pd.Series([1, 0, 0, 0]) >>> s.autocorr() nan """ return self.corr(self.shift(lag)) def dot(self, other: AnyArrayLike) -> Series | np.ndarray: """ Compute the dot product between the Series and the columns of other. This method computes the dot product between the Series and another one, or the Series and each columns of a DataFrame, or the Series and each columns of an array. It can also be called using `self @ other` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the dot product with its columns. Returns ------- scalar, Series or numpy.ndarray Return the dot product of the Series and other if other is a Series, the Series of the dot product of Series and each rows of other if other is a DataFrame or a numpy.ndarray between the Series and each columns of the numpy array. See Also -------- DataFrame.dot: Compute the matrix product with the DataFrame. Series.mul: Multiplication of series and other, element-wise. Notes ----- The Series and other has to share the same index if other is a Series or a DataFrame. Examples -------- >>> s = pd.Series([0, 1, 2, 3]) >>> other = pd.Series([-1, 2, -3, 4]) >>> s.dot(other) 8 >>> s @ other 8 >>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(df) 0 24 1 14 dtype: int64 >>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(arr) array([24, 14]) """ if isinstance(other, (Series, ABCDataFrame)): common = self.index.union(other.index) if len(common) > len(self.index) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(index=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right.values else: lvals = self.values rvals = np.asarray(other) if lvals.shape[0] != rvals.shape[0]: raise Exception( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, ABCDataFrame): return self._constructor( np.dot(lvals, rvals), index=other.columns, copy=False ).__finalize__(self, method="dot") elif isinstance(other, Series): return np.dot(lvals, rvals) elif isinstance(rvals, np.ndarray): return np.dot(lvals, rvals) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(np.transpose(other)) # Signature of "searchsorted" incompatible with supertype "IndexOpsMixin" def searchsorted( # type: ignore[override] self, value: NumpyValueArrayLike | ExtensionArray, side: Literal["left", "right"] = "left", sorter: NumpySorter = None, ) -> npt.NDArray[np.intp] | np.intp: return base.IndexOpsMixin.searchsorted(self, value, side=side, sorter=sorter) # ------------------------------------------------------------------- # Combination def _append( self, to_append, ignore_index: bool = False, verify_integrity: bool = False ): from pandas.core.reshape.concat import concat if isinstance(to_append, (list, tuple)): to_concat = [self] to_concat.extend(to_append) else: to_concat = [self, to_append] if any(isinstance(x, (ABCDataFrame,)) for x in to_concat[1:]): msg = "to_append should be a Series or list/tuple of Series, got DataFrame" raise TypeError(msg) return concat( to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity ) def _binop(self, other: Series, func, level=None, fill_value=None): """ Perform generic binary operation with optional fill value. Parameters ---------- other : Series func : binary operator fill_value : float or object Value to substitute for NA/null values. If both Series are NA in a location, the result will be NA regardless of the passed fill value. level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level. Returns ------- Series """ if not isinstance(other, Series): raise AssertionError("Other operand must be Series") this = self if not self.index.equals(other.index): this, other = self.align(other, level=level, join="outer", copy=False) this_vals, other_vals = ops.fill_binop(this._values, other._values, fill_value) with np.errstate(all="ignore"): result = func(this_vals, other_vals) name = ops.get_op_result_name(self, other) return this._construct_result(result, name) def _construct_result( self, result: ArrayLike | tuple[ArrayLike, ArrayLike], name: Hashable ) -> Series | tuple[Series, Series]: """ Construct an appropriately-labelled Series from the result of an op. Parameters ---------- result : ndarray or ExtensionArray name : Label Returns ------- Series In the case of __divmod__ or __rdivmod__, a 2-tuple of Series. """ if isinstance(result, tuple): # produced by divmod or rdivmod res1 = self._construct_result(result[0], name=name) res2 = self._construct_result(result[1], name=name) # GH#33427 assertions to keep mypy happy assert isinstance(res1, Series) assert isinstance(res2, Series) return (res1, res2) # TODO: result should always be ArrayLike, but this fails for some # JSONArray tests dtype = getattr(result, "dtype", None) out = self._constructor(result, index=self.index, dtype=dtype) out = out.__finalize__(self) # Set the result's name after __finalize__ is called because __finalize__ # would set it back to self.name out.name = name return out _shared_docs["compare"], """ Returns ------- Series or DataFrame If axis is 0 or 'index' the result will be a Series. The resulting index will be a MultiIndex with 'self' and 'other' stacked alternately at the inner level. If axis is 1 or 'columns' the result will be a DataFrame. It will have two columns namely 'self' and 'other'. See Also -------- DataFrame.compare : Compare with another DataFrame and show differences. Notes ----- Matching NaNs will not appear as a difference. Examples -------- >>> s1 = pd.Series(["a", "b", "c", "d", "e"]) >>> s2 = pd.Series(["a", "a", "c", "b", "e"]) Align the differences on columns >>> s1.compare(s2) self other 1 b a 3 d b Stack the differences on indices >>> s1.compare(s2, align_axis=0) 1 self b other a 3 self d other b dtype: object Keep all original rows >>> s1.compare(s2, keep_shape=True) self other 0 NaN NaN 1 b a 2 NaN NaN 3 d b 4 NaN NaN Keep all original rows and also all original values >>> s1.compare(s2, keep_shape=True, keep_equal=True) self other 0 a a 1 b a 2 c c 3 d b 4 e e """, klass=_shared_doc_kwargs["klass"], ) def compare( self, other: Series, align_axis: Axis = 1, keep_shape: bool = False, keep_equal: bool = False, result_names: Suffixes = ("self", "other"), ) -> DataFrame | Series: return super().compare( other=other, align_axis=align_axis, keep_shape=keep_shape, keep_equal=keep_equal, result_names=result_names, ) def combine( self, other: Series | Hashable, func: Callable[[Hashable, Hashable], Hashable], fill_value: Hashable = None, ) -> Series: """ Combine the Series with a Series or scalar according to `func`. Combine the Series and `other` using `func` to perform elementwise selection for combined Series. `fill_value` is assumed when value is missing at some index from one of the two objects being combined. Parameters ---------- other : Series or scalar The value(s) to be combined with the `Series`. func : function Function that takes two scalars as inputs and returns an element. fill_value : scalar, optional The value to assume when an index is missing from one Series or the other. The default specifies to use the appropriate NaN value for the underlying dtype of the Series. Returns ------- Series The result of combining the Series with the other object. See Also -------- Series.combine_first : Combine Series values, choosing the calling Series' values first. Examples -------- Consider 2 Datasets ``s1`` and ``s2`` containing highest clocked speeds of different birds. >>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0}) >>> s1 falcon 330.0 eagle 160.0 dtype: float64 >>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0}) >>> s2 falcon 345.0 eagle 200.0 duck 30.0 dtype: float64 Now, to combine the two datasets and view the highest speeds of the birds across the two datasets >>> s1.combine(s2, max) duck NaN eagle 200.0 falcon 345.0 dtype: float64 In the previous example, the resulting value for duck is missing, because the maximum of a NaN and a float is a NaN. So, in the example, we set ``fill_value=0``, so the maximum value returned will be the value from some dataset. >>> s1.combine(s2, max, fill_value=0) duck 30.0 eagle 200.0 falcon 345.0 dtype: float64 """ if fill_value is None: fill_value = na_value_for_dtype(self.dtype, compat=False) if isinstance(other, Series): # If other is a Series, result is based on union of Series, # so do this element by element new_index = self.index.union(other.index) new_name = ops.get_op_result_name(self, other) new_values = np.empty(len(new_index), dtype=object) for i, idx in enumerate(new_index): lv = self.get(idx, fill_value) rv = other.get(idx, fill_value) with np.errstate(all="ignore"): new_values[i] = func(lv, rv) else: # Assume that other is a scalar, so apply the function for # each element in the Series new_index = self.index new_values = np.empty(len(new_index), dtype=object) with np.errstate(all="ignore"): new_values[:] = [func(lv, other) for lv in self._values] new_name = self.name # try_float=False is to match agg_series npvalues = lib.maybe_convert_objects(new_values, try_float=False) res_values = maybe_cast_pointwise_result(npvalues, self.dtype, same_dtype=False) return self._constructor(res_values, index=new_index, name=new_name, copy=False) def combine_first(self, other) -> Series: """ Update null elements with value in the same location in 'other'. Combine two Series objects by filling null values in one Series with non-null values from the other Series. Result index will be the union of the two indexes. Parameters ---------- other : Series The value(s) to be used for filling null values. Returns ------- Series The result of combining the provided Series with the other object. See Also -------- Series.combine : Perform element-wise operation on two Series using a given function. Examples -------- >>> s1 = pd.Series([1, np.nan]) >>> s2 = pd.Series([3, 4, 5]) >>> s1.combine_first(s2) 0 1.0 1 4.0 2 5.0 dtype: float64 Null values still persist if the location of that null value does not exist in `other` >>> s1 = pd.Series({'falcon': np.nan, 'eagle': 160.0}) >>> s2 = pd.Series({'eagle': 200.0, 'duck': 30.0}) >>> s1.combine_first(s2) duck 30.0 eagle 160.0 falcon NaN dtype: float64 """ new_index = self.index.union(other.index) this = self.reindex(new_index, copy=False) other = other.reindex(new_index, copy=False) if this.dtype.kind == "M" and other.dtype.kind != "M": other = to_datetime(other) return this.where(notna(this), other) def update(self, other: Series | Sequence | Mapping) -> None: """ Modify Series in place using values from passed Series. Uses non-NA values from passed Series to make updates. Aligns on index. Parameters ---------- other : Series, or object coercible into Series Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6])) >>> s 0 4 1 5 2 6 dtype: int64 >>> s = pd.Series(['a', 'b', 'c']) >>> s.update(pd.Series(['d', 'e'], index=[0, 2])) >>> s 0 d 1 b 2 e dtype: object >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6, 7, 8])) >>> s 0 4 1 5 2 6 dtype: int64 If ``other`` contains NaNs the corresponding values are not updated in the original Series. >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, np.nan, 6])) >>> s 0 4 1 2 2 6 dtype: int64 ``other`` can also be a non-Series object type that is coercible into a Series >>> s = pd.Series([1, 2, 3]) >>> s.update([4, np.nan, 6]) >>> s 0 4 1 2 2 6 dtype: int64 >>> s = pd.Series([1, 2, 3]) >>> s.update({1: 9}) >>> s 0 1 1 9 2 3 dtype: int64 """ if not isinstance(other, Series): other = Series(other) other = other.reindex_like(self) mask = notna(other) self._mgr = self._mgr.putmask(mask=mask, new=other) self._maybe_update_cacher() # ---------------------------------------------------------------------- # Reindexing, sorting def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[False] = ..., kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> Series: ... def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[True], kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> None: ... def sort_values( self, *, axis: Axis = 0, ascending: bool | int | Sequence[bool] | Sequence[int] = True, inplace: bool = False, kind: str = "quicksort", na_position: str = "last", ignore_index: bool = False, key: ValueKeyFunc = None, ) -> Series | None: """ Sort by the values. Sort a Series in ascending or descending order by some criterion. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. ascending : bool or list of bools, default True If True, sort values in ascending order, otherwise descending. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the series values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect a ``Series`` and return an array-like. .. versionadded:: 1.1.0 Returns ------- Series or None Series ordered by values or None if ``inplace=True``. See Also -------- Series.sort_index : Sort by the Series indices. DataFrame.sort_values : Sort DataFrame by the values along either axis. DataFrame.sort_index : Sort DataFrame by indices. Examples -------- >>> s = pd.Series([np.nan, 1, 3, 10, 5]) >>> s 0 NaN 1 1.0 2 3.0 3 10.0 4 5.0 dtype: float64 Sort values ascending order (default behaviour) >>> s.sort_values(ascending=True) 1 1.0 2 3.0 4 5.0 3 10.0 0 NaN dtype: float64 Sort values descending order >>> s.sort_values(ascending=False) 3 10.0 4 5.0 2 3.0 1 1.0 0 NaN dtype: float64 Sort values putting NAs first >>> s.sort_values(na_position='first') 0 NaN 1 1.0 2 3.0 4 5.0 3 10.0 dtype: float64 Sort a series of strings >>> s = pd.Series(['z', 'b', 'd', 'a', 'c']) >>> s 0 z 1 b 2 d 3 a 4 c dtype: object >>> s.sort_values() 3 a 1 b 4 c 2 d 0 z dtype: object Sort using a key function. Your `key` function will be given the ``Series`` of values and should return an array-like. >>> s = pd.Series(['a', 'B', 'c', 'D', 'e']) >>> s.sort_values() 1 B 3 D 0 a 2 c 4 e dtype: object >>> s.sort_values(key=lambda x: x.str.lower()) 0 a 1 B 2 c 3 D 4 e dtype: object NumPy ufuncs work well here. For example, we can sort by the ``sin`` of the value >>> s = pd.Series([-4, -2, 0, 2, 4]) >>> s.sort_values(key=np.sin) 1 -2 4 4 2 0 0 -4 3 2 dtype: int64 More complicated user-defined functions can be used, as long as they expect a Series and return an array-like >>> s.sort_values(key=lambda x: (np.tan(x.cumsum()))) 0 -4 3 2 4 4 1 -2 2 0 dtype: int64 """ inplace = validate_bool_kwarg(inplace, "inplace") # Validate the axis parameter self._get_axis_number(axis) # GH 5856/5853 if inplace and self._is_cached: raise ValueError( "This Series is a view of some other array, to " "sort in-place you must create a copy" ) if is_list_like(ascending): ascending = cast(Sequence[Union[bool, int]], ascending) if len(ascending) != 1: raise ValueError( f"Length of ascending ({len(ascending)}) must be 1 for Series" ) ascending = ascending[0] ascending = validate_ascending(ascending) if na_position not in ["first", "last"]: raise ValueError(f"invalid na_position: {na_position}") # GH 35922. Make sorting stable by leveraging nargsort values_to_sort = ensure_key_mapped(self, key)._values if key else self._values sorted_index = nargsort(values_to_sort, kind, bool(ascending), na_position) if is_range_indexer(sorted_index, len(sorted_index)): if inplace: return self._update_inplace(self) return self.copy(deep=None) result = self._constructor( self._values[sorted_index], index=self.index[sorted_index], copy=False ) if ignore_index: result.index = default_index(len(sorted_index)) if not inplace: return result.__finalize__(self, method="sort_values") self._update_inplace(result) return None def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[True], kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> None: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[False] = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: bool = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series | None: ... def sort_index( self, *, axis: Axis = 0, level: IndexLabel = None, ascending: bool | Sequence[bool] = True, inplace: bool = False, kind: SortKind = "quicksort", na_position: NaPosition = "last", sort_remaining: bool = True, ignore_index: bool = False, key: IndexKeyFunc = None, ) -> Series | None: """ Sort Series by index labels. Returns a new Series sorted by label if `inplace` argument is ``False``, otherwise updates the original series and returns None. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. level : int, optional If not None, sort on values in specified index level(s). ascending : bool or list-like of bools, default True Sort ascending vs. descending. When the index is a MultiIndex the sort direction can be controlled for each level individually. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. Not implemented for MultiIndex. sort_remaining : bool, default True If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. .. versionadded:: 1.1.0 Returns ------- Series or None The original Series sorted by the labels or None if ``inplace=True``. See Also -------- DataFrame.sort_index: Sort DataFrame by the index. DataFrame.sort_values: Sort DataFrame by the value. Series.sort_values : Sort Series by the value. Examples -------- >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4]) >>> s.sort_index() 1 c 2 b 3 a 4 d dtype: object Sort Descending >>> s.sort_index(ascending=False) 4 d 3 a 2 b 1 c dtype: object By default NaNs are put at the end, but use `na_position` to place them at the beginning >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan]) >>> s.sort_index(na_position='first') NaN d 1.0 c 2.0 b 3.0 a dtype: object Specify index level to sort >>> arrays = [np.array(['qux', 'qux', 'foo', 'foo', ... 'baz', 'baz', 'bar', 'bar']), ... np.array(['two', 'one', 'two', 'one', ... 'two', 'one', 'two', 'one'])] >>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays) >>> s.sort_index(level=1) bar one 8 baz one 6 foo one 4 qux one 2 bar two 7 baz two 5 foo two 3 qux two 1 dtype: int64 Does not sort by remaining levels when sorting by levels >>> s.sort_index(level=1, sort_remaining=False) qux one 2 foo one 4 baz one 6 bar one 8 qux two 1 foo two 3 baz two 5 bar two 7 dtype: int64 Apply a key function before sorting >>> s = pd.Series([1, 2, 3, 4], index=['A', 'b', 'C', 'd']) >>> s.sort_index(key=lambda x : x.str.lower()) A 1 b 2 C 3 d 4 dtype: int64 """ return super().sort_index( axis=axis, level=level, ascending=ascending, inplace=inplace, kind=kind, na_position=na_position, sort_remaining=sort_remaining, ignore_index=ignore_index, key=key, ) def argsort( self, axis: Axis = 0, kind: SortKind = "quicksort", order: None = None, ) -> Series: """ Return the integer indices that would sort the Series values. Override ndarray.argsort. Argsorts the value, omitting NA/null values, and places the result in the same locations as the non-NA values. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. order : None Has no effect but is accepted for compatibility with numpy. Returns ------- Series[np.intp] Positions of values within the sort order with -1 indicating nan values. See Also -------- numpy.ndarray.argsort : Returns the indices that would sort this array. """ values = self._values mask = isna(values) if mask.any(): result = np.full(len(self), -1, dtype=np.intp) notmask = ~mask result[notmask] = np.argsort(values[notmask], kind=kind) else: result = np.argsort(values, kind=kind) res = self._constructor( result, index=self.index, name=self.name, dtype=np.intp, copy=False ) return res.__finalize__(self, method="argsort") def nlargest( self, n: int = 5, keep: Literal["first", "last", "all"] = "first" ) -> Series: """ Return the largest `n` elements. Parameters ---------- n : int, default 5 Return this many descending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` largest values in the Series, sorted in decreasing order. See Also -------- Series.nsmallest: Get the `n` smallest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values(ascending=False).head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Malta": 434000, "Maldives": 434000, ... "Brunei": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Malta 434000 Maldives 434000 Brunei 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` largest elements where ``n=5`` by default. >>> s.nlargest() France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3``. Default `keep` value is 'first' so Malta will be kept. >>> s.nlargest(3) France 65000000 Italy 59000000 Malta 434000 dtype: int64 The `n` largest elements where ``n=3`` and keeping the last duplicates. Brunei will be kept since it is the last with value 434000 based on the index order. >>> s.nlargest(3, keep='last') France 65000000 Italy 59000000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3`` with all duplicates kept. Note that the returned Series has five elements due to the three duplicates. >>> s.nlargest(3, keep='all') France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nlargest() def nsmallest(self, n: int = 5, keep: str = "first") -> Series: """ Return the smallest `n` elements. Parameters ---------- n : int, default 5 Return this many ascending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` smallest values in the Series, sorted in increasing order. See Also -------- Series.nlargest: Get the `n` largest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values().head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Brunei": 434000, "Malta": 434000, ... "Maldives": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Brunei 434000 Malta 434000 Maldives 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` smallest elements where ``n=5`` by default. >>> s.nsmallest() Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 Iceland 337000 dtype: int64 The `n` smallest elements where ``n=3``. Default `keep` value is 'first' so Nauru and Tuvalu will be kept. >>> s.nsmallest(3) Montserrat 5200 Nauru 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` and keeping the last duplicates. Anguilla and Tuvalu will be kept since they are the last with value 11300 based on the index order. >>> s.nsmallest(3, keep='last') Montserrat 5200 Anguilla 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` with all duplicates kept. Note that the returned Series has four elements due to the three duplicates. >>> s.nsmallest(3, keep='all') Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nsmallest() klass=_shared_doc_kwargs["klass"], extra_params=dedent( """copy : bool, default True Whether to copy underlying data.""" ), examples=dedent( """\ Examples -------- >>> s = pd.Series( ... ["A", "B", "A", "C"], ... index=[ ... ["Final exam", "Final exam", "Coursework", "Coursework"], ... ["History", "Geography", "History", "Geography"], ... ["January", "February", "March", "April"], ... ], ... ) >>> s Final exam History January A Geography February B Coursework History March A Geography April C dtype: object In the following example, we will swap the levels of the indices. Here, we will swap the levels column-wise, but levels can be swapped row-wise in a similar manner. Note that column-wise is the default behaviour. By not supplying any arguments for i and j, we swap the last and second to last indices. >>> s.swaplevel() Final exam January History A February Geography B Coursework March History A April Geography C dtype: object By supplying one argument, we can choose which index to swap the last index with. We can for example swap the first index with the last one as follows. >>> s.swaplevel(0) January History Final exam A February Geography Final exam B March History Coursework A April Geography Coursework C dtype: object We can also define explicitly which indices we want to swap by supplying values for both i and j. Here, we for example swap the first and second indices. >>> s.swaplevel(0, 1) History Final exam January A Geography Final exam February B History Coursework March A Geography Coursework April C dtype: object""" ), ) def swaplevel( self, i: Level = -2, j: Level = -1, copy: bool | None = None ) -> Series: """ Swap levels i and j in a :class:`MultiIndex`. Default is to swap the two innermost levels of the index. Parameters ---------- i, j : int or str Levels of the indices to be swapped. Can pass level name as string. {extra_params} Returns ------- {klass} {klass} with levels swapped in MultiIndex. {examples} """ assert isinstance(self.index, MultiIndex) result = self.copy(deep=copy and not using_copy_on_write()) result.index = self.index.swaplevel(i, j) return result def reorder_levels(self, order: Sequence[Level]) -> Series: """ Rearrange index levels using input order. May not drop or duplicate levels. Parameters ---------- order : list of int representing new level order Reference level by number or key. Returns ------- type of caller (new object) """ if not isinstance(self.index, MultiIndex): # pragma: no cover raise Exception("Can only reorder levels on a hierarchical axis.") result = self.copy(deep=None) assert isinstance(result.index, MultiIndex) result.index = result.index.reorder_levels(order) return result def explode(self, ignore_index: bool = False) -> Series: """ Transform each element of a list-like to a row. Parameters ---------- ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 1.1.0 Returns ------- Series Exploded lists to rows; index will be duplicated for these rows. See Also -------- Series.str.split : Split string values on specified separator. Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame. DataFrame.melt : Unpivot a DataFrame from wide format to long format. DataFrame.explode : Explode a DataFrame from list-like columns to long format. Notes ----- This routine will explode list-likes including lists, tuples, sets, Series, and np.ndarray. The result dtype of the subset rows will be object. Scalars will be returned unchanged, and empty list-likes will result in a np.nan for that row. In addition, the ordering of elements in the output will be non-deterministic when exploding sets. Reference :ref:`the user guide <reshaping.explode>` for more examples. Examples -------- >>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]]) >>> s 0 [1, 2, 3] 1 foo 2 [] 3 [3, 4] dtype: object >>> s.explode() 0 1 0 2 0 3 1 foo 2 NaN 3 3 3 4 dtype: object """ if not len(self) or not is_object_dtype(self): result = self.copy() return result.reset_index(drop=True) if ignore_index else result values, counts = reshape.explode(np.asarray(self._values)) if ignore_index: index = default_index(len(values)) else: index = self.index.repeat(counts) return self._constructor(values, index=index, name=self.name, copy=False) def unstack(self, level: IndexLabel = -1, fill_value: Hashable = None) -> DataFrame: """ Unstack, also known as pivot, Series with MultiIndex to produce DataFrame. Parameters ---------- level : int, str, or list of these, default last level Level(s) to unstack, can pass level name. fill_value : scalar value, default None Value to use when replacing NaN values. Returns ------- DataFrame Unstacked Series. Notes ----- Reference :ref:`the user guide <reshaping.stacking>` for more examples. Examples -------- >>> s = pd.Series([1, 2, 3, 4], ... index=pd.MultiIndex.from_product([['one', 'two'], ... ['a', 'b']])) >>> s one a 1 b 2 two a 3 b 4 dtype: int64 >>> s.unstack(level=-1) a b one 1 2 two 3 4 >>> s.unstack(level=0) one two a 1 3 b 2 4 """ from pandas.core.reshape.reshape import unstack return unstack(self, level, fill_value) # ---------------------------------------------------------------------- # function application def map( self, arg: Callable | Mapping | Series, na_action: Literal["ignore"] | None = None, ) -> Series: """ Map values of Series according to an input mapping or function. Used for substituting each value in a Series with another value, that may be derived from a function, a ``dict`` or a :class:`Series`. Parameters ---------- arg : function, collections.abc.Mapping subclass or Series Mapping correspondence. na_action : {None, 'ignore'}, default None If 'ignore', propagate NaN values, without passing them to the mapping correspondence. Returns ------- Series Same index as caller. See Also -------- Series.apply : For applying more complex functions on a Series. DataFrame.apply : Apply a function row-/column-wise. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Notes ----- When ``arg`` is a dictionary, values in Series that are not in the dictionary (as keys) are converted to ``NaN``. However, if the dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e. provides a method for default values), then this default is used rather than ``NaN``. Examples -------- >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit']) >>> s 0 cat 1 dog 2 NaN 3 rabbit dtype: object ``map`` accepts a ``dict`` or a ``Series``. Values that are not found in the ``dict`` are converted to ``NaN``, unless the dict has a default value (e.g. ``defaultdict``): >>> s.map({'cat': 'kitten', 'dog': 'puppy'}) 0 kitten 1 puppy 2 NaN 3 NaN dtype: object It also accepts a function: >>> s.map('I am a {}'.format) 0 I am a cat 1 I am a dog 2 I am a nan 3 I am a rabbit dtype: object To avoid applying the function to missing values (and keep them as ``NaN``) ``na_action='ignore'`` can be used: >>> s.map('I am a {}'.format, na_action='ignore') 0 I am a cat 1 I am a dog 2 NaN 3 I am a rabbit dtype: object """ new_values = self._map_values(arg, na_action=na_action) return self._constructor(new_values, index=self.index, copy=False).__finalize__( self, method="map" ) def _gotitem(self, key, ndim, subset=None) -> Series: """ Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : {1, 2} Requested ndim of result. subset : object, default None Subset to act on. """ return self _agg_see_also_doc = dedent( """ See Also -------- Series.apply : Invoke function on a Series. Series.transform : Transform function producing a Series with like indexes. """ ) _agg_examples_doc = dedent( """ Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.agg('min') 1 >>> s.agg(['min', 'max']) min 1 max 4 dtype: int64 """ ) _shared_docs["aggregate"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, ) def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs): # Validate the axis parameter self._get_axis_number(axis) # if func is None, will switch to user-provided "named aggregation" kwargs if func is None: func = dict(kwargs.items()) op = SeriesApply(self, func, convert_dtype=False, args=args, kwargs=kwargs) result = op.agg() return result agg = aggregate # error: Signature of "any" incompatible with supertype "NDFrame" [override] def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: None = ..., **kwargs, ) -> bool: ... def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: Level, **kwargs, ) -> Series | bool: ... # error: Missing return statement def any( # type: ignore[empty-body] self, axis: Axis = 0, bool_only: bool | None = None, skipna: bool = True, level: Level | None = None, **kwargs, ) -> Series | bool: ... _shared_docs["transform"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], ) def transform( self, func: AggFuncType, axis: Axis = 0, *args, **kwargs ) -> DataFrame | Series: # Validate axis argument self._get_axis_number(axis) result = SeriesApply( self, func=func, convert_dtype=True, args=args, kwargs=kwargs ).transform() return result def apply( self, func: AggFuncType, convert_dtype: bool = True, args: tuple[Any, ...] = (), **kwargs, ) -> DataFrame | Series: """ Invoke function on values of Series. Can be ufunc (a NumPy function that applies to the entire Series) or a Python function that only works on single values. Parameters ---------- func : function Python function or NumPy ufunc to apply. convert_dtype : bool, default True Try to find better dtype for elementwise function results. If False, leave as dtype=object. Note that the dtype is always preserved for some extension array dtypes, such as Categorical. args : tuple Positional arguments passed to func after the series value. **kwargs Additional keyword arguments passed to func. Returns ------- Series or DataFrame If func returns a Series object the result will be a DataFrame. See Also -------- Series.map: For element-wise operations. Series.agg: Only perform aggregating type operations. Series.transform: Only perform transforming type operations. Notes ----- Functions that mutate the passed object can produce unexpected behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` for more details. Examples -------- Create a series with typical summer temperatures for each city. >>> s = pd.Series([20, 21, 12], ... index=['London', 'New York', 'Helsinki']) >>> s London 20 New York 21 Helsinki 12 dtype: int64 Square the values by defining a function and passing it as an argument to ``apply()``. >>> def square(x): ... return x ** 2 >>> s.apply(square) London 400 New York 441 Helsinki 144 dtype: int64 Square the values by passing an anonymous function as an argument to ``apply()``. >>> s.apply(lambda x: x ** 2) London 400 New York 441 Helsinki 144 dtype: int64 Define a custom function that needs additional positional arguments and pass these additional arguments using the ``args`` keyword. >>> def subtract_custom_value(x, custom_value): ... return x - custom_value >>> s.apply(subtract_custom_value, args=(5,)) London 15 New York 16 Helsinki 7 dtype: int64 Define a custom function that takes keyword arguments and pass these arguments to ``apply``. >>> def add_custom_values(x, **kwargs): ... for month in kwargs: ... x += kwargs[month] ... return x >>> s.apply(add_custom_values, june=30, july=20, august=25) London 95 New York 96 Helsinki 87 dtype: int64 Use a function from the Numpy library. >>> s.apply(np.log) London 2.995732 New York 3.044522 Helsinki 2.484907 dtype: float64 """ return SeriesApply(self, func, convert_dtype, args, kwargs).apply() def _reduce( self, op, name: str, *, axis: Axis = 0, skipna: bool = True, numeric_only: bool = False, filter_type=None, **kwds, ): """ Perform a reduction operation. If we have an ndarray as a value, then simply perform the operation, otherwise delegate to the object. """ delegate = self._values if axis is not None: self._get_axis_number(axis) if isinstance(delegate, ExtensionArray): # dispatch to ExtensionArray interface return delegate._reduce(name, skipna=skipna, **kwds) else: # dispatch to numpy arrays if numeric_only and not is_numeric_dtype(self.dtype): kwd_name = "numeric_only" if name in ["any", "all"]: kwd_name = "bool_only" # GH#47500 - change to TypeError to match other methods raise TypeError( f"Series.{name} does not allow {kwd_name}={numeric_only} " "with non-numeric dtypes." ) with np.errstate(all="ignore"): return op(delegate, skipna=skipna, **kwds) def _reindex_indexer( self, new_index: Index | None, indexer: npt.NDArray[np.intp] | None, copy: bool | None, ) -> Series: # Note: new_index is None iff indexer is None # if not None, indexer is np.intp if indexer is None and ( new_index is None or new_index.names == self.index.names ): if using_copy_on_write(): return self.copy(deep=copy) if copy or copy is None: return self.copy(deep=copy) return self new_values = algorithms.take_nd( self._values, indexer, allow_fill=True, fill_value=None ) return self._constructor(new_values, index=new_index, copy=False) def _needs_reindex_multi(self, axes, method, level) -> bool: """ Check if we do need a multi reindex; this is for compat with higher dims. """ return False # error: Cannot determine type of 'align' NDFrame.align, # type: ignore[has-type] klass=_shared_doc_kwargs["klass"], axes_single_arg=_shared_doc_kwargs["axes_single_arg"], ) def align( self, other: Series, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value: Hashable = None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> Series: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[True], level: Level | None = ..., errors: IgnoreRaise = ..., ) -> None: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[False] = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: bool = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series | None: ... def rename( self, index: Renamer | Hashable | None = None, *, axis: Axis | None = None, copy: bool = True, inplace: bool = False, level: Level | None = None, errors: IgnoreRaise = "ignore", ) -> Series | None: """ Alter Series index labels or name. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. Alternatively, change ``Series.name`` with a scalar value. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- index : scalar, hashable sequence, dict-like or function optional Functions or dict-like are transformations to apply to the index. Scalar or hashable sequence-like will alter the ``Series.name`` attribute. axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new Series. If True the value of copy is ignored. level : int or level name, default None In case of MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise `KeyError` when a `dict-like mapper` or `index` contains labels that are not present in the index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- Series or None Series with index labels or name altered or None if ``inplace=True``. See Also -------- DataFrame.rename : Corresponding DataFrame method. Series.rename_axis : Set the name of the axis. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.rename("my_name") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 >>> s.rename(lambda x: x ** 2) # function, changes labels 0 1 1 2 4 3 dtype: int64 >>> s.rename({1: 3, 2: 5}) # mapping, changes labels 0 1 3 2 5 3 dtype: int64 """ if axis is not None: # Make sure we raise if an invalid 'axis' is passed. axis = self._get_axis_number(axis) if callable(index) or is_dict_like(index): # error: Argument 1 to "_rename" of "NDFrame" has incompatible # type "Union[Union[Mapping[Any, Hashable], Callable[[Any], # Hashable]], Hashable, None]"; expected "Union[Mapping[Any, # Hashable], Callable[[Any], Hashable], None]" return super()._rename( index, # type: ignore[arg-type] copy=copy, inplace=inplace, level=level, errors=errors, ) else: return self._set_name(index, inplace=inplace) """ Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.set_axis(['a', 'b', 'c'], axis=0) a 1 b 2 c 3 dtype: int64 """ ) **_shared_doc_kwargs, extended_summary_sub="", axis_description_sub="", see_also_sub="", ) ) ) # error: Cannot determine type of 'shift' # ---------------------------------------------------------------------- # Convert to types that support pd.NA # error: Cannot determine type of 'isna' # error: Return type "Series" of "isna" incompatible with return type "ndarray # [Any, dtype[bool_]]" in supertype "IndexOpsMixin" # error: Cannot determine type of 'isna' # error: Cannot determine type of 'notna' # error: Cannot determine type of 'notna' # ---------------------------------------------------------------------- # Time series-oriented methods # error: Cannot determine type of 'asfreq' # error: Cannot determine type of 'resample' # ---------------------------------------------------------------------- # Add index # ---------------------------------------------------------------------- # Accessor Methods # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # Add plotting methods to Series # ---------------------------------------------------------------------- # Template-Based Arithmetic/Comparison Methods Series The provided code snippet includes necessary dependencies for implementing the `_dtype_to_default_stata_fmt` function. Write a Python function `def _dtype_to_default_stata_fmt( dtype, column: Series, dta_version: int = 114, force_strl: bool = False ) -> str` to solve the following problem: Map numpy dtype to stata's default format for this type. Not terribly important since users can change this in Stata. Semantics are object -> "%DDs" where DD is the length of the string. If not a string, raise ValueError float64 -> "%10.0g" float32 -> "%9.0g" int64 -> "%9.0g" int32 -> "%12.0g" int16 -> "%8.0g" int8 -> "%8.0g" strl -> "%9s" Here is the function: def _dtype_to_default_stata_fmt( dtype, column: Series, dta_version: int = 114, force_strl: bool = False ) -> str: """ Map numpy dtype to stata's default format for this type. Not terribly important since users can change this in Stata. Semantics are object -> "%DDs" where DD is the length of the string. If not a string, raise ValueError float64 -> "%10.0g" float32 -> "%9.0g" int64 -> "%9.0g" int32 -> "%12.0g" int16 -> "%8.0g" int8 -> "%8.0g" strl -> "%9s" """ # TODO: Refactor to combine type with format # TODO: expand this to handle a default datetime format? if dta_version < 117: max_str_len = 244 else: max_str_len = 2045 if force_strl: return "%9s" if dtype.type is np.object_: itemsize = max_len_string_array(ensure_object(column._values)) if itemsize > max_str_len: if dta_version >= 117: return "%9s" else: raise ValueError(excessive_string_length_error.format(column.name)) return "%" + str(max(itemsize, 1)) + "s" elif dtype == np.float64: return "%10.0g" elif dtype == np.float32: return "%9.0g" elif dtype == np.int32: return "%12.0g" elif dtype in (np.int8, np.int16): return "%8.0g" else: # pragma : no cover raise NotImplementedError(f"Data type {dtype} not supported.")
Map numpy dtype to stata's default format for this type. Not terribly important since users can change this in Stata. Semantics are object -> "%DDs" where DD is the length of the string. If not a string, raise ValueError float64 -> "%10.0g" float32 -> "%9.0g" int64 -> "%9.0g" int32 -> "%12.0g" int16 -> "%8.0g" int8 -> "%8.0g" strl -> "%9s"
173,542
from __future__ import annotations from collections import abc import datetime from io import BytesIO import os import struct import sys from types import TracebackType from typing import ( IO, TYPE_CHECKING, Any, AnyStr, Callable, Final, Hashable, Sequence, cast, ) import warnings from dateutil.relativedelta import relativedelta import numpy as np from pandas._libs.lib import infer_dtype from pandas._libs.writers import max_len_string_array from pandas._typing import ( CompressionOptions, FilePath, ReadBuffer, StorageOptions, WriteBuffer, ) from pandas.errors import ( CategoricalConversionWarning, InvalidColumnName, PossiblePrecisionLoss, ValueLabelTypeMismatch, ) from pandas.util._decorators import ( Appender, doc, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, is_categorical_dtype, is_datetime64_dtype, is_numeric_dtype, ) from pandas import ( Categorical, DatetimeIndex, NaT, Timestamp, isna, to_datetime, to_timedelta, ) from pandas.core.arrays.boolean import BooleanDtype from pandas.core.arrays.integer import IntegerDtype from pandas.core.frame import DataFrame from pandas.core.indexes.base import Index from pandas.core.series import Series from pandas.core.shared_docs import _shared_docs from pandas.io.common import get_handle ensure_object = algos.ensure_object class Series(base.IndexOpsMixin, NDFrame): # type: ignore[misc] """ One-dimensional ndarray with axis labels (including time series). Labels need not be unique but must be a hashable type. The object supports both integer- and label-based indexing and provides a host of methods for performing operations involving the index. Statistical methods from ndarray have been overridden to automatically exclude missing data (currently represented as NaN). Operations between Series (+, -, /, \\*, \\*\\*) align values based on their associated index values-- they need not be the same length. The result index will be the sorted union of the two indexes. Parameters ---------- data : array-like, Iterable, dict, or scalar value Contains data stored in Series. If data is a dict, argument order is maintained. index : array-like or Index (1d) Values must be hashable and have the same length as `data`. Non-unique index values are allowed. Will default to RangeIndex (0, 1, 2, ..., n) if not provided. If data is dict-like and index is None, then the keys in the data are used as the index. If the index is not None, the resulting Series is reindexed with the index values. dtype : str, numpy.dtype, or ExtensionDtype, optional Data type for the output Series. If not specified, this will be inferred from `data`. See the :ref:`user guide <basics.dtypes>` for more usages. name : Hashable, default None The name to give to the Series. copy : bool, default False Copy input data. Only affects Series or 1d ndarray input. See examples. Notes ----- Please reference the :ref:`User Guide <basics.series>` for more information. Examples -------- Constructing Series from a dictionary with an Index specified >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['a', 'b', 'c']) >>> ser a 1 b 2 c 3 dtype: int64 The keys of the dictionary match with the Index values, hence the Index values have no effect. >>> d = {'a': 1, 'b': 2, 'c': 3} >>> ser = pd.Series(data=d, index=['x', 'y', 'z']) >>> ser x NaN y NaN z NaN dtype: float64 Note that the Index is first build with the keys from the dictionary. After this the Series is reindexed with the given Index values, hence we get all NaN as a result. Constructing Series from a list with `copy=False`. >>> r = [1, 2] >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r [1, 2] >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `copy` of the original data even though `copy=False`, so the data is unchanged. Constructing Series from a 1d ndarray with `copy=False`. >>> r = np.array([1, 2]) >>> ser = pd.Series(r, copy=False) >>> ser.iloc[0] = 999 >>> r array([999, 2]) >>> ser 0 999 1 2 dtype: int64 Due to input data type the Series has a `view` on the original data, so the data is changed as well. """ _typ = "series" _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) _name: Hashable _metadata: list[str] = ["name"] _internal_names_set = {"index"} | NDFrame._internal_names_set _accessors = {"dt", "cat", "str", "sparse"} _hidden_attrs = ( base.IndexOpsMixin._hidden_attrs | NDFrame._hidden_attrs | frozenset([]) ) # Override cache_readonly bc Series is mutable # error: Incompatible types in assignment (expression has type "property", # base class "IndexOpsMixin" defined the type as "Callable[[IndexOpsMixin], bool]") hasnans = property( # type: ignore[assignment] # error: "Callable[[IndexOpsMixin], bool]" has no attribute "fget" base.IndexOpsMixin.hasnans.fget, # type: ignore[attr-defined] doc=base.IndexOpsMixin.hasnans.__doc__, ) _mgr: SingleManager div: Callable[[Series, Any], Series] rdiv: Callable[[Series, Any], Series] # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index=None, dtype: Dtype | None = None, name=None, copy: bool | None = None, fastpath: bool = False, ) -> None: if ( isinstance(data, (SingleBlockManager, SingleArrayManager)) and index is None and dtype is None and (copy is False or copy is None) ): if using_copy_on_write(): data = data.copy(deep=False) # GH#33357 called with just the SingleBlockManager NDFrame.__init__(self, data) if fastpath: # e.g. from _box_col_values, skip validation of name object.__setattr__(self, "_name", name) else: self.name = name return if isinstance(data, (ExtensionArray, np.ndarray)): if copy is not False and using_copy_on_write(): if dtype is None or astype_is_view(data.dtype, pandas_dtype(dtype)): data = data.copy() if copy is None: copy = False # we are called internally, so short-circuit if fastpath: # data is a ndarray, index is defined if not isinstance(data, (SingleBlockManager, SingleArrayManager)): manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index) elif manager == "array": data = SingleArrayManager.from_array(data, index) elif using_copy_on_write() and not copy: data = data.copy(deep=False) if copy: data = data.copy() # skips validation of the name object.__setattr__(self, "_name", name) NDFrame.__init__(self, data) return if isinstance(data, SingleBlockManager) and using_copy_on_write() and not copy: data = data.copy(deep=False) name = ibase.maybe_extract_name(name, data, type(self)) if index is not None: index = ensure_index(index) if dtype is not None: dtype = self._validate_dtype(dtype) if data is None: index = index if index is not None else default_index(0) if len(index) or dtype is not None: data = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: data = [] if isinstance(data, MultiIndex): raise NotImplementedError( "initializing a Series from a MultiIndex is not supported" ) refs = None if isinstance(data, Index): if dtype is not None: data = data.astype(dtype, copy=False) if using_copy_on_write(): refs = data._references data = data._values else: # GH#24096 we need to ensure the index remains immutable data = data._values.copy() copy = False elif isinstance(data, np.ndarray): if len(data.dtype): # GH#13296 we are dealing with a compound dtype, which # should be treated as 2D raise ValueError( "Cannot construct a Series from an ndarray with " "compound dtype. Use DataFrame instead." ) elif isinstance(data, Series): if index is None: index = data.index data = data._mgr.copy(deep=False) else: data = data.reindex(index, copy=copy) copy = False data = data._mgr elif is_dict_like(data): data, index = self._init_dict(data, index, dtype) dtype = None copy = False elif isinstance(data, (SingleBlockManager, SingleArrayManager)): if index is None: index = data.index elif not data.index.equals(index) or copy: # GH#19275 SingleBlockManager input should only be called # internally raise AssertionError( "Cannot pass both SingleBlockManager " "`data` argument and a different " "`index` argument. `copy` must be False." ) elif isinstance(data, ExtensionArray): pass else: data = com.maybe_iterable_to_list(data) if is_list_like(data) and not len(data) and dtype is None: # GH 29405: Pre-2.0, this defaulted to float. dtype = np.dtype(object) if index is None: if not is_list_like(data): data = [data] index = default_index(len(data)) elif is_list_like(data): com.require_length_match(data, index) # create/copy the manager if isinstance(data, (SingleBlockManager, SingleArrayManager)): if dtype is not None: data = data.astype(dtype=dtype, errors="ignore", copy=copy) elif copy: data = data.copy() else: data = sanitize_array(data, index, dtype, copy) manager = get_option("mode.data_manager") if manager == "block": data = SingleBlockManager.from_array(data, index, refs=refs) elif manager == "array": data = SingleArrayManager.from_array(data, index) NDFrame.__init__(self, data) self.name = name self._set_axis(0, index) def _init_dict( self, data, index: Index | None = None, dtype: DtypeObj | None = None ): """ Derive the "_mgr" and "index" attributes of a new Series from a dictionary input. Parameters ---------- data : dict or dict-like Data used to populate the new Series. index : Index or None, default None Index for the new Series: if None, use dict keys. dtype : np.dtype, ExtensionDtype, or None, default None The dtype for the new Series: if None, infer from data. Returns ------- _data : BlockManager for the new Series index : index for the new Series """ keys: Index | tuple # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')] # raises KeyError), so we iterate the entire dict, and align if data: # GH:34717, issue was using zip to extract key and values from data. # using generators in effects the performance. # Below is the new way of extracting the keys and values keys = tuple(data.keys()) values = list(data.values()) # Generating list of values- faster way elif index is not None: # fastpath for Series(data=None). Just use broadcasting a scalar # instead of reindexing. if len(index) or dtype is not None: values = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: values = [] keys = index else: keys, values = (), [] # Input is now list-like, so rely on "standard" construction: s = self._constructor( values, index=keys, dtype=dtype, ) # Now we just make sure the order is respected, if any if data and index is not None: s = s.reindex(index, copy=False) return s._mgr, s.index # ---------------------------------------------------------------------- def _constructor(self) -> Callable[..., Series]: return Series def _constructor_expanddim(self) -> Callable[..., DataFrame]: """ Used when a manipulation result has one higher dimension as the original, such as Series.to_frame() """ from pandas.core.frame import DataFrame return DataFrame # types def _can_hold_na(self) -> bool: return self._mgr._can_hold_na # ndarray compatibility def dtype(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtype dtype('int64') """ return self._mgr.dtype def dtypes(self) -> DtypeObj: """ Return the dtype object of the underlying data. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.dtypes dtype('int64') """ # DataFrame compatibility return self.dtype def name(self) -> Hashable: """ Return the name of the Series. The name of a Series becomes its index or column name if it is used to form a DataFrame. It is also used whenever displaying the Series using the interpreter. Returns ------- label (hashable object) The name of the Series, also the column name if part of a DataFrame. See Also -------- Series.rename : Sets the Series name when given a scalar input. Index.name : Corresponding Index property. Examples -------- The Series name can be set initially when calling the constructor. >>> s = pd.Series([1, 2, 3], dtype=np.int64, name='Numbers') >>> s 0 1 1 2 2 3 Name: Numbers, dtype: int64 >>> s.name = "Integers" >>> s 0 1 1 2 2 3 Name: Integers, dtype: int64 The name of a Series within a DataFrame is its column name. >>> df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], ... columns=["Odd Numbers", "Even Numbers"]) >>> df Odd Numbers Even Numbers 0 1 2 1 3 4 2 5 6 >>> df["Even Numbers"].name 'Even Numbers' """ return self._name def name(self, value: Hashable) -> None: validate_all_hashable(value, error_name=f"{type(self).__name__}.name") object.__setattr__(self, "_name", value) def values(self): """ Return Series as ndarray or ndarray-like depending on the dtype. .. warning:: We recommend using :attr:`Series.array` or :meth:`Series.to_numpy`, depending on whether you need a reference to the underlying data or a NumPy array. Returns ------- numpy.ndarray or ndarray-like See Also -------- Series.array : Reference to the underlying data. Series.to_numpy : A NumPy array representing the underlying data. Examples -------- >>> pd.Series([1, 2, 3]).values array([1, 2, 3]) >>> pd.Series(list('aabc')).values array(['a', 'a', 'b', 'c'], dtype=object) >>> pd.Series(list('aabc')).astype('category').values ['a', 'a', 'b', 'c'] Categories (3, object): ['a', 'b', 'c'] Timezone aware datetime data is converted to UTC: >>> pd.Series(pd.date_range('20130101', periods=3, ... tz='US/Eastern')).values array(['2013-01-01T05:00:00.000000000', '2013-01-02T05:00:00.000000000', '2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]') """ return self._mgr.external_values() def _values(self): """ Return the internal repr of this data (defined by Block.interval_values). This are the values as stored in the Block (ndarray or ExtensionArray depending on the Block class), with datetime64[ns] and timedelta64[ns] wrapped in ExtensionArrays to match Index._values behavior. Differs from the public ``.values`` for certain data types, because of historical backwards compatibility of the public attribute (e.g. period returns object ndarray and datetimetz a datetime64[ns] ndarray for ``.values`` while it returns an ExtensionArray for ``._values`` in those cases). Differs from ``.array`` in that this still returns the numpy array if the Block is backed by a numpy array (except for datetime64 and timedelta64 dtypes), while ``.array`` ensures to always return an ExtensionArray. Overview: dtype | values | _values | array | ----------- | ------------- | ------------- | ------------- | Numeric | ndarray | ndarray | PandasArray | Category | Categorical | Categorical | Categorical | dt64[ns] | ndarray[M8ns] | DatetimeArray | DatetimeArray | dt64[ns tz] | ndarray[M8ns] | DatetimeArray | DatetimeArray | td64[ns] | ndarray[m8ns] | TimedeltaArray| ndarray[m8ns] | Period | ndarray[obj] | PeriodArray | PeriodArray | Nullable | EA | EA | EA | """ return self._mgr.internal_values() def _references(self) -> BlockValuesRefs | None: if isinstance(self._mgr, SingleArrayManager): return None return self._mgr._block.refs # error: Decorated property not supported def array(self) -> ExtensionArray: return self._mgr.array_values() # ops def ravel(self, order: str = "C") -> ArrayLike: """ Return the flattened underlying data as an ndarray or ExtensionArray. Returns ------- numpy.ndarray or ExtensionArray Flattened data of the Series. See Also -------- numpy.ndarray.ravel : Return a flattened array. """ arr = self._values.ravel(order=order) if isinstance(arr, np.ndarray) and using_copy_on_write(): arr.flags.writeable = False return arr def __len__(self) -> int: """ Return the length of the Series. """ return len(self._mgr) def view(self, dtype: Dtype | None = None) -> Series: """ Create a new view of the Series. This function will return a new Series with a view of the same underlying values in memory, optionally reinterpreted with a new data type. The new data type must preserve the same size in bytes as to not cause index misalignment. Parameters ---------- dtype : data type Data type object or one of their string representations. Returns ------- Series A new Series object as a view of the same data in memory. See Also -------- numpy.ndarray.view : Equivalent numpy function to create a new view of the same data in memory. Notes ----- Series are instantiated with ``dtype=float64`` by default. While ``numpy.ndarray.view()`` will return a view with the same data type as the original array, ``Series.view()`` (without specified dtype) will try using ``float64`` and may fail if the original data type size in bytes is not the same. Examples -------- >>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8') >>> s 0 -2 1 -1 2 0 3 1 4 2 dtype: int8 The 8 bit signed integer representation of `-1` is `0b11111111`, but the same bytes represent 255 if read as an 8 bit unsigned integer: >>> us = s.view('uint8') >>> us 0 254 1 255 2 0 3 1 4 2 dtype: uint8 The views share the same underlying values: >>> us[0] = 128 >>> s 0 -128 1 -1 2 0 3 1 4 2 dtype: int8 """ # self.array instead of self._values so we piggyback on PandasArray # implementation res_values = self.array.view(dtype) res_ser = self._constructor(res_values, index=self.index, copy=False) if isinstance(res_ser._mgr, SingleBlockManager) and using_copy_on_write(): blk = res_ser._mgr._block blk.refs = cast("BlockValuesRefs", self._references) blk.refs.add_reference(blk) # type: ignore[arg-type] return res_ser.__finalize__(self, method="view") # ---------------------------------------------------------------------- # NDArray Compat _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: """ Return the values as a NumPy array. Users should not call this directly. Rather, it is invoked by :func:`numpy.array` and :func:`numpy.asarray`. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to use for the resulting NumPy array. By default, the dtype is inferred from the data. Returns ------- numpy.ndarray The values in the series converted to a :class:`numpy.ndarray` with the specified `dtype`. See Also -------- array : Create a new array from data. Series.array : Zero-copy view to the array backing the Series. Series.to_numpy : Series method for similar behavior. Examples -------- >>> ser = pd.Series([1, 2, 3]) >>> np.asarray(ser) array([1, 2, 3]) For timezone-aware data, the timezones may be retained with ``dtype='object'`` >>> tzser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) >>> np.asarray(tzser, dtype="object") array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'), Timestamp('2000-01-02 00:00:00+0100', tz='CET')], dtype=object) Or the values may be localized to UTC and the tzinfo discarded with ``dtype='datetime64[ns]'`` >>> np.asarray(tzser, dtype="datetime64[ns]") # doctest: +ELLIPSIS array(['1999-12-31T23:00:00.000000000', ...], dtype='datetime64[ns]') """ values = self._values arr = np.asarray(values, dtype=dtype) if using_copy_on_write() and astype_is_view(values.dtype, arr.dtype): arr = arr.view() arr.flags.writeable = False return arr # ---------------------------------------------------------------------- # Unary Methods # coercion __float__ = _coerce_method(float) __int__ = _coerce_method(int) # ---------------------------------------------------------------------- # indexers def axes(self) -> list[Index]: """ Return a list of the row axis labels. """ return [self.index] # ---------------------------------------------------------------------- # Indexing Methods def take(self, indices, axis: Axis = 0, **kwargs) -> Series: nv.validate_take((), kwargs) indices = ensure_platform_int(indices) if ( indices.ndim == 1 and using_copy_on_write() and is_range_indexer(indices, len(self)) ): return self.copy(deep=None) new_index = self.index.take(indices) new_values = self._values.take(indices) result = self._constructor(new_values, index=new_index, fastpath=True) return result.__finalize__(self, method="take") def _take_with_is_copy(self, indices, axis: Axis = 0) -> Series: """ Internal version of the `take` method that sets the `_is_copy` attribute to keep track of the parent dataframe (using in indexing for the SettingWithCopyWarning). For Series this does the same as the public take (it never sets `_is_copy`). See the docstring of `take` for full explanation of the parameters. """ return self.take(indices=indices, axis=axis) def _ixs(self, i: int, axis: AxisInt = 0) -> Any: """ Return the i-th value or values in the Series by location. Parameters ---------- i : int Returns ------- scalar (int) or Series (slice, sequence) """ return self._values[i] def _slice(self, slobj: slice | np.ndarray, axis: Axis = 0) -> Series: # axis kwarg is retained for compat with NDFrame method # _slice is *always* positional return self._get_values(slobj) def __getitem__(self, key): check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) if key is Ellipsis: return self key_is_scalar = is_scalar(key) if isinstance(key, (list, tuple)): key = unpack_1tuple(key) if is_integer(key) and self.index._should_fallback_to_positional: return self._values[key] elif key_is_scalar: return self._get_value(key) if is_hashable(key): # Otherwise index.get_value will raise InvalidIndexError try: # For labels that don't resolve as scalars like tuples and frozensets result = self._get_value(key) return result except (KeyError, TypeError, InvalidIndexError): # InvalidIndexError for e.g. generator # see test_series_getitem_corner_generator if isinstance(key, tuple) and isinstance(self.index, MultiIndex): # We still have the corner case where a tuple is a key # in the first level of our MultiIndex return self._get_values_tuple(key) if is_iterator(key): key = list(key) if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) return self._get_values(key) return self._get_with(key) def _get_with(self, key): # other: fancy integer or otherwise if isinstance(key, slice): # _convert_slice_indexer to determine if this slice is positional # or label based, and if the latter, convert to positional slobj = self.index._convert_slice_indexer(key, kind="getitem") return self._slice(slobj) elif isinstance(key, ABCDataFrame): raise TypeError( "Indexing a Series with DataFrame is not " "supported, use the appropriate DataFrame column" ) elif isinstance(key, tuple): return self._get_values_tuple(key) elif not is_list_like(key): # e.g. scalars that aren't recognized by lib.is_scalar, GH#32684 return self.loc[key] if not isinstance(key, (list, np.ndarray, ExtensionArray, Series, Index)): key = list(key) if isinstance(key, Index): key_type = key.inferred_type else: key_type = lib.infer_dtype(key, skipna=False) # Note: The key_type == "boolean" case should be caught by the # com.is_bool_indexer check in __getitem__ if key_type == "integer": # We need to decide whether to treat this as a positional indexer # (i.e. self.iloc) or label-based (i.e. self.loc) if not self.index._should_fallback_to_positional: return self.loc[key] else: return self.iloc[key] # handle the dup indexing case GH#4246 return self.loc[key] def _get_values_tuple(self, key: tuple): # mpl hackaround if com.any_none(*key): # mpl compat if we look up e.g. ser[:, np.newaxis]; # see tests.series.timeseries.test_mpl_compat_hack # the asarray is needed to avoid returning a 2D DatetimeArray result = np.asarray(self._values[key]) disallow_ndim_indexing(result) return result if not isinstance(self.index, MultiIndex): raise KeyError("key of type tuple not found and not a MultiIndex") # If key is contained, would have returned by now indexer, new_index = self.index.get_loc_level(key) new_ser = self._constructor(self._values[indexer], index=new_index, copy=False) if using_copy_on_write() and isinstance(indexer, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) def _get_values(self, indexer: slice | npt.NDArray[np.bool_]) -> Series: new_mgr = self._mgr.getitem_mgr(indexer) return self._constructor(new_mgr).__finalize__(self) def _get_value(self, label, takeable: bool = False): """ Quickly retrieve single value at passed index label. Parameters ---------- label : object takeable : interpret the index as indexers, default False Returns ------- scalar value """ if takeable: return self._values[label] # Similar to Index.get_value, but we do not fall back to positional loc = self.index.get_loc(label) if is_integer(loc): return self._values[loc] if isinstance(self.index, MultiIndex): mi = self.index new_values = self._values[loc] if len(new_values) == 1 and mi.nlevels == 1: # If more than one level left, we can not return a scalar return new_values[0] new_index = mi[loc] new_index = maybe_droplevels(new_index, label) new_ser = self._constructor( new_values, index=new_index, name=self.name, copy=False ) if using_copy_on_write() and isinstance(loc, slice): new_ser._mgr.add_references(self._mgr) # type: ignore[arg-type] return new_ser.__finalize__(self) else: return self.iloc[loc] def __setitem__(self, key, value) -> None: if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) cacher_needs_updating = self._check_is_chained_assignment_possible() if key is Ellipsis: key = slice(None) if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") return self._set_values(indexer, value) try: self._set_with_engine(key, value) except KeyError: # We have a scalar (or for MultiIndex or object-dtype, scalar-like) # key that is not present in self.index. if is_integer(key): if not self.index._should_fallback_to_positional: # GH#33469 self.loc[key] = value else: # positional setter # can't use _mgr.setitem_inplace yet bc could have *both* # KeyError and then ValueError, xref GH#45070 self._set_values(key, value) else: # GH#12862 adding a new key to the Series self.loc[key] = value except (TypeError, ValueError, LossySetitemError): # The key was OK, but we cannot set the value losslessly indexer = self.index.get_loc(key) self._set_values(indexer, value) except InvalidIndexError as err: if isinstance(key, tuple) and not isinstance(self.index, MultiIndex): # cases with MultiIndex don't get here bc they raise KeyError # e.g. test_basic_getitem_setitem_corner raise KeyError( "key of type tuple not found and not a MultiIndex" ) from err if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) if ( is_list_like(value) and len(value) != len(self) and not isinstance(value, Series) and not is_object_dtype(self.dtype) ): # Series will be reindexed to have matching length inside # _where call below # GH#44265 indexer = key.nonzero()[0] self._set_values(indexer, value) return # otherwise with listlike other we interpret series[mask] = other # as series[mask] = other[mask] try: self._where(~key, value, inplace=True) except InvalidIndexError: # test_where_dups self.iloc[key] = value return else: self._set_with(key, value) if cacher_needs_updating: self._maybe_update_cacher(inplace=True) def _set_with_engine(self, key, value) -> None: loc = self.index.get_loc(key) # this is equivalent to self._values[key] = value self._mgr.setitem_inplace(loc, value) def _set_with(self, key, value) -> None: # We got here via exception-handling off of InvalidIndexError, so # key should always be listlike at this point. assert not isinstance(key, tuple) if is_iterator(key): # Without this, the call to infer_dtype will consume the generator key = list(key) if not self.index._should_fallback_to_positional: # Regardless of the key type, we're treating it as labels self._set_labels(key, value) else: # Note: key_type == "boolean" should not occur because that # should be caught by the is_bool_indexer check in __setitem__ key_type = lib.infer_dtype(key, skipna=False) if key_type == "integer": self._set_values(key, value) else: self._set_labels(key, value) def _set_labels(self, key, value) -> None: key = com.asarray_tuplesafe(key) indexer: np.ndarray = self.index.get_indexer(key) mask = indexer == -1 if mask.any(): raise KeyError(f"{key[mask]} not in index") self._set_values(indexer, value) def _set_values(self, key, value) -> None: if isinstance(key, (Index, Series)): key = key._values self._mgr = self._mgr.setitem(indexer=key, value=value) self._maybe_update_cacher() def _set_value(self, label, value, takeable: bool = False) -> None: """ Quickly set single value at passed label. If label is not contained, a new object is created with the label placed at the end of the result index. Parameters ---------- label : object Partial indexing with MultiIndex not allowed. value : object Scalar value. takeable : interpret the index as indexers, default False """ if not takeable: try: loc = self.index.get_loc(label) except KeyError: # set using a non-recursive method self.loc[label] = value return else: loc = label self._set_values(loc, value) # ---------------------------------------------------------------------- # Lookup Caching def _is_cached(self) -> bool: """Return boolean indicating if self is cached or not.""" return getattr(self, "_cacher", None) is not None def _get_cacher(self): """return my cacher or None""" cacher = getattr(self, "_cacher", None) if cacher is not None: cacher = cacher[1]() return cacher def _reset_cacher(self) -> None: """ Reset the cacher. """ if hasattr(self, "_cacher"): del self._cacher def _set_as_cached(self, item, cacher) -> None: """ Set the _cacher attribute on the calling object with a weakref to cacher. """ if using_copy_on_write(): return self._cacher = (item, weakref.ref(cacher)) def _clear_item_cache(self) -> None: # no-op for Series pass def _check_is_chained_assignment_possible(self) -> bool: """ See NDFrame._check_is_chained_assignment_possible.__doc__ """ if self._is_view and self._is_cached: ref = self._get_cacher() if ref is not None and ref._is_mixed_type: self._check_setitem_copy(t="referent", force=True) return True return super()._check_is_chained_assignment_possible() def _maybe_update_cacher( self, clear: bool = False, verify_is_copy: bool = True, inplace: bool = False ) -> None: """ See NDFrame._maybe_update_cacher.__doc__ """ # for CoW, we never want to update the parent DataFrame cache # if the Series changed, but don't keep track of any cacher if using_copy_on_write(): return cacher = getattr(self, "_cacher", None) if cacher is not None: assert self.ndim == 1 ref: DataFrame = cacher[1]() # we are trying to reference a dead referent, hence # a copy if ref is None: del self._cacher elif len(self) == len(ref) and self.name in ref.columns: # GH#42530 self.name must be in ref.columns # to ensure column still in dataframe # otherwise, either self or ref has swapped in new arrays ref._maybe_cache_changed(cacher[0], self, inplace=inplace) else: # GH#33675 we have swapped in a new array, so parent # reference to self is now invalid ref._item_cache.pop(cacher[0], None) super()._maybe_update_cacher( clear=clear, verify_is_copy=verify_is_copy, inplace=inplace ) # ---------------------------------------------------------------------- # Unsorted def _is_mixed_type(self) -> bool: return False def repeat(self, repeats: int | Sequence[int], axis: None = None) -> Series: """ Repeat elements of a Series. Returns a new Series where each element of the current Series is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty Series. axis : None Unused. Parameter needed for compatibility with DataFrame. Returns ------- Series Newly created Series with repeated elements. See Also -------- Index.repeat : Equivalent function for Index. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> s = pd.Series(['a', 'b', 'c']) >>> s 0 a 1 b 2 c dtype: object >>> s.repeat(2) 0 a 0 a 1 b 1 b 2 c 2 c dtype: object >>> s.repeat([1, 2, 3]) 0 a 1 b 1 b 2 c 2 c 2 c dtype: object """ nv.validate_repeat((), {"axis": axis}) new_index = self.index.repeat(repeats) new_values = self._values.repeat(repeats) return self._constructor(new_values, index=new_index, copy=False).__finalize__( self, method="repeat" ) def reset_index( self, level: IndexLabel = ..., *, drop: Literal[False] = ..., name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> DataFrame: ... def reset_index( self, level: IndexLabel = ..., *, drop: Literal[True], name: Level = ..., inplace: Literal[False] = ..., allow_duplicates: bool = ..., ) -> Series: ... def reset_index( self, level: IndexLabel = ..., *, drop: bool = ..., name: Level = ..., inplace: Literal[True], allow_duplicates: bool = ..., ) -> None: ... def reset_index( self, level: IndexLabel = None, *, drop: bool = False, name: Level = lib.no_default, inplace: bool = False, allow_duplicates: bool = False, ) -> DataFrame | Series | None: """ Generate a new DataFrame or Series with the index reset. This is useful when the index needs to be treated as a column, or when the index is meaningless and needs to be reset to the default before another operation. Parameters ---------- level : int, str, tuple, or list, default optional For a Series with a MultiIndex, only remove the specified levels from the index. Removes all levels by default. drop : bool, default False Just reset the index, without inserting it as a column in the new DataFrame. name : object, optional The name to use for the column containing the original Series values. Uses ``self.name`` by default. This argument is ignored when `drop` is True. inplace : bool, default False Modify the Series in place (do not create a new object). allow_duplicates : bool, default False Allow duplicate column labels to be created. .. versionadded:: 1.5.0 Returns ------- Series or DataFrame or None When `drop` is False (the default), a DataFrame is returned. The newly created columns will come first in the DataFrame, followed by the original Series values. When `drop` is True, a `Series` is returned. In either case, if ``inplace=True``, no value is returned. See Also -------- DataFrame.reset_index: Analogous function for DataFrame. Examples -------- >>> s = pd.Series([1, 2, 3, 4], name='foo', ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx')) Generate a DataFrame with default index. >>> s.reset_index() idx foo 0 a 1 1 b 2 2 c 3 3 d 4 To specify the name of the new column use `name`. >>> s.reset_index(name='values') idx values 0 a 1 1 b 2 2 c 3 3 d 4 To generate a new Series with the default set `drop` to True. >>> s.reset_index(drop=True) 0 1 1 2 2 3 3 4 Name: foo, dtype: int64 The `level` parameter is interesting for Series with a multi-level index. >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']), ... np.array(['one', 'two', 'one', 'two'])] >>> s2 = pd.Series( ... range(4), name='foo', ... index=pd.MultiIndex.from_arrays(arrays, ... names=['a', 'b'])) To remove a specific level from the Index, use `level`. >>> s2.reset_index(level='a') a foo b one bar 0 two bar 1 one baz 2 two baz 3 If `level` is not set, all levels are removed from the Index. >>> s2.reset_index() a b foo 0 bar one 0 1 bar two 1 2 baz one 2 3 baz two 3 """ inplace = validate_bool_kwarg(inplace, "inplace") if drop: new_index = default_index(len(self)) if level is not None: level_list: Sequence[Hashable] if not isinstance(level, (tuple, list)): level_list = [level] else: level_list = level level_list = [self.index._get_level_number(lev) for lev in level_list] if len(level_list) < self.index.nlevels: new_index = self.index.droplevel(level_list) if inplace: self.index = new_index elif using_copy_on_write(): new_ser = self.copy(deep=False) new_ser.index = new_index return new_ser.__finalize__(self, method="reset_index") else: return self._constructor( self._values.copy(), index=new_index, copy=False ).__finalize__(self, method="reset_index") elif inplace: raise TypeError( "Cannot reset_index inplace on a Series to create a DataFrame" ) else: if name is lib.no_default: # For backwards compatibility, keep columns as [0] instead of # [None] when self.name is None if self.name is None: name = 0 else: name = self.name df = self.to_frame(name) return df.reset_index( level=level, drop=drop, allow_duplicates=allow_duplicates ) return None # ---------------------------------------------------------------------- # Rendering Methods def __repr__(self) -> str: """ Return a string representation for a particular Series. """ # pylint: disable=invalid-repr-returned repr_params = fmt.get_series_repr_params() return self.to_string(**repr_params) def to_string( self, buf: None = ..., na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], na_rep: str = ..., float_format: str | None = ..., header: bool = ..., index: bool = ..., length=..., dtype=..., name=..., max_rows: int | None = ..., min_rows: int | None = ..., ) -> None: ... def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, na_rep: str = "NaN", float_format: str | None = None, header: bool = True, index: bool = True, length: bool = False, dtype: bool = False, name: bool = False, max_rows: int | None = None, min_rows: int | None = None, ) -> str | None: """ Render a string representation of the Series. Parameters ---------- buf : StringIO-like, optional Buffer to write to. na_rep : str, optional String representation of NaN to use, default 'NaN'. float_format : one-parameter function, optional Formatter function to apply to columns' elements if they are floats, default None. header : bool, default True Add the Series header (index name). index : bool, optional Add index (row) labels, default True. length : bool, default False Add the Series length. dtype : bool, default False Add the Series dtype. name : bool, default False Add the Series name if not None. max_rows : int, optional Maximum number of rows to show before truncating. If None, show all. min_rows : int, optional The number of rows to display in a truncated repr (when number of rows is above `max_rows`). Returns ------- str or None String representation of Series if ``buf=None``, otherwise None. """ formatter = fmt.SeriesFormatter( self, name=name, length=length, header=header, index=index, dtype=dtype, na_rep=na_rep, float_format=float_format, min_rows=min_rows, max_rows=max_rows, ) result = formatter.to_string() # catch contract violations if not isinstance(result, str): raise AssertionError( "result must be of type str, type " f"of result is {repr(type(result).__name__)}" ) if buf is None: return result else: if hasattr(buf, "write"): buf.write(result) else: with open(buf, "w") as f: f.write(result) return None klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples=dedent( """Examples -------- >>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal") >>> print(s.to_markdown()) | | animal | |---:|:---------| | 0 | elk | | 1 | pig | | 2 | dog | | 3 | quetzal | Output markdown with a tabulate option. >>> print(s.to_markdown(tablefmt="grid")) +----+----------+ | | animal | +====+==========+ | 0 | elk | +----+----------+ | 1 | pig | +----+----------+ | 2 | dog | +----+----------+ | 3 | quetzal | +----+----------+""" ), ) def to_markdown( self, buf: IO[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: """ Print {klass} in Markdown-friendly format. Parameters ---------- buf : str, Path or StringIO-like, optional, default None Buffer to write to. If None, the output is returned as a string. mode : str, optional Mode in which file is opened, "wt" by default. index : bool, optional, default True Add index (row) labels. .. versionadded:: 1.1.0 {storage_options} .. versionadded:: 1.2.0 **kwargs These parameters will be passed to `tabulate \ <https://pypi.org/project/tabulate>`_. Returns ------- str {klass} in Markdown-friendly format. Notes ----- Requires the `tabulate <https://pypi.org/project/tabulate>`_ package. {examples} """ return self.to_frame().to_markdown( buf, mode, index, storage_options=storage_options, **kwargs ) # ---------------------------------------------------------------------- def items(self) -> Iterable[tuple[Hashable, Any]]: """ Lazily iterate over (index, value) tuples. This method returns an iterable tuple (index, value). This is convenient if you want to create a lazy iterator. Returns ------- iterable Iterable of tuples containing the (index, value) pairs from a Series. See Also -------- DataFrame.items : Iterate over (column name, Series) pairs. DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. Examples -------- >>> s = pd.Series(['A', 'B', 'C']) >>> for index, value in s.items(): ... print(f"Index : {index}, Value : {value}") Index : 0, Value : A Index : 1, Value : B Index : 2, Value : C """ return zip(iter(self.index), iter(self)) # ---------------------------------------------------------------------- # Misc public methods def keys(self) -> Index: """ Return alias for index. Returns ------- Index Index of the Series. """ return self.index def to_dict(self, into: type[dict] = dict) -> dict: """ Convert Series to {label -> value} dict or dict-like object. Parameters ---------- into : class, default dict The collections.abc.Mapping subclass to use as the return object. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- collections.abc.Mapping Key-value representation of Series. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.to_dict() {0: 1, 1: 2, 2: 3, 3: 4} >>> from collections import OrderedDict, defaultdict >>> s.to_dict(OrderedDict) OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> dd = defaultdict(list) >>> s.to_dict(dd) defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4}) """ # GH16122 into_c = com.standardize_mapping(into) if is_object_dtype(self) or is_extension_array_dtype(self): return into_c((k, maybe_box_native(v)) for k, v in self.items()) else: # Not an object dtype => all types will be the same so let the default # indexer return native python type return into_c(self.items()) def to_frame(self, name: Hashable = lib.no_default) -> DataFrame: """ Convert Series to DataFrame. Parameters ---------- name : object, optional The passed name should substitute for the series name (if it has one). Returns ------- DataFrame DataFrame representation of Series. Examples -------- >>> s = pd.Series(["a", "b", "c"], ... name="vals") >>> s.to_frame() vals 0 a 1 b 2 c """ columns: Index if name is lib.no_default: name = self.name if name is None: # default to [0], same as we would get with DataFrame(self) columns = default_index(1) else: columns = Index([name]) else: columns = Index([name]) mgr = self._mgr.to_2d_mgr(columns) df = self._constructor_expanddim(mgr) return df.__finalize__(self, method="to_frame") def _set_name(self, name, inplace: bool = False) -> Series: """ Set the Series name. Parameters ---------- name : str inplace : bool Whether to modify `self` directly or return a copy. """ inplace = validate_bool_kwarg(inplace, "inplace") ser = self if inplace else self.copy() ser.name = name return ser """ Examples -------- >>> ser = pd.Series([390., 350., 30., 20.], ... index=['Falcon', 'Falcon', 'Parrot', 'Parrot'], name="Max Speed") >>> ser Falcon 390.0 Falcon 350.0 Parrot 30.0 Parrot 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", "b"]).mean() a 210.0 b 185.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(ser > 100).mean() Max Speed False 25.0 True 370.0 Name: Max Speed, dtype: float64 **Grouping by Indexes** We can groupby different levels of a hierarchical index using the `level` parameter: >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], ... ['Captive', 'Wild', 'Captive', 'Wild']] >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) >>> ser = pd.Series([390., 350., 30., 20.], index=index, name="Max Speed") >>> ser Animal Type Falcon Captive 390.0 Wild 350.0 Parrot Captive 30.0 Wild 20.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level=0).mean() Animal Falcon 370.0 Parrot 25.0 Name: Max Speed, dtype: float64 >>> ser.groupby(level="Type").mean() Type Captive 210.0 Wild 185.0 Name: Max Speed, dtype: float64 We can also choose to include `NA` in group keys or not by defining `dropna` parameter, the default setting is `True`. >>> ser = pd.Series([1, 2, 3, 3], index=["a", 'a', 'b', np.nan]) >>> ser.groupby(level=0).sum() a 3 b 3 dtype: int64 >>> ser.groupby(level=0, dropna=False).sum() a 3 b 3 NaN 3 dtype: int64 >>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot'] >>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name="Max Speed") >>> ser.groupby(["a", "b", "a", np.nan]).mean() a 210.0 b 350.0 Name: Max Speed, dtype: float64 >>> ser.groupby(["a", "b", "a", np.nan], dropna=False).mean() a 210.0 b 350.0 NaN 20.0 Name: Max Speed, dtype: float64 """ ) def groupby( self, by=None, axis: Axis = 0, level: IndexLabel = None, as_index: bool = True, sort: bool = True, group_keys: bool = True, observed: bool = False, dropna: bool = True, ) -> SeriesGroupBy: from pandas.core.groupby.generic import SeriesGroupBy if level is None and by is None: raise TypeError("You have to supply one of 'by' and 'level'") if not as_index: raise TypeError("as_index=False only valid with DataFrame") axis = self._get_axis_number(axis) return SeriesGroupBy( obj=self, keys=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, observed=observed, dropna=dropna, ) # ---------------------------------------------------------------------- # Statistics, overridden ndarray methods # TODO: integrate bottleneck def count(self): """ Return number of non-NA/null observations in the Series. Returns ------- int or Series (if level specified) Number of non-null values in the Series. See Also -------- DataFrame.count : Count non-NA cells for each column or row. Examples -------- >>> s = pd.Series([0.0, 1.0, np.nan]) >>> s.count() 2 """ return notna(self._values).sum().astype("int64") def mode(self, dropna: bool = True) -> Series: """ Return the mode(s) of the Series. The mode is the value that appears most often. There can be multiple modes. Always returns Series even if only one value is returned. Parameters ---------- dropna : bool, default True Don't consider counts of NaN/NaT. Returns ------- Series Modes of the Series in sorted order. """ # TODO: Add option for bins like value_counts() values = self._values if isinstance(values, np.ndarray): res_values = algorithms.mode(values, dropna=dropna) else: res_values = values._mode(dropna=dropna) # Ensure index is type stable (should always use int index) return self._constructor( res_values, index=range(len(res_values)), name=self.name, copy=False ) def unique(self) -> ArrayLike: # pylint: disable=useless-parent-delegation """ Return unique values of Series object. Uniques are returned in order of appearance. Hash table-based unique, therefore does NOT sort. Returns ------- ndarray or ExtensionArray The unique values returned as a NumPy array. See Notes. See Also -------- Series.drop_duplicates : Return Series with duplicate values removed. unique : Top-level unique method for any 1-d array-like object. Index.unique : Return Index with unique values from an Index object. Notes ----- Returns the unique values as a NumPy array. In case of an extension-array backed Series, a new :class:`~api.extensions.ExtensionArray` of that type with just the unique values is returned. This includes * Categorical * Period * Datetime with Timezone * Datetime without Timezone * Timedelta * Interval * Sparse * IntegerNA See Examples section. Examples -------- >>> pd.Series([2, 1, 3, 3], name='A').unique() array([2, 1, 3]) >>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00'] Length: 1, dtype: datetime64[ns] >>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern') ... for _ in range(3)]).unique() <DatetimeArray> ['2016-01-01 00:00:00-05:00'] Length: 1, dtype: datetime64[ns, US/Eastern] An Categorical will return categories in the order of appearance and with the same dtype. >>> pd.Series(pd.Categorical(list('baabc'))).unique() ['b', 'a', 'c'] Categories (3, object): ['a', 'b', 'c'] >>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'), ... ordered=True)).unique() ['b', 'a', 'c'] Categories (3, object): ['a' < 'b' < 'c'] """ return super().unique() def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[False] = ..., ignore_index: bool = ..., ) -> Series: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: Literal[True], ignore_index: bool = ... ) -> None: ... def drop_duplicates( self, *, keep: DropKeep = ..., inplace: bool = ..., ignore_index: bool = ... ) -> Series | None: ... def drop_duplicates( self, *, keep: DropKeep = "first", inplace: bool = False, ignore_index: bool = False, ) -> Series | None: """ Return Series with duplicate values removed. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' Method to handle dropping duplicates: - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. inplace : bool, default ``False`` If ``True``, performs operation inplace and returns None. ignore_index : bool, default ``False`` If ``True``, the resulting axis will be labeled 0, 1, …, n - 1. .. versionadded:: 2.0.0 Returns ------- Series or None Series with duplicates dropped or None if ``inplace=True``. See Also -------- Index.drop_duplicates : Equivalent method on Index. DataFrame.drop_duplicates : Equivalent method on DataFrame. Series.duplicated : Related method on Series, indicating duplicate Series values. Series.unique : Return unique values as an array. Examples -------- Generate a Series with duplicated entries. >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], ... name='animal') >>> s 0 lama 1 cow 2 lama 3 beetle 4 lama 5 hippo Name: animal, dtype: object With the 'keep' parameter, the selection behaviour of duplicated values can be changed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> s.drop_duplicates() 0 lama 1 cow 3 beetle 5 hippo Name: animal, dtype: object The value 'last' for parameter 'keep' keeps the last occurrence for each set of duplicated entries. >>> s.drop_duplicates(keep='last') 1 cow 3 beetle 4 lama 5 hippo Name: animal, dtype: object The value ``False`` for parameter 'keep' discards all sets of duplicated entries. >>> s.drop_duplicates(keep=False) 1 cow 3 beetle 5 hippo Name: animal, dtype: object """ inplace = validate_bool_kwarg(inplace, "inplace") result = super().drop_duplicates(keep=keep) if ignore_index: result.index = default_index(len(result)) if inplace: self._update_inplace(result) return None else: return result def duplicated(self, keep: DropKeep = "first") -> Series: """ Indicate duplicate Series values. Duplicated values are indicated as ``True`` values in the resulting Series. Either all duplicates, all except the first or all except the last occurrence of duplicates can be indicated. Parameters ---------- keep : {'first', 'last', False}, default 'first' Method to handle dropping duplicates: - 'first' : Mark duplicates as ``True`` except for the first occurrence. - 'last' : Mark duplicates as ``True`` except for the last occurrence. - ``False`` : Mark all duplicates as ``True``. Returns ------- Series[bool] Series indicating whether each value has occurred in the preceding values. See Also -------- Index.duplicated : Equivalent method on pandas.Index. DataFrame.duplicated : Equivalent method on pandas.DataFrame. Series.drop_duplicates : Remove duplicate values from Series. Examples -------- By default, for each set of duplicated values, the first occurrence is set on False and all others on True: >>> animals = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama']) >>> animals.duplicated() 0 False 1 False 2 True 3 False 4 True dtype: bool which is equivalent to >>> animals.duplicated(keep='first') 0 False 1 False 2 True 3 False 4 True dtype: bool By using 'last', the last occurrence of each set of duplicated values is set on False and all others on True: >>> animals.duplicated(keep='last') 0 True 1 False 2 True 3 False 4 False dtype: bool By setting keep on ``False``, all duplicates are True: >>> animals.duplicated(keep=False) 0 True 1 False 2 True 3 False 4 True dtype: bool """ res = self._duplicated(keep=keep) result = self._constructor(res, index=self.index, copy=False) return result.__finalize__(self, method="duplicated") def idxmin(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the minimum value. If multiple values equal the minimum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the minimum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmin : Return indices of the minimum values along the given axis. DataFrame.idxmin : Return index of first occurrence of minimum over requested axis. Series.idxmax : Return index *label* of the first occurrence of maximum of values. Notes ----- This method is the Series version of ``ndarray.argmin``. This method returns the label of the minimum, while ``ndarray.argmin`` returns the position. To get the position, use ``series.values.argmin()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 1], ... index=['A', 'B', 'C', 'D']) >>> s A 1.0 B NaN C 4.0 D 1.0 dtype: float64 >>> s.idxmin() 'A' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmin(skipna=False) nan """ # error: Argument 1 to "argmin" of "IndexOpsMixin" has incompatible type "Union # [int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmin(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def idxmax(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: """ Return the row label of the maximum value. If multiple values equal the maximum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the maximum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmax : Return indices of the maximum values along the given axis. DataFrame.idxmax : Return index of first occurrence of maximum over requested axis. Series.idxmin : Return index *label* of the first occurrence of minimum of values. Notes ----- This method is the Series version of ``ndarray.argmax``. This method returns the label of the maximum, while ``ndarray.argmax`` returns the position. To get the position, use ``series.values.argmax()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 3, 4], ... index=['A', 'B', 'C', 'D', 'E']) >>> s A 1.0 B NaN C 4.0 D 3.0 E 4.0 dtype: float64 >>> s.idxmax() 'C' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmax(skipna=False) nan """ # error: Argument 1 to "argmax" of "IndexOpsMixin" has incompatible type # "Union[int, Literal['index', 'columns']]"; expected "Optional[int]" i = self.argmax(axis, skipna, *args, **kwargs) # type: ignore[arg-type] if i == -1: return np.nan return self.index[i] def round(self, decimals: int = 0, *args, **kwargs) -> Series: """ Round each value in a Series to the given number of decimals. Parameters ---------- decimals : int, default 0 Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point. *args, **kwargs Additional arguments and keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Series Rounded values of the Series. See Also -------- numpy.around : Round values of an np.array. DataFrame.round : Round values of a DataFrame. Examples -------- >>> s = pd.Series([0.1, 1.3, 2.7]) >>> s.round() 0 0.0 1 1.0 2 3.0 dtype: float64 """ nv.validate_round(args, kwargs) result = self._values.round(decimals) result = self._constructor(result, index=self.index, copy=False).__finalize__( self, method="round" ) return result def quantile( self, q: float = ..., interpolation: QuantileInterpolation = ... ) -> float: ... def quantile( self, q: Sequence[float] | AnyArrayLike, interpolation: QuantileInterpolation = ..., ) -> Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = ..., interpolation: QuantileInterpolation = ..., ) -> float | Series: ... def quantile( self, q: float | Sequence[float] | AnyArrayLike = 0.5, interpolation: QuantileInterpolation = "linear", ) -> float | Series: """ Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) The quantile(s) to compute, which can lie in range: 0 <= q <= 1. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- float or Series If ``q`` is an array, a Series will be returned where the index is ``q`` and the values are the quantiles, otherwise a float will be returned. See Also -------- core.window.Rolling.quantile : Calculate the rolling quantile. numpy.percentile : Returns the q-th percentile(s) of the array elements. Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.quantile(.5) 2.5 >>> s.quantile([.25, .5, .75]) 0.25 1.75 0.50 2.50 0.75 3.25 dtype: float64 """ validate_percentile(q) # We dispatch to DataFrame so that core.internals only has to worry # about 2D cases. df = self.to_frame() result = df.quantile(q=q, interpolation=interpolation, numeric_only=False) if result.ndim == 2: result = result.iloc[:, 0] if is_list_like(q): result.name = self.name idx = Index(q, dtype=np.float64) return self._constructor(result, index=idx, name=self.name) else: # scalar return result.iloc[0] def corr( self, other: Series, method: CorrelationMethod = "pearson", min_periods: int | None = None, ) -> float: """ Compute correlation with `other` Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the correlation function is applied. Parameters ---------- other : Series Series with which to compute the correlation. method : {'pearson', 'kendall', 'spearman'} or callable Method used to compute correlation: - pearson : Standard correlation coefficient - kendall : Kendall Tau correlation coefficient - spearman : Spearman rank correlation - callable: Callable with input two 1d ndarrays and returning a float. .. warning:: Note that the returned matrix from corr will have 1 along the diagonals and will be symmetric regardless of the callable's behavior. min_periods : int, optional Minimum number of observations needed to have a valid result. Returns ------- float Correlation with other. See Also -------- DataFrame.corr : Compute pairwise correlation between columns. DataFrame.corrwith : Compute pairwise correlation with another DataFrame or Series. Notes ----- Pearson, Kendall and Spearman correlation are currently computed using pairwise complete observations. * `Pearson correlation coefficient <https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`_ * `Kendall rank correlation coefficient <https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient>`_ * `Spearman's rank correlation coefficient <https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient>`_ Examples -------- >>> def histogram_intersection(a, b): ... v = np.minimum(a, b).sum().round(decimals=1) ... return v >>> s1 = pd.Series([.2, .0, .6, .2]) >>> s2 = pd.Series([.3, .6, .0, .1]) >>> s1.corr(s2, method=histogram_intersection) 0.3 """ # noqa:E501 this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan if method in ["pearson", "spearman", "kendall"] or callable(method): return nanops.nancorr( this.values, other.values, method=method, min_periods=min_periods ) raise ValueError( "method must be either 'pearson', " "'spearman', 'kendall', or a callable, " f"'{method}' was supplied" ) def cov( self, other: Series, min_periods: int | None = None, ddof: int | None = 1, ) -> float: """ Compute covariance with Series, excluding missing values. The two `Series` objects are not required to be the same length and will be aligned internally before the covariance is calculated. Parameters ---------- other : Series Series with which to compute the covariance. min_periods : int, optional Minimum number of observations needed to have a valid result. ddof : int, default 1 Delta degrees of freedom. The divisor used in calculations is ``N - ddof``, where ``N`` represents the number of elements. .. versionadded:: 1.1.0 Returns ------- float Covariance between Series and other normalized by N-1 (unbiased estimator). See Also -------- DataFrame.cov : Compute pairwise covariance of columns. Examples -------- >>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035]) >>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198]) >>> s1.cov(s2) -0.01685762652715874 """ this, other = self.align(other, join="inner", copy=False) if len(this) == 0: return np.nan return nanops.nancov( this.values, other.values, min_periods=min_periods, ddof=ddof ) klass="Series", extra_params="", other_klass="DataFrame", examples=dedent( """ Difference with previous row >>> s = pd.Series([1, 1, 2, 3, 5, 8]) >>> s.diff() 0 NaN 1 0.0 2 1.0 3 1.0 4 2.0 5 3.0 dtype: float64 Difference with 3rd previous row >>> s.diff(periods=3) 0 NaN 1 NaN 2 NaN 3 2.0 4 4.0 5 6.0 dtype: float64 Difference with following row >>> s.diff(periods=-1) 0 0.0 1 -1.0 2 -1.0 3 -2.0 4 -3.0 5 NaN dtype: float64 Overflow in input dtype >>> s = pd.Series([1, 0], dtype=np.uint8) >>> s.diff() 0 NaN 1 255.0 dtype: float64""" ), ) def diff(self, periods: int = 1) -> Series: """ First discrete difference of element. Calculates the difference of a {klass} element compared with another element in the {klass} (default is element in previous row). Parameters ---------- periods : int, default 1 Periods to shift for calculating difference, accepts negative values. {extra_params} Returns ------- {klass} First differences of the Series. See Also -------- {klass}.pct_change: Percent change over given number of periods. {klass}.shift: Shift index by desired number of periods with an optional time freq. {other_klass}.diff: First discrete difference of object. Notes ----- For boolean dtypes, this uses :meth:`operator.xor` rather than :meth:`operator.sub`. The result is calculated according to current dtype in {klass}, however dtype of the result is always float64. Examples -------- {examples} """ result = algorithms.diff(self._values, periods) return self._constructor(result, index=self.index, copy=False).__finalize__( self, method="diff" ) def autocorr(self, lag: int = 1) -> float: """ Compute the lag-N autocorrelation. This method computes the Pearson correlation between the Series and its shifted self. Parameters ---------- lag : int, default 1 Number of lags to apply before performing autocorrelation. Returns ------- float The Pearson correlation between self and self.shift(lag). See Also -------- Series.corr : Compute the correlation between two Series. Series.shift : Shift index by desired number of periods. DataFrame.corr : Compute pairwise correlation of columns. DataFrame.corrwith : Compute pairwise correlation between rows or columns of two DataFrame objects. Notes ----- If the Pearson correlation is not well defined return 'NaN'. Examples -------- >>> s = pd.Series([0.25, 0.5, 0.2, -0.05]) >>> s.autocorr() # doctest: +ELLIPSIS 0.10355... >>> s.autocorr(lag=2) # doctest: +ELLIPSIS -0.99999... If the Pearson correlation is not well defined, then 'NaN' is returned. >>> s = pd.Series([1, 0, 0, 0]) >>> s.autocorr() nan """ return self.corr(self.shift(lag)) def dot(self, other: AnyArrayLike) -> Series | np.ndarray: """ Compute the dot product between the Series and the columns of other. This method computes the dot product between the Series and another one, or the Series and each columns of a DataFrame, or the Series and each columns of an array. It can also be called using `self @ other` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the dot product with its columns. Returns ------- scalar, Series or numpy.ndarray Return the dot product of the Series and other if other is a Series, the Series of the dot product of Series and each rows of other if other is a DataFrame or a numpy.ndarray between the Series and each columns of the numpy array. See Also -------- DataFrame.dot: Compute the matrix product with the DataFrame. Series.mul: Multiplication of series and other, element-wise. Notes ----- The Series and other has to share the same index if other is a Series or a DataFrame. Examples -------- >>> s = pd.Series([0, 1, 2, 3]) >>> other = pd.Series([-1, 2, -3, 4]) >>> s.dot(other) 8 >>> s @ other 8 >>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(df) 0 24 1 14 dtype: int64 >>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(arr) array([24, 14]) """ if isinstance(other, (Series, ABCDataFrame)): common = self.index.union(other.index) if len(common) > len(self.index) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(index=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right.values else: lvals = self.values rvals = np.asarray(other) if lvals.shape[0] != rvals.shape[0]: raise Exception( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, ABCDataFrame): return self._constructor( np.dot(lvals, rvals), index=other.columns, copy=False ).__finalize__(self, method="dot") elif isinstance(other, Series): return np.dot(lvals, rvals) elif isinstance(rvals, np.ndarray): return np.dot(lvals, rvals) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other): """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(np.transpose(other)) # Signature of "searchsorted" incompatible with supertype "IndexOpsMixin" def searchsorted( # type: ignore[override] self, value: NumpyValueArrayLike | ExtensionArray, side: Literal["left", "right"] = "left", sorter: NumpySorter = None, ) -> npt.NDArray[np.intp] | np.intp: return base.IndexOpsMixin.searchsorted(self, value, side=side, sorter=sorter) # ------------------------------------------------------------------- # Combination def _append( self, to_append, ignore_index: bool = False, verify_integrity: bool = False ): from pandas.core.reshape.concat import concat if isinstance(to_append, (list, tuple)): to_concat = [self] to_concat.extend(to_append) else: to_concat = [self, to_append] if any(isinstance(x, (ABCDataFrame,)) for x in to_concat[1:]): msg = "to_append should be a Series or list/tuple of Series, got DataFrame" raise TypeError(msg) return concat( to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity ) def _binop(self, other: Series, func, level=None, fill_value=None): """ Perform generic binary operation with optional fill value. Parameters ---------- other : Series func : binary operator fill_value : float or object Value to substitute for NA/null values. If both Series are NA in a location, the result will be NA regardless of the passed fill value. level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level. Returns ------- Series """ if not isinstance(other, Series): raise AssertionError("Other operand must be Series") this = self if not self.index.equals(other.index): this, other = self.align(other, level=level, join="outer", copy=False) this_vals, other_vals = ops.fill_binop(this._values, other._values, fill_value) with np.errstate(all="ignore"): result = func(this_vals, other_vals) name = ops.get_op_result_name(self, other) return this._construct_result(result, name) def _construct_result( self, result: ArrayLike | tuple[ArrayLike, ArrayLike], name: Hashable ) -> Series | tuple[Series, Series]: """ Construct an appropriately-labelled Series from the result of an op. Parameters ---------- result : ndarray or ExtensionArray name : Label Returns ------- Series In the case of __divmod__ or __rdivmod__, a 2-tuple of Series. """ if isinstance(result, tuple): # produced by divmod or rdivmod res1 = self._construct_result(result[0], name=name) res2 = self._construct_result(result[1], name=name) # GH#33427 assertions to keep mypy happy assert isinstance(res1, Series) assert isinstance(res2, Series) return (res1, res2) # TODO: result should always be ArrayLike, but this fails for some # JSONArray tests dtype = getattr(result, "dtype", None) out = self._constructor(result, index=self.index, dtype=dtype) out = out.__finalize__(self) # Set the result's name after __finalize__ is called because __finalize__ # would set it back to self.name out.name = name return out _shared_docs["compare"], """ Returns ------- Series or DataFrame If axis is 0 or 'index' the result will be a Series. The resulting index will be a MultiIndex with 'self' and 'other' stacked alternately at the inner level. If axis is 1 or 'columns' the result will be a DataFrame. It will have two columns namely 'self' and 'other'. See Also -------- DataFrame.compare : Compare with another DataFrame and show differences. Notes ----- Matching NaNs will not appear as a difference. Examples -------- >>> s1 = pd.Series(["a", "b", "c", "d", "e"]) >>> s2 = pd.Series(["a", "a", "c", "b", "e"]) Align the differences on columns >>> s1.compare(s2) self other 1 b a 3 d b Stack the differences on indices >>> s1.compare(s2, align_axis=0) 1 self b other a 3 self d other b dtype: object Keep all original rows >>> s1.compare(s2, keep_shape=True) self other 0 NaN NaN 1 b a 2 NaN NaN 3 d b 4 NaN NaN Keep all original rows and also all original values >>> s1.compare(s2, keep_shape=True, keep_equal=True) self other 0 a a 1 b a 2 c c 3 d b 4 e e """, klass=_shared_doc_kwargs["klass"], ) def compare( self, other: Series, align_axis: Axis = 1, keep_shape: bool = False, keep_equal: bool = False, result_names: Suffixes = ("self", "other"), ) -> DataFrame | Series: return super().compare( other=other, align_axis=align_axis, keep_shape=keep_shape, keep_equal=keep_equal, result_names=result_names, ) def combine( self, other: Series | Hashable, func: Callable[[Hashable, Hashable], Hashable], fill_value: Hashable = None, ) -> Series: """ Combine the Series with a Series or scalar according to `func`. Combine the Series and `other` using `func` to perform elementwise selection for combined Series. `fill_value` is assumed when value is missing at some index from one of the two objects being combined. Parameters ---------- other : Series or scalar The value(s) to be combined with the `Series`. func : function Function that takes two scalars as inputs and returns an element. fill_value : scalar, optional The value to assume when an index is missing from one Series or the other. The default specifies to use the appropriate NaN value for the underlying dtype of the Series. Returns ------- Series The result of combining the Series with the other object. See Also -------- Series.combine_first : Combine Series values, choosing the calling Series' values first. Examples -------- Consider 2 Datasets ``s1`` and ``s2`` containing highest clocked speeds of different birds. >>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0}) >>> s1 falcon 330.0 eagle 160.0 dtype: float64 >>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0}) >>> s2 falcon 345.0 eagle 200.0 duck 30.0 dtype: float64 Now, to combine the two datasets and view the highest speeds of the birds across the two datasets >>> s1.combine(s2, max) duck NaN eagle 200.0 falcon 345.0 dtype: float64 In the previous example, the resulting value for duck is missing, because the maximum of a NaN and a float is a NaN. So, in the example, we set ``fill_value=0``, so the maximum value returned will be the value from some dataset. >>> s1.combine(s2, max, fill_value=0) duck 30.0 eagle 200.0 falcon 345.0 dtype: float64 """ if fill_value is None: fill_value = na_value_for_dtype(self.dtype, compat=False) if isinstance(other, Series): # If other is a Series, result is based on union of Series, # so do this element by element new_index = self.index.union(other.index) new_name = ops.get_op_result_name(self, other) new_values = np.empty(len(new_index), dtype=object) for i, idx in enumerate(new_index): lv = self.get(idx, fill_value) rv = other.get(idx, fill_value) with np.errstate(all="ignore"): new_values[i] = func(lv, rv) else: # Assume that other is a scalar, so apply the function for # each element in the Series new_index = self.index new_values = np.empty(len(new_index), dtype=object) with np.errstate(all="ignore"): new_values[:] = [func(lv, other) for lv in self._values] new_name = self.name # try_float=False is to match agg_series npvalues = lib.maybe_convert_objects(new_values, try_float=False) res_values = maybe_cast_pointwise_result(npvalues, self.dtype, same_dtype=False) return self._constructor(res_values, index=new_index, name=new_name, copy=False) def combine_first(self, other) -> Series: """ Update null elements with value in the same location in 'other'. Combine two Series objects by filling null values in one Series with non-null values from the other Series. Result index will be the union of the two indexes. Parameters ---------- other : Series The value(s) to be used for filling null values. Returns ------- Series The result of combining the provided Series with the other object. See Also -------- Series.combine : Perform element-wise operation on two Series using a given function. Examples -------- >>> s1 = pd.Series([1, np.nan]) >>> s2 = pd.Series([3, 4, 5]) >>> s1.combine_first(s2) 0 1.0 1 4.0 2 5.0 dtype: float64 Null values still persist if the location of that null value does not exist in `other` >>> s1 = pd.Series({'falcon': np.nan, 'eagle': 160.0}) >>> s2 = pd.Series({'eagle': 200.0, 'duck': 30.0}) >>> s1.combine_first(s2) duck 30.0 eagle 160.0 falcon NaN dtype: float64 """ new_index = self.index.union(other.index) this = self.reindex(new_index, copy=False) other = other.reindex(new_index, copy=False) if this.dtype.kind == "M" and other.dtype.kind != "M": other = to_datetime(other) return this.where(notna(this), other) def update(self, other: Series | Sequence | Mapping) -> None: """ Modify Series in place using values from passed Series. Uses non-NA values from passed Series to make updates. Aligns on index. Parameters ---------- other : Series, or object coercible into Series Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6])) >>> s 0 4 1 5 2 6 dtype: int64 >>> s = pd.Series(['a', 'b', 'c']) >>> s.update(pd.Series(['d', 'e'], index=[0, 2])) >>> s 0 d 1 b 2 e dtype: object >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6, 7, 8])) >>> s 0 4 1 5 2 6 dtype: int64 If ``other`` contains NaNs the corresponding values are not updated in the original Series. >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, np.nan, 6])) >>> s 0 4 1 2 2 6 dtype: int64 ``other`` can also be a non-Series object type that is coercible into a Series >>> s = pd.Series([1, 2, 3]) >>> s.update([4, np.nan, 6]) >>> s 0 4 1 2 2 6 dtype: int64 >>> s = pd.Series([1, 2, 3]) >>> s.update({1: 9}) >>> s 0 1 1 9 2 3 dtype: int64 """ if not isinstance(other, Series): other = Series(other) other = other.reindex_like(self) mask = notna(other) self._mgr = self._mgr.putmask(mask=mask, new=other) self._maybe_update_cacher() # ---------------------------------------------------------------------- # Reindexing, sorting def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[False] = ..., kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> Series: ... def sort_values( self, *, axis: Axis = ..., ascending: bool | int | Sequence[bool] | Sequence[int] = ..., inplace: Literal[True], kind: str = ..., na_position: str = ..., ignore_index: bool = ..., key: ValueKeyFunc = ..., ) -> None: ... def sort_values( self, *, axis: Axis = 0, ascending: bool | int | Sequence[bool] | Sequence[int] = True, inplace: bool = False, kind: str = "quicksort", na_position: str = "last", ignore_index: bool = False, key: ValueKeyFunc = None, ) -> Series | None: """ Sort by the values. Sort a Series in ascending or descending order by some criterion. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. ascending : bool or list of bools, default True If True, sort values in ascending order, otherwise descending. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the series values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect a ``Series`` and return an array-like. .. versionadded:: 1.1.0 Returns ------- Series or None Series ordered by values or None if ``inplace=True``. See Also -------- Series.sort_index : Sort by the Series indices. DataFrame.sort_values : Sort DataFrame by the values along either axis. DataFrame.sort_index : Sort DataFrame by indices. Examples -------- >>> s = pd.Series([np.nan, 1, 3, 10, 5]) >>> s 0 NaN 1 1.0 2 3.0 3 10.0 4 5.0 dtype: float64 Sort values ascending order (default behaviour) >>> s.sort_values(ascending=True) 1 1.0 2 3.0 4 5.0 3 10.0 0 NaN dtype: float64 Sort values descending order >>> s.sort_values(ascending=False) 3 10.0 4 5.0 2 3.0 1 1.0 0 NaN dtype: float64 Sort values putting NAs first >>> s.sort_values(na_position='first') 0 NaN 1 1.0 2 3.0 4 5.0 3 10.0 dtype: float64 Sort a series of strings >>> s = pd.Series(['z', 'b', 'd', 'a', 'c']) >>> s 0 z 1 b 2 d 3 a 4 c dtype: object >>> s.sort_values() 3 a 1 b 4 c 2 d 0 z dtype: object Sort using a key function. Your `key` function will be given the ``Series`` of values and should return an array-like. >>> s = pd.Series(['a', 'B', 'c', 'D', 'e']) >>> s.sort_values() 1 B 3 D 0 a 2 c 4 e dtype: object >>> s.sort_values(key=lambda x: x.str.lower()) 0 a 1 B 2 c 3 D 4 e dtype: object NumPy ufuncs work well here. For example, we can sort by the ``sin`` of the value >>> s = pd.Series([-4, -2, 0, 2, 4]) >>> s.sort_values(key=np.sin) 1 -2 4 4 2 0 0 -4 3 2 dtype: int64 More complicated user-defined functions can be used, as long as they expect a Series and return an array-like >>> s.sort_values(key=lambda x: (np.tan(x.cumsum()))) 0 -4 3 2 4 4 1 -2 2 0 dtype: int64 """ inplace = validate_bool_kwarg(inplace, "inplace") # Validate the axis parameter self._get_axis_number(axis) # GH 5856/5853 if inplace and self._is_cached: raise ValueError( "This Series is a view of some other array, to " "sort in-place you must create a copy" ) if is_list_like(ascending): ascending = cast(Sequence[Union[bool, int]], ascending) if len(ascending) != 1: raise ValueError( f"Length of ascending ({len(ascending)}) must be 1 for Series" ) ascending = ascending[0] ascending = validate_ascending(ascending) if na_position not in ["first", "last"]: raise ValueError(f"invalid na_position: {na_position}") # GH 35922. Make sorting stable by leveraging nargsort values_to_sort = ensure_key_mapped(self, key)._values if key else self._values sorted_index = nargsort(values_to_sort, kind, bool(ascending), na_position) if is_range_indexer(sorted_index, len(sorted_index)): if inplace: return self._update_inplace(self) return self.copy(deep=None) result = self._constructor( self._values[sorted_index], index=self.index[sorted_index], copy=False ) if ignore_index: result.index = default_index(len(sorted_index)) if not inplace: return result.__finalize__(self, method="sort_values") self._update_inplace(result) return None def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[True], kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> None: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: Literal[False] = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series: ... def sort_index( self, *, axis: Axis = ..., level: IndexLabel = ..., ascending: bool | Sequence[bool] = ..., inplace: bool = ..., kind: SortKind = ..., na_position: NaPosition = ..., sort_remaining: bool = ..., ignore_index: bool = ..., key: IndexKeyFunc = ..., ) -> Series | None: ... def sort_index( self, *, axis: Axis = 0, level: IndexLabel = None, ascending: bool | Sequence[bool] = True, inplace: bool = False, kind: SortKind = "quicksort", na_position: NaPosition = "last", sort_remaining: bool = True, ignore_index: bool = False, key: IndexKeyFunc = None, ) -> Series | None: """ Sort Series by index labels. Returns a new Series sorted by label if `inplace` argument is ``False``, otherwise updates the original series and returns None. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. level : int, optional If not None, sort on values in specified index level(s). ascending : bool or list-like of bools, default True Sort ascending vs. descending. When the index is a MultiIndex the sort direction can be controlled for each level individually. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. Not implemented for MultiIndex. sort_remaining : bool, default True If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. ignore_index : bool, default False If True, the resulting axis will be labeled 0, 1, …, n - 1. key : callable, optional If not None, apply the key function to the index values before sorting. This is similar to the `key` argument in the builtin :meth:`sorted` function, with the notable difference that this `key` function should be *vectorized*. It should expect an ``Index`` and return an ``Index`` of the same shape. .. versionadded:: 1.1.0 Returns ------- Series or None The original Series sorted by the labels or None if ``inplace=True``. See Also -------- DataFrame.sort_index: Sort DataFrame by the index. DataFrame.sort_values: Sort DataFrame by the value. Series.sort_values : Sort Series by the value. Examples -------- >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4]) >>> s.sort_index() 1 c 2 b 3 a 4 d dtype: object Sort Descending >>> s.sort_index(ascending=False) 4 d 3 a 2 b 1 c dtype: object By default NaNs are put at the end, but use `na_position` to place them at the beginning >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan]) >>> s.sort_index(na_position='first') NaN d 1.0 c 2.0 b 3.0 a dtype: object Specify index level to sort >>> arrays = [np.array(['qux', 'qux', 'foo', 'foo', ... 'baz', 'baz', 'bar', 'bar']), ... np.array(['two', 'one', 'two', 'one', ... 'two', 'one', 'two', 'one'])] >>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays) >>> s.sort_index(level=1) bar one 8 baz one 6 foo one 4 qux one 2 bar two 7 baz two 5 foo two 3 qux two 1 dtype: int64 Does not sort by remaining levels when sorting by levels >>> s.sort_index(level=1, sort_remaining=False) qux one 2 foo one 4 baz one 6 bar one 8 qux two 1 foo two 3 baz two 5 bar two 7 dtype: int64 Apply a key function before sorting >>> s = pd.Series([1, 2, 3, 4], index=['A', 'b', 'C', 'd']) >>> s.sort_index(key=lambda x : x.str.lower()) A 1 b 2 C 3 d 4 dtype: int64 """ return super().sort_index( axis=axis, level=level, ascending=ascending, inplace=inplace, kind=kind, na_position=na_position, sort_remaining=sort_remaining, ignore_index=ignore_index, key=key, ) def argsort( self, axis: Axis = 0, kind: SortKind = "quicksort", order: None = None, ) -> Series: """ Return the integer indices that would sort the Series values. Override ndarray.argsort. Argsorts the value, omitting NA/null values, and places the result in the same locations as the non-NA values. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort' Choice of sorting algorithm. See :func:`numpy.sort` for more information. 'mergesort' and 'stable' are the only stable algorithms. order : None Has no effect but is accepted for compatibility with numpy. Returns ------- Series[np.intp] Positions of values within the sort order with -1 indicating nan values. See Also -------- numpy.ndarray.argsort : Returns the indices that would sort this array. """ values = self._values mask = isna(values) if mask.any(): result = np.full(len(self), -1, dtype=np.intp) notmask = ~mask result[notmask] = np.argsort(values[notmask], kind=kind) else: result = np.argsort(values, kind=kind) res = self._constructor( result, index=self.index, name=self.name, dtype=np.intp, copy=False ) return res.__finalize__(self, method="argsort") def nlargest( self, n: int = 5, keep: Literal["first", "last", "all"] = "first" ) -> Series: """ Return the largest `n` elements. Parameters ---------- n : int, default 5 Return this many descending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` largest values in the Series, sorted in decreasing order. See Also -------- Series.nsmallest: Get the `n` smallest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values(ascending=False).head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Malta": 434000, "Maldives": 434000, ... "Brunei": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Malta 434000 Maldives 434000 Brunei 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` largest elements where ``n=5`` by default. >>> s.nlargest() France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3``. Default `keep` value is 'first' so Malta will be kept. >>> s.nlargest(3) France 65000000 Italy 59000000 Malta 434000 dtype: int64 The `n` largest elements where ``n=3`` and keeping the last duplicates. Brunei will be kept since it is the last with value 434000 based on the index order. >>> s.nlargest(3, keep='last') France 65000000 Italy 59000000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3`` with all duplicates kept. Note that the returned Series has five elements due to the three duplicates. >>> s.nlargest(3, keep='all') France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nlargest() def nsmallest(self, n: int = 5, keep: str = "first") -> Series: """ Return the smallest `n` elements. Parameters ---------- n : int, default 5 Return this many ascending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` smallest values in the Series, sorted in increasing order. See Also -------- Series.nlargest: Get the `n` largest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values().head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Brunei": 434000, "Malta": 434000, ... "Maldives": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Montserrat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Brunei 434000 Malta 434000 Maldives 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Montserrat 5200 dtype: int64 The `n` smallest elements where ``n=5`` by default. >>> s.nsmallest() Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 Iceland 337000 dtype: int64 The `n` smallest elements where ``n=3``. Default `keep` value is 'first' so Nauru and Tuvalu will be kept. >>> s.nsmallest(3) Montserrat 5200 Nauru 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` and keeping the last duplicates. Anguilla and Tuvalu will be kept since they are the last with value 11300 based on the index order. >>> s.nsmallest(3, keep='last') Montserrat 5200 Anguilla 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` with all duplicates kept. Note that the returned Series has four elements due to the three duplicates. >>> s.nsmallest(3, keep='all') Montserrat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 dtype: int64 """ return selectn.SelectNSeries(self, n=n, keep=keep).nsmallest() klass=_shared_doc_kwargs["klass"], extra_params=dedent( """copy : bool, default True Whether to copy underlying data.""" ), examples=dedent( """\ Examples -------- >>> s = pd.Series( ... ["A", "B", "A", "C"], ... index=[ ... ["Final exam", "Final exam", "Coursework", "Coursework"], ... ["History", "Geography", "History", "Geography"], ... ["January", "February", "March", "April"], ... ], ... ) >>> s Final exam History January A Geography February B Coursework History March A Geography April C dtype: object In the following example, we will swap the levels of the indices. Here, we will swap the levels column-wise, but levels can be swapped row-wise in a similar manner. Note that column-wise is the default behaviour. By not supplying any arguments for i and j, we swap the last and second to last indices. >>> s.swaplevel() Final exam January History A February Geography B Coursework March History A April Geography C dtype: object By supplying one argument, we can choose which index to swap the last index with. We can for example swap the first index with the last one as follows. >>> s.swaplevel(0) January History Final exam A February Geography Final exam B March History Coursework A April Geography Coursework C dtype: object We can also define explicitly which indices we want to swap by supplying values for both i and j. Here, we for example swap the first and second indices. >>> s.swaplevel(0, 1) History Final exam January A Geography Final exam February B History Coursework March A Geography Coursework April C dtype: object""" ), ) def swaplevel( self, i: Level = -2, j: Level = -1, copy: bool | None = None ) -> Series: """ Swap levels i and j in a :class:`MultiIndex`. Default is to swap the two innermost levels of the index. Parameters ---------- i, j : int or str Levels of the indices to be swapped. Can pass level name as string. {extra_params} Returns ------- {klass} {klass} with levels swapped in MultiIndex. {examples} """ assert isinstance(self.index, MultiIndex) result = self.copy(deep=copy and not using_copy_on_write()) result.index = self.index.swaplevel(i, j) return result def reorder_levels(self, order: Sequence[Level]) -> Series: """ Rearrange index levels using input order. May not drop or duplicate levels. Parameters ---------- order : list of int representing new level order Reference level by number or key. Returns ------- type of caller (new object) """ if not isinstance(self.index, MultiIndex): # pragma: no cover raise Exception("Can only reorder levels on a hierarchical axis.") result = self.copy(deep=None) assert isinstance(result.index, MultiIndex) result.index = result.index.reorder_levels(order) return result def explode(self, ignore_index: bool = False) -> Series: """ Transform each element of a list-like to a row. Parameters ---------- ignore_index : bool, default False If True, the resulting index will be labeled 0, 1, …, n - 1. .. versionadded:: 1.1.0 Returns ------- Series Exploded lists to rows; index will be duplicated for these rows. See Also -------- Series.str.split : Split string values on specified separator. Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame. DataFrame.melt : Unpivot a DataFrame from wide format to long format. DataFrame.explode : Explode a DataFrame from list-like columns to long format. Notes ----- This routine will explode list-likes including lists, tuples, sets, Series, and np.ndarray. The result dtype of the subset rows will be object. Scalars will be returned unchanged, and empty list-likes will result in a np.nan for that row. In addition, the ordering of elements in the output will be non-deterministic when exploding sets. Reference :ref:`the user guide <reshaping.explode>` for more examples. Examples -------- >>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]]) >>> s 0 [1, 2, 3] 1 foo 2 [] 3 [3, 4] dtype: object >>> s.explode() 0 1 0 2 0 3 1 foo 2 NaN 3 3 3 4 dtype: object """ if not len(self) or not is_object_dtype(self): result = self.copy() return result.reset_index(drop=True) if ignore_index else result values, counts = reshape.explode(np.asarray(self._values)) if ignore_index: index = default_index(len(values)) else: index = self.index.repeat(counts) return self._constructor(values, index=index, name=self.name, copy=False) def unstack(self, level: IndexLabel = -1, fill_value: Hashable = None) -> DataFrame: """ Unstack, also known as pivot, Series with MultiIndex to produce DataFrame. Parameters ---------- level : int, str, or list of these, default last level Level(s) to unstack, can pass level name. fill_value : scalar value, default None Value to use when replacing NaN values. Returns ------- DataFrame Unstacked Series. Notes ----- Reference :ref:`the user guide <reshaping.stacking>` for more examples. Examples -------- >>> s = pd.Series([1, 2, 3, 4], ... index=pd.MultiIndex.from_product([['one', 'two'], ... ['a', 'b']])) >>> s one a 1 b 2 two a 3 b 4 dtype: int64 >>> s.unstack(level=-1) a b one 1 2 two 3 4 >>> s.unstack(level=0) one two a 1 3 b 2 4 """ from pandas.core.reshape.reshape import unstack return unstack(self, level, fill_value) # ---------------------------------------------------------------------- # function application def map( self, arg: Callable | Mapping | Series, na_action: Literal["ignore"] | None = None, ) -> Series: """ Map values of Series according to an input mapping or function. Used for substituting each value in a Series with another value, that may be derived from a function, a ``dict`` or a :class:`Series`. Parameters ---------- arg : function, collections.abc.Mapping subclass or Series Mapping correspondence. na_action : {None, 'ignore'}, default None If 'ignore', propagate NaN values, without passing them to the mapping correspondence. Returns ------- Series Same index as caller. See Also -------- Series.apply : For applying more complex functions on a Series. DataFrame.apply : Apply a function row-/column-wise. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Notes ----- When ``arg`` is a dictionary, values in Series that are not in the dictionary (as keys) are converted to ``NaN``. However, if the dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e. provides a method for default values), then this default is used rather than ``NaN``. Examples -------- >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit']) >>> s 0 cat 1 dog 2 NaN 3 rabbit dtype: object ``map`` accepts a ``dict`` or a ``Series``. Values that are not found in the ``dict`` are converted to ``NaN``, unless the dict has a default value (e.g. ``defaultdict``): >>> s.map({'cat': 'kitten', 'dog': 'puppy'}) 0 kitten 1 puppy 2 NaN 3 NaN dtype: object It also accepts a function: >>> s.map('I am a {}'.format) 0 I am a cat 1 I am a dog 2 I am a nan 3 I am a rabbit dtype: object To avoid applying the function to missing values (and keep them as ``NaN``) ``na_action='ignore'`` can be used: >>> s.map('I am a {}'.format, na_action='ignore') 0 I am a cat 1 I am a dog 2 NaN 3 I am a rabbit dtype: object """ new_values = self._map_values(arg, na_action=na_action) return self._constructor(new_values, index=self.index, copy=False).__finalize__( self, method="map" ) def _gotitem(self, key, ndim, subset=None) -> Series: """ Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : {1, 2} Requested ndim of result. subset : object, default None Subset to act on. """ return self _agg_see_also_doc = dedent( """ See Also -------- Series.apply : Invoke function on a Series. Series.transform : Transform function producing a Series with like indexes. """ ) _agg_examples_doc = dedent( """ Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s 0 1 1 2 2 3 3 4 dtype: int64 >>> s.agg('min') 1 >>> s.agg(['min', 'max']) min 1 max 4 dtype: int64 """ ) _shared_docs["aggregate"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, ) def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs): # Validate the axis parameter self._get_axis_number(axis) # if func is None, will switch to user-provided "named aggregation" kwargs if func is None: func = dict(kwargs.items()) op = SeriesApply(self, func, convert_dtype=False, args=args, kwargs=kwargs) result = op.agg() return result agg = aggregate # error: Signature of "any" incompatible with supertype "NDFrame" [override] def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: None = ..., **kwargs, ) -> bool: ... def any( self, *, axis: Axis = ..., bool_only: bool | None = ..., skipna: bool = ..., level: Level, **kwargs, ) -> Series | bool: ... # error: Missing return statement def any( # type: ignore[empty-body] self, axis: Axis = 0, bool_only: bool | None = None, skipna: bool = True, level: Level | None = None, **kwargs, ) -> Series | bool: ... _shared_docs["transform"], klass=_shared_doc_kwargs["klass"], axis=_shared_doc_kwargs["axis"], ) def transform( self, func: AggFuncType, axis: Axis = 0, *args, **kwargs ) -> DataFrame | Series: # Validate axis argument self._get_axis_number(axis) result = SeriesApply( self, func=func, convert_dtype=True, args=args, kwargs=kwargs ).transform() return result def apply( self, func: AggFuncType, convert_dtype: bool = True, args: tuple[Any, ...] = (), **kwargs, ) -> DataFrame | Series: """ Invoke function on values of Series. Can be ufunc (a NumPy function that applies to the entire Series) or a Python function that only works on single values. Parameters ---------- func : function Python function or NumPy ufunc to apply. convert_dtype : bool, default True Try to find better dtype for elementwise function results. If False, leave as dtype=object. Note that the dtype is always preserved for some extension array dtypes, such as Categorical. args : tuple Positional arguments passed to func after the series value. **kwargs Additional keyword arguments passed to func. Returns ------- Series or DataFrame If func returns a Series object the result will be a DataFrame. See Also -------- Series.map: For element-wise operations. Series.agg: Only perform aggregating type operations. Series.transform: Only perform transforming type operations. Notes ----- Functions that mutate the passed object can produce unexpected behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` for more details. Examples -------- Create a series with typical summer temperatures for each city. >>> s = pd.Series([20, 21, 12], ... index=['London', 'New York', 'Helsinki']) >>> s London 20 New York 21 Helsinki 12 dtype: int64 Square the values by defining a function and passing it as an argument to ``apply()``. >>> def square(x): ... return x ** 2 >>> s.apply(square) London 400 New York 441 Helsinki 144 dtype: int64 Square the values by passing an anonymous function as an argument to ``apply()``. >>> s.apply(lambda x: x ** 2) London 400 New York 441 Helsinki 144 dtype: int64 Define a custom function that needs additional positional arguments and pass these additional arguments using the ``args`` keyword. >>> def subtract_custom_value(x, custom_value): ... return x - custom_value >>> s.apply(subtract_custom_value, args=(5,)) London 15 New York 16 Helsinki 7 dtype: int64 Define a custom function that takes keyword arguments and pass these arguments to ``apply``. >>> def add_custom_values(x, **kwargs): ... for month in kwargs: ... x += kwargs[month] ... return x >>> s.apply(add_custom_values, june=30, july=20, august=25) London 95 New York 96 Helsinki 87 dtype: int64 Use a function from the Numpy library. >>> s.apply(np.log) London 2.995732 New York 3.044522 Helsinki 2.484907 dtype: float64 """ return SeriesApply(self, func, convert_dtype, args, kwargs).apply() def _reduce( self, op, name: str, *, axis: Axis = 0, skipna: bool = True, numeric_only: bool = False, filter_type=None, **kwds, ): """ Perform a reduction operation. If we have an ndarray as a value, then simply perform the operation, otherwise delegate to the object. """ delegate = self._values if axis is not None: self._get_axis_number(axis) if isinstance(delegate, ExtensionArray): # dispatch to ExtensionArray interface return delegate._reduce(name, skipna=skipna, **kwds) else: # dispatch to numpy arrays if numeric_only and not is_numeric_dtype(self.dtype): kwd_name = "numeric_only" if name in ["any", "all"]: kwd_name = "bool_only" # GH#47500 - change to TypeError to match other methods raise TypeError( f"Series.{name} does not allow {kwd_name}={numeric_only} " "with non-numeric dtypes." ) with np.errstate(all="ignore"): return op(delegate, skipna=skipna, **kwds) def _reindex_indexer( self, new_index: Index | None, indexer: npt.NDArray[np.intp] | None, copy: bool | None, ) -> Series: # Note: new_index is None iff indexer is None # if not None, indexer is np.intp if indexer is None and ( new_index is None or new_index.names == self.index.names ): if using_copy_on_write(): return self.copy(deep=copy) if copy or copy is None: return self.copy(deep=copy) return self new_values = algorithms.take_nd( self._values, indexer, allow_fill=True, fill_value=None ) return self._constructor(new_values, index=new_index, copy=False) def _needs_reindex_multi(self, axes, method, level) -> bool: """ Check if we do need a multi reindex; this is for compat with higher dims. """ return False # error: Cannot determine type of 'align' NDFrame.align, # type: ignore[has-type] klass=_shared_doc_kwargs["klass"], axes_single_arg=_shared_doc_kwargs["axes_single_arg"], ) def align( self, other: Series, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value: Hashable = None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> Series: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[True], level: Level | None = ..., errors: IgnoreRaise = ..., ) -> None: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: Literal[False] = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series: ... def rename( self, index: Renamer | Hashable | None = ..., *, axis: Axis | None = ..., copy: bool = ..., inplace: bool = ..., level: Level | None = ..., errors: IgnoreRaise = ..., ) -> Series | None: ... def rename( self, index: Renamer | Hashable | None = None, *, axis: Axis | None = None, copy: bool = True, inplace: bool = False, level: Level | None = None, errors: IgnoreRaise = "ignore", ) -> Series | None: """ Alter Series index labels or name. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. Alternatively, change ``Series.name`` with a scalar value. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- index : scalar, hashable sequence, dict-like or function optional Functions or dict-like are transformations to apply to the index. Scalar or hashable sequence-like will alter the ``Series.name`` attribute. axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new Series. If True the value of copy is ignored. level : int or level name, default None In case of MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise `KeyError` when a `dict-like mapper` or `index` contains labels that are not present in the index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- Series or None Series with index labels or name altered or None if ``inplace=True``. See Also -------- DataFrame.rename : Corresponding DataFrame method. Series.rename_axis : Set the name of the axis. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.rename("my_name") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 >>> s.rename(lambda x: x ** 2) # function, changes labels 0 1 1 2 4 3 dtype: int64 >>> s.rename({1: 3, 2: 5}) # mapping, changes labels 0 1 3 2 5 3 dtype: int64 """ if axis is not None: # Make sure we raise if an invalid 'axis' is passed. axis = self._get_axis_number(axis) if callable(index) or is_dict_like(index): # error: Argument 1 to "_rename" of "NDFrame" has incompatible # type "Union[Union[Mapping[Any, Hashable], Callable[[Any], # Hashable]], Hashable, None]"; expected "Union[Mapping[Any, # Hashable], Callable[[Any], Hashable], None]" return super()._rename( index, # type: ignore[arg-type] copy=copy, inplace=inplace, level=level, errors=errors, ) else: return self._set_name(index, inplace=inplace) """ Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.set_axis(['a', 'b', 'c'], axis=0) a 1 b 2 c 3 dtype: int64 """ ) **_shared_doc_kwargs, extended_summary_sub="", axis_description_sub="", see_also_sub="", ) ) ) # error: Cannot determine type of 'shift' # ---------------------------------------------------------------------- # Convert to types that support pd.NA # error: Cannot determine type of 'isna' # error: Return type "Series" of "isna" incompatible with return type "ndarray # [Any, dtype[bool_]]" in supertype "IndexOpsMixin" # error: Cannot determine type of 'isna' # error: Cannot determine type of 'notna' # error: Cannot determine type of 'notna' # ---------------------------------------------------------------------- # Time series-oriented methods # error: Cannot determine type of 'asfreq' # error: Cannot determine type of 'resample' # ---------------------------------------------------------------------- # Add index # ---------------------------------------------------------------------- # Accessor Methods # ---------------------------------------------------------------------- # ---------------------------------------------------------------------- # Add plotting methods to Series # ---------------------------------------------------------------------- # Template-Based Arithmetic/Comparison Methods Series The provided code snippet includes necessary dependencies for implementing the `_dtype_to_stata_type_117` function. Write a Python function `def _dtype_to_stata_type_117(dtype: np.dtype, column: Series, force_strl: bool) -> int` to solve the following problem: Converts dtype types to stata types. Returns the byte of the given ordinal. See TYPE_MAP and comments for an explanation. This is also explained in the dta spec. 1 - 2045 are strings of this length Pandas Stata 32768 - for object strL 65526 - for int8 byte 65527 - for int16 int 65528 - for int32 long 65529 - for float32 float 65530 - for double double If there are dates to convert, then dtype will already have the correct type inserted. Here is the function: def _dtype_to_stata_type_117(dtype: np.dtype, column: Series, force_strl: bool) -> int: """ Converts dtype types to stata types. Returns the byte of the given ordinal. See TYPE_MAP and comments for an explanation. This is also explained in the dta spec. 1 - 2045 are strings of this length Pandas Stata 32768 - for object strL 65526 - for int8 byte 65527 - for int16 int 65528 - for int32 long 65529 - for float32 float 65530 - for double double If there are dates to convert, then dtype will already have the correct type inserted. """ # TODO: expand to handle datetime to integer conversion if force_strl: return 32768 if dtype.type is np.object_: # try to coerce it to the biggest string # not memory efficient, what else could we # do? itemsize = max_len_string_array(ensure_object(column._values)) itemsize = max(itemsize, 1) if itemsize <= 2045: return itemsize return 32768 elif dtype.type is np.float64: return 65526 elif dtype.type is np.float32: return 65527 elif dtype.type is np.int32: return 65528 elif dtype.type is np.int16: return 65529 elif dtype.type is np.int8: return 65530 else: # pragma : no cover raise NotImplementedError(f"Data type {dtype} not supported.")
Converts dtype types to stata types. Returns the byte of the given ordinal. See TYPE_MAP and comments for an explanation. This is also explained in the dta spec. 1 - 2045 are strings of this length Pandas Stata 32768 - for object strL 65526 - for int8 byte 65527 - for int16 int 65528 - for int32 long 65529 - for float32 float 65530 - for double double If there are dates to convert, then dtype will already have the correct type inserted.
173,543
from __future__ import annotations from collections import abc import datetime from io import BytesIO import os import struct import sys from types import TracebackType from typing import ( IO, TYPE_CHECKING, Any, AnyStr, Callable, Final, Hashable, Sequence, cast, ) import warnings from dateutil.relativedelta import relativedelta import numpy as np from pandas._libs.lib import infer_dtype from pandas._libs.writers import max_len_string_array from pandas._typing import ( CompressionOptions, FilePath, ReadBuffer, StorageOptions, WriteBuffer, ) from pandas.errors import ( CategoricalConversionWarning, InvalidColumnName, PossiblePrecisionLoss, ValueLabelTypeMismatch, ) from pandas.util._decorators import ( Appender, doc, ) from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ( ensure_object, is_categorical_dtype, is_datetime64_dtype, is_numeric_dtype, ) from pandas import ( Categorical, DatetimeIndex, NaT, Timestamp, isna, to_datetime, to_timedelta, ) from pandas.core.arrays.boolean import BooleanDtype from pandas.core.arrays.integer import IntegerDtype from pandas.core.frame import DataFrame from pandas.core.indexes.base import Index from pandas.core.series import Series from pandas.core.shared_docs import _shared_docs from pandas.io.common import get_handle The provided code snippet includes necessary dependencies for implementing the `_pad_bytes_new` function. Write a Python function `def _pad_bytes_new(name: str | bytes, length: int) -> bytes` to solve the following problem: Takes a bytes instance and pads it with null bytes until it's length chars. Here is the function: def _pad_bytes_new(name: str | bytes, length: int) -> bytes: """ Takes a bytes instance and pads it with null bytes until it's length chars. """ if isinstance(name, str): name = bytes(name, "utf-8") return name + b"\x00" * (length - len(name))
Takes a bytes instance and pads it with null bytes until it's length chars.
173,544
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, Callable, Hashable, Iterable, Literal, MutableMapping, Sequence, TypeVar, overload, ) from pandas.compat._optional import import_optional_dependency from pandas.core.dtypes.common import ( is_integer, is_list_like, ) _writers: MutableMapping[str, ExcelWriter_t] = {} The provided code snippet includes necessary dependencies for implementing the `register_writer` function. Write a Python function `def register_writer(klass: ExcelWriter_t) -> None` to solve the following problem: Add engine to the excel writer registry.io.excel. You must use this method to integrate with ``to_excel``. Parameters ---------- klass : ExcelWriter Here is the function: def register_writer(klass: ExcelWriter_t) -> None: """ Add engine to the excel writer registry.io.excel. You must use this method to integrate with ``to_excel``. Parameters ---------- klass : ExcelWriter """ if not callable(klass): raise ValueError("Can only register callables as engines") engine_name = klass._engine _writers[engine_name] = klass
Add engine to the excel writer registry.io.excel. You must use this method to integrate with ``to_excel``. Parameters ---------- klass : ExcelWriter
173,545
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, Callable, Hashable, Iterable, Literal, MutableMapping, Sequence, TypeVar, overload, ) from pandas.compat._optional import import_optional_dependency from pandas.core.dtypes.common import ( is_integer, is_list_like, ) Literal: _SpecialForm = ... def import_optional_dependency( name: str, extra: str = "", errors: str = "raise", min_version: str | None = None, ): """ Import an optional dependency. By default, if a dependency is missing an ImportError with a nice message will be raised. If a dependency is present, but too old, we raise. Parameters ---------- name : str The module name. extra : str Additional text to include in the ImportError message. errors : str {'raise', 'warn', 'ignore'} What to do when a dependency is not found or its version is too old. * raise : Raise an ImportError * warn : Only applicable when a module's version is to old. Warns that the version is too old and returns None * ignore: If the module is not installed, return None, otherwise, return the module, even if the version is too old. It's expected that users validate the version locally when using ``errors="ignore"`` (see. ``io/html.py``) min_version : str, default None Specify a minimum version that is different from the global pandas minimum version required. Returns ------- maybe_module : Optional[ModuleType] The imported module, when found and the version is correct. None is returned when the package is not found and `errors` is False, or when the package's version is too old and `errors` is ``'warn'``. """ assert errors in {"warn", "raise", "ignore"} package_name = INSTALL_MAPPING.get(name) install_name = package_name if package_name is not None else name msg = ( f"Missing optional dependency '{install_name}'. {extra} " f"Use pip or conda to install {install_name}." ) try: module = importlib.import_module(name) except ImportError: if errors == "raise": raise ImportError(msg) return None # Handle submodules: if we have submodule, grab parent module from sys.modules parent = name.split(".")[0] if parent != name: install_name = parent module_to_get = sys.modules[install_name] else: module_to_get = module minimum_version = min_version if min_version is not None else VERSIONS.get(parent) if minimum_version: version = get_version(module_to_get) if version and Version(version) < Version(minimum_version): msg = ( f"Pandas requires version '{minimum_version}' or newer of '{parent}' " f"(version '{version}' currently installed)." ) if errors == "warn": warnings.warn( msg, UserWarning, stacklevel=find_stack_level(), ) return None elif errors == "raise": raise ImportError(msg) return module The provided code snippet includes necessary dependencies for implementing the `get_default_engine` function. Write a Python function `def get_default_engine(ext: str, mode: Literal["reader", "writer"] = "reader") -> str` to solve the following problem: Return the default reader/writer for the given extension. Parameters ---------- ext : str The excel file extension for which to get the default engine. mode : str {'reader', 'writer'} Whether to get the default engine for reading or writing. Either 'reader' or 'writer' Returns ------- str The default engine for the extension. Here is the function: def get_default_engine(ext: str, mode: Literal["reader", "writer"] = "reader") -> str: """ Return the default reader/writer for the given extension. Parameters ---------- ext : str The excel file extension for which to get the default engine. mode : str {'reader', 'writer'} Whether to get the default engine for reading or writing. Either 'reader' or 'writer' Returns ------- str The default engine for the extension. """ _default_readers = { "xlsx": "openpyxl", "xlsm": "openpyxl", "xlsb": "pyxlsb", "xls": "xlrd", "ods": "odf", } _default_writers = { "xlsx": "openpyxl", "xlsm": "openpyxl", "xlsb": "pyxlsb", "ods": "odf", } assert mode in ["reader", "writer"] if mode == "writer": # Prefer xlsxwriter over openpyxl if installed xlsxwriter = import_optional_dependency("xlsxwriter", errors="warn") if xlsxwriter: _default_writers["xlsx"] = "xlsxwriter" return _default_writers[ext] else: return _default_readers[ext]
Return the default reader/writer for the given extension. Parameters ---------- ext : str The excel file extension for which to get the default engine. mode : str {'reader', 'writer'} Whether to get the default engine for reading or writing. Either 'reader' or 'writer' Returns ------- str The default engine for the extension.
173,546
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, Callable, Hashable, Iterable, Literal, MutableMapping, Sequence, TypeVar, overload, ) from pandas.compat._optional import import_optional_dependency from pandas.core.dtypes.common import ( is_integer, is_list_like, ) _writers: MutableMapping[str, ExcelWriter_t] = {} def get_writer(engine_name: str) -> ExcelWriter_t: try: return _writers[engine_name] except KeyError as err: raise ValueError(f"No Excel writer '{engine_name}'") from err
null
173,547
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, Callable, Hashable, Iterable, Literal, MutableMapping, Sequence, TypeVar, overload, ) from pandas.compat._optional import import_optional_dependency from pandas.core.dtypes.common import ( is_integer, is_list_like, ) def maybe_convert_usecols(usecols: str | list[int]) -> list[int]: ...
null
173,548
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, Callable, Hashable, Iterable, Literal, MutableMapping, Sequence, TypeVar, overload, ) from pandas.compat._optional import import_optional_dependency from pandas.core.dtypes.common import ( is_integer, is_list_like, ) def maybe_convert_usecols(usecols: list[str]) -> list[str]: ...
null
173,549
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, Callable, Hashable, Iterable, Literal, MutableMapping, Sequence, TypeVar, overload, ) from pandas.compat._optional import import_optional_dependency from pandas.core.dtypes.common import ( is_integer, is_list_like, ) def maybe_convert_usecols(usecols: usecols_func) -> usecols_func: ...
null
173,550
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, Callable, Hashable, Iterable, Literal, MutableMapping, Sequence, TypeVar, overload, ) from pandas.compat._optional import import_optional_dependency from pandas.core.dtypes.common import ( is_integer, is_list_like, ) def maybe_convert_usecols(usecols: None) -> None: ...
null
173,551
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, Callable, Hashable, Iterable, Literal, MutableMapping, Sequence, TypeVar, overload, ) from pandas.compat._optional import import_optional_dependency from pandas.core.dtypes.common import ( is_integer, is_list_like, ) def _range2cols(areas: str) -> list[int]: """ Convert comma separated list of column names and ranges to indices. Parameters ---------- areas : str A string containing a sequence of column ranges (or areas). Returns ------- cols : list A list of 0-based column indices. Examples -------- >>> _range2cols('A:E') [0, 1, 2, 3, 4] >>> _range2cols('A,C,Z:AB') [0, 2, 25, 26, 27] """ cols: list[int] = [] for rng in areas.split(","): if ":" in rng: rngs = rng.split(":") cols.extend(range(_excel2num(rngs[0]), _excel2num(rngs[1]) + 1)) else: cols.append(_excel2num(rng)) return cols The provided code snippet includes necessary dependencies for implementing the `maybe_convert_usecols` function. Write a Python function `def maybe_convert_usecols( usecols: str | list[int] | list[str] | usecols_func | None, ) -> None | list[int] | list[str] | usecols_func` to solve the following problem: Convert `usecols` into a compatible format for parsing in `parsers.py`. Parameters ---------- usecols : object The use-columns object to potentially convert. Returns ------- converted : object The compatible format of `usecols`. Here is the function: def maybe_convert_usecols( usecols: str | list[int] | list[str] | usecols_func | None, ) -> None | list[int] | list[str] | usecols_func: """ Convert `usecols` into a compatible format for parsing in `parsers.py`. Parameters ---------- usecols : object The use-columns object to potentially convert. Returns ------- converted : object The compatible format of `usecols`. """ if usecols is None: return usecols if is_integer(usecols): raise ValueError( "Passing an integer for `usecols` is no longer supported. " "Please pass in a list of int from 0 to `usecols` inclusive instead." ) if isinstance(usecols, str): return _range2cols(usecols) return usecols
Convert `usecols` into a compatible format for parsing in `parsers.py`. Parameters ---------- usecols : object The use-columns object to potentially convert. Returns ------- converted : object The compatible format of `usecols`.
173,552
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, Callable, Hashable, Iterable, Literal, MutableMapping, Sequence, TypeVar, overload, ) from pandas.compat._optional import import_optional_dependency from pandas.core.dtypes.common import ( is_integer, is_list_like, ) Literal: _SpecialForm = ... def validate_freeze_panes(freeze_panes: tuple[int, int]) -> Literal[True]: ...
null
173,553
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, Callable, Hashable, Iterable, Literal, MutableMapping, Sequence, TypeVar, overload, ) from pandas.compat._optional import import_optional_dependency from pandas.core.dtypes.common import ( is_integer, is_list_like, ) Literal: _SpecialForm = ... def validate_freeze_panes(freeze_panes: None) -> Literal[False]: ...
null
173,554
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, Callable, Hashable, Iterable, Literal, MutableMapping, Sequence, TypeVar, overload, ) from pandas.compat._optional import import_optional_dependency from pandas.core.dtypes.common import ( is_integer, is_list_like, ) def validate_freeze_panes(freeze_panes: tuple[int, int] | None) -> bool: if freeze_panes is not None: if len(freeze_panes) == 2 and all( isinstance(item, int) for item in freeze_panes ): return True raise ValueError( "freeze_panes must be of form (row, column) " "where row and column are integers" ) # freeze_panes wasn't specified, return False so it won't be applied # to output sheet return False
null
173,555
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, Callable, Hashable, Iterable, Literal, MutableMapping, Sequence, TypeVar, overload, ) from pandas.compat._optional import import_optional_dependency from pandas.core.dtypes.common import ( is_integer, is_list_like, ) class Hashable(Protocol, metaclass=ABCMeta): # TODO: This is special, in that a subclass of a hashable class may not be hashable # (for example, list vs. object). It's not obvious how to represent this. This class # is currently mostly useless for static checking. def __hash__(self) -> int: ... The provided code snippet includes necessary dependencies for implementing the `fill_mi_header` function. Write a Python function `def fill_mi_header( row: list[Hashable], control_row: list[bool] ) -> tuple[list[Hashable], list[bool]]` to solve the following problem: Forward fill blank entries in row but only inside the same parent index. Used for creating headers in Multiindex. Parameters ---------- row : list List of items in a single row. control_row : list of bool Helps to determine if particular column is in same parent index as the previous value. Used to stop propagation of empty cells between different indexes. Returns ------- Returns changed row and control_row Here is the function: def fill_mi_header( row: list[Hashable], control_row: list[bool] ) -> tuple[list[Hashable], list[bool]]: """ Forward fill blank entries in row but only inside the same parent index. Used for creating headers in Multiindex. Parameters ---------- row : list List of items in a single row. control_row : list of bool Helps to determine if particular column is in same parent index as the previous value. Used to stop propagation of empty cells between different indexes. Returns ------- Returns changed row and control_row """ last = row[0] for i in range(1, len(row)): if not control_row[i]: last = row[i] if row[i] == "" or row[i] is None: row[i] = last else: control_row[i] = False last = row[i] return row, control_row
Forward fill blank entries in row but only inside the same parent index. Used for creating headers in Multiindex. Parameters ---------- row : list List of items in a single row. control_row : list of bool Helps to determine if particular column is in same parent index as the previous value. Used to stop propagation of empty cells between different indexes. Returns ------- Returns changed row and control_row
173,556
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, Callable, Hashable, Iterable, Literal, MutableMapping, Sequence, TypeVar, overload, ) from pandas.compat._optional import import_optional_dependency from pandas.core.dtypes.common import ( is_integer, is_list_like, ) class Hashable(Protocol, metaclass=ABCMeta): # TODO: This is special, in that a subclass of a hashable class may not be hashable # (for example, list vs. object). It's not obvious how to represent this. This class # is currently mostly useless for static checking. def __hash__(self) -> int: ... class Iterable(Protocol[_T_co]): def __iter__(self) -> Iterator[_T_co]: ... class Sequence(_Collection[_T_co], Reversible[_T_co], Generic[_T_co]): def __getitem__(self, i: int) -> _T_co: ... def __getitem__(self, s: slice) -> Sequence[_T_co]: ... # Mixin methods def index(self, value: Any, start: int = ..., stop: int = ...) -> int: ... def count(self, value: Any) -> int: ... def __contains__(self, x: object) -> bool: ... def __iter__(self) -> Iterator[_T_co]: ... def __reversed__(self) -> Iterator[_T_co]: ... The provided code snippet includes necessary dependencies for implementing the `pop_header_name` function. Write a Python function `def pop_header_name( row: list[Hashable], index_col: int | Sequence[int] ) -> tuple[Hashable | None, list[Hashable]]` to solve the following problem: Pop the header name for MultiIndex parsing. Parameters ---------- row : list The data row to parse for the header name. index_col : int, list The index columns for our data. Assumed to be non-null. Returns ------- header_name : str The extracted header name. trimmed_row : list The original data row with the header name removed. Here is the function: def pop_header_name( row: list[Hashable], index_col: int | Sequence[int] ) -> tuple[Hashable | None, list[Hashable]]: """ Pop the header name for MultiIndex parsing. Parameters ---------- row : list The data row to parse for the header name. index_col : int, list The index columns for our data. Assumed to be non-null. Returns ------- header_name : str The extracted header name. trimmed_row : list The original data row with the header name removed. """ # Pop out header name and fill w/blank. if is_list_like(index_col): assert isinstance(index_col, Iterable) i = max(index_col) else: assert not isinstance(index_col, Iterable) i = index_col header_name = row[i] header_name = None if header_name == "" else header_name return header_name, row[:i] + [""] + row[i + 1 :]
Pop the header name for MultiIndex parsing. Parameters ---------- row : list The data row to parse for the header name. index_col : int, list The index columns for our data. Assumed to be non-null. Returns ------- header_name : str The extracted header name. trimmed_row : list The original data row with the header name removed.
173,557
from __future__ import annotations from typing import ( TYPE_CHECKING, Any, Callable, Hashable, Iterable, Literal, MutableMapping, Sequence, TypeVar, overload, ) from pandas.compat._optional import import_optional_dependency from pandas.core.dtypes.common import ( is_integer, is_list_like, ) Any = object() The provided code snippet includes necessary dependencies for implementing the `combine_kwargs` function. Write a Python function `def combine_kwargs(engine_kwargs: dict[str, Any] | None, kwargs: dict) -> dict` to solve the following problem: Used to combine two sources of kwargs for the backend engine. Use of kwargs is deprecated, this function is solely for use in 1.3 and should be removed in 1.4/2.0. Also _base.ExcelWriter.__new__ ensures either engine_kwargs or kwargs must be None or empty respectively. Parameters ---------- engine_kwargs: dict kwargs to be passed through to the engine. kwargs: dict kwargs to be psased through to the engine (deprecated) Returns ------- engine_kwargs combined with kwargs Here is the function: def combine_kwargs(engine_kwargs: dict[str, Any] | None, kwargs: dict) -> dict: """ Used to combine two sources of kwargs for the backend engine. Use of kwargs is deprecated, this function is solely for use in 1.3 and should be removed in 1.4/2.0. Also _base.ExcelWriter.__new__ ensures either engine_kwargs or kwargs must be None or empty respectively. Parameters ---------- engine_kwargs: dict kwargs to be passed through to the engine. kwargs: dict kwargs to be psased through to the engine (deprecated) Returns ------- engine_kwargs combined with kwargs """ if engine_kwargs is None: result = {} else: result = engine_kwargs.copy() result.update(kwargs) return result
Used to combine two sources of kwargs for the backend engine. Use of kwargs is deprecated, this function is solely for use in 1.3 and should be removed in 1.4/2.0. Also _base.ExcelWriter.__new__ ensures either engine_kwargs or kwargs must be None or empty respectively. Parameters ---------- engine_kwargs: dict kwargs to be passed through to the engine. kwargs: dict kwargs to be psased through to the engine (deprecated) Returns ------- engine_kwargs combined with kwargs
173,558
from __future__ import annotations import abc import datetime from functools import partial from io import BytesIO import os from textwrap import fill from types import TracebackType from typing import ( IO, Any, Callable, Hashable, Iterable, List, Literal, Mapping, Sequence, Union, cast, overload, ) import zipfile from pandas._config import config from pandas._libs import lib from pandas._libs.parsers import STR_NA_VALUES from pandas._typing import ( DtypeArg, DtypeBackend, FilePath, IntStrT, ReadBuffer, StorageOptions, WriteExcelBuffer, ) from pandas.compat._optional import ( get_version, import_optional_dependency, ) from pandas.errors import EmptyDataError from pandas.util._decorators import ( Appender, doc, ) from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( is_bool, is_float, is_integer, is_list_like, ) from pandas.core.frame import DataFrame from pandas.core.shared_docs import _shared_docs from pandas.util.version import Version from pandas.io.common import ( IOHandles, get_handle, stringify_path, validate_header_arg, ) from pandas.io.excel._util import ( fill_mi_header, get_default_engine, get_writer, maybe_convert_usecols, pop_header_name, ) from pandas.io.parsers import TextParser from pandas.io.parsers.readers import validate_integer class Callable(BaseTypingInstance): def py__call__(self, arguments): """ def x() -> Callable[[Callable[..., _T]], _T]: ... """ # The 0th index are the arguments. try: param_values = self._generics_manager[0] result_values = self._generics_manager[1] except IndexError: debug.warning('Callable[...] defined without two arguments') return NO_VALUES else: from jedi.inference.gradual.annotation import infer_return_for_callable return infer_return_for_callable(arguments, param_values, result_values) def py__get__(self, instance, class_value): return ValueSet([self]) class Hashable(Protocol, metaclass=ABCMeta): # TODO: This is special, in that a subclass of a hashable class may not be hashable # (for example, list vs. object). It's not obvious how to represent this. This class # is currently mostly useless for static checking. def __hash__(self) -> int: ... class Iterable(Protocol[_T_co]): def __iter__(self) -> Iterator[_T_co]: ... class Sequence(_Collection[_T_co], Reversible[_T_co], Generic[_T_co]): def __getitem__(self, i: int) -> _T_co: ... def __getitem__(self, s: slice) -> Sequence[_T_co]: ... # Mixin methods def index(self, value: Any, start: int = ..., stop: int = ...) -> int: ... def count(self, value: Any) -> int: ... def __contains__(self, x: object) -> bool: ... def __iter__(self) -> Iterator[_T_co]: ... def __reversed__(self) -> Iterator[_T_co]: ... Literal: _SpecialForm = ... DtypeArg = Union[Dtype, Dict[Hashable, Dtype]] StorageOptions = Optional[Dict[str, Any]] DtypeBackend = Literal["pyarrow", "numpy_nullable"] class DataFrame(NDFrame, OpsMixin): """ Two-dimensional, size-mutable, potentially heterogeneous tabular data. Data structure also contains labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects. The primary pandas data structure. Parameters ---------- data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame Dict can contain Series, arrays, constants, dataclass or list-like objects. If data is a dict, column order follows insertion-order. If a dict contains Series which have an index defined, it is aligned by its index. This alignment also occurs if data is a Series or a DataFrame itself. Alignment is done on Series/DataFrame inputs. If data is a list of dicts, column order follows insertion-order. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided. columns : Index or array-like Column labels to use for resulting frame when data does not have them, defaulting to RangeIndex(0, 1, 2, ..., n). If data contains column labels, will perform column selection instead. dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer. copy : bool or None, default None Copy data from inputs. For dict data, the default of None behaves like ``copy=True``. For DataFrame or 2d ndarray input, the default of None behaves like ``copy=False``. If data is a dict containing one or more Series (possibly of different dtypes), ``copy=False`` will ensure that these inputs are not copied. .. versionchanged:: 1.3.0 See Also -------- DataFrame.from_records : Constructor from tuples, also record arrays. DataFrame.from_dict : From dicts of Series, arrays, or dicts. read_csv : Read a comma-separated values (csv) file into DataFrame. read_table : Read general delimited file into DataFrame. read_clipboard : Read text from clipboard into DataFrame. Notes ----- Please reference the :ref:`User Guide <basics.dataframe>` for more information. Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = pd.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from a dictionary including Series: >>> d = {'col1': [0, 1, 2, 3], 'col2': pd.Series([2, 3], index=[2, 3])} >>> pd.DataFrame(data=d, index=[0, 1, 2, 3]) col1 col2 0 0 NaN 1 1 NaN 2 2 2.0 3 3 3.0 Constructing DataFrame from numpy ndarray: >>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), ... columns=['a', 'b', 'c']) >>> df2 a b c 0 1 2 3 1 4 5 6 2 7 8 9 Constructing DataFrame from a numpy ndarray that has labeled columns: >>> data = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], ... dtype=[("a", "i4"), ("b", "i4"), ("c", "i4")]) >>> df3 = pd.DataFrame(data, columns=['c', 'a']) ... >>> df3 c a 0 3 1 1 6 4 2 9 7 Constructing DataFrame from dataclass: >>> from dataclasses import make_dataclass >>> Point = make_dataclass("Point", [("x", int), ("y", int)]) >>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)]) x y 0 0 0 1 0 3 2 2 3 Constructing DataFrame from Series/DataFrame: >>> ser = pd.Series([1, 2, 3], index=["a", "b", "c"]) >>> df = pd.DataFrame(data=ser, index=["a", "c"]) >>> df 0 a 1 c 3 >>> df1 = pd.DataFrame([1, 2, 3], index=["a", "b", "c"], columns=["x"]) >>> df2 = pd.DataFrame(data=df1, index=["a", "c"]) >>> df2 x a 1 c 3 """ _internal_names_set = {"columns", "index"} | NDFrame._internal_names_set _typ = "dataframe" _HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray) _accessors: set[str] = {"sparse"} _hidden_attrs: frozenset[str] = NDFrame._hidden_attrs | frozenset([]) _mgr: BlockManager | ArrayManager def _constructor(self) -> Callable[..., DataFrame]: return DataFrame _constructor_sliced: Callable[..., Series] = Series # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index: Axes | None = None, columns: Axes | None = None, dtype: Dtype | None = None, copy: bool | None = None, ) -> None: if dtype is not None: dtype = self._validate_dtype(dtype) if isinstance(data, DataFrame): data = data._mgr if not copy: # if not copying data, ensure to still return a shallow copy # to avoid the result sharing the same Manager data = data.copy(deep=False) if isinstance(data, (BlockManager, ArrayManager)): if using_copy_on_write(): data = data.copy(deep=False) # first check if a Manager is passed without any other arguments # -> use fastpath (without checking Manager type) if index is None and columns is None and dtype is None and not copy: # GH#33357 fastpath NDFrame.__init__(self, data) return manager = get_option("mode.data_manager") # GH47215 if index is not None and isinstance(index, set): raise ValueError("index cannot be a set") if columns is not None and isinstance(columns, set): raise ValueError("columns cannot be a set") if copy is None: if isinstance(data, dict): # retain pre-GH#38939 default behavior copy = True elif ( manager == "array" and isinstance(data, (np.ndarray, ExtensionArray)) and data.ndim == 2 ): # INFO(ArrayManager) by default copy the 2D input array to get # contiguous 1D arrays copy = True elif using_copy_on_write() and not isinstance( data, (Index, DataFrame, Series) ): copy = True else: copy = False if data is None: index = index if index is not None else default_index(0) columns = columns if columns is not None else default_index(0) dtype = dtype if dtype is not None else pandas_dtype(object) data = [] if isinstance(data, (BlockManager, ArrayManager)): mgr = self._init_mgr( data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy ) elif isinstance(data, dict): # GH#38939 de facto copy defaults to False only in non-dict cases mgr = dict_to_mgr(data, index, columns, dtype=dtype, copy=copy, typ=manager) elif isinstance(data, ma.MaskedArray): from numpy.ma import mrecords # masked recarray if isinstance(data, mrecords.MaskedRecords): raise TypeError( "MaskedRecords are not supported. Pass " "{name: data[name] for name in data.dtype.names} " "instead" ) # a masked array data = sanitize_masked_array(data) mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) elif isinstance(data, (np.ndarray, Series, Index, ExtensionArray)): if data.dtype.names: # i.e. numpy structured array data = cast(np.ndarray, data) mgr = rec_array_to_mgr( data, index, columns, dtype, copy, typ=manager, ) elif getattr(data, "name", None) is not None: # i.e. Series/Index with non-None name _copy = copy if using_copy_on_write() else True mgr = dict_to_mgr( # error: Item "ndarray" of "Union[ndarray, Series, Index]" has no # attribute "name" {data.name: data}, # type: ignore[union-attr] index, columns, dtype=dtype, typ=manager, copy=_copy, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) # For data is list-like, or Iterable (will consume into list) elif is_list_like(data): if not isinstance(data, abc.Sequence): if hasattr(data, "__array__"): # GH#44616 big perf improvement for e.g. pytorch tensor data = np.asarray(data) else: data = list(data) if len(data) > 0: if is_dataclass(data[0]): data = dataclasses_to_dicts(data) if not isinstance(data, np.ndarray) and treat_as_nested(data): # exclude ndarray as we may have cast it a few lines above if columns is not None: columns = ensure_index(columns) arrays, columns, index = nested_data_to_arrays( # error: Argument 3 to "nested_data_to_arrays" has incompatible # type "Optional[Collection[Any]]"; expected "Optional[Index]" data, columns, index, # type: ignore[arg-type] dtype, ) mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, typ=manager, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) else: mgr = dict_to_mgr( {}, index, columns if columns is not None else default_index(0), dtype=dtype, typ=manager, ) # For data is scalar else: if index is None or columns is None: raise ValueError("DataFrame constructor not properly called!") index = ensure_index(index) columns = ensure_index(columns) if not dtype: dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True) # For data is a scalar extension dtype if isinstance(dtype, ExtensionDtype): # TODO(EA2D): special case not needed with 2D EAs values = [ construct_1d_arraylike_from_scalar(data, len(index), dtype) for _ in range(len(columns)) ] mgr = arrays_to_mgr(values, columns, index, dtype=None, typ=manager) else: arr2d = construct_2d_arraylike_from_scalar( data, len(index), len(columns), dtype, copy, ) mgr = ndarray_to_mgr( arr2d, index, columns, dtype=arr2d.dtype, copy=False, typ=manager, ) # ensure correct Manager type according to settings mgr = mgr_to_mgr(mgr, typ=manager) NDFrame.__init__(self, mgr) # ---------------------------------------------------------------------- def __dataframe__( self, nan_as_null: bool = False, allow_copy: bool = True ) -> DataFrameXchg: """ Return the dataframe interchange object implementing the interchange protocol. Parameters ---------- nan_as_null : bool, default False Whether to tell the DataFrame to overwrite null values in the data with ``NaN`` (or ``NaT``). allow_copy : bool, default True Whether to allow memory copying when exporting. If set to False it would cause non-zero-copy exports to fail. Returns ------- DataFrame interchange object The object which consuming library can use to ingress the dataframe. Notes ----- Details on the interchange protocol: https://data-apis.org/dataframe-protocol/latest/index.html `nan_as_null` currently has no effect; once support for nullable extension dtypes is added, this value should be propagated to columns. """ from pandas.core.interchange.dataframe import PandasDataFrameXchg return PandasDataFrameXchg(self, nan_as_null, allow_copy) # ---------------------------------------------------------------------- def axes(self) -> list[Index]: """ Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='object')] """ return [self.index, self.columns] def shape(self) -> tuple[int, int]: """ Return a tuple representing the dimensionality of the DataFrame. See Also -------- ndarray.shape : Tuple of array dimensions. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self.index), len(self.columns) def _is_homogeneous_type(self) -> bool: """ Whether all the columns in a DataFrame have the same type. Returns ------- bool See Also -------- Index._is_homogeneous_type : Whether the object has a single dtype. MultiIndex._is_homogeneous_type : Whether all the levels of a MultiIndex have the same dtype. Examples -------- >>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type True >>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type False Items with the same type but different sizes are considered different types. >>> DataFrame({ ... "A": np.array([1, 2], dtype=np.int32), ... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type False """ if isinstance(self._mgr, ArrayManager): return len({arr.dtype for arr in self._mgr.arrays}) == 1 if self._mgr.any_extension_types: return len({block.dtype for block in self._mgr.blocks}) == 1 else: return not self._is_mixed_type def _can_fast_transpose(self) -> bool: """ Can we transpose this DataFrame without creating any new array objects. """ if isinstance(self._mgr, ArrayManager): return False blocks = self._mgr.blocks if len(blocks) != 1: return False dtype = blocks[0].dtype # TODO(EA2D) special case would be unnecessary with 2D EAs return not is_1d_only_ea_dtype(dtype) def _values(self) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray: """ Analogue to ._values that may return a 2D ExtensionArray. """ mgr = self._mgr if isinstance(mgr, ArrayManager): if len(mgr.arrays) == 1 and not is_1d_only_ea_dtype(mgr.arrays[0].dtype): # error: Item "ExtensionArray" of "Union[ndarray, ExtensionArray]" # has no attribute "reshape" return mgr.arrays[0].reshape(-1, 1) # type: ignore[union-attr] return ensure_wrapped_if_datetimelike(self.values) blocks = mgr.blocks if len(blocks) != 1: return ensure_wrapped_if_datetimelike(self.values) arr = blocks[0].values if arr.ndim == 1: # non-2D ExtensionArray return self.values # more generally, whatever we allow in NDArrayBackedExtensionBlock arr = cast("np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray", arr) return arr.T # ---------------------------------------------------------------------- # Rendering Methods def _repr_fits_vertical_(self) -> bool: """ Check length against max_rows. """ max_rows = get_option("display.max_rows") return len(self) <= max_rows def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool: """ Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns. In case of non-interactive session, no boundaries apply. `ignore_width` is here so ipynb+HTML output can behave the way users expect. display.max_columns remains in effect. GH3541, GH3573 """ width, height = console.get_console_size() max_columns = get_option("display.max_columns") nb_columns = len(self.columns) # exceed max columns if (max_columns and nb_columns > max_columns) or ( (not ignore_width) and width and nb_columns > (width // 2) ): return False # used by repr_html under IPython notebook or scripts ignore terminal # dims if ignore_width or width is None or not console.in_interactive_session(): return True if get_option("display.width") is not None or console.in_ipython_frontend(): # check at least the column row for excessive width max_rows = 1 else: max_rows = get_option("display.max_rows") # when auto-detecting, so width=None and not in ipython front end # check whether repr fits horizontal by actually checking # the width of the rendered repr buf = StringIO() # only care about the stuff we'll actually print out # and to_string on entire frame may be expensive d = self if max_rows is not None: # unlimited rows # min of two, where one may be None d = d.iloc[: min(max_rows, len(d))] else: return True d.to_string(buf=buf) value = buf.getvalue() repr_width = max(len(line) for line in value.split("\n")) return repr_width < width def _info_repr(self) -> bool: """ True if the repr should show the info view. """ info_repr_option = get_option("display.large_repr") == "info" return info_repr_option and not ( self._repr_fits_horizontal_() and self._repr_fits_vertical_() ) def __repr__(self) -> str: """ Return a string representation for a particular DataFrame. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) return buf.getvalue() repr_params = fmt.get_dataframe_repr_params() return self.to_string(**repr_params) def _repr_html_(self) -> str | None: """ Return a html representation for a particular DataFrame. Mainly for IPython notebook. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) # need to escape the <class>, should be the first line. val = buf.getvalue().replace("<", r"&lt;", 1) val = val.replace(">", r"&gt;", 1) return f"<pre>{val}</pre>" if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") min_rows = get_option("display.min_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") formatter = fmt.DataFrameFormatter( self, columns=None, col_space=None, na_rep="NaN", formatters=None, float_format=None, sparsify=None, justify=None, index_names=True, header=True, index=True, bold_rows=True, escape=True, max_rows=max_rows, min_rows=min_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=".", ) return fmt.DataFrameRenderer(formatter).to_html(notebook=True) else: return None def to_string( self, buf: None = ..., columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> None: ... header_type="bool or sequence of str", header="Write out the column names. If a list of strings " "is given, it is assumed to be aliases for the " "column names", col_space_type="int, list or dict of int", col_space="The minimum width of each column. If a list of ints is given " "every integers corresponds with one column. If a dict is given, the key " "references the column, while the value defines the space to use.", ) def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[str] | None = None, col_space: int | list[int] | dict[Hashable, int] | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: fmt.FormattersType | None = None, float_format: fmt.FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool = False, decimal: str = ".", line_width: int | None = None, min_rows: int | None = None, max_colwidth: int | None = None, encoding: str | None = None, ) -> str | None: """ Render a DataFrame to a console-friendly tabular output. %(shared_params)s line_width : int, optional Width to wrap a line in characters. min_rows : int, optional The number of rows to display in the console in a truncated repr (when number of rows is above `max_rows`). max_colwidth : int, optional Max width to truncate each column in characters. By default, no limit. encoding : str, default "utf-8" Set character encoding. %(returns)s See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]} >>> df = pd.DataFrame(d) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 """ from pandas import option_context with option_context("display.max_colwidth", max_colwidth): formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, min_rows=min_rows, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, ) return fmt.DataFrameRenderer(formatter).to_string( buf=buf, encoding=encoding, line_width=line_width, ) # ---------------------------------------------------------------------- def style(self) -> Styler: """ Returns a Styler object. Contains methods for building a styled HTML representation of the DataFrame. See Also -------- io.formats.style.Styler : Helps style a DataFrame or Series according to the data with HTML and CSS. """ from pandas.io.formats.style import Styler return Styler(self) _shared_docs[ "items" ] = r""" Iterate over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Yields ------ label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. Examples -------- >>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.items(): ... print(f'label: {label}') ... print(f'content: {content}', sep='\n') ... label: species content: panda bear polar bear koala marsupial Name: species, dtype: object label: population content: panda 1864 polar 22000 koala 80000 Name: population, dtype: int64 """ def items(self) -> Iterable[tuple[Hashable, Series]]: if self.columns.is_unique and hasattr(self, "_item_cache"): for k in self.columns: yield k, self._get_item_cache(k) else: for i, k in enumerate(self.columns): yield k, self._ixs(i, axis=1) def iterrows(self) -> Iterable[tuple[Hashable, Series]]: """ Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a `MultiIndex`. data : Series The data of the row as a Series. See Also -------- DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- 1. Because ``iterrows`` returns a Series for each row, it does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float']) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row['int'].dtype) float64 >>> print(df['int'].dtype) int64 To preserve dtypes while iterating over the rows, it is better to use :meth:`itertuples` which returns namedtuples of the values and which is generally faster than ``iterrows``. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect. """ columns = self.columns klass = self._constructor_sliced using_cow = using_copy_on_write() for k, v in zip(self.index, self.values): s = klass(v, index=columns, name=k).__finalize__(self) if using_cow and self._mgr.is_single_block: s._mgr.add_references(self._mgr) # type: ignore[arg-type] yield k, s def itertuples( self, index: bool = True, name: str | None = "Pandas" ) -> Iterable[tuple[Any, ...]]: """ Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "Pandas" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An object to iterate over namedtuples for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. Examples -------- >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) ... Pandas(Index='dog', num_legs=4, num_wings=0) Pandas(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) ... Pandas(num_legs=4, num_wings=0) Pandas(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name='Animal'): ... print(row) ... Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2) """ arrays = [] fields = list(self.columns) if index: arrays.append(self.index) fields.insert(0, "Index") # use integer indexing because of possible duplicate column names arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) if name is not None: # https://github.com/python/mypy/issues/9046 # error: namedtuple() expects a string literal as the first argument itertuple = collections.namedtuple( # type: ignore[misc] name, fields, rename=True ) return map(itertuple._make, zip(*arrays)) # fallback to regular tuples return zip(*arrays) def __len__(self) -> int: """ Returns length of info axis, but here we use the index. """ return len(self.index) def dot(self, other: Series) -> Series: ... def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame: ... def dot(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Compute the matrix multiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series, DataFrame or a numpy array. It can also be called using ``self @ other`` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the matrix product with. Returns ------- Series or DataFrame If other is a Series, return the matrix product between self and other as a Series. If other is a DataFrame or a numpy.array, return the matrix product of self and other in a DataFrame of a np.array. See Also -------- Series.dot: Similar method for Series. Notes ----- The dimensions of DataFrame and other must be compatible in order to compute the matrix multiplication. In addition, the column names of DataFrame and the index of other must contain the same values, as they will be aligned prior to the multiplication. The dot method for Series computes the inner product, instead of the matrix product here. Examples -------- Here we multiply a DataFrame with a Series. >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) >>> s = pd.Series([1, 1, 2, 1]) >>> df.dot(s) 0 -4 1 5 dtype: int64 Here we multiply a DataFrame with another DataFrame. >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(other) 0 1 0 1 4 1 2 2 Note that the dot method give the same result as @ >>> df @ other 0 1 0 1 4 1 2 2 The dot method works also if other is an np.array. >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(arr) 0 1 0 1 4 1 2 2 Note how shuffling of the objects does not change the result. >>> s2 = s.reindex([1, 0, 2, 3]) >>> df.dot(s2) 0 -4 1 5 dtype: int64 """ if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index) if len(common) > len(self.columns) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(columns=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right._values else: left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: raise ValueError( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, DataFrame): return self._constructor( np.dot(lvals, rvals), index=left.index, columns=other.columns, copy=False, ) elif isinstance(other, Series): return self._constructor_sliced( np.dot(lvals, rvals), index=left.index, copy=False ) elif isinstance(rvals, (np.ndarray, Index)): result = np.dot(lvals, rvals) if result.ndim == 2: return self._constructor(result, index=left.index, copy=False) else: return self._constructor_sliced(result, index=left.index, copy=False) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other: Series) -> Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other) -> DataFrame: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ try: return self.T.dot(np.transpose(other)).T except ValueError as err: if "shape mismatch" not in str(err): raise # GH#21581 give exception message for original shapes msg = f"shapes {np.shape(other)} and {self.shape} not aligned" raise ValueError(msg) from err # ---------------------------------------------------------------------- # IO methods (to / from other formats) def from_dict( cls, data: dict, orient: str = "columns", dtype: Dtype | None = None, columns: Axes | None = None, ) -> DataFrame: """ Construct DataFrame from dict of array-like or dicts. Creates DataFrame object from dictionary by columns or by index allowing dtype specification. Parameters ---------- data : dict Of the form {field : array-like} or {field : dict}. orient : {'columns', 'index', 'tight'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. If 'tight', assume a dict with keys ['index', 'columns', 'data', 'index_names', 'column_names']. .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument dtype : dtype, default None Data type to force after DataFrame construction, otherwise infer. columns : list, default None Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'`` or ``orient='tight'``. Returns ------- DataFrame See Also -------- DataFrame.from_records : DataFrame from structured ndarray, sequence of tuples or dicts, or DataFrame. DataFrame : DataFrame object creation using constructor. DataFrame.to_dict : Convert the DataFrame to a dictionary. Examples -------- By default the keys of the dict become the DataFrame columns: >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Specify ``orient='index'`` to create the DataFrame using dictionary keys as rows: >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data, orient='index') 0 1 2 3 row_1 3 2 1 0 row_2 a b c d When using the 'index' orientation, the column names can be specified manually: >>> pd.DataFrame.from_dict(data, orient='index', ... columns=['A', 'B', 'C', 'D']) A B C D row_1 3 2 1 0 row_2 a b c d Specify ``orient='tight'`` to create the DataFrame using a 'tight' format: >>> data = {'index': [('a', 'b'), ('a', 'c')], ... 'columns': [('x', 1), ('y', 2)], ... 'data': [[1, 3], [2, 4]], ... 'index_names': ['n1', 'n2'], ... 'column_names': ['z1', 'z2']} >>> pd.DataFrame.from_dict(data, orient='tight') z1 x y z2 1 2 n1 n2 a b 1 3 c 2 4 """ index = None orient = orient.lower() if orient == "index": if len(data) > 0: # TODO speed up Series case if isinstance(list(data.values())[0], (Series, dict)): data = _from_nested_dict(data) else: index = list(data.keys()) # error: Incompatible types in assignment (expression has type # "List[Any]", variable has type "Dict[Any, Any]") data = list(data.values()) # type: ignore[assignment] elif orient in ("columns", "tight"): if columns is not None: raise ValueError(f"cannot use columns parameter with orient='{orient}'") else: # pragma: no cover raise ValueError( f"Expected 'index', 'columns' or 'tight' for orient parameter. " f"Got '{orient}' instead" ) if orient != "tight": return cls(data, index=index, columns=columns, dtype=dtype) else: realdata = data["data"] def create_index(indexlist, namelist): index: Index if len(namelist) > 1: index = MultiIndex.from_tuples(indexlist, names=namelist) else: index = Index(indexlist, name=namelist[0]) return index index = create_index(data["index"], data["index_names"]) columns = create_index(data["columns"], data["column_names"]) return cls(realdata, index=index, columns=columns, dtype=dtype) def to_numpy( self, dtype: npt.DTypeLike | None = None, copy: bool = False, na_value: object = lib.no_default, ) -> np.ndarray: """ Convert the DataFrame to a NumPy array. By default, the dtype of the returned array will be the common NumPy dtype of all types in the DataFrame. For example, if the dtypes are ``float16`` and ``float32``, the results dtype will be ``float32``. This may require copying data and coercing values, which may be expensive. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray`. copy : bool, default False Whether to ensure that the returned value is not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. na_value : Any, optional The value to use for missing values. The default value depends on `dtype` and the dtypes of the DataFrame columns. .. versionadded:: 1.1.0 Returns ------- numpy.ndarray See Also -------- Series.to_numpy : Similar method for Series. Examples -------- >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() array([[1, 3], [2, 4]]) With heterogeneous data, the lowest common type will have to be used. >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) >>> df.to_numpy() array([[1. , 3. ], [2. , 4.5]]) For a mix of numeric and non-numeric types, the output array will have object dtype. >>> df['C'] = pd.date_range('2000', periods=2) >>> df.to_numpy() array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) """ if dtype is not None: dtype = np.dtype(dtype) result = self._mgr.as_array(dtype=dtype, copy=copy, na_value=na_value) if result.dtype is not dtype: result = np.array(result, dtype=dtype, copy=False) return result def _create_data_for_split_and_tight_to_dict( self, are_all_object_dtype_cols: bool, object_dtype_indices: list[int] ) -> list: """ Simple helper method to create data for to ``to_dict(orient="split")`` and ``to_dict(orient="tight")`` to create the main output data """ if are_all_object_dtype_cols: data = [ list(map(maybe_box_native, t)) for t in self.itertuples(index=False, name=None) ] else: data = [list(t) for t in self.itertuples(index=False, name=None)] if object_dtype_indices: # If we have object_dtype_cols, apply maybe_box_naive after list # comprehension for perf for row in data: for i in object_dtype_indices: row[i] = maybe_box_native(row[i]) return data def to_dict( self, orient: Literal["dict", "list", "series", "split", "tight", "index"] = ..., into: type[dict] = ..., ) -> dict: ... def to_dict(self, orient: Literal["records"], into: type[dict] = ...) -> list[dict]: ... def to_dict( self, orient: Literal[ "dict", "list", "series", "split", "tight", "records", "index" ] = "dict", into: type[dict] = dict, index: bool = True, ) -> dict | list[dict]: """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'tight', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'tight' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values], 'index_names' -> [index.names], 'column_names' -> [column.names]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. index : bool, default True Whether to include the index item (and index_names item if `orient` is 'tight') in the returned dictionary. Can only be ``False`` when `orient` is 'split' or 'tight'. .. versionadded:: 2.0.0 Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. See Also -------- DataFrame.from_dict: Create a DataFrame from a dictionary. DataFrame.to_json: Convert a DataFrame to JSON format. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df.to_dict() {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}} You can specify the return orientation. >>> df.to_dict('series') {'col1': row1 1 row2 2 Name: col1, dtype: int64, 'col2': row1 0.50 row2 0.75 Name: col2, dtype: float64} >>> df.to_dict('split') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]]} >>> df.to_dict('records') [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}] >>> df.to_dict('index') {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}} >>> df.to_dict('tight') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]], 'index_names': [None], 'column_names': [None]} You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) [defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}), defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})] """ from pandas.core.methods.to_dict import to_dict return to_dict(self, orient, into, index) def to_gbq( self, destination_table: str, project_id: str | None = None, chunksize: int | None = None, reauth: bool = False, if_exists: str = "fail", auth_local_webserver: bool = True, table_schema: list[dict[str, str]] | None = None, location: str | None = None, progress_bar: bool = True, credentials=None, ) -> None: """ Write a DataFrame to a Google BigQuery table. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See the `How to authenticate with Google BigQuery <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__ guide for authentication instructions. Parameters ---------- destination_table : str Name of table to be written, in the form ``dataset.tablename``. project_id : str, optional Google BigQuery Account project ID. Optional when available from the environment. chunksize : int, optional Number of rows to be inserted in each chunk from the dataframe. Set to ``None`` to load the whole dataframe at once. reauth : bool, default False Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. if_exists : str, default 'fail' Behavior when the destination table exists. Value can be one of: ``'fail'`` If table exists raise pandas_gbq.gbq.TableCreationError. ``'replace'`` If table exists, drop it, recreate it, and insert data. ``'append'`` If table exists, insert data. Create if does not exist. auth_local_webserver : bool, default True Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. .. _local webserver flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server .. _console flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console *New in version 0.2.0 of pandas-gbq*. .. versionchanged:: 1.5.0 Default value is changed to ``True``. Google has deprecated the ``auth_local_webserver = False`` `"out of band" (copy-paste) flow <https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html?m=1#disallowed-oob>`_. table_schema : list of dicts, optional List of BigQuery table fields to which according DataFrame columns conform to, e.g. ``[{'name': 'col1', 'type': 'STRING'},...]``. If schema is not provided, it will be generated according to dtypes of DataFrame columns. See BigQuery API documentation on available names of a field. *New in version 0.3.1 of pandas-gbq*. location : str, optional Location where the load job should run. See the `BigQuery locations documentation <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a list of available locations. The location must match that of the target dataset. *New in version 0.5.0 of pandas-gbq*. progress_bar : bool, default True Use the library `tqdm` to show the progress bar for the upload, chunk by chunk. *New in version 0.5.0 of pandas-gbq*. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine :class:`google.auth.compute_engine.Credentials` or Service Account :class:`google.oauth2.service_account.Credentials` directly. *New in version 0.8.0 of pandas-gbq*. See Also -------- pandas_gbq.to_gbq : This function in the pandas-gbq library. read_gbq : Read a DataFrame from Google BigQuery. """ from pandas.io import gbq gbq.to_gbq( self, destination_table, project_id=project_id, chunksize=chunksize, reauth=reauth, if_exists=if_exists, auth_local_webserver=auth_local_webserver, table_schema=table_schema, location=location, progress_bar=progress_bar, credentials=credentials, ) def from_records( cls, data, index=None, exclude=None, columns=None, coerce_float: bool = False, nrows: int | None = None, ) -> DataFrame: """ Convert structured or record ndarray to DataFrame. Creates a DataFrame object from a structured ndarray, sequence of tuples or dicts, or DataFrame. Parameters ---------- data : structured ndarray, sequence of tuples or dicts, or DataFrame Structured input data. index : str, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use. exclude : sequence, default None Columns or fields to exclude. columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns). coerce_float : bool, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. nrows : int, default None Number of rows to read if data is an iterator. Returns ------- DataFrame See Also -------- DataFrame.from_dict : DataFrame from dict of array-like or dicts. DataFrame : DataFrame object creation using constructor. Examples -------- Data can be provided as a structured ndarray: >>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')], ... dtype=[('col_1', 'i4'), ('col_2', 'U1')]) >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of dicts: >>> data = [{'col_1': 3, 'col_2': 'a'}, ... {'col_1': 2, 'col_2': 'b'}, ... {'col_1': 1, 'col_2': 'c'}, ... {'col_1': 0, 'col_2': 'd'}] >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of tuples with corresponding columns: >>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')] >>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2']) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d """ if isinstance(data, DataFrame): if columns is not None: if is_scalar(columns): columns = [columns] data = data[columns] if index is not None: data = data.set_index(index) if exclude is not None: data = data.drop(columns=exclude) return data.copy(deep=False) result_index = None # Make a copy of the input columns so we can modify it if columns is not None: columns = ensure_index(columns) def maybe_reorder( arrays: list[ArrayLike], arr_columns: Index, columns: Index, index ) -> tuple[list[ArrayLike], Index, Index | None]: """ If our desired 'columns' do not match the data's pre-existing 'arr_columns', we re-order our arrays. This is like a pre-emptive (cheap) reindex. """ if len(arrays): length = len(arrays[0]) else: length = 0 result_index = None if len(arrays) == 0 and index is None and length == 0: result_index = default_index(0) arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, length) return arrays, arr_columns, result_index if is_iterator(data): if nrows == 0: return cls() try: first_row = next(data) except StopIteration: return cls(index=index, columns=columns) dtype = None if hasattr(first_row, "dtype") and first_row.dtype.names: dtype = first_row.dtype values = [first_row] if nrows is None: values += data else: values.extend(itertools.islice(data, nrows - 1)) if dtype is not None: data = np.array(values, dtype=dtype) else: data = values if isinstance(data, dict): if columns is None: columns = arr_columns = ensure_index(sorted(data)) arrays = [data[k] for k in columns] else: arrays = [] arr_columns_list = [] for k, v in data.items(): if k in columns: arr_columns_list.append(k) arrays.append(v) arr_columns = Index(arr_columns_list) arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = to_arrays(data, columns) arr_columns = columns else: arrays, arr_columns = to_arrays(data, columns) if coerce_float: for i, arr in enumerate(arrays): if arr.dtype == object: # error: Argument 1 to "maybe_convert_objects" has # incompatible type "Union[ExtensionArray, ndarray]"; # expected "ndarray" arrays[i] = lib.maybe_convert_objects( arr, # type: ignore[arg-type] try_float=True, ) arr_columns = ensure_index(arr_columns) if columns is None: columns = arr_columns else: arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) if exclude is None: exclude = set() else: exclude = set(exclude) if index is not None: if isinstance(index, str) or not hasattr(index, "__iter__"): i = columns.get_loc(index) exclude.add(index) if len(arrays) > 0: result_index = Index(arrays[i], name=index) else: result_index = Index([], name=index) else: try: index_data = [arrays[arr_columns.get_loc(field)] for field in index] except (KeyError, TypeError): # raised by get_loc, see GH#29258 result_index = index else: result_index = ensure_index_from_sequences(index_data, names=index) exclude.update(index) if any(exclude): arr_exclude = [x for x in exclude if x in arr_columns] to_remove = [arr_columns.get_loc(col) for col in arr_exclude] arrays = [v for i, v in enumerate(arrays) if i not in to_remove] columns = columns.drop(exclude) manager = get_option("mode.data_manager") mgr = arrays_to_mgr(arrays, columns, result_index, typ=manager) return cls(mgr) def to_records( self, index: bool = True, column_dtypes=None, index_dtypes=None ) -> np.recarray: """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. column_dtypes : str, type, dict, default None If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) If the DataFrame index has no label then the recarray field name is set to 'index'. If the index has a label then this is used as the field name: >>> df.index = df.index.rename("I") >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')]) As well as for the index: >>> df.to_records(index_dtypes="<S2") rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')]) >>> index_dtypes = f"<S{df.index.str.len().max()}" >>> df.to_records(index_dtypes=index_dtypes) rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')]) """ if index: ix_vals = [ np.asarray(self.index.get_level_values(i)) for i in range(self.index.nlevels) ] arrays = ix_vals + [ np.asarray(self.iloc[:, i]) for i in range(len(self.columns)) ] index_names = list(self.index.names) if isinstance(self.index, MultiIndex): index_names = com.fill_missing_names(index_names) elif index_names[0] is None: index_names = ["index"] names = [str(name) for name in itertools.chain(index_names, self.columns)] else: arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))] names = [str(c) for c in self.columns] index_names = [] index_len = len(index_names) formats = [] for i, v in enumerate(arrays): index_int = i # When the names and arrays are collected, we # first collect those in the DataFrame's index, # followed by those in its columns. # # Thus, the total length of the array is: # len(index_names) + len(DataFrame.columns). # # This check allows us to see whether we are # handling a name / array in the index or column. if index_int < index_len: dtype_mapping = index_dtypes name = index_names[index_int] else: index_int -= index_len dtype_mapping = column_dtypes name = self.columns[index_int] # We have a dictionary, so we get the data type # associated with the index or column (which can # be denoted by its name in the DataFrame or its # position in DataFrame's array of indices or # columns, whichever is applicable. if is_dict_like(dtype_mapping): if name in dtype_mapping: dtype_mapping = dtype_mapping[name] elif index_int in dtype_mapping: dtype_mapping = dtype_mapping[index_int] else: dtype_mapping = None # If no mapping can be found, use the array's # dtype attribute for formatting. # # A valid dtype must either be a type or # string naming a type. if dtype_mapping is None: formats.append(v.dtype) elif isinstance(dtype_mapping, (type, np.dtype, str)): # error: Argument 1 to "append" of "list" has incompatible # type "Union[type, dtype[Any], str]"; expected "dtype[Any]" formats.append(dtype_mapping) # type: ignore[arg-type] else: element = "row" if i < index_len else "column" msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}" raise ValueError(msg) return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats}) def _from_arrays( cls, arrays, columns, index, dtype: Dtype | None = None, verify_integrity: bool = True, ) -> DataFrame: """ Create DataFrame from a list of arrays corresponding to the columns. Parameters ---------- arrays : list-like of arrays Each array in the list corresponds to one column, in order. columns : list-like, Index The column names for the resulting DataFrame. index : list-like, Index The rows labels for the resulting DataFrame. dtype : dtype, optional Optional dtype to enforce for all arrays. verify_integrity : bool, default True Validate and homogenize all input. If set to False, it is assumed that all elements of `arrays` are actual arrays how they will be stored in a block (numpy ndarray or ExtensionArray), have the same length as and are aligned with the index, and that `columns` and `index` are ensured to be an Index object. Returns ------- DataFrame """ if dtype is not None: dtype = pandas_dtype(dtype) manager = get_option("mode.data_manager") columns = ensure_index(columns) if len(columns) != len(arrays): raise ValueError("len(columns) must match len(arrays)") mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, verify_integrity=verify_integrity, typ=manager, ) return cls(mgr) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path", ) def to_stata( self, path: FilePath | WriteBuffer[bytes], *, convert_dates: dict[Hashable, str] | None = None, write_index: bool = True, byteorder: str | None = None, time_stamp: datetime.datetime | None = None, data_label: str | None = None, variable_labels: dict[Hashable, str] | None = None, version: int | None = 114, convert_strl: Sequence[Hashable] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, value_labels: dict[Hashable, dict[float, str]] | None = None, ) -> None: """ Export DataFrame object to Stata dta format. Writes the DataFrame to a Stata dataset file. "dta" files contain a Stata dataset. Parameters ---------- path : str, path object, or buffer String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has timezone information. write_index : bool Write the index to Stata dataset. byteorder : str Can be ">", "<", "little", or "big". default is `sys.byteorder`. time_stamp : datetime A datetime to use as file creation date. Default is the current time. data_label : str, optional A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. version : {{114, 117, 118, 119, None}}, default 114 Version to use in the output dta file. Set to None to let pandas decide between 118 or 119 formats depending on the number of columns in the frame. Version 114 can be read by Stata 10 and later. Version 117 can be read by Stata 13 or later. Version 118 is supported in Stata 14 and later. Version 119 is supported in Stata 15 and later. Version 114 limits string variables to 244 characters or fewer while versions 117 and later allow strings with lengths up to 2,000,000 characters. Versions 118 and 119 support Unicode characters, and version 119 supports more than 32,767 variables. Version 119 should usually only be used when the number of variables exceeds the capacity of dta format 118. Exporting smaller datasets in format 119 may have unintended consequences, and, as of November 2020, Stata SE cannot read version 119 files. convert_strl : list, optional List of column names to convert to string columns to Stata StrL format. Only available if version is 117. Storing strings in the StrL format can produce smaller dta files if strings have more than 8 characters and values are repeated. {compression_options} .. versionadded:: 1.1.0 .. versionchanged:: 1.4.0 Zstandard support. {storage_options} .. versionadded:: 1.2.0 value_labels : dict of dicts Dictionary containing columns as keys and dictionaries of column value to labels as values. Labels for a single variable must be 32,000 characters or smaller. .. versionadded:: 1.4.0 Raises ------ NotImplementedError * If datetimes contain timezone information * Column dtype is not representable in Stata ValueError * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters See Also -------- read_stata : Import Stata data files. io.stata.StataWriter : Low-level writer for Stata data files. io.stata.StataWriter117 : Low-level writer for version 117 files. Examples -------- >>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], ... 'speed': [350, 18, 361, 15]}}) >>> df.to_stata('animals.dta') # doctest: +SKIP """ if version not in (114, 117, 118, 119, None): raise ValueError("Only formats 114, 117, 118 and 119 are supported.") if version == 114: if convert_strl is not None: raise ValueError("strl is not supported in format 114") from pandas.io.stata import StataWriter as statawriter elif version == 117: # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriter117 as statawriter, ) else: # versions 118 and 119 # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriterUTF8 as statawriter, ) kwargs: dict[str, Any] = {} if version is None or version >= 117: # strl conversion is only supported >= 117 kwargs["convert_strl"] = convert_strl if version is None or version >= 118: # Specifying the version is only supported for UTF8 (118 or 119) kwargs["version"] = version writer = statawriter( path, self, convert_dates=convert_dates, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, write_index=write_index, variable_labels=variable_labels, compression=compression, storage_options=storage_options, value_labels=value_labels, **kwargs, ) writer.write_file() def to_feather(self, path: FilePath | WriteBuffer[bytes], **kwargs) -> None: """ Write a DataFrame to the binary Feather format. Parameters ---------- path : str, path object, file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If a string or a path, it will be used as Root Directory path when writing a partitioned dataset. **kwargs : Additional keywords passed to :func:`pyarrow.feather.write_feather`. Starting with pyarrow 0.17, this includes the `compression`, `compression_level`, `chunksize` and `version` keywords. .. versionadded:: 1.1.0 Notes ----- This function writes the dataframe as a `feather file <https://arrow.apache.org/docs/python/feather.html>`_. Requires a default index. For saving the DataFrame with your custom index use a method that supports custom indices e.g. `to_parquet`. """ from pandas.io.feather_format import to_feather to_feather(self, path, **kwargs) Series.to_markdown, klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples="""Examples -------- >>> df = pd.DataFrame( ... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]} ... ) >>> print(df.to_markdown()) | | animal_1 | animal_2 | |---:|:-----------|:-----------| | 0 | elk | dog | | 1 | pig | quetzal | Output markdown with a tabulate option. >>> print(df.to_markdown(tablefmt="grid")) +----+------------+------------+ | | animal_1 | animal_2 | +====+============+============+ | 0 | elk | dog | +----+------------+------------+ | 1 | pig | quetzal | +----+------------+------------+""", ) def to_markdown( self, buf: FilePath | WriteBuffer[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: if "showindex" in kwargs: raise ValueError("Pass 'index' instead of 'showindex") kwargs.setdefault("headers", "keys") kwargs.setdefault("tablefmt", "pipe") kwargs.setdefault("showindex", index) tabulate = import_optional_dependency("tabulate") result = tabulate.tabulate(self, **kwargs) if buf is None: return result with get_handle(buf, mode, storage_options=storage_options) as handles: handles.handle.write(result) return None def to_parquet( self, path: None = ..., engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> bytes: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes], engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> None: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes] | None = None, engine: str = "auto", compression: str | None = "snappy", index: bool | None = None, partition_cols: list[str] | None = None, storage_options: StorageOptions = None, **kwargs, ) -> bytes | None: """ Write a DataFrame to the binary parquet format. This function writes the dataframe as a `parquet file <https://parquet.apache.org/>`_. You can choose different parquet backends, and have the option of compression. See :ref:`the user guide <io.parquet>` for more details. Parameters ---------- path : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If None, the result is returned as bytes. If a string or path, it will be used as Root Directory path when writing a partitioned dataset. .. versionchanged:: 1.2.0 Previously this was "fname" engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``True`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. partition_cols : list, optional, default None Column names by which to partition the dataset. Columns are partitioned in the order they are given. Must be None if path is not a string. {storage_options} .. versionadded:: 1.2.0 **kwargs Additional arguments passed to the parquet library. See :ref:`pandas io <io.parquet>` for more details. Returns ------- bytes if no path argument is provided else None See Also -------- read_parquet : Read a parquet file. DataFrame.to_orc : Write an orc file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- This function requires either the `fastparquet <https://pypi.org/project/fastparquet>`_ or `pyarrow <https://arrow.apache.org/docs/python/>`_ library. Examples -------- >>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}}) >>> df.to_parquet('df.parquet.gzip', ... compression='gzip') # doctest: +SKIP >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP col1 col2 0 1 3 1 2 4 If you want to get a buffer to the parquet content you can use a io.BytesIO object, as long as you don't use partition_cols, which creates multiple files. >>> import io >>> f = io.BytesIO() >>> df.to_parquet(f) >>> f.seek(0) 0 >>> content = f.read() """ from pandas.io.parquet import to_parquet return to_parquet( self, path, engine, compression=compression, index=index, partition_cols=partition_cols, storage_options=storage_options, **kwargs, ) def to_orc( self, path: FilePath | WriteBuffer[bytes] | None = None, *, engine: Literal["pyarrow"] = "pyarrow", index: bool | None = None, engine_kwargs: dict[str, Any] | None = None, ) -> bytes | None: """ Write a DataFrame to the ORC format. .. versionadded:: 1.5.0 Parameters ---------- path : str, file-like object or None, default None If a string, it will be used as Root Directory path when writing a partitioned dataset. By file-like object, we refer to objects with a write() method, such as a file handle (e.g. via builtin open function). If path is None, a bytes object is returned. engine : str, default 'pyarrow' ORC library to use. Pyarrow must be >= 7.0.0. index : bool, optional If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``infer`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. engine_kwargs : dict[str, Any] or None, default None Additional keyword arguments passed to :func:`pyarrow.orc.write_table`. Returns ------- bytes if no path argument is provided else None Raises ------ NotImplementedError Dtype of one or more columns is category, unsigned integers, interval, period or sparse. ValueError engine is not pyarrow. See Also -------- read_orc : Read a ORC file. DataFrame.to_parquet : Write a parquet file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- * Before using this function you should read the :ref:`user guide about ORC <io.orc>` and :ref:`install optional dependencies <install.warn_orc>`. * This function requires `pyarrow <https://arrow.apache.org/docs/python/>`_ library. * For supported dtypes please refer to `supported ORC features in Arrow <https://arrow.apache.org/docs/cpp/orc.html#data-types>`__. * Currently timezones in datetime columns are not preserved when a dataframe is converted into ORC files. Examples -------- >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [4, 3]}) >>> df.to_orc('df.orc') # doctest: +SKIP >>> pd.read_orc('df.orc') # doctest: +SKIP col1 col2 0 1 4 1 2 3 If you want to get a buffer to the orc content you can write it to io.BytesIO >>> import io >>> b = io.BytesIO(df.to_orc()) # doctest: +SKIP >>> b.seek(0) # doctest: +SKIP 0 >>> content = b.read() # doctest: +SKIP """ from pandas.io.orc import to_orc return to_orc( self, path, engine=engine, index=index, engine_kwargs=engine_kwargs ) def to_html( self, buf: FilePath | WriteBuffer[str], columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> None: ... def to_html( self, buf: None = ..., columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> str: ... header_type="bool", header="Whether to print column labels, default True", col_space_type="str or int, list or dict of int or str", col_space="The minimum width of each column in CSS length " "units. An int is assumed to be px units.", ) def to_html( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[Level] | None = None, col_space: ColspaceArgType | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: FormattersType | None = None, float_format: FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool | str = False, decimal: str = ".", bold_rows: bool = True, classes: str | list | tuple | None = None, escape: bool = True, notebook: bool = False, border: int | bool | None = None, table_id: str | None = None, render_links: bool = False, encoding: str | None = None, ) -> str | None: """ Render a DataFrame as an HTML table. %(shared_params)s bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.display.html.border``. table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links. encoding : str, default "utf-8" Set character encoding. .. versionadded:: 1.0 %(returns)s See Also -------- to_string : Convert DataFrame to a string. """ if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS: raise ValueError("Invalid value for justify parameter") formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, header=header, index=index, formatters=formatters, float_format=float_format, bold_rows=bold_rows, sparsify=sparsify, justify=justify, index_names=index_names, escape=escape, decimal=decimal, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, ) # TODO: a generic formatter wld b in DataFrameFormatter return fmt.DataFrameRenderer(formatter).to_html( buf=buf, classes=classes, notebook=notebook, border=border, encoding=encoding, table_id=table_id, render_links=render_links, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buffer", ) def to_xml( self, path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, index: bool = True, root_name: str | None = "data", row_name: str | None = "row", na_rep: str | None = None, attr_cols: list[str] | None = None, elem_cols: list[str] | None = None, namespaces: dict[str | None, str] | None = None, prefix: str | None = None, encoding: str = "utf-8", xml_declaration: bool | None = True, pretty_print: bool | None = True, parser: str | None = "lxml", stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ) -> str | None: """ Render a DataFrame to an XML document. .. versionadded:: 1.3.0 Parameters ---------- path_or_buffer : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a ``write()`` function. If None, the result is returned as a string. index : bool, default True Whether to include index in XML document. root_name : str, default 'data' The name of root element in XML document. row_name : str, default 'row' The name of row element in XML document. na_rep : str, optional Missing data representation. attr_cols : list-like, optional List of columns to write as attributes in row element. Hierarchical columns will be flattened with underscore delimiting the different levels. elem_cols : list-like, optional List of columns to write as children in row element. By default, all columns output as children of row element. Hierarchical columns will be flattened with underscore delimiting the different levels. namespaces : dict, optional All namespaces to be defined in root element. Keys of dict should be prefix names and values of dict corresponding URIs. Default namespaces should be given empty string key. For example, :: namespaces = {{"": "https://example.com"}} prefix : str, optional Namespace prefix to be used for every element and/or attribute in document. This should be one of the keys in ``namespaces`` dict. encoding : str, default 'utf-8' Encoding of the resulting document. xml_declaration : bool, default True Whether to include the XML declaration at start of document. pretty_print : bool, default True Whether output should be pretty printed with indentation and line breaks. parser : {{'lxml','etree'}}, default 'lxml' Parser module to use for building of tree. Only 'lxml' and 'etree' are supported. With 'lxml', the ability to use XSLT stylesheet is supported. stylesheet : str, path object or file-like object, optional A URL, file-like object, or a raw string containing an XSLT script used to transform the raw XML output. Script should use layout of elements and attributes from original output. This argument requires ``lxml`` to be installed. Only XSLT 1.0 scripts and not later versions is currently supported. {compression_options} .. versionchanged:: 1.4.0 Zstandard support. {storage_options} Returns ------- None or str If ``io`` is None, returns the resulting XML format as a string. Otherwise returns None. See Also -------- to_json : Convert the pandas object to a JSON string. to_html : Convert DataFrame to a html. Examples -------- >>> df = pd.DataFrame({{'shape': ['square', 'circle', 'triangle'], ... 'degrees': [360, 360, 180], ... 'sides': [4, np.nan, 3]}}) >>> df.to_xml() # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row> <index>0</index> <shape>square</shape> <degrees>360</degrees> <sides>4.0</sides> </row> <row> <index>1</index> <shape>circle</shape> <degrees>360</degrees> <sides/> </row> <row> <index>2</index> <shape>triangle</shape> <degrees>180</degrees> <sides>3.0</sides> </row> </data> >>> df.to_xml(attr_cols=[ ... 'index', 'shape', 'degrees', 'sides' ... ]) # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row index="0" shape="square" degrees="360" sides="4.0"/> <row index="1" shape="circle" degrees="360"/> <row index="2" shape="triangle" degrees="180" sides="3.0"/> </data> >>> df.to_xml(namespaces={{"doc": "https://example.com"}}, ... prefix="doc") # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <doc:data xmlns:doc="https://example.com"> <doc:row> <doc:index>0</doc:index> <doc:shape>square</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides>4.0</doc:sides> </doc:row> <doc:row> <doc:index>1</doc:index> <doc:shape>circle</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides/> </doc:row> <doc:row> <doc:index>2</doc:index> <doc:shape>triangle</doc:shape> <doc:degrees>180</doc:degrees> <doc:sides>3.0</doc:sides> </doc:row> </doc:data> """ from pandas.io.formats.xml import ( EtreeXMLFormatter, LxmlXMLFormatter, ) lxml = import_optional_dependency("lxml.etree", errors="ignore") TreeBuilder: type[EtreeXMLFormatter] | type[LxmlXMLFormatter] if parser == "lxml": if lxml is not None: TreeBuilder = LxmlXMLFormatter else: raise ImportError( "lxml not found, please install or use the etree parser." ) elif parser == "etree": TreeBuilder = EtreeXMLFormatter else: raise ValueError("Values for parser can only be lxml or etree.") xml_formatter = TreeBuilder( self, path_or_buffer=path_or_buffer, index=index, root_name=root_name, row_name=row_name, na_rep=na_rep, attr_cols=attr_cols, elem_cols=elem_cols, namespaces=namespaces, prefix=prefix, encoding=encoding, xml_declaration=xml_declaration, pretty_print=pretty_print, stylesheet=stylesheet, compression=compression, storage_options=storage_options, ) return xml_formatter.write_output() # ---------------------------------------------------------------------- def info( self, verbose: bool | None = None, buf: WriteBuffer[str] | None = None, max_cols: int | None = None, memory_usage: bool | str | None = None, show_counts: bool | None = None, ) -> None: info = DataFrameInfo( data=self, memory_usage=memory_usage, ) info.render( buf=buf, max_cols=max_cols, verbose=verbose, show_counts=show_counts, ) def memory_usage(self, index: bool = True, deep: bool = False) -> Series: """ Return the memory usage of each column in bytes. The memory usage can optionally include the contribution of the index and elements of `object` dtype. This value is displayed in `DataFrame.info` by default. This can be suppressed by setting ``pandas.options.display.memory_usage`` to False. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the DataFrame's index in returned Series. If ``index=True``, the memory usage of the index is the first item in the output. deep : bool, default False If True, introspect the data deeply by interrogating `object` dtypes for system-level memory consumption, and include it in the returned values. Returns ------- Series A Series whose index is the original column names and whose values is the memory usage of each column in bytes. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of an ndarray. Series.memory_usage : Bytes consumed by a Series. Categorical : Memory-efficient array for string values with many repeated values. DataFrame.info : Concise summary of a DataFrame. Notes ----- See the :ref:`Frequently Asked Questions <df-memory-usage>` for more details. Examples -------- >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool'] >>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t)) ... for t in dtypes]) >>> df = pd.DataFrame(data) >>> df.head() int64 float64 complex128 object bool 0 1 1.0 1.0+0.0j 1 True 1 1 1.0 1.0+0.0j 1 True 2 1 1.0 1.0+0.0j 1 True 3 1 1.0 1.0+0.0j 1 True 4 1 1.0 1.0+0.0j 1 True >>> df.memory_usage() Index 128 int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 >>> df.memory_usage(index=False) int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 The memory footprint of `object` dtype columns is ignored by default: >>> df.memory_usage(deep=True) Index 128 int64 40000 float64 40000 complex128 80000 object 180000 bool 5000 dtype: int64 Use a Categorical for efficient storage of an object-dtype column with many repeated values. >>> df['object'].astype('category').memory_usage(deep=True) 5244 """ result = self._constructor_sliced( [c.memory_usage(index=False, deep=deep) for col, c in self.items()], index=self.columns, dtype=np.intp, ) if index: index_memory_usage = self._constructor_sliced( self.index.memory_usage(deep=deep), index=["Index"] ) result = index_memory_usage._append(result) return result def transpose(self, *args, copy: bool = False) -> DataFrame: """ Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. Parameters ---------- *args : tuple, optional Accepted for compatibility with NumPy. copy : bool, default False Whether to copy the data after transposing, even for DataFrames with a single dtype. Note that a copy is always required for mixed dtype DataFrames, or for DataFrames with any extension types. Returns ------- DataFrame The transposed DataFrame. See Also -------- numpy.transpose : Permute the dimensions of a given array. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the `object` dtype. In such a case, a copy of the data is always made. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = pd.DataFrame(data=d1) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T # or df1.transpose() >>> df1_transposed 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'name': ['Alice', 'Bob'], ... 'score': [9.5, 8], ... 'employed': [False, True], ... 'kids': [0, 0]} >>> df2 = pd.DataFrame(data=d2) >>> df2 name score employed kids 0 Alice 9.5 False 0 1 Bob 8.0 True 0 >>> df2_transposed = df2.T # or df2.transpose() >>> df2_transposed 0 1 name Alice Bob score 9.5 8.0 employed False True kids 0 0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the `object` dtype: >>> df2.dtypes name object score float64 employed bool kids int64 dtype: object >>> df2_transposed.dtypes 0 object 1 object dtype: object """ nv.validate_transpose(args, {}) # construct the args dtypes = list(self.dtypes) if self._can_fast_transpose: # Note: tests pass without this, but this improves perf quite a bit. new_vals = self._values.T if copy and not using_copy_on_write(): new_vals = new_vals.copy() result = self._constructor( new_vals, index=self.columns, columns=self.index, copy=False ) if using_copy_on_write() and len(self) > 0: result._mgr.add_references(self._mgr) # type: ignore[arg-type] elif ( self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]) ): # We have EAs with the same dtype. We can preserve that dtype in transpose. dtype = dtypes[0] arr_type = dtype.construct_array_type() values = self.values new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values] result = type(self)._from_arrays( new_values, index=self.columns, columns=self.index ) else: new_arr = self.values.T if copy and not using_copy_on_write(): new_arr = new_arr.copy() result = self._constructor( new_arr, index=self.columns, columns=self.index, # We already made a copy (more than one block) copy=False, ) return result.__finalize__(self, method="transpose") def T(self) -> DataFrame: """ The transpose of the DataFrame. Returns ------- DataFrame The transposed DataFrame. See Also -------- DataFrame.transpose : Transpose index and columns. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.T 0 1 col1 1 2 col2 3 4 """ return self.transpose() # ---------------------------------------------------------------------- # Indexing Methods def _ixs(self, i: int, axis: AxisInt = 0) -> Series: """ Parameters ---------- i : int axis : int Returns ------- Series """ # irow if axis == 0: new_mgr = self._mgr.fast_xs(i) # if we are a copy, mark as such copy = isinstance(new_mgr.array, np.ndarray) and new_mgr.array.base is None result = self._constructor_sliced(new_mgr, name=self.index[i]).__finalize__( self ) result._set_is_copy(self, copy=copy) return result # icol else: label = self.columns[i] col_mgr = self._mgr.iget(i) result = self._box_col_values(col_mgr, i) # this is a cached value, mark it so result._set_as_cached(label, self) return result def _get_column_array(self, i: int) -> ArrayLike: """ Get the values of the i'th column (ndarray or ExtensionArray, as stored in the Block) Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ return self._mgr.iget_values(i) def _iter_column_arrays(self) -> Iterator[ArrayLike]: """ Iterate over the arrays of all columns in order. This returns the values as stored in the Block (ndarray or ExtensionArray). Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ for i in range(len(self.columns)): yield self._get_column_array(i) def _getitem_nocopy(self, key: list): """ Behaves like __getitem__, but returns a view in cases where __getitem__ would make a copy. """ # TODO(CoW): can be removed if/when we are always Copy-on-Write indexer = self.columns._get_indexer_strict(key, "columns")[1] new_axis = self.columns[indexer] new_mgr = self._mgr.reindex_indexer( new_axis, indexer, axis=0, allow_dups=True, copy=False, only_slice=True, ) return self._constructor(new_mgr) def __getitem__(self, key): check_dict_or_set_indexers(key) key = lib.item_from_zerodim(key) key = com.apply_if_callable(key, self) if is_hashable(key) and not is_iterator(key): # is_iterator to exclude generator e.g. test_getitem_listlike # shortcut if the key is in columns is_mi = isinstance(self.columns, MultiIndex) # GH#45316 Return view if key is not duplicated # Only use drop_duplicates with duplicates for performance if not is_mi and ( self.columns.is_unique and key in self.columns or key in self.columns.drop_duplicates(keep=False) ): return self._get_item_cache(key) elif is_mi and self.columns.is_unique and key in self.columns: return self._getitem_multilevel(key) # Do we have a slicer (on rows)? if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") if isinstance(indexer, np.ndarray): # reachable with DatetimeIndex indexer = lib.maybe_indices_to_slice( indexer.astype(np.intp, copy=False), len(self) ) if isinstance(indexer, np.ndarray): # GH#43223 If we can not convert, use take return self.take(indexer, axis=0) return self._slice(indexer, axis=0) # Do we have a (boolean) DataFrame? if isinstance(key, DataFrame): return self.where(key) # Do we have a (boolean) 1d indexer? if com.is_bool_indexer(key): return self._getitem_bool_array(key) # We are left with two options: a single key, and a collection of keys, # We interpret tuples as collections only for non-MultiIndex is_single_key = isinstance(key, tuple) or not is_list_like(key) if is_single_key: if self.columns.nlevels > 1: return self._getitem_multilevel(key) indexer = self.columns.get_loc(key) if is_integer(indexer): indexer = [indexer] else: if is_iterator(key): key = list(key) indexer = self.columns._get_indexer_strict(key, "columns")[1] # take() does not accept boolean indexers if getattr(indexer, "dtype", None) == bool: indexer = np.where(indexer)[0] data = self._take_with_is_copy(indexer, axis=1) if is_single_key: # What does looking for a single key in a non-unique index return? # The behavior is inconsistent. It returns a Series, except when # - the key itself is repeated (test on data.shape, #9519), or # - we have a MultiIndex on columns (test on self.columns, #21309) if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex): # GH#26490 using data[key] can cause RecursionError return data._get_item_cache(key) return data def _getitem_bool_array(self, key): # also raises Exception if object array with NA values # warning here just in case -- previously __setitem__ was # reindexing but __getitem__ was not; it seems more reasonable to # go with the __setitem__ behavior since that is more consistent # with all other indexing behavior if isinstance(key, Series) and not key.index.equals(self.index): warnings.warn( "Boolean Series key will be reindexed to match DataFrame index.", UserWarning, stacklevel=find_stack_level(), ) elif len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}." ) # check_bool_indexer will throw exception if Series key cannot # be reindexed to match DataFrame rows key = check_bool_indexer(self.index, key) if key.all(): return self.copy(deep=None) indexer = key.nonzero()[0] return self._take_with_is_copy(indexer, axis=0) def _getitem_multilevel(self, key): # self.columns is a MultiIndex loc = self.columns.get_loc(key) if isinstance(loc, (slice, np.ndarray)): new_columns = self.columns[loc] result_columns = maybe_droplevels(new_columns, key) if self._is_mixed_type: result = self.reindex(columns=new_columns) result.columns = result_columns else: new_values = self._values[:, loc] result = self._constructor( new_values, index=self.index, columns=result_columns, copy=False ) if using_copy_on_write() and isinstance(loc, slice): result._mgr.add_references(self._mgr) # type: ignore[arg-type] result = result.__finalize__(self) # If there is only one column being returned, and its name is # either an empty string, or a tuple with an empty string as its # first element, then treat the empty string as a placeholder # and return the column as if the user had provided that empty # string in the key. If the result is a Series, exclude the # implied empty string from its name. if len(result.columns) == 1: # e.g. test_frame_getitem_multicolumn_empty_level, # test_frame_mixed_depth_get, test_loc_setitem_single_column_slice top = result.columns[0] if isinstance(top, tuple): top = top[0] if top == "": result = result[""] if isinstance(result, Series): result = self._constructor_sliced( result, index=self.index, name=key ) result._set_is_copy(self) return result else: # loc is neither a slice nor ndarray, so must be an int return self._ixs(loc, axis=1) def _get_value(self, index, col, takeable: bool = False) -> Scalar: """ Quickly retrieve single value at passed column and index. Parameters ---------- index : row label col : column label takeable : interpret the index/col as indexers, default False Returns ------- scalar Notes ----- Assumes that both `self.index._index_as_unique` and `self.columns._index_as_unique`; Caller is responsible for checking. """ if takeable: series = self._ixs(col, axis=1) return series._values[index] series = self._get_item_cache(col) engine = self.index._engine if not isinstance(self.index, MultiIndex): # CategoricalIndex: Trying to use the engine fastpath may give incorrect # results if our categories are integers that dont match our codes # IntervalIndex: IntervalTree has no get_loc row = self.index.get_loc(index) return series._values[row] # For MultiIndex going through engine effectively restricts us to # same-length tuples; see test_get_set_value_no_partial_indexing loc = engine.get_loc(index) return series._values[loc] def isetitem(self, loc, value) -> None: """ Set the given value in the column with position `loc`. This is a positional analogue to ``__setitem__``. Parameters ---------- loc : int or sequence of ints Index position for the column. value : scalar or arraylike Value(s) for the column. Notes ----- ``frame.isetitem(loc, value)`` is an in-place method as it will modify the DataFrame in place (not returning a new object). In contrast to ``frame.iloc[:, i] = value`` which will try to update the existing values in place, ``frame.isetitem(loc, value)`` will not update the values of the column itself in place, it will instead insert a new array. In cases where ``frame.columns`` is unique, this is equivalent to ``frame[frame.columns[i]] = value``. """ if isinstance(value, DataFrame): if is_scalar(loc): loc = [loc] for i, idx in enumerate(loc): arraylike = self._sanitize_column(value.iloc[:, i]) self._iset_item_mgr(idx, arraylike, inplace=False) return arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=False) def __setitem__(self, key, value): if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) key = com.apply_if_callable(key, self) # see if we can slice the rows if isinstance(key, slice): slc = self.index._convert_slice_indexer(key, kind="getitem") return self._setitem_slice(slc, value) if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2: self._setitem_frame(key, value) elif isinstance(key, (Series, np.ndarray, list, Index)): self._setitem_array(key, value) elif isinstance(value, DataFrame): self._set_item_frame_value(key, value) elif ( is_list_like(value) and not self.columns.is_unique and 1 < len(self.columns.get_indexer_for([key])) == len(value) ): # Column to set is duplicated self._setitem_array([key], value) else: # set column self._set_item(key, value) def _setitem_slice(self, key: slice, value) -> None: # NB: we can't just use self.loc[key] = value because that # operates on labels and we need to operate positional for # backwards-compat, xref GH#31469 self._check_setitem_copy() self.iloc[key] = value def _setitem_array(self, key, value): # also raises Exception if object array with NA values if com.is_bool_indexer(key): # bool indexer is indexing along rows if len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}!" ) key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] self._check_setitem_copy() if isinstance(value, DataFrame): # GH#39931 reindex since iloc does not align value = value.reindex(self.index.take(indexer)) self.iloc[indexer] = value else: # Note: unlike self.iloc[:, indexer] = value, this will # never try to overwrite values inplace if isinstance(value, DataFrame): check_key_length(self.columns, key, value) for k1, k2 in zip(key, value.columns): self[k1] = value[k2] elif not is_list_like(value): for col in key: self[col] = value elif isinstance(value, np.ndarray) and value.ndim == 2: self._iset_not_inplace(key, value) elif np.ndim(value) > 1: # list of lists value = DataFrame(value).values return self._setitem_array(key, value) else: self._iset_not_inplace(key, value) def _iset_not_inplace(self, key, value): # GH#39510 when setting with df[key] = obj with a list-like key and # list-like value, we iterate over those listlikes and set columns # one at a time. This is different from dispatching to # `self.loc[:, key]= value` because loc.__setitem__ may overwrite # data inplace, whereas this will insert new arrays. def igetitem(obj, i: int): # Note: we catch DataFrame obj before getting here, but # hypothetically would return obj.iloc[:, i] if isinstance(obj, np.ndarray): return obj[..., i] else: return obj[i] if self.columns.is_unique: if np.shape(value)[-1] != len(key): raise ValueError("Columns must be same length as key") for i, col in enumerate(key): self[col] = igetitem(value, i) else: ilocs = self.columns.get_indexer_non_unique(key)[0] if (ilocs < 0).any(): # key entries not in self.columns raise NotImplementedError if np.shape(value)[-1] != len(ilocs): raise ValueError("Columns must be same length as key") assert np.ndim(value) <= 2 orig_columns = self.columns # Using self.iloc[:, i] = ... may set values inplace, which # by convention we do not do in __setitem__ try: self.columns = Index(range(len(self.columns))) for i, iloc in enumerate(ilocs): self[iloc] = igetitem(value, i) finally: self.columns = orig_columns def _setitem_frame(self, key, value): # support boolean setting with DataFrame input, e.g. # df[df > df2] = 0 if isinstance(key, np.ndarray): if key.shape != self.shape: raise ValueError("Array conditional must be same shape as self") key = self._constructor(key, **self._construct_axes_dict(), copy=False) if key.size and not all(is_bool_dtype(dtype) for dtype in key.dtypes): raise TypeError( "Must pass DataFrame or 2-d ndarray with boolean values only" ) self._check_inplace_setting(value) self._check_setitem_copy() self._where(-key, value, inplace=True) def _set_item_frame_value(self, key, value: DataFrame) -> None: self._ensure_valid_index(value) # align columns if key in self.columns: loc = self.columns.get_loc(key) cols = self.columns[loc] len_cols = 1 if is_scalar(cols) or isinstance(cols, tuple) else len(cols) if len_cols != len(value.columns): raise ValueError("Columns must be same length as key") # align right-hand-side columns if self.columns # is multi-index and self[key] is a sub-frame if isinstance(self.columns, MultiIndex) and isinstance( loc, (slice, Series, np.ndarray, Index) ): cols_droplevel = maybe_droplevels(cols, key) if len(cols_droplevel) and not cols_droplevel.equals(value.columns): value = value.reindex(cols_droplevel, axis=1) for col, col_droplevel in zip(cols, cols_droplevel): self[col] = value[col_droplevel] return if is_scalar(cols): self[cols] = value[value.columns[0]] return # now align rows arraylike = _reindex_for_setitem(value, self.index) self._set_item_mgr(key, arraylike) return if len(value.columns) != 1: raise ValueError( "Cannot set a DataFrame with multiple columns to the single " f"column {key}" ) self[key] = value[value.columns[0]] def _iset_item_mgr( self, loc: int | slice | np.ndarray, value, inplace: bool = False ) -> None: # when called from _set_item_mgr loc can be anything returned from get_loc self._mgr.iset(loc, value, inplace=inplace) self._clear_item_cache() def _set_item_mgr(self, key, value: ArrayLike) -> None: try: loc = self._info_axis.get_loc(key) except KeyError: # This item wasn't present, just insert at end self._mgr.insert(len(self._info_axis), key, value) else: self._iset_item_mgr(loc, value) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _iset_item(self, loc: int, value) -> None: arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=True) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _set_item(self, key, value) -> None: """ Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the same length as the DataFrames index or an error will be thrown. Series/TimeSeries will be conformed to the DataFrames index to ensure homogeneity. """ value = self._sanitize_column(value) if ( key in self.columns and value.ndim == 1 and not is_extension_array_dtype(value) ): # broadcast across multiple columns if necessary if not self.columns.is_unique or isinstance(self.columns, MultiIndex): existing_piece = self[key] if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)).T self._set_item_mgr(key, value) def _set_value( self, index: IndexLabel, col, value: Scalar, takeable: bool = False ) -> None: """ Put single value at passed column and index. Parameters ---------- index : Label row label col : Label column label value : scalar takeable : bool, default False Sets whether or not index/col interpreted as indexers """ try: if takeable: icol = col iindex = cast(int, index) else: icol = self.columns.get_loc(col) iindex = self.index.get_loc(index) self._mgr.column_setitem(icol, iindex, value, inplace_only=True) self._clear_item_cache() except (KeyError, TypeError, ValueError, LossySetitemError): # get_loc might raise a KeyError for missing labels (falling back # to (i)loc will do expansion of the index) # column_setitem will do validation that may raise TypeError, # ValueError, or LossySetitemError # set using a non-recursive method & reset the cache if takeable: self.iloc[index, col] = value else: self.loc[index, col] = value self._item_cache.pop(col, None) except InvalidIndexError as ii_err: # GH48729: Seems like you are trying to assign a value to a # row when only scalar options are permitted raise InvalidIndexError( f"You can only assign a scalar value not a {type(value)}" ) from ii_err def _ensure_valid_index(self, value) -> None: """ Ensure that if we don't have an index, that we can create one from the passed value. """ # GH5632, make sure that we are a Series convertible if not len(self.index) and is_list_like(value) and len(value): if not isinstance(value, DataFrame): try: value = Series(value) except (ValueError, NotImplementedError, TypeError) as err: raise ValueError( "Cannot set a frame with no defined index " "and a value that cannot be converted to a Series" ) from err # GH31368 preserve name of index index_copy = value.index.copy() if self.index.name is not None: index_copy.name = self.index.name self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan) def _box_col_values(self, values: SingleDataManager, loc: int) -> Series: """ Provide boxed values for a column. """ # Lookup in columns so that if e.g. a str datetime was passed # we attach the Timestamp object as the name. name = self.columns[loc] klass = self._constructor_sliced # We get index=self.index bc values is a SingleDataManager return klass(values, name=name, fastpath=True).__finalize__(self) # ---------------------------------------------------------------------- # Lookup Caching def _clear_item_cache(self) -> None: self._item_cache.clear() def _get_item_cache(self, item: Hashable) -> Series: """Return the cached item, item represents a label indexer.""" if using_copy_on_write(): loc = self.columns.get_loc(item) return self._ixs(loc, axis=1) cache = self._item_cache res = cache.get(item) if res is None: # All places that call _get_item_cache have unique columns, # pending resolution of GH#33047 loc = self.columns.get_loc(item) res = self._ixs(loc, axis=1) cache[item] = res # for a chain res._is_copy = self._is_copy return res def _reset_cacher(self) -> None: # no-op for DataFrame pass def _maybe_cache_changed(self, item, value: Series, inplace: bool) -> None: """ The object has called back to us saying maybe it has changed. """ loc = self._info_axis.get_loc(item) arraylike = value._values old = self._ixs(loc, axis=1) if old._values is value._values and inplace: # GH#46149 avoid making unnecessary copies/block-splitting return self._mgr.iset(loc, arraylike, inplace=inplace) # ---------------------------------------------------------------------- # Unsorted def query(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> DataFrame: ... def query(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def query(self, expr: str, *, inplace: bool = ..., **kwargs) -> DataFrame | None: ... def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | None: """ Query the columns of a DataFrame with a boolean expression. Parameters ---------- expr : str The query string to evaluate. You can refer to variables in the environment by prefixing them with an '@' character like ``@a + b``. You can refer to column names that are not valid Python variable names by surrounding them in backticks. Thus, column names containing spaces or punctuations (besides underscores) or starting with digits must be surrounded by backticks. (For example, a column named "Area (cm^2)" would be referenced as ```Area (cm^2)```). Column names which are Python keywords (like "list", "for", "import", etc) cannot be used. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. inplace : bool Whether to modify the DataFrame rather than creating a new one. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`DataFrame.query`. Returns ------- DataFrame or None DataFrame resulting from the provided query expression or None if ``inplace=True``. See Also -------- eval : Evaluate a string describing operations on DataFrame columns. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Notes ----- The result of the evaluation of this expression is first passed to :attr:`DataFrame.loc` and if that fails because of a multidimensional key (e.g., a DataFrame) then the result will be passed to :meth:`DataFrame.__getitem__`. This method uses the top-level :func:`eval` function to evaluate the passed query. The :meth:`~pandas.DataFrame.query` method uses a slightly modified Python syntax by default. For example, the ``&`` and ``|`` (bitwise) operators have the precedence of their boolean cousins, :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, however the semantics are different. You can change the semantics of the expression by passing the keyword argument ``parser='python'``. This enforces the same semantics as evaluation in Python space. Likewise, you can pass ``engine='python'`` to evaluate an expression using Python itself as a backend. This is not recommended as it is inefficient compared to using ``numexpr`` as the engine. The :attr:`DataFrame.index` and :attr:`DataFrame.columns` attributes of the :class:`~pandas.DataFrame` instance are placed in the query namespace by default, which allows you to treat both the index and columns of the frame as a column in the frame. The identifier ``index`` is used for the frame index; you can also use the name of the index to identify it in a query. Please note that Python keywords may not be used as identifiers. For further details and examples see the ``query`` documentation in :ref:`indexing <indexing.query>`. *Backtick quoted variables* Backtick quoted variables are parsed as literal Python code and are converted internally to a Python valid identifier. This can lead to the following problems. During parsing a number of disallowed characters inside the backtick quoted string are replaced by strings that are allowed as a Python identifier. These characters include all operators in Python, the space character, the question mark, the exclamation mark, the dollar sign, and the euro sign. For other characters that fall outside the ASCII range (U+0001..U+007F) and those that are not further specified in PEP 3131, the query parser will raise an error. This excludes whitespace different than the space character, but also the hashtag (as it is used for comments) and the backtick itself (backtick can also not be escaped). In a special case, quotes that make a pair around a backtick can confuse the parser. For example, ```it's` > `that's``` will raise an error, as it forms a quoted string (``'s > `that'``) with a backtick inside. See also the Python documentation about lexical analysis (https://docs.python.org/3/reference/lexical_analysis.html) in combination with the source code in :mod:`pandas.core.computation.parsing`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10 """ inplace = validate_bool_kwarg(inplace, "inplace") if not isinstance(expr, str): msg = f"expr must be a string to be evaluated, {type(expr)} given" raise ValueError(msg) kwargs["level"] = kwargs.pop("level", 0) + 1 kwargs["target"] = None res = self.eval(expr, **kwargs) try: result = self.loc[res] except ValueError: # when res is multi-dimensional loc raises, but this is sometimes a # valid query result = self[res] if inplace: self._update_inplace(result) return None else: return result def eval(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> Any: ... def eval(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def eval(self, expr: str, *, inplace: bool = False, **kwargs) -> Any | None: """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. Returns ------- ndarray, scalar, pandas object, or None The result of the evaluation or None if ``inplace=True``. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Notes ----- For more details see the API documentation for :func:`~eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Multiple columns can be assigned to using multi-line expressions: >>> df.eval( ... ''' ... C = A + B ... D = A - B ... ''' ... ) A B C D 0 1 10 11 -9 1 2 8 10 -6 2 3 6 9 -3 3 4 4 8 0 4 5 2 7 3 """ from pandas.core.computation.eval import eval as _eval inplace = validate_bool_kwarg(inplace, "inplace") kwargs["level"] = kwargs.pop("level", 0) + 1 index_resolvers = self._get_index_resolvers() column_resolvers = self._get_cleaned_column_resolvers() resolvers = column_resolvers, index_resolvers if "target" not in kwargs: kwargs["target"] = self kwargs["resolvers"] = tuple(kwargs.get("resolvers", ())) + resolvers return _eval(expr, inplace=inplace, **kwargs) def select_dtypes(self, include=None, exclude=None) -> DataFrame: """ Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty * If ``include`` and ``exclude`` have overlapping elements * If any kind of string dtype is passed in. See Also -------- DataFrame.dtypes: Return Series with the data type of each column. Notes ----- * To select all *numeric* types, use ``np.number`` or ``'number'`` * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy <https://numpy.org/doc/stable/reference/arrays.scalars.html>`__ * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or ``'timedelta64'`` * To select Pandas categorical dtypes, use ``'category'`` * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in 0.20.0) or ``'datetime64[ns, tz]'`` Examples -------- >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int64']) b c 0 True 1.0 1 False 2.0 2 True 1.0 3 False 2.0 4 True 1.0 5 False 2.0 """ if not is_list_like(include): include = (include,) if include is not None else () if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () selection = (frozenset(include), frozenset(exclude)) if not any(selection): raise ValueError("at least one of include or exclude must be nonempty") # convert the myriad valid dtypes object to a single representation def check_int_infer_dtype(dtypes): converted_dtypes: list[type] = [] for dtype in dtypes: # Numpy maps int to different types (int32, in64) on Windows and Linux # see https://github.com/numpy/numpy/issues/9464 if (isinstance(dtype, str) and dtype == "int") or (dtype is int): converted_dtypes.append(np.int32) converted_dtypes.append(np.int64) elif dtype == "float" or dtype is float: # GH#42452 : np.dtype("float") coerces to np.float64 from Numpy 1.20 converted_dtypes.extend([np.float64, np.float32]) else: converted_dtypes.append(infer_dtype_from_object(dtype)) return frozenset(converted_dtypes) include = check_int_infer_dtype(include) exclude = check_int_infer_dtype(exclude) for dtypes in (include, exclude): invalidate_string_dtypes(dtypes) # can't both include AND exclude! if not include.isdisjoint(exclude): raise ValueError(f"include and exclude overlap on {(include & exclude)}") def dtype_predicate(dtype: DtypeObj, dtypes_set) -> bool: # GH 46870: BooleanDtype._is_numeric == True but should be excluded return issubclass(dtype.type, tuple(dtypes_set)) or ( np.number in dtypes_set and getattr(dtype, "_is_numeric", False) and not is_bool_dtype(dtype) ) def predicate(arr: ArrayLike) -> bool: dtype = arr.dtype if include: if not dtype_predicate(dtype, include): return False if exclude: if dtype_predicate(dtype, exclude): return False return True mgr = self._mgr._get_data_subset(predicate).copy(deep=None) return type(self)(mgr).__finalize__(self) def insert( self, loc: int, column: Hashable, value: Scalar | AnyArrayLike, allow_duplicates: bool | lib.NoDefault = lib.no_default, ) -> None: """ Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns). column : str, number, or hashable object Label of the inserted column. value : Scalar, Series, or array-like allow_duplicates : bool, optional, default lib.no_default See Also -------- Index.insert : Insert new item by index. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.insert(1, "newcol", [99, 99]) >>> df col1 newcol col2 0 1 99 3 1 2 99 4 >>> df.insert(0, "col1", [100, 100], allow_duplicates=True) >>> df col1 col1 newcol col2 0 100 1 99 3 1 100 2 99 4 Notice that pandas uses index alignment in case of `value` from type `Series`: >>> df.insert(0, "col0", pd.Series([5, 6], index=[1, 2])) >>> df col0 col1 col1 newcol col2 0 NaN 100 1 99 3 1 5.0 100 2 99 4 """ if allow_duplicates is lib.no_default: allow_duplicates = False if allow_duplicates and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'allow_duplicates=True' when " "'self.flags.allows_duplicate_labels' is False." ) if not allow_duplicates and column in self.columns: # Should this be a different kind of error?? raise ValueError(f"cannot insert {column}, already exists") if not isinstance(loc, int): raise TypeError("loc must be int") value = self._sanitize_column(value) self._mgr.insert(loc, column, value) def assign(self, **kwargs) -> DataFrame: r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. Later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 You can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 """ data = self.copy(deep=None) for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) return data def _sanitize_column(self, value) -> ArrayLike: """ Ensures new columns (which go into the BlockManager as new blocks) are always copied and converted into an array. Parameters ---------- value : scalar, Series, or array-like Returns ------- numpy.ndarray or ExtensionArray """ self._ensure_valid_index(value) # We can get there through isetitem with a DataFrame # or through loc single_block_path if isinstance(value, DataFrame): return _reindex_for_setitem(value, self.index) elif is_dict_like(value): return _reindex_for_setitem(Series(value), self.index) if is_list_like(value): com.require_length_match(value, self.index) return sanitize_array(value, self.index, copy=True, allow_2d=True) def _series(self): return { item: Series( self._mgr.iget(idx), index=self.index, name=item, fastpath=True ) for idx, item in enumerate(self.columns) } # ---------------------------------------------------------------------- # Reindexing and alignment def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy): frame = self columns = axes["columns"] if columns is not None: frame = frame._reindex_columns( columns, method, copy, level, fill_value, limit, tolerance ) index = axes["index"] if index is not None: frame = frame._reindex_index( index, method, copy, level, fill_value, limit, tolerance ) return frame def _reindex_index( self, new_index, method, copy: bool, level: Level, fill_value=np.nan, limit=None, tolerance=None, ): new_index, indexer = self.index.reindex( new_index, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {0: [new_index, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_columns( self, new_columns, method, copy: bool, level: Level, fill_value=None, limit=None, tolerance=None, ): new_columns, indexer = self.columns.reindex( new_columns, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {1: [new_columns, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_multi( self, axes: dict[str, Index], copy: bool, fill_value ) -> DataFrame: """ We are guaranteed non-Nones in the axes. """ new_index, row_indexer = self.index.reindex(axes["index"]) new_columns, col_indexer = self.columns.reindex(axes["columns"]) if row_indexer is not None and col_indexer is not None: # Fastpath. By doing two 'take's at once we avoid making an # unnecessary copy. # We only get here with `not self._is_mixed_type`, which (almost) # ensures that self.values is cheap. It may be worth making this # condition more specific. indexer = row_indexer, col_indexer new_values = take_2d_multi(self.values, indexer, fill_value=fill_value) return self._constructor( new_values, index=new_index, columns=new_columns, copy=False ) else: return self._reindex_with_indexers( {0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, copy=copy, fill_value=fill_value, ) def align( self, other: DataFrame, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value=None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> DataFrame: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) """ Examples -------- >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) Change the row labels. >>> df.set_axis(['a', 'b', 'c'], axis='index') A B a 1 4 b 2 5 c 3 6 Change the column labels. >>> df.set_axis(['I', 'II'], axis='columns') I II 0 1 4 1 2 5 2 3 6 """ ) **_shared_doc_kwargs, extended_summary_sub=" column or", axis_description_sub=", and 1 identifies the columns", see_also_sub=" or columns", ) ) # ---------------------------------------------------------------------- # Reindex-based selection methods # ---------------------------------------------------------------------- # Sorting # error: Signature of "sort_values" incompatible with supertype "NDFrame" # TODO: Just move the sort_values doc here. ) # ---------------------------------------------------------------------- # Arithmetic Methods ) ) ) # ---------------------------------------------------------------------- # Function application ) # error: Signature of "any" incompatible with supertype "NDFrame" [override] # error: Missing return statement ) # ---------------------------------------------------------------------- # Merging / joining methods # ---------------------------------------------------------------------- # Statistical methods, etc. # ---------------------------------------------------------------------- # ndarray-like stats methods # ---------------------------------------------------------------------- # Add index and columns # ---------------------------------------------------------------------- # Add plotting methods to DataFrame # ---------------------------------------------------------------------- # Internal Interface Methods DataFrame def read_excel( io, # sheet name is str or int -> DataFrame sheet_name: str | int = ..., *, header: int | Sequence[int] | None = ..., names: list[str] | None = ..., index_col: int | Sequence[int] | None = ..., usecols: int | str | Sequence[int] | Sequence[str] | Callable[[str], bool] | None = ..., dtype: DtypeArg | None = ..., engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb"] | None = ..., converters: dict[str, Callable] | dict[int, Callable] | None = ..., true_values: Iterable[Hashable] | None = ..., false_values: Iterable[Hashable] | None = ..., skiprows: Sequence[int] | int | Callable[[int], object] | None = ..., nrows: int | None = ..., na_values=..., keep_default_na: bool = ..., na_filter: bool = ..., verbose: bool = ..., parse_dates: list | dict | bool = ..., date_parser: Callable | lib.NoDefault = ..., date_format: dict[Hashable, str] | str | None = ..., thousands: str | None = ..., decimal: str = ..., comment: str | None = ..., skipfooter: int = ..., storage_options: StorageOptions = ..., dtype_backend: DtypeBackend | lib.NoDefault = ..., ) -> DataFrame: ...
null
173,559
from __future__ import annotations import abc import datetime from functools import partial from io import BytesIO import os from textwrap import fill from types import TracebackType from typing import ( IO, Any, Callable, Hashable, Iterable, List, Literal, Mapping, Sequence, Union, cast, overload, ) import zipfile from pandas._config import config from pandas._libs import lib from pandas._libs.parsers import STR_NA_VALUES from pandas._typing import ( DtypeArg, DtypeBackend, FilePath, IntStrT, ReadBuffer, StorageOptions, WriteExcelBuffer, ) from pandas.compat._optional import ( get_version, import_optional_dependency, ) from pandas.errors import EmptyDataError from pandas.util._decorators import ( Appender, doc, ) from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( is_bool, is_float, is_integer, is_list_like, ) from pandas.core.frame import DataFrame from pandas.core.shared_docs import _shared_docs from pandas.util.version import Version from pandas.io.common import ( IOHandles, get_handle, stringify_path, validate_header_arg, ) from pandas.io.excel._util import ( fill_mi_header, get_default_engine, get_writer, maybe_convert_usecols, pop_header_name, ) from pandas.io.parsers import TextParser from pandas.io.parsers.readers import validate_integer class Callable(BaseTypingInstance): def py__call__(self, arguments): """ def x() -> Callable[[Callable[..., _T]], _T]: ... """ # The 0th index are the arguments. try: param_values = self._generics_manager[0] result_values = self._generics_manager[1] except IndexError: debug.warning('Callable[...] defined without two arguments') return NO_VALUES else: from jedi.inference.gradual.annotation import infer_return_for_callable return infer_return_for_callable(arguments, param_values, result_values) def py__get__(self, instance, class_value): return ValueSet([self]) class Hashable(Protocol, metaclass=ABCMeta): # TODO: This is special, in that a subclass of a hashable class may not be hashable # (for example, list vs. object). It's not obvious how to represent this. This class # is currently mostly useless for static checking. def __hash__(self) -> int: ... class Iterable(Protocol[_T_co]): def __iter__(self) -> Iterator[_T_co]: ... class Sequence(_Collection[_T_co], Reversible[_T_co], Generic[_T_co]): def __getitem__(self, i: int) -> _T_co: ... def __getitem__(self, s: slice) -> Sequence[_T_co]: ... # Mixin methods def index(self, value: Any, start: int = ..., stop: int = ...) -> int: ... def count(self, value: Any) -> int: ... def __contains__(self, x: object) -> bool: ... def __iter__(self) -> Iterator[_T_co]: ... def __reversed__(self) -> Iterator[_T_co]: ... Literal: _SpecialForm = ... IntStrT = TypeVar("IntStrT", int, str) DtypeArg = Union[Dtype, Dict[Hashable, Dtype]] StorageOptions = Optional[Dict[str, Any]] DtypeBackend = Literal["pyarrow", "numpy_nullable"] class DataFrame(NDFrame, OpsMixin): """ Two-dimensional, size-mutable, potentially heterogeneous tabular data. Data structure also contains labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects. The primary pandas data structure. Parameters ---------- data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame Dict can contain Series, arrays, constants, dataclass or list-like objects. If data is a dict, column order follows insertion-order. If a dict contains Series which have an index defined, it is aligned by its index. This alignment also occurs if data is a Series or a DataFrame itself. Alignment is done on Series/DataFrame inputs. If data is a list of dicts, column order follows insertion-order. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided. columns : Index or array-like Column labels to use for resulting frame when data does not have them, defaulting to RangeIndex(0, 1, 2, ..., n). If data contains column labels, will perform column selection instead. dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer. copy : bool or None, default None Copy data from inputs. For dict data, the default of None behaves like ``copy=True``. For DataFrame or 2d ndarray input, the default of None behaves like ``copy=False``. If data is a dict containing one or more Series (possibly of different dtypes), ``copy=False`` will ensure that these inputs are not copied. .. versionchanged:: 1.3.0 See Also -------- DataFrame.from_records : Constructor from tuples, also record arrays. DataFrame.from_dict : From dicts of Series, arrays, or dicts. read_csv : Read a comma-separated values (csv) file into DataFrame. read_table : Read general delimited file into DataFrame. read_clipboard : Read text from clipboard into DataFrame. Notes ----- Please reference the :ref:`User Guide <basics.dataframe>` for more information. Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = pd.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from a dictionary including Series: >>> d = {'col1': [0, 1, 2, 3], 'col2': pd.Series([2, 3], index=[2, 3])} >>> pd.DataFrame(data=d, index=[0, 1, 2, 3]) col1 col2 0 0 NaN 1 1 NaN 2 2 2.0 3 3 3.0 Constructing DataFrame from numpy ndarray: >>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), ... columns=['a', 'b', 'c']) >>> df2 a b c 0 1 2 3 1 4 5 6 2 7 8 9 Constructing DataFrame from a numpy ndarray that has labeled columns: >>> data = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], ... dtype=[("a", "i4"), ("b", "i4"), ("c", "i4")]) >>> df3 = pd.DataFrame(data, columns=['c', 'a']) ... >>> df3 c a 0 3 1 1 6 4 2 9 7 Constructing DataFrame from dataclass: >>> from dataclasses import make_dataclass >>> Point = make_dataclass("Point", [("x", int), ("y", int)]) >>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)]) x y 0 0 0 1 0 3 2 2 3 Constructing DataFrame from Series/DataFrame: >>> ser = pd.Series([1, 2, 3], index=["a", "b", "c"]) >>> df = pd.DataFrame(data=ser, index=["a", "c"]) >>> df 0 a 1 c 3 >>> df1 = pd.DataFrame([1, 2, 3], index=["a", "b", "c"], columns=["x"]) >>> df2 = pd.DataFrame(data=df1, index=["a", "c"]) >>> df2 x a 1 c 3 """ _internal_names_set = {"columns", "index"} | NDFrame._internal_names_set _typ = "dataframe" _HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray) _accessors: set[str] = {"sparse"} _hidden_attrs: frozenset[str] = NDFrame._hidden_attrs | frozenset([]) _mgr: BlockManager | ArrayManager def _constructor(self) -> Callable[..., DataFrame]: return DataFrame _constructor_sliced: Callable[..., Series] = Series # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index: Axes | None = None, columns: Axes | None = None, dtype: Dtype | None = None, copy: bool | None = None, ) -> None: if dtype is not None: dtype = self._validate_dtype(dtype) if isinstance(data, DataFrame): data = data._mgr if not copy: # if not copying data, ensure to still return a shallow copy # to avoid the result sharing the same Manager data = data.copy(deep=False) if isinstance(data, (BlockManager, ArrayManager)): if using_copy_on_write(): data = data.copy(deep=False) # first check if a Manager is passed without any other arguments # -> use fastpath (without checking Manager type) if index is None and columns is None and dtype is None and not copy: # GH#33357 fastpath NDFrame.__init__(self, data) return manager = get_option("mode.data_manager") # GH47215 if index is not None and isinstance(index, set): raise ValueError("index cannot be a set") if columns is not None and isinstance(columns, set): raise ValueError("columns cannot be a set") if copy is None: if isinstance(data, dict): # retain pre-GH#38939 default behavior copy = True elif ( manager == "array" and isinstance(data, (np.ndarray, ExtensionArray)) and data.ndim == 2 ): # INFO(ArrayManager) by default copy the 2D input array to get # contiguous 1D arrays copy = True elif using_copy_on_write() and not isinstance( data, (Index, DataFrame, Series) ): copy = True else: copy = False if data is None: index = index if index is not None else default_index(0) columns = columns if columns is not None else default_index(0) dtype = dtype if dtype is not None else pandas_dtype(object) data = [] if isinstance(data, (BlockManager, ArrayManager)): mgr = self._init_mgr( data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy ) elif isinstance(data, dict): # GH#38939 de facto copy defaults to False only in non-dict cases mgr = dict_to_mgr(data, index, columns, dtype=dtype, copy=copy, typ=manager) elif isinstance(data, ma.MaskedArray): from numpy.ma import mrecords # masked recarray if isinstance(data, mrecords.MaskedRecords): raise TypeError( "MaskedRecords are not supported. Pass " "{name: data[name] for name in data.dtype.names} " "instead" ) # a masked array data = sanitize_masked_array(data) mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) elif isinstance(data, (np.ndarray, Series, Index, ExtensionArray)): if data.dtype.names: # i.e. numpy structured array data = cast(np.ndarray, data) mgr = rec_array_to_mgr( data, index, columns, dtype, copy, typ=manager, ) elif getattr(data, "name", None) is not None: # i.e. Series/Index with non-None name _copy = copy if using_copy_on_write() else True mgr = dict_to_mgr( # error: Item "ndarray" of "Union[ndarray, Series, Index]" has no # attribute "name" {data.name: data}, # type: ignore[union-attr] index, columns, dtype=dtype, typ=manager, copy=_copy, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) # For data is list-like, or Iterable (will consume into list) elif is_list_like(data): if not isinstance(data, abc.Sequence): if hasattr(data, "__array__"): # GH#44616 big perf improvement for e.g. pytorch tensor data = np.asarray(data) else: data = list(data) if len(data) > 0: if is_dataclass(data[0]): data = dataclasses_to_dicts(data) if not isinstance(data, np.ndarray) and treat_as_nested(data): # exclude ndarray as we may have cast it a few lines above if columns is not None: columns = ensure_index(columns) arrays, columns, index = nested_data_to_arrays( # error: Argument 3 to "nested_data_to_arrays" has incompatible # type "Optional[Collection[Any]]"; expected "Optional[Index]" data, columns, index, # type: ignore[arg-type] dtype, ) mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, typ=manager, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) else: mgr = dict_to_mgr( {}, index, columns if columns is not None else default_index(0), dtype=dtype, typ=manager, ) # For data is scalar else: if index is None or columns is None: raise ValueError("DataFrame constructor not properly called!") index = ensure_index(index) columns = ensure_index(columns) if not dtype: dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True) # For data is a scalar extension dtype if isinstance(dtype, ExtensionDtype): # TODO(EA2D): special case not needed with 2D EAs values = [ construct_1d_arraylike_from_scalar(data, len(index), dtype) for _ in range(len(columns)) ] mgr = arrays_to_mgr(values, columns, index, dtype=None, typ=manager) else: arr2d = construct_2d_arraylike_from_scalar( data, len(index), len(columns), dtype, copy, ) mgr = ndarray_to_mgr( arr2d, index, columns, dtype=arr2d.dtype, copy=False, typ=manager, ) # ensure correct Manager type according to settings mgr = mgr_to_mgr(mgr, typ=manager) NDFrame.__init__(self, mgr) # ---------------------------------------------------------------------- def __dataframe__( self, nan_as_null: bool = False, allow_copy: bool = True ) -> DataFrameXchg: """ Return the dataframe interchange object implementing the interchange protocol. Parameters ---------- nan_as_null : bool, default False Whether to tell the DataFrame to overwrite null values in the data with ``NaN`` (or ``NaT``). allow_copy : bool, default True Whether to allow memory copying when exporting. If set to False it would cause non-zero-copy exports to fail. Returns ------- DataFrame interchange object The object which consuming library can use to ingress the dataframe. Notes ----- Details on the interchange protocol: https://data-apis.org/dataframe-protocol/latest/index.html `nan_as_null` currently has no effect; once support for nullable extension dtypes is added, this value should be propagated to columns. """ from pandas.core.interchange.dataframe import PandasDataFrameXchg return PandasDataFrameXchg(self, nan_as_null, allow_copy) # ---------------------------------------------------------------------- def axes(self) -> list[Index]: """ Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='object')] """ return [self.index, self.columns] def shape(self) -> tuple[int, int]: """ Return a tuple representing the dimensionality of the DataFrame. See Also -------- ndarray.shape : Tuple of array dimensions. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self.index), len(self.columns) def _is_homogeneous_type(self) -> bool: """ Whether all the columns in a DataFrame have the same type. Returns ------- bool See Also -------- Index._is_homogeneous_type : Whether the object has a single dtype. MultiIndex._is_homogeneous_type : Whether all the levels of a MultiIndex have the same dtype. Examples -------- >>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type True >>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type False Items with the same type but different sizes are considered different types. >>> DataFrame({ ... "A": np.array([1, 2], dtype=np.int32), ... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type False """ if isinstance(self._mgr, ArrayManager): return len({arr.dtype for arr in self._mgr.arrays}) == 1 if self._mgr.any_extension_types: return len({block.dtype for block in self._mgr.blocks}) == 1 else: return not self._is_mixed_type def _can_fast_transpose(self) -> bool: """ Can we transpose this DataFrame without creating any new array objects. """ if isinstance(self._mgr, ArrayManager): return False blocks = self._mgr.blocks if len(blocks) != 1: return False dtype = blocks[0].dtype # TODO(EA2D) special case would be unnecessary with 2D EAs return not is_1d_only_ea_dtype(dtype) def _values(self) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray: """ Analogue to ._values that may return a 2D ExtensionArray. """ mgr = self._mgr if isinstance(mgr, ArrayManager): if len(mgr.arrays) == 1 and not is_1d_only_ea_dtype(mgr.arrays[0].dtype): # error: Item "ExtensionArray" of "Union[ndarray, ExtensionArray]" # has no attribute "reshape" return mgr.arrays[0].reshape(-1, 1) # type: ignore[union-attr] return ensure_wrapped_if_datetimelike(self.values) blocks = mgr.blocks if len(blocks) != 1: return ensure_wrapped_if_datetimelike(self.values) arr = blocks[0].values if arr.ndim == 1: # non-2D ExtensionArray return self.values # more generally, whatever we allow in NDArrayBackedExtensionBlock arr = cast("np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray", arr) return arr.T # ---------------------------------------------------------------------- # Rendering Methods def _repr_fits_vertical_(self) -> bool: """ Check length against max_rows. """ max_rows = get_option("display.max_rows") return len(self) <= max_rows def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool: """ Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns. In case of non-interactive session, no boundaries apply. `ignore_width` is here so ipynb+HTML output can behave the way users expect. display.max_columns remains in effect. GH3541, GH3573 """ width, height = console.get_console_size() max_columns = get_option("display.max_columns") nb_columns = len(self.columns) # exceed max columns if (max_columns and nb_columns > max_columns) or ( (not ignore_width) and width and nb_columns > (width // 2) ): return False # used by repr_html under IPython notebook or scripts ignore terminal # dims if ignore_width or width is None or not console.in_interactive_session(): return True if get_option("display.width") is not None or console.in_ipython_frontend(): # check at least the column row for excessive width max_rows = 1 else: max_rows = get_option("display.max_rows") # when auto-detecting, so width=None and not in ipython front end # check whether repr fits horizontal by actually checking # the width of the rendered repr buf = StringIO() # only care about the stuff we'll actually print out # and to_string on entire frame may be expensive d = self if max_rows is not None: # unlimited rows # min of two, where one may be None d = d.iloc[: min(max_rows, len(d))] else: return True d.to_string(buf=buf) value = buf.getvalue() repr_width = max(len(line) for line in value.split("\n")) return repr_width < width def _info_repr(self) -> bool: """ True if the repr should show the info view. """ info_repr_option = get_option("display.large_repr") == "info" return info_repr_option and not ( self._repr_fits_horizontal_() and self._repr_fits_vertical_() ) def __repr__(self) -> str: """ Return a string representation for a particular DataFrame. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) return buf.getvalue() repr_params = fmt.get_dataframe_repr_params() return self.to_string(**repr_params) def _repr_html_(self) -> str | None: """ Return a html representation for a particular DataFrame. Mainly for IPython notebook. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) # need to escape the <class>, should be the first line. val = buf.getvalue().replace("<", r"&lt;", 1) val = val.replace(">", r"&gt;", 1) return f"<pre>{val}</pre>" if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") min_rows = get_option("display.min_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") formatter = fmt.DataFrameFormatter( self, columns=None, col_space=None, na_rep="NaN", formatters=None, float_format=None, sparsify=None, justify=None, index_names=True, header=True, index=True, bold_rows=True, escape=True, max_rows=max_rows, min_rows=min_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=".", ) return fmt.DataFrameRenderer(formatter).to_html(notebook=True) else: return None def to_string( self, buf: None = ..., columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> None: ... header_type="bool or sequence of str", header="Write out the column names. If a list of strings " "is given, it is assumed to be aliases for the " "column names", col_space_type="int, list or dict of int", col_space="The minimum width of each column. If a list of ints is given " "every integers corresponds with one column. If a dict is given, the key " "references the column, while the value defines the space to use.", ) def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[str] | None = None, col_space: int | list[int] | dict[Hashable, int] | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: fmt.FormattersType | None = None, float_format: fmt.FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool = False, decimal: str = ".", line_width: int | None = None, min_rows: int | None = None, max_colwidth: int | None = None, encoding: str | None = None, ) -> str | None: """ Render a DataFrame to a console-friendly tabular output. %(shared_params)s line_width : int, optional Width to wrap a line in characters. min_rows : int, optional The number of rows to display in the console in a truncated repr (when number of rows is above `max_rows`). max_colwidth : int, optional Max width to truncate each column in characters. By default, no limit. encoding : str, default "utf-8" Set character encoding. %(returns)s See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]} >>> df = pd.DataFrame(d) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 """ from pandas import option_context with option_context("display.max_colwidth", max_colwidth): formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, min_rows=min_rows, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, ) return fmt.DataFrameRenderer(formatter).to_string( buf=buf, encoding=encoding, line_width=line_width, ) # ---------------------------------------------------------------------- def style(self) -> Styler: """ Returns a Styler object. Contains methods for building a styled HTML representation of the DataFrame. See Also -------- io.formats.style.Styler : Helps style a DataFrame or Series according to the data with HTML and CSS. """ from pandas.io.formats.style import Styler return Styler(self) _shared_docs[ "items" ] = r""" Iterate over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Yields ------ label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. Examples -------- >>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.items(): ... print(f'label: {label}') ... print(f'content: {content}', sep='\n') ... label: species content: panda bear polar bear koala marsupial Name: species, dtype: object label: population content: panda 1864 polar 22000 koala 80000 Name: population, dtype: int64 """ def items(self) -> Iterable[tuple[Hashable, Series]]: if self.columns.is_unique and hasattr(self, "_item_cache"): for k in self.columns: yield k, self._get_item_cache(k) else: for i, k in enumerate(self.columns): yield k, self._ixs(i, axis=1) def iterrows(self) -> Iterable[tuple[Hashable, Series]]: """ Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a `MultiIndex`. data : Series The data of the row as a Series. See Also -------- DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- 1. Because ``iterrows`` returns a Series for each row, it does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float']) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row['int'].dtype) float64 >>> print(df['int'].dtype) int64 To preserve dtypes while iterating over the rows, it is better to use :meth:`itertuples` which returns namedtuples of the values and which is generally faster than ``iterrows``. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect. """ columns = self.columns klass = self._constructor_sliced using_cow = using_copy_on_write() for k, v in zip(self.index, self.values): s = klass(v, index=columns, name=k).__finalize__(self) if using_cow and self._mgr.is_single_block: s._mgr.add_references(self._mgr) # type: ignore[arg-type] yield k, s def itertuples( self, index: bool = True, name: str | None = "Pandas" ) -> Iterable[tuple[Any, ...]]: """ Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "Pandas" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An object to iterate over namedtuples for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. Examples -------- >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) ... Pandas(Index='dog', num_legs=4, num_wings=0) Pandas(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) ... Pandas(num_legs=4, num_wings=0) Pandas(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name='Animal'): ... print(row) ... Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2) """ arrays = [] fields = list(self.columns) if index: arrays.append(self.index) fields.insert(0, "Index") # use integer indexing because of possible duplicate column names arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) if name is not None: # https://github.com/python/mypy/issues/9046 # error: namedtuple() expects a string literal as the first argument itertuple = collections.namedtuple( # type: ignore[misc] name, fields, rename=True ) return map(itertuple._make, zip(*arrays)) # fallback to regular tuples return zip(*arrays) def __len__(self) -> int: """ Returns length of info axis, but here we use the index. """ return len(self.index) def dot(self, other: Series) -> Series: ... def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame: ... def dot(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Compute the matrix multiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series, DataFrame or a numpy array. It can also be called using ``self @ other`` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the matrix product with. Returns ------- Series or DataFrame If other is a Series, return the matrix product between self and other as a Series. If other is a DataFrame or a numpy.array, return the matrix product of self and other in a DataFrame of a np.array. See Also -------- Series.dot: Similar method for Series. Notes ----- The dimensions of DataFrame and other must be compatible in order to compute the matrix multiplication. In addition, the column names of DataFrame and the index of other must contain the same values, as they will be aligned prior to the multiplication. The dot method for Series computes the inner product, instead of the matrix product here. Examples -------- Here we multiply a DataFrame with a Series. >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) >>> s = pd.Series([1, 1, 2, 1]) >>> df.dot(s) 0 -4 1 5 dtype: int64 Here we multiply a DataFrame with another DataFrame. >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(other) 0 1 0 1 4 1 2 2 Note that the dot method give the same result as @ >>> df @ other 0 1 0 1 4 1 2 2 The dot method works also if other is an np.array. >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(arr) 0 1 0 1 4 1 2 2 Note how shuffling of the objects does not change the result. >>> s2 = s.reindex([1, 0, 2, 3]) >>> df.dot(s2) 0 -4 1 5 dtype: int64 """ if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index) if len(common) > len(self.columns) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(columns=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right._values else: left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: raise ValueError( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, DataFrame): return self._constructor( np.dot(lvals, rvals), index=left.index, columns=other.columns, copy=False, ) elif isinstance(other, Series): return self._constructor_sliced( np.dot(lvals, rvals), index=left.index, copy=False ) elif isinstance(rvals, (np.ndarray, Index)): result = np.dot(lvals, rvals) if result.ndim == 2: return self._constructor(result, index=left.index, copy=False) else: return self._constructor_sliced(result, index=left.index, copy=False) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other: Series) -> Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other) -> DataFrame: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ try: return self.T.dot(np.transpose(other)).T except ValueError as err: if "shape mismatch" not in str(err): raise # GH#21581 give exception message for original shapes msg = f"shapes {np.shape(other)} and {self.shape} not aligned" raise ValueError(msg) from err # ---------------------------------------------------------------------- # IO methods (to / from other formats) def from_dict( cls, data: dict, orient: str = "columns", dtype: Dtype | None = None, columns: Axes | None = None, ) -> DataFrame: """ Construct DataFrame from dict of array-like or dicts. Creates DataFrame object from dictionary by columns or by index allowing dtype specification. Parameters ---------- data : dict Of the form {field : array-like} or {field : dict}. orient : {'columns', 'index', 'tight'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. If 'tight', assume a dict with keys ['index', 'columns', 'data', 'index_names', 'column_names']. .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument dtype : dtype, default None Data type to force after DataFrame construction, otherwise infer. columns : list, default None Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'`` or ``orient='tight'``. Returns ------- DataFrame See Also -------- DataFrame.from_records : DataFrame from structured ndarray, sequence of tuples or dicts, or DataFrame. DataFrame : DataFrame object creation using constructor. DataFrame.to_dict : Convert the DataFrame to a dictionary. Examples -------- By default the keys of the dict become the DataFrame columns: >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Specify ``orient='index'`` to create the DataFrame using dictionary keys as rows: >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data, orient='index') 0 1 2 3 row_1 3 2 1 0 row_2 a b c d When using the 'index' orientation, the column names can be specified manually: >>> pd.DataFrame.from_dict(data, orient='index', ... columns=['A', 'B', 'C', 'D']) A B C D row_1 3 2 1 0 row_2 a b c d Specify ``orient='tight'`` to create the DataFrame using a 'tight' format: >>> data = {'index': [('a', 'b'), ('a', 'c')], ... 'columns': [('x', 1), ('y', 2)], ... 'data': [[1, 3], [2, 4]], ... 'index_names': ['n1', 'n2'], ... 'column_names': ['z1', 'z2']} >>> pd.DataFrame.from_dict(data, orient='tight') z1 x y z2 1 2 n1 n2 a b 1 3 c 2 4 """ index = None orient = orient.lower() if orient == "index": if len(data) > 0: # TODO speed up Series case if isinstance(list(data.values())[0], (Series, dict)): data = _from_nested_dict(data) else: index = list(data.keys()) # error: Incompatible types in assignment (expression has type # "List[Any]", variable has type "Dict[Any, Any]") data = list(data.values()) # type: ignore[assignment] elif orient in ("columns", "tight"): if columns is not None: raise ValueError(f"cannot use columns parameter with orient='{orient}'") else: # pragma: no cover raise ValueError( f"Expected 'index', 'columns' or 'tight' for orient parameter. " f"Got '{orient}' instead" ) if orient != "tight": return cls(data, index=index, columns=columns, dtype=dtype) else: realdata = data["data"] def create_index(indexlist, namelist): index: Index if len(namelist) > 1: index = MultiIndex.from_tuples(indexlist, names=namelist) else: index = Index(indexlist, name=namelist[0]) return index index = create_index(data["index"], data["index_names"]) columns = create_index(data["columns"], data["column_names"]) return cls(realdata, index=index, columns=columns, dtype=dtype) def to_numpy( self, dtype: npt.DTypeLike | None = None, copy: bool = False, na_value: object = lib.no_default, ) -> np.ndarray: """ Convert the DataFrame to a NumPy array. By default, the dtype of the returned array will be the common NumPy dtype of all types in the DataFrame. For example, if the dtypes are ``float16`` and ``float32``, the results dtype will be ``float32``. This may require copying data and coercing values, which may be expensive. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray`. copy : bool, default False Whether to ensure that the returned value is not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. na_value : Any, optional The value to use for missing values. The default value depends on `dtype` and the dtypes of the DataFrame columns. .. versionadded:: 1.1.0 Returns ------- numpy.ndarray See Also -------- Series.to_numpy : Similar method for Series. Examples -------- >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() array([[1, 3], [2, 4]]) With heterogeneous data, the lowest common type will have to be used. >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) >>> df.to_numpy() array([[1. , 3. ], [2. , 4.5]]) For a mix of numeric and non-numeric types, the output array will have object dtype. >>> df['C'] = pd.date_range('2000', periods=2) >>> df.to_numpy() array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) """ if dtype is not None: dtype = np.dtype(dtype) result = self._mgr.as_array(dtype=dtype, copy=copy, na_value=na_value) if result.dtype is not dtype: result = np.array(result, dtype=dtype, copy=False) return result def _create_data_for_split_and_tight_to_dict( self, are_all_object_dtype_cols: bool, object_dtype_indices: list[int] ) -> list: """ Simple helper method to create data for to ``to_dict(orient="split")`` and ``to_dict(orient="tight")`` to create the main output data """ if are_all_object_dtype_cols: data = [ list(map(maybe_box_native, t)) for t in self.itertuples(index=False, name=None) ] else: data = [list(t) for t in self.itertuples(index=False, name=None)] if object_dtype_indices: # If we have object_dtype_cols, apply maybe_box_naive after list # comprehension for perf for row in data: for i in object_dtype_indices: row[i] = maybe_box_native(row[i]) return data def to_dict( self, orient: Literal["dict", "list", "series", "split", "tight", "index"] = ..., into: type[dict] = ..., ) -> dict: ... def to_dict(self, orient: Literal["records"], into: type[dict] = ...) -> list[dict]: ... def to_dict( self, orient: Literal[ "dict", "list", "series", "split", "tight", "records", "index" ] = "dict", into: type[dict] = dict, index: bool = True, ) -> dict | list[dict]: """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'tight', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'tight' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values], 'index_names' -> [index.names], 'column_names' -> [column.names]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. index : bool, default True Whether to include the index item (and index_names item if `orient` is 'tight') in the returned dictionary. Can only be ``False`` when `orient` is 'split' or 'tight'. .. versionadded:: 2.0.0 Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. See Also -------- DataFrame.from_dict: Create a DataFrame from a dictionary. DataFrame.to_json: Convert a DataFrame to JSON format. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df.to_dict() {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}} You can specify the return orientation. >>> df.to_dict('series') {'col1': row1 1 row2 2 Name: col1, dtype: int64, 'col2': row1 0.50 row2 0.75 Name: col2, dtype: float64} >>> df.to_dict('split') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]]} >>> df.to_dict('records') [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}] >>> df.to_dict('index') {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}} >>> df.to_dict('tight') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]], 'index_names': [None], 'column_names': [None]} You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) [defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}), defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})] """ from pandas.core.methods.to_dict import to_dict return to_dict(self, orient, into, index) def to_gbq( self, destination_table: str, project_id: str | None = None, chunksize: int | None = None, reauth: bool = False, if_exists: str = "fail", auth_local_webserver: bool = True, table_schema: list[dict[str, str]] | None = None, location: str | None = None, progress_bar: bool = True, credentials=None, ) -> None: """ Write a DataFrame to a Google BigQuery table. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See the `How to authenticate with Google BigQuery <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__ guide for authentication instructions. Parameters ---------- destination_table : str Name of table to be written, in the form ``dataset.tablename``. project_id : str, optional Google BigQuery Account project ID. Optional when available from the environment. chunksize : int, optional Number of rows to be inserted in each chunk from the dataframe. Set to ``None`` to load the whole dataframe at once. reauth : bool, default False Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. if_exists : str, default 'fail' Behavior when the destination table exists. Value can be one of: ``'fail'`` If table exists raise pandas_gbq.gbq.TableCreationError. ``'replace'`` If table exists, drop it, recreate it, and insert data. ``'append'`` If table exists, insert data. Create if does not exist. auth_local_webserver : bool, default True Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. .. _local webserver flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server .. _console flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console *New in version 0.2.0 of pandas-gbq*. .. versionchanged:: 1.5.0 Default value is changed to ``True``. Google has deprecated the ``auth_local_webserver = False`` `"out of band" (copy-paste) flow <https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html?m=1#disallowed-oob>`_. table_schema : list of dicts, optional List of BigQuery table fields to which according DataFrame columns conform to, e.g. ``[{'name': 'col1', 'type': 'STRING'},...]``. If schema is not provided, it will be generated according to dtypes of DataFrame columns. See BigQuery API documentation on available names of a field. *New in version 0.3.1 of pandas-gbq*. location : str, optional Location where the load job should run. See the `BigQuery locations documentation <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a list of available locations. The location must match that of the target dataset. *New in version 0.5.0 of pandas-gbq*. progress_bar : bool, default True Use the library `tqdm` to show the progress bar for the upload, chunk by chunk. *New in version 0.5.0 of pandas-gbq*. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine :class:`google.auth.compute_engine.Credentials` or Service Account :class:`google.oauth2.service_account.Credentials` directly. *New in version 0.8.0 of pandas-gbq*. See Also -------- pandas_gbq.to_gbq : This function in the pandas-gbq library. read_gbq : Read a DataFrame from Google BigQuery. """ from pandas.io import gbq gbq.to_gbq( self, destination_table, project_id=project_id, chunksize=chunksize, reauth=reauth, if_exists=if_exists, auth_local_webserver=auth_local_webserver, table_schema=table_schema, location=location, progress_bar=progress_bar, credentials=credentials, ) def from_records( cls, data, index=None, exclude=None, columns=None, coerce_float: bool = False, nrows: int | None = None, ) -> DataFrame: """ Convert structured or record ndarray to DataFrame. Creates a DataFrame object from a structured ndarray, sequence of tuples or dicts, or DataFrame. Parameters ---------- data : structured ndarray, sequence of tuples or dicts, or DataFrame Structured input data. index : str, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use. exclude : sequence, default None Columns or fields to exclude. columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns). coerce_float : bool, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. nrows : int, default None Number of rows to read if data is an iterator. Returns ------- DataFrame See Also -------- DataFrame.from_dict : DataFrame from dict of array-like or dicts. DataFrame : DataFrame object creation using constructor. Examples -------- Data can be provided as a structured ndarray: >>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')], ... dtype=[('col_1', 'i4'), ('col_2', 'U1')]) >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of dicts: >>> data = [{'col_1': 3, 'col_2': 'a'}, ... {'col_1': 2, 'col_2': 'b'}, ... {'col_1': 1, 'col_2': 'c'}, ... {'col_1': 0, 'col_2': 'd'}] >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of tuples with corresponding columns: >>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')] >>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2']) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d """ if isinstance(data, DataFrame): if columns is not None: if is_scalar(columns): columns = [columns] data = data[columns] if index is not None: data = data.set_index(index) if exclude is not None: data = data.drop(columns=exclude) return data.copy(deep=False) result_index = None # Make a copy of the input columns so we can modify it if columns is not None: columns = ensure_index(columns) def maybe_reorder( arrays: list[ArrayLike], arr_columns: Index, columns: Index, index ) -> tuple[list[ArrayLike], Index, Index | None]: """ If our desired 'columns' do not match the data's pre-existing 'arr_columns', we re-order our arrays. This is like a pre-emptive (cheap) reindex. """ if len(arrays): length = len(arrays[0]) else: length = 0 result_index = None if len(arrays) == 0 and index is None and length == 0: result_index = default_index(0) arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, length) return arrays, arr_columns, result_index if is_iterator(data): if nrows == 0: return cls() try: first_row = next(data) except StopIteration: return cls(index=index, columns=columns) dtype = None if hasattr(first_row, "dtype") and first_row.dtype.names: dtype = first_row.dtype values = [first_row] if nrows is None: values += data else: values.extend(itertools.islice(data, nrows - 1)) if dtype is not None: data = np.array(values, dtype=dtype) else: data = values if isinstance(data, dict): if columns is None: columns = arr_columns = ensure_index(sorted(data)) arrays = [data[k] for k in columns] else: arrays = [] arr_columns_list = [] for k, v in data.items(): if k in columns: arr_columns_list.append(k) arrays.append(v) arr_columns = Index(arr_columns_list) arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = to_arrays(data, columns) arr_columns = columns else: arrays, arr_columns = to_arrays(data, columns) if coerce_float: for i, arr in enumerate(arrays): if arr.dtype == object: # error: Argument 1 to "maybe_convert_objects" has # incompatible type "Union[ExtensionArray, ndarray]"; # expected "ndarray" arrays[i] = lib.maybe_convert_objects( arr, # type: ignore[arg-type] try_float=True, ) arr_columns = ensure_index(arr_columns) if columns is None: columns = arr_columns else: arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) if exclude is None: exclude = set() else: exclude = set(exclude) if index is not None: if isinstance(index, str) or not hasattr(index, "__iter__"): i = columns.get_loc(index) exclude.add(index) if len(arrays) > 0: result_index = Index(arrays[i], name=index) else: result_index = Index([], name=index) else: try: index_data = [arrays[arr_columns.get_loc(field)] for field in index] except (KeyError, TypeError): # raised by get_loc, see GH#29258 result_index = index else: result_index = ensure_index_from_sequences(index_data, names=index) exclude.update(index) if any(exclude): arr_exclude = [x for x in exclude if x in arr_columns] to_remove = [arr_columns.get_loc(col) for col in arr_exclude] arrays = [v for i, v in enumerate(arrays) if i not in to_remove] columns = columns.drop(exclude) manager = get_option("mode.data_manager") mgr = arrays_to_mgr(arrays, columns, result_index, typ=manager) return cls(mgr) def to_records( self, index: bool = True, column_dtypes=None, index_dtypes=None ) -> np.recarray: """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. column_dtypes : str, type, dict, default None If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) If the DataFrame index has no label then the recarray field name is set to 'index'. If the index has a label then this is used as the field name: >>> df.index = df.index.rename("I") >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')]) As well as for the index: >>> df.to_records(index_dtypes="<S2") rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')]) >>> index_dtypes = f"<S{df.index.str.len().max()}" >>> df.to_records(index_dtypes=index_dtypes) rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')]) """ if index: ix_vals = [ np.asarray(self.index.get_level_values(i)) for i in range(self.index.nlevels) ] arrays = ix_vals + [ np.asarray(self.iloc[:, i]) for i in range(len(self.columns)) ] index_names = list(self.index.names) if isinstance(self.index, MultiIndex): index_names = com.fill_missing_names(index_names) elif index_names[0] is None: index_names = ["index"] names = [str(name) for name in itertools.chain(index_names, self.columns)] else: arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))] names = [str(c) for c in self.columns] index_names = [] index_len = len(index_names) formats = [] for i, v in enumerate(arrays): index_int = i # When the names and arrays are collected, we # first collect those in the DataFrame's index, # followed by those in its columns. # # Thus, the total length of the array is: # len(index_names) + len(DataFrame.columns). # # This check allows us to see whether we are # handling a name / array in the index or column. if index_int < index_len: dtype_mapping = index_dtypes name = index_names[index_int] else: index_int -= index_len dtype_mapping = column_dtypes name = self.columns[index_int] # We have a dictionary, so we get the data type # associated with the index or column (which can # be denoted by its name in the DataFrame or its # position in DataFrame's array of indices or # columns, whichever is applicable. if is_dict_like(dtype_mapping): if name in dtype_mapping: dtype_mapping = dtype_mapping[name] elif index_int in dtype_mapping: dtype_mapping = dtype_mapping[index_int] else: dtype_mapping = None # If no mapping can be found, use the array's # dtype attribute for formatting. # # A valid dtype must either be a type or # string naming a type. if dtype_mapping is None: formats.append(v.dtype) elif isinstance(dtype_mapping, (type, np.dtype, str)): # error: Argument 1 to "append" of "list" has incompatible # type "Union[type, dtype[Any], str]"; expected "dtype[Any]" formats.append(dtype_mapping) # type: ignore[arg-type] else: element = "row" if i < index_len else "column" msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}" raise ValueError(msg) return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats}) def _from_arrays( cls, arrays, columns, index, dtype: Dtype | None = None, verify_integrity: bool = True, ) -> DataFrame: """ Create DataFrame from a list of arrays corresponding to the columns. Parameters ---------- arrays : list-like of arrays Each array in the list corresponds to one column, in order. columns : list-like, Index The column names for the resulting DataFrame. index : list-like, Index The rows labels for the resulting DataFrame. dtype : dtype, optional Optional dtype to enforce for all arrays. verify_integrity : bool, default True Validate and homogenize all input. If set to False, it is assumed that all elements of `arrays` are actual arrays how they will be stored in a block (numpy ndarray or ExtensionArray), have the same length as and are aligned with the index, and that `columns` and `index` are ensured to be an Index object. Returns ------- DataFrame """ if dtype is not None: dtype = pandas_dtype(dtype) manager = get_option("mode.data_manager") columns = ensure_index(columns) if len(columns) != len(arrays): raise ValueError("len(columns) must match len(arrays)") mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, verify_integrity=verify_integrity, typ=manager, ) return cls(mgr) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path", ) def to_stata( self, path: FilePath | WriteBuffer[bytes], *, convert_dates: dict[Hashable, str] | None = None, write_index: bool = True, byteorder: str | None = None, time_stamp: datetime.datetime | None = None, data_label: str | None = None, variable_labels: dict[Hashable, str] | None = None, version: int | None = 114, convert_strl: Sequence[Hashable] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, value_labels: dict[Hashable, dict[float, str]] | None = None, ) -> None: """ Export DataFrame object to Stata dta format. Writes the DataFrame to a Stata dataset file. "dta" files contain a Stata dataset. Parameters ---------- path : str, path object, or buffer String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has timezone information. write_index : bool Write the index to Stata dataset. byteorder : str Can be ">", "<", "little", or "big". default is `sys.byteorder`. time_stamp : datetime A datetime to use as file creation date. Default is the current time. data_label : str, optional A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. version : {{114, 117, 118, 119, None}}, default 114 Version to use in the output dta file. Set to None to let pandas decide between 118 or 119 formats depending on the number of columns in the frame. Version 114 can be read by Stata 10 and later. Version 117 can be read by Stata 13 or later. Version 118 is supported in Stata 14 and later. Version 119 is supported in Stata 15 and later. Version 114 limits string variables to 244 characters or fewer while versions 117 and later allow strings with lengths up to 2,000,000 characters. Versions 118 and 119 support Unicode characters, and version 119 supports more than 32,767 variables. Version 119 should usually only be used when the number of variables exceeds the capacity of dta format 118. Exporting smaller datasets in format 119 may have unintended consequences, and, as of November 2020, Stata SE cannot read version 119 files. convert_strl : list, optional List of column names to convert to string columns to Stata StrL format. Only available if version is 117. Storing strings in the StrL format can produce smaller dta files if strings have more than 8 characters and values are repeated. {compression_options} .. versionadded:: 1.1.0 .. versionchanged:: 1.4.0 Zstandard support. {storage_options} .. versionadded:: 1.2.0 value_labels : dict of dicts Dictionary containing columns as keys and dictionaries of column value to labels as values. Labels for a single variable must be 32,000 characters or smaller. .. versionadded:: 1.4.0 Raises ------ NotImplementedError * If datetimes contain timezone information * Column dtype is not representable in Stata ValueError * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters See Also -------- read_stata : Import Stata data files. io.stata.StataWriter : Low-level writer for Stata data files. io.stata.StataWriter117 : Low-level writer for version 117 files. Examples -------- >>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], ... 'speed': [350, 18, 361, 15]}}) >>> df.to_stata('animals.dta') # doctest: +SKIP """ if version not in (114, 117, 118, 119, None): raise ValueError("Only formats 114, 117, 118 and 119 are supported.") if version == 114: if convert_strl is not None: raise ValueError("strl is not supported in format 114") from pandas.io.stata import StataWriter as statawriter elif version == 117: # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriter117 as statawriter, ) else: # versions 118 and 119 # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriterUTF8 as statawriter, ) kwargs: dict[str, Any] = {} if version is None or version >= 117: # strl conversion is only supported >= 117 kwargs["convert_strl"] = convert_strl if version is None or version >= 118: # Specifying the version is only supported for UTF8 (118 or 119) kwargs["version"] = version writer = statawriter( path, self, convert_dates=convert_dates, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, write_index=write_index, variable_labels=variable_labels, compression=compression, storage_options=storage_options, value_labels=value_labels, **kwargs, ) writer.write_file() def to_feather(self, path: FilePath | WriteBuffer[bytes], **kwargs) -> None: """ Write a DataFrame to the binary Feather format. Parameters ---------- path : str, path object, file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If a string or a path, it will be used as Root Directory path when writing a partitioned dataset. **kwargs : Additional keywords passed to :func:`pyarrow.feather.write_feather`. Starting with pyarrow 0.17, this includes the `compression`, `compression_level`, `chunksize` and `version` keywords. .. versionadded:: 1.1.0 Notes ----- This function writes the dataframe as a `feather file <https://arrow.apache.org/docs/python/feather.html>`_. Requires a default index. For saving the DataFrame with your custom index use a method that supports custom indices e.g. `to_parquet`. """ from pandas.io.feather_format import to_feather to_feather(self, path, **kwargs) Series.to_markdown, klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples="""Examples -------- >>> df = pd.DataFrame( ... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]} ... ) >>> print(df.to_markdown()) | | animal_1 | animal_2 | |---:|:-----------|:-----------| | 0 | elk | dog | | 1 | pig | quetzal | Output markdown with a tabulate option. >>> print(df.to_markdown(tablefmt="grid")) +----+------------+------------+ | | animal_1 | animal_2 | +====+============+============+ | 0 | elk | dog | +----+------------+------------+ | 1 | pig | quetzal | +----+------------+------------+""", ) def to_markdown( self, buf: FilePath | WriteBuffer[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: if "showindex" in kwargs: raise ValueError("Pass 'index' instead of 'showindex") kwargs.setdefault("headers", "keys") kwargs.setdefault("tablefmt", "pipe") kwargs.setdefault("showindex", index) tabulate = import_optional_dependency("tabulate") result = tabulate.tabulate(self, **kwargs) if buf is None: return result with get_handle(buf, mode, storage_options=storage_options) as handles: handles.handle.write(result) return None def to_parquet( self, path: None = ..., engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> bytes: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes], engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> None: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes] | None = None, engine: str = "auto", compression: str | None = "snappy", index: bool | None = None, partition_cols: list[str] | None = None, storage_options: StorageOptions = None, **kwargs, ) -> bytes | None: """ Write a DataFrame to the binary parquet format. This function writes the dataframe as a `parquet file <https://parquet.apache.org/>`_. You can choose different parquet backends, and have the option of compression. See :ref:`the user guide <io.parquet>` for more details. Parameters ---------- path : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If None, the result is returned as bytes. If a string or path, it will be used as Root Directory path when writing a partitioned dataset. .. versionchanged:: 1.2.0 Previously this was "fname" engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``True`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. partition_cols : list, optional, default None Column names by which to partition the dataset. Columns are partitioned in the order they are given. Must be None if path is not a string. {storage_options} .. versionadded:: 1.2.0 **kwargs Additional arguments passed to the parquet library. See :ref:`pandas io <io.parquet>` for more details. Returns ------- bytes if no path argument is provided else None See Also -------- read_parquet : Read a parquet file. DataFrame.to_orc : Write an orc file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- This function requires either the `fastparquet <https://pypi.org/project/fastparquet>`_ or `pyarrow <https://arrow.apache.org/docs/python/>`_ library. Examples -------- >>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}}) >>> df.to_parquet('df.parquet.gzip', ... compression='gzip') # doctest: +SKIP >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP col1 col2 0 1 3 1 2 4 If you want to get a buffer to the parquet content you can use a io.BytesIO object, as long as you don't use partition_cols, which creates multiple files. >>> import io >>> f = io.BytesIO() >>> df.to_parquet(f) >>> f.seek(0) 0 >>> content = f.read() """ from pandas.io.parquet import to_parquet return to_parquet( self, path, engine, compression=compression, index=index, partition_cols=partition_cols, storage_options=storage_options, **kwargs, ) def to_orc( self, path: FilePath | WriteBuffer[bytes] | None = None, *, engine: Literal["pyarrow"] = "pyarrow", index: bool | None = None, engine_kwargs: dict[str, Any] | None = None, ) -> bytes | None: """ Write a DataFrame to the ORC format. .. versionadded:: 1.5.0 Parameters ---------- path : str, file-like object or None, default None If a string, it will be used as Root Directory path when writing a partitioned dataset. By file-like object, we refer to objects with a write() method, such as a file handle (e.g. via builtin open function). If path is None, a bytes object is returned. engine : str, default 'pyarrow' ORC library to use. Pyarrow must be >= 7.0.0. index : bool, optional If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``infer`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. engine_kwargs : dict[str, Any] or None, default None Additional keyword arguments passed to :func:`pyarrow.orc.write_table`. Returns ------- bytes if no path argument is provided else None Raises ------ NotImplementedError Dtype of one or more columns is category, unsigned integers, interval, period or sparse. ValueError engine is not pyarrow. See Also -------- read_orc : Read a ORC file. DataFrame.to_parquet : Write a parquet file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- * Before using this function you should read the :ref:`user guide about ORC <io.orc>` and :ref:`install optional dependencies <install.warn_orc>`. * This function requires `pyarrow <https://arrow.apache.org/docs/python/>`_ library. * For supported dtypes please refer to `supported ORC features in Arrow <https://arrow.apache.org/docs/cpp/orc.html#data-types>`__. * Currently timezones in datetime columns are not preserved when a dataframe is converted into ORC files. Examples -------- >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [4, 3]}) >>> df.to_orc('df.orc') # doctest: +SKIP >>> pd.read_orc('df.orc') # doctest: +SKIP col1 col2 0 1 4 1 2 3 If you want to get a buffer to the orc content you can write it to io.BytesIO >>> import io >>> b = io.BytesIO(df.to_orc()) # doctest: +SKIP >>> b.seek(0) # doctest: +SKIP 0 >>> content = b.read() # doctest: +SKIP """ from pandas.io.orc import to_orc return to_orc( self, path, engine=engine, index=index, engine_kwargs=engine_kwargs ) def to_html( self, buf: FilePath | WriteBuffer[str], columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> None: ... def to_html( self, buf: None = ..., columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> str: ... header_type="bool", header="Whether to print column labels, default True", col_space_type="str or int, list or dict of int or str", col_space="The minimum width of each column in CSS length " "units. An int is assumed to be px units.", ) def to_html( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[Level] | None = None, col_space: ColspaceArgType | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: FormattersType | None = None, float_format: FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool | str = False, decimal: str = ".", bold_rows: bool = True, classes: str | list | tuple | None = None, escape: bool = True, notebook: bool = False, border: int | bool | None = None, table_id: str | None = None, render_links: bool = False, encoding: str | None = None, ) -> str | None: """ Render a DataFrame as an HTML table. %(shared_params)s bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.display.html.border``. table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links. encoding : str, default "utf-8" Set character encoding. .. versionadded:: 1.0 %(returns)s See Also -------- to_string : Convert DataFrame to a string. """ if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS: raise ValueError("Invalid value for justify parameter") formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, header=header, index=index, formatters=formatters, float_format=float_format, bold_rows=bold_rows, sparsify=sparsify, justify=justify, index_names=index_names, escape=escape, decimal=decimal, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, ) # TODO: a generic formatter wld b in DataFrameFormatter return fmt.DataFrameRenderer(formatter).to_html( buf=buf, classes=classes, notebook=notebook, border=border, encoding=encoding, table_id=table_id, render_links=render_links, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buffer", ) def to_xml( self, path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, index: bool = True, root_name: str | None = "data", row_name: str | None = "row", na_rep: str | None = None, attr_cols: list[str] | None = None, elem_cols: list[str] | None = None, namespaces: dict[str | None, str] | None = None, prefix: str | None = None, encoding: str = "utf-8", xml_declaration: bool | None = True, pretty_print: bool | None = True, parser: str | None = "lxml", stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ) -> str | None: """ Render a DataFrame to an XML document. .. versionadded:: 1.3.0 Parameters ---------- path_or_buffer : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a ``write()`` function. If None, the result is returned as a string. index : bool, default True Whether to include index in XML document. root_name : str, default 'data' The name of root element in XML document. row_name : str, default 'row' The name of row element in XML document. na_rep : str, optional Missing data representation. attr_cols : list-like, optional List of columns to write as attributes in row element. Hierarchical columns will be flattened with underscore delimiting the different levels. elem_cols : list-like, optional List of columns to write as children in row element. By default, all columns output as children of row element. Hierarchical columns will be flattened with underscore delimiting the different levels. namespaces : dict, optional All namespaces to be defined in root element. Keys of dict should be prefix names and values of dict corresponding URIs. Default namespaces should be given empty string key. For example, :: namespaces = {{"": "https://example.com"}} prefix : str, optional Namespace prefix to be used for every element and/or attribute in document. This should be one of the keys in ``namespaces`` dict. encoding : str, default 'utf-8' Encoding of the resulting document. xml_declaration : bool, default True Whether to include the XML declaration at start of document. pretty_print : bool, default True Whether output should be pretty printed with indentation and line breaks. parser : {{'lxml','etree'}}, default 'lxml' Parser module to use for building of tree. Only 'lxml' and 'etree' are supported. With 'lxml', the ability to use XSLT stylesheet is supported. stylesheet : str, path object or file-like object, optional A URL, file-like object, or a raw string containing an XSLT script used to transform the raw XML output. Script should use layout of elements and attributes from original output. This argument requires ``lxml`` to be installed. Only XSLT 1.0 scripts and not later versions is currently supported. {compression_options} .. versionchanged:: 1.4.0 Zstandard support. {storage_options} Returns ------- None or str If ``io`` is None, returns the resulting XML format as a string. Otherwise returns None. See Also -------- to_json : Convert the pandas object to a JSON string. to_html : Convert DataFrame to a html. Examples -------- >>> df = pd.DataFrame({{'shape': ['square', 'circle', 'triangle'], ... 'degrees': [360, 360, 180], ... 'sides': [4, np.nan, 3]}}) >>> df.to_xml() # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row> <index>0</index> <shape>square</shape> <degrees>360</degrees> <sides>4.0</sides> </row> <row> <index>1</index> <shape>circle</shape> <degrees>360</degrees> <sides/> </row> <row> <index>2</index> <shape>triangle</shape> <degrees>180</degrees> <sides>3.0</sides> </row> </data> >>> df.to_xml(attr_cols=[ ... 'index', 'shape', 'degrees', 'sides' ... ]) # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row index="0" shape="square" degrees="360" sides="4.0"/> <row index="1" shape="circle" degrees="360"/> <row index="2" shape="triangle" degrees="180" sides="3.0"/> </data> >>> df.to_xml(namespaces={{"doc": "https://example.com"}}, ... prefix="doc") # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <doc:data xmlns:doc="https://example.com"> <doc:row> <doc:index>0</doc:index> <doc:shape>square</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides>4.0</doc:sides> </doc:row> <doc:row> <doc:index>1</doc:index> <doc:shape>circle</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides/> </doc:row> <doc:row> <doc:index>2</doc:index> <doc:shape>triangle</doc:shape> <doc:degrees>180</doc:degrees> <doc:sides>3.0</doc:sides> </doc:row> </doc:data> """ from pandas.io.formats.xml import ( EtreeXMLFormatter, LxmlXMLFormatter, ) lxml = import_optional_dependency("lxml.etree", errors="ignore") TreeBuilder: type[EtreeXMLFormatter] | type[LxmlXMLFormatter] if parser == "lxml": if lxml is not None: TreeBuilder = LxmlXMLFormatter else: raise ImportError( "lxml not found, please install or use the etree parser." ) elif parser == "etree": TreeBuilder = EtreeXMLFormatter else: raise ValueError("Values for parser can only be lxml or etree.") xml_formatter = TreeBuilder( self, path_or_buffer=path_or_buffer, index=index, root_name=root_name, row_name=row_name, na_rep=na_rep, attr_cols=attr_cols, elem_cols=elem_cols, namespaces=namespaces, prefix=prefix, encoding=encoding, xml_declaration=xml_declaration, pretty_print=pretty_print, stylesheet=stylesheet, compression=compression, storage_options=storage_options, ) return xml_formatter.write_output() # ---------------------------------------------------------------------- def info( self, verbose: bool | None = None, buf: WriteBuffer[str] | None = None, max_cols: int | None = None, memory_usage: bool | str | None = None, show_counts: bool | None = None, ) -> None: info = DataFrameInfo( data=self, memory_usage=memory_usage, ) info.render( buf=buf, max_cols=max_cols, verbose=verbose, show_counts=show_counts, ) def memory_usage(self, index: bool = True, deep: bool = False) -> Series: """ Return the memory usage of each column in bytes. The memory usage can optionally include the contribution of the index and elements of `object` dtype. This value is displayed in `DataFrame.info` by default. This can be suppressed by setting ``pandas.options.display.memory_usage`` to False. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the DataFrame's index in returned Series. If ``index=True``, the memory usage of the index is the first item in the output. deep : bool, default False If True, introspect the data deeply by interrogating `object` dtypes for system-level memory consumption, and include it in the returned values. Returns ------- Series A Series whose index is the original column names and whose values is the memory usage of each column in bytes. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of an ndarray. Series.memory_usage : Bytes consumed by a Series. Categorical : Memory-efficient array for string values with many repeated values. DataFrame.info : Concise summary of a DataFrame. Notes ----- See the :ref:`Frequently Asked Questions <df-memory-usage>` for more details. Examples -------- >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool'] >>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t)) ... for t in dtypes]) >>> df = pd.DataFrame(data) >>> df.head() int64 float64 complex128 object bool 0 1 1.0 1.0+0.0j 1 True 1 1 1.0 1.0+0.0j 1 True 2 1 1.0 1.0+0.0j 1 True 3 1 1.0 1.0+0.0j 1 True 4 1 1.0 1.0+0.0j 1 True >>> df.memory_usage() Index 128 int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 >>> df.memory_usage(index=False) int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 The memory footprint of `object` dtype columns is ignored by default: >>> df.memory_usage(deep=True) Index 128 int64 40000 float64 40000 complex128 80000 object 180000 bool 5000 dtype: int64 Use a Categorical for efficient storage of an object-dtype column with many repeated values. >>> df['object'].astype('category').memory_usage(deep=True) 5244 """ result = self._constructor_sliced( [c.memory_usage(index=False, deep=deep) for col, c in self.items()], index=self.columns, dtype=np.intp, ) if index: index_memory_usage = self._constructor_sliced( self.index.memory_usage(deep=deep), index=["Index"] ) result = index_memory_usage._append(result) return result def transpose(self, *args, copy: bool = False) -> DataFrame: """ Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. Parameters ---------- *args : tuple, optional Accepted for compatibility with NumPy. copy : bool, default False Whether to copy the data after transposing, even for DataFrames with a single dtype. Note that a copy is always required for mixed dtype DataFrames, or for DataFrames with any extension types. Returns ------- DataFrame The transposed DataFrame. See Also -------- numpy.transpose : Permute the dimensions of a given array. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the `object` dtype. In such a case, a copy of the data is always made. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = pd.DataFrame(data=d1) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T # or df1.transpose() >>> df1_transposed 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'name': ['Alice', 'Bob'], ... 'score': [9.5, 8], ... 'employed': [False, True], ... 'kids': [0, 0]} >>> df2 = pd.DataFrame(data=d2) >>> df2 name score employed kids 0 Alice 9.5 False 0 1 Bob 8.0 True 0 >>> df2_transposed = df2.T # or df2.transpose() >>> df2_transposed 0 1 name Alice Bob score 9.5 8.0 employed False True kids 0 0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the `object` dtype: >>> df2.dtypes name object score float64 employed bool kids int64 dtype: object >>> df2_transposed.dtypes 0 object 1 object dtype: object """ nv.validate_transpose(args, {}) # construct the args dtypes = list(self.dtypes) if self._can_fast_transpose: # Note: tests pass without this, but this improves perf quite a bit. new_vals = self._values.T if copy and not using_copy_on_write(): new_vals = new_vals.copy() result = self._constructor( new_vals, index=self.columns, columns=self.index, copy=False ) if using_copy_on_write() and len(self) > 0: result._mgr.add_references(self._mgr) # type: ignore[arg-type] elif ( self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]) ): # We have EAs with the same dtype. We can preserve that dtype in transpose. dtype = dtypes[0] arr_type = dtype.construct_array_type() values = self.values new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values] result = type(self)._from_arrays( new_values, index=self.columns, columns=self.index ) else: new_arr = self.values.T if copy and not using_copy_on_write(): new_arr = new_arr.copy() result = self._constructor( new_arr, index=self.columns, columns=self.index, # We already made a copy (more than one block) copy=False, ) return result.__finalize__(self, method="transpose") def T(self) -> DataFrame: """ The transpose of the DataFrame. Returns ------- DataFrame The transposed DataFrame. See Also -------- DataFrame.transpose : Transpose index and columns. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.T 0 1 col1 1 2 col2 3 4 """ return self.transpose() # ---------------------------------------------------------------------- # Indexing Methods def _ixs(self, i: int, axis: AxisInt = 0) -> Series: """ Parameters ---------- i : int axis : int Returns ------- Series """ # irow if axis == 0: new_mgr = self._mgr.fast_xs(i) # if we are a copy, mark as such copy = isinstance(new_mgr.array, np.ndarray) and new_mgr.array.base is None result = self._constructor_sliced(new_mgr, name=self.index[i]).__finalize__( self ) result._set_is_copy(self, copy=copy) return result # icol else: label = self.columns[i] col_mgr = self._mgr.iget(i) result = self._box_col_values(col_mgr, i) # this is a cached value, mark it so result._set_as_cached(label, self) return result def _get_column_array(self, i: int) -> ArrayLike: """ Get the values of the i'th column (ndarray or ExtensionArray, as stored in the Block) Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ return self._mgr.iget_values(i) def _iter_column_arrays(self) -> Iterator[ArrayLike]: """ Iterate over the arrays of all columns in order. This returns the values as stored in the Block (ndarray or ExtensionArray). Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ for i in range(len(self.columns)): yield self._get_column_array(i) def _getitem_nocopy(self, key: list): """ Behaves like __getitem__, but returns a view in cases where __getitem__ would make a copy. """ # TODO(CoW): can be removed if/when we are always Copy-on-Write indexer = self.columns._get_indexer_strict(key, "columns")[1] new_axis = self.columns[indexer] new_mgr = self._mgr.reindex_indexer( new_axis, indexer, axis=0, allow_dups=True, copy=False, only_slice=True, ) return self._constructor(new_mgr) def __getitem__(self, key): check_dict_or_set_indexers(key) key = lib.item_from_zerodim(key) key = com.apply_if_callable(key, self) if is_hashable(key) and not is_iterator(key): # is_iterator to exclude generator e.g. test_getitem_listlike # shortcut if the key is in columns is_mi = isinstance(self.columns, MultiIndex) # GH#45316 Return view if key is not duplicated # Only use drop_duplicates with duplicates for performance if not is_mi and ( self.columns.is_unique and key in self.columns or key in self.columns.drop_duplicates(keep=False) ): return self._get_item_cache(key) elif is_mi and self.columns.is_unique and key in self.columns: return self._getitem_multilevel(key) # Do we have a slicer (on rows)? if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") if isinstance(indexer, np.ndarray): # reachable with DatetimeIndex indexer = lib.maybe_indices_to_slice( indexer.astype(np.intp, copy=False), len(self) ) if isinstance(indexer, np.ndarray): # GH#43223 If we can not convert, use take return self.take(indexer, axis=0) return self._slice(indexer, axis=0) # Do we have a (boolean) DataFrame? if isinstance(key, DataFrame): return self.where(key) # Do we have a (boolean) 1d indexer? if com.is_bool_indexer(key): return self._getitem_bool_array(key) # We are left with two options: a single key, and a collection of keys, # We interpret tuples as collections only for non-MultiIndex is_single_key = isinstance(key, tuple) or not is_list_like(key) if is_single_key: if self.columns.nlevels > 1: return self._getitem_multilevel(key) indexer = self.columns.get_loc(key) if is_integer(indexer): indexer = [indexer] else: if is_iterator(key): key = list(key) indexer = self.columns._get_indexer_strict(key, "columns")[1] # take() does not accept boolean indexers if getattr(indexer, "dtype", None) == bool: indexer = np.where(indexer)[0] data = self._take_with_is_copy(indexer, axis=1) if is_single_key: # What does looking for a single key in a non-unique index return? # The behavior is inconsistent. It returns a Series, except when # - the key itself is repeated (test on data.shape, #9519), or # - we have a MultiIndex on columns (test on self.columns, #21309) if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex): # GH#26490 using data[key] can cause RecursionError return data._get_item_cache(key) return data def _getitem_bool_array(self, key): # also raises Exception if object array with NA values # warning here just in case -- previously __setitem__ was # reindexing but __getitem__ was not; it seems more reasonable to # go with the __setitem__ behavior since that is more consistent # with all other indexing behavior if isinstance(key, Series) and not key.index.equals(self.index): warnings.warn( "Boolean Series key will be reindexed to match DataFrame index.", UserWarning, stacklevel=find_stack_level(), ) elif len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}." ) # check_bool_indexer will throw exception if Series key cannot # be reindexed to match DataFrame rows key = check_bool_indexer(self.index, key) if key.all(): return self.copy(deep=None) indexer = key.nonzero()[0] return self._take_with_is_copy(indexer, axis=0) def _getitem_multilevel(self, key): # self.columns is a MultiIndex loc = self.columns.get_loc(key) if isinstance(loc, (slice, np.ndarray)): new_columns = self.columns[loc] result_columns = maybe_droplevels(new_columns, key) if self._is_mixed_type: result = self.reindex(columns=new_columns) result.columns = result_columns else: new_values = self._values[:, loc] result = self._constructor( new_values, index=self.index, columns=result_columns, copy=False ) if using_copy_on_write() and isinstance(loc, slice): result._mgr.add_references(self._mgr) # type: ignore[arg-type] result = result.__finalize__(self) # If there is only one column being returned, and its name is # either an empty string, or a tuple with an empty string as its # first element, then treat the empty string as a placeholder # and return the column as if the user had provided that empty # string in the key. If the result is a Series, exclude the # implied empty string from its name. if len(result.columns) == 1: # e.g. test_frame_getitem_multicolumn_empty_level, # test_frame_mixed_depth_get, test_loc_setitem_single_column_slice top = result.columns[0] if isinstance(top, tuple): top = top[0] if top == "": result = result[""] if isinstance(result, Series): result = self._constructor_sliced( result, index=self.index, name=key ) result._set_is_copy(self) return result else: # loc is neither a slice nor ndarray, so must be an int return self._ixs(loc, axis=1) def _get_value(self, index, col, takeable: bool = False) -> Scalar: """ Quickly retrieve single value at passed column and index. Parameters ---------- index : row label col : column label takeable : interpret the index/col as indexers, default False Returns ------- scalar Notes ----- Assumes that both `self.index._index_as_unique` and `self.columns._index_as_unique`; Caller is responsible for checking. """ if takeable: series = self._ixs(col, axis=1) return series._values[index] series = self._get_item_cache(col) engine = self.index._engine if not isinstance(self.index, MultiIndex): # CategoricalIndex: Trying to use the engine fastpath may give incorrect # results if our categories are integers that dont match our codes # IntervalIndex: IntervalTree has no get_loc row = self.index.get_loc(index) return series._values[row] # For MultiIndex going through engine effectively restricts us to # same-length tuples; see test_get_set_value_no_partial_indexing loc = engine.get_loc(index) return series._values[loc] def isetitem(self, loc, value) -> None: """ Set the given value in the column with position `loc`. This is a positional analogue to ``__setitem__``. Parameters ---------- loc : int or sequence of ints Index position for the column. value : scalar or arraylike Value(s) for the column. Notes ----- ``frame.isetitem(loc, value)`` is an in-place method as it will modify the DataFrame in place (not returning a new object). In contrast to ``frame.iloc[:, i] = value`` which will try to update the existing values in place, ``frame.isetitem(loc, value)`` will not update the values of the column itself in place, it will instead insert a new array. In cases where ``frame.columns`` is unique, this is equivalent to ``frame[frame.columns[i]] = value``. """ if isinstance(value, DataFrame): if is_scalar(loc): loc = [loc] for i, idx in enumerate(loc): arraylike = self._sanitize_column(value.iloc[:, i]) self._iset_item_mgr(idx, arraylike, inplace=False) return arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=False) def __setitem__(self, key, value): if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) key = com.apply_if_callable(key, self) # see if we can slice the rows if isinstance(key, slice): slc = self.index._convert_slice_indexer(key, kind="getitem") return self._setitem_slice(slc, value) if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2: self._setitem_frame(key, value) elif isinstance(key, (Series, np.ndarray, list, Index)): self._setitem_array(key, value) elif isinstance(value, DataFrame): self._set_item_frame_value(key, value) elif ( is_list_like(value) and not self.columns.is_unique and 1 < len(self.columns.get_indexer_for([key])) == len(value) ): # Column to set is duplicated self._setitem_array([key], value) else: # set column self._set_item(key, value) def _setitem_slice(self, key: slice, value) -> None: # NB: we can't just use self.loc[key] = value because that # operates on labels and we need to operate positional for # backwards-compat, xref GH#31469 self._check_setitem_copy() self.iloc[key] = value def _setitem_array(self, key, value): # also raises Exception if object array with NA values if com.is_bool_indexer(key): # bool indexer is indexing along rows if len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}!" ) key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] self._check_setitem_copy() if isinstance(value, DataFrame): # GH#39931 reindex since iloc does not align value = value.reindex(self.index.take(indexer)) self.iloc[indexer] = value else: # Note: unlike self.iloc[:, indexer] = value, this will # never try to overwrite values inplace if isinstance(value, DataFrame): check_key_length(self.columns, key, value) for k1, k2 in zip(key, value.columns): self[k1] = value[k2] elif not is_list_like(value): for col in key: self[col] = value elif isinstance(value, np.ndarray) and value.ndim == 2: self._iset_not_inplace(key, value) elif np.ndim(value) > 1: # list of lists value = DataFrame(value).values return self._setitem_array(key, value) else: self._iset_not_inplace(key, value) def _iset_not_inplace(self, key, value): # GH#39510 when setting with df[key] = obj with a list-like key and # list-like value, we iterate over those listlikes and set columns # one at a time. This is different from dispatching to # `self.loc[:, key]= value` because loc.__setitem__ may overwrite # data inplace, whereas this will insert new arrays. def igetitem(obj, i: int): # Note: we catch DataFrame obj before getting here, but # hypothetically would return obj.iloc[:, i] if isinstance(obj, np.ndarray): return obj[..., i] else: return obj[i] if self.columns.is_unique: if np.shape(value)[-1] != len(key): raise ValueError("Columns must be same length as key") for i, col in enumerate(key): self[col] = igetitem(value, i) else: ilocs = self.columns.get_indexer_non_unique(key)[0] if (ilocs < 0).any(): # key entries not in self.columns raise NotImplementedError if np.shape(value)[-1] != len(ilocs): raise ValueError("Columns must be same length as key") assert np.ndim(value) <= 2 orig_columns = self.columns # Using self.iloc[:, i] = ... may set values inplace, which # by convention we do not do in __setitem__ try: self.columns = Index(range(len(self.columns))) for i, iloc in enumerate(ilocs): self[iloc] = igetitem(value, i) finally: self.columns = orig_columns def _setitem_frame(self, key, value): # support boolean setting with DataFrame input, e.g. # df[df > df2] = 0 if isinstance(key, np.ndarray): if key.shape != self.shape: raise ValueError("Array conditional must be same shape as self") key = self._constructor(key, **self._construct_axes_dict(), copy=False) if key.size and not all(is_bool_dtype(dtype) for dtype in key.dtypes): raise TypeError( "Must pass DataFrame or 2-d ndarray with boolean values only" ) self._check_inplace_setting(value) self._check_setitem_copy() self._where(-key, value, inplace=True) def _set_item_frame_value(self, key, value: DataFrame) -> None: self._ensure_valid_index(value) # align columns if key in self.columns: loc = self.columns.get_loc(key) cols = self.columns[loc] len_cols = 1 if is_scalar(cols) or isinstance(cols, tuple) else len(cols) if len_cols != len(value.columns): raise ValueError("Columns must be same length as key") # align right-hand-side columns if self.columns # is multi-index and self[key] is a sub-frame if isinstance(self.columns, MultiIndex) and isinstance( loc, (slice, Series, np.ndarray, Index) ): cols_droplevel = maybe_droplevels(cols, key) if len(cols_droplevel) and not cols_droplevel.equals(value.columns): value = value.reindex(cols_droplevel, axis=1) for col, col_droplevel in zip(cols, cols_droplevel): self[col] = value[col_droplevel] return if is_scalar(cols): self[cols] = value[value.columns[0]] return # now align rows arraylike = _reindex_for_setitem(value, self.index) self._set_item_mgr(key, arraylike) return if len(value.columns) != 1: raise ValueError( "Cannot set a DataFrame with multiple columns to the single " f"column {key}" ) self[key] = value[value.columns[0]] def _iset_item_mgr( self, loc: int | slice | np.ndarray, value, inplace: bool = False ) -> None: # when called from _set_item_mgr loc can be anything returned from get_loc self._mgr.iset(loc, value, inplace=inplace) self._clear_item_cache() def _set_item_mgr(self, key, value: ArrayLike) -> None: try: loc = self._info_axis.get_loc(key) except KeyError: # This item wasn't present, just insert at end self._mgr.insert(len(self._info_axis), key, value) else: self._iset_item_mgr(loc, value) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _iset_item(self, loc: int, value) -> None: arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=True) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _set_item(self, key, value) -> None: """ Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the same length as the DataFrames index or an error will be thrown. Series/TimeSeries will be conformed to the DataFrames index to ensure homogeneity. """ value = self._sanitize_column(value) if ( key in self.columns and value.ndim == 1 and not is_extension_array_dtype(value) ): # broadcast across multiple columns if necessary if not self.columns.is_unique or isinstance(self.columns, MultiIndex): existing_piece = self[key] if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)).T self._set_item_mgr(key, value) def _set_value( self, index: IndexLabel, col, value: Scalar, takeable: bool = False ) -> None: """ Put single value at passed column and index. Parameters ---------- index : Label row label col : Label column label value : scalar takeable : bool, default False Sets whether or not index/col interpreted as indexers """ try: if takeable: icol = col iindex = cast(int, index) else: icol = self.columns.get_loc(col) iindex = self.index.get_loc(index) self._mgr.column_setitem(icol, iindex, value, inplace_only=True) self._clear_item_cache() except (KeyError, TypeError, ValueError, LossySetitemError): # get_loc might raise a KeyError for missing labels (falling back # to (i)loc will do expansion of the index) # column_setitem will do validation that may raise TypeError, # ValueError, or LossySetitemError # set using a non-recursive method & reset the cache if takeable: self.iloc[index, col] = value else: self.loc[index, col] = value self._item_cache.pop(col, None) except InvalidIndexError as ii_err: # GH48729: Seems like you are trying to assign a value to a # row when only scalar options are permitted raise InvalidIndexError( f"You can only assign a scalar value not a {type(value)}" ) from ii_err def _ensure_valid_index(self, value) -> None: """ Ensure that if we don't have an index, that we can create one from the passed value. """ # GH5632, make sure that we are a Series convertible if not len(self.index) and is_list_like(value) and len(value): if not isinstance(value, DataFrame): try: value = Series(value) except (ValueError, NotImplementedError, TypeError) as err: raise ValueError( "Cannot set a frame with no defined index " "and a value that cannot be converted to a Series" ) from err # GH31368 preserve name of index index_copy = value.index.copy() if self.index.name is not None: index_copy.name = self.index.name self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan) def _box_col_values(self, values: SingleDataManager, loc: int) -> Series: """ Provide boxed values for a column. """ # Lookup in columns so that if e.g. a str datetime was passed # we attach the Timestamp object as the name. name = self.columns[loc] klass = self._constructor_sliced # We get index=self.index bc values is a SingleDataManager return klass(values, name=name, fastpath=True).__finalize__(self) # ---------------------------------------------------------------------- # Lookup Caching def _clear_item_cache(self) -> None: self._item_cache.clear() def _get_item_cache(self, item: Hashable) -> Series: """Return the cached item, item represents a label indexer.""" if using_copy_on_write(): loc = self.columns.get_loc(item) return self._ixs(loc, axis=1) cache = self._item_cache res = cache.get(item) if res is None: # All places that call _get_item_cache have unique columns, # pending resolution of GH#33047 loc = self.columns.get_loc(item) res = self._ixs(loc, axis=1) cache[item] = res # for a chain res._is_copy = self._is_copy return res def _reset_cacher(self) -> None: # no-op for DataFrame pass def _maybe_cache_changed(self, item, value: Series, inplace: bool) -> None: """ The object has called back to us saying maybe it has changed. """ loc = self._info_axis.get_loc(item) arraylike = value._values old = self._ixs(loc, axis=1) if old._values is value._values and inplace: # GH#46149 avoid making unnecessary copies/block-splitting return self._mgr.iset(loc, arraylike, inplace=inplace) # ---------------------------------------------------------------------- # Unsorted def query(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> DataFrame: ... def query(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def query(self, expr: str, *, inplace: bool = ..., **kwargs) -> DataFrame | None: ... def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | None: """ Query the columns of a DataFrame with a boolean expression. Parameters ---------- expr : str The query string to evaluate. You can refer to variables in the environment by prefixing them with an '@' character like ``@a + b``. You can refer to column names that are not valid Python variable names by surrounding them in backticks. Thus, column names containing spaces or punctuations (besides underscores) or starting with digits must be surrounded by backticks. (For example, a column named "Area (cm^2)" would be referenced as ```Area (cm^2)```). Column names which are Python keywords (like "list", "for", "import", etc) cannot be used. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. inplace : bool Whether to modify the DataFrame rather than creating a new one. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`DataFrame.query`. Returns ------- DataFrame or None DataFrame resulting from the provided query expression or None if ``inplace=True``. See Also -------- eval : Evaluate a string describing operations on DataFrame columns. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Notes ----- The result of the evaluation of this expression is first passed to :attr:`DataFrame.loc` and if that fails because of a multidimensional key (e.g., a DataFrame) then the result will be passed to :meth:`DataFrame.__getitem__`. This method uses the top-level :func:`eval` function to evaluate the passed query. The :meth:`~pandas.DataFrame.query` method uses a slightly modified Python syntax by default. For example, the ``&`` and ``|`` (bitwise) operators have the precedence of their boolean cousins, :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, however the semantics are different. You can change the semantics of the expression by passing the keyword argument ``parser='python'``. This enforces the same semantics as evaluation in Python space. Likewise, you can pass ``engine='python'`` to evaluate an expression using Python itself as a backend. This is not recommended as it is inefficient compared to using ``numexpr`` as the engine. The :attr:`DataFrame.index` and :attr:`DataFrame.columns` attributes of the :class:`~pandas.DataFrame` instance are placed in the query namespace by default, which allows you to treat both the index and columns of the frame as a column in the frame. The identifier ``index`` is used for the frame index; you can also use the name of the index to identify it in a query. Please note that Python keywords may not be used as identifiers. For further details and examples see the ``query`` documentation in :ref:`indexing <indexing.query>`. *Backtick quoted variables* Backtick quoted variables are parsed as literal Python code and are converted internally to a Python valid identifier. This can lead to the following problems. During parsing a number of disallowed characters inside the backtick quoted string are replaced by strings that are allowed as a Python identifier. These characters include all operators in Python, the space character, the question mark, the exclamation mark, the dollar sign, and the euro sign. For other characters that fall outside the ASCII range (U+0001..U+007F) and those that are not further specified in PEP 3131, the query parser will raise an error. This excludes whitespace different than the space character, but also the hashtag (as it is used for comments) and the backtick itself (backtick can also not be escaped). In a special case, quotes that make a pair around a backtick can confuse the parser. For example, ```it's` > `that's``` will raise an error, as it forms a quoted string (``'s > `that'``) with a backtick inside. See also the Python documentation about lexical analysis (https://docs.python.org/3/reference/lexical_analysis.html) in combination with the source code in :mod:`pandas.core.computation.parsing`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10 """ inplace = validate_bool_kwarg(inplace, "inplace") if not isinstance(expr, str): msg = f"expr must be a string to be evaluated, {type(expr)} given" raise ValueError(msg) kwargs["level"] = kwargs.pop("level", 0) + 1 kwargs["target"] = None res = self.eval(expr, **kwargs) try: result = self.loc[res] except ValueError: # when res is multi-dimensional loc raises, but this is sometimes a # valid query result = self[res] if inplace: self._update_inplace(result) return None else: return result def eval(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> Any: ... def eval(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def eval(self, expr: str, *, inplace: bool = False, **kwargs) -> Any | None: """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. Returns ------- ndarray, scalar, pandas object, or None The result of the evaluation or None if ``inplace=True``. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Notes ----- For more details see the API documentation for :func:`~eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Multiple columns can be assigned to using multi-line expressions: >>> df.eval( ... ''' ... C = A + B ... D = A - B ... ''' ... ) A B C D 0 1 10 11 -9 1 2 8 10 -6 2 3 6 9 -3 3 4 4 8 0 4 5 2 7 3 """ from pandas.core.computation.eval import eval as _eval inplace = validate_bool_kwarg(inplace, "inplace") kwargs["level"] = kwargs.pop("level", 0) + 1 index_resolvers = self._get_index_resolvers() column_resolvers = self._get_cleaned_column_resolvers() resolvers = column_resolvers, index_resolvers if "target" not in kwargs: kwargs["target"] = self kwargs["resolvers"] = tuple(kwargs.get("resolvers", ())) + resolvers return _eval(expr, inplace=inplace, **kwargs) def select_dtypes(self, include=None, exclude=None) -> DataFrame: """ Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty * If ``include`` and ``exclude`` have overlapping elements * If any kind of string dtype is passed in. See Also -------- DataFrame.dtypes: Return Series with the data type of each column. Notes ----- * To select all *numeric* types, use ``np.number`` or ``'number'`` * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy <https://numpy.org/doc/stable/reference/arrays.scalars.html>`__ * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or ``'timedelta64'`` * To select Pandas categorical dtypes, use ``'category'`` * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in 0.20.0) or ``'datetime64[ns, tz]'`` Examples -------- >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int64']) b c 0 True 1.0 1 False 2.0 2 True 1.0 3 False 2.0 4 True 1.0 5 False 2.0 """ if not is_list_like(include): include = (include,) if include is not None else () if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () selection = (frozenset(include), frozenset(exclude)) if not any(selection): raise ValueError("at least one of include or exclude must be nonempty") # convert the myriad valid dtypes object to a single representation def check_int_infer_dtype(dtypes): converted_dtypes: list[type] = [] for dtype in dtypes: # Numpy maps int to different types (int32, in64) on Windows and Linux # see https://github.com/numpy/numpy/issues/9464 if (isinstance(dtype, str) and dtype == "int") or (dtype is int): converted_dtypes.append(np.int32) converted_dtypes.append(np.int64) elif dtype == "float" or dtype is float: # GH#42452 : np.dtype("float") coerces to np.float64 from Numpy 1.20 converted_dtypes.extend([np.float64, np.float32]) else: converted_dtypes.append(infer_dtype_from_object(dtype)) return frozenset(converted_dtypes) include = check_int_infer_dtype(include) exclude = check_int_infer_dtype(exclude) for dtypes in (include, exclude): invalidate_string_dtypes(dtypes) # can't both include AND exclude! if not include.isdisjoint(exclude): raise ValueError(f"include and exclude overlap on {(include & exclude)}") def dtype_predicate(dtype: DtypeObj, dtypes_set) -> bool: # GH 46870: BooleanDtype._is_numeric == True but should be excluded return issubclass(dtype.type, tuple(dtypes_set)) or ( np.number in dtypes_set and getattr(dtype, "_is_numeric", False) and not is_bool_dtype(dtype) ) def predicate(arr: ArrayLike) -> bool: dtype = arr.dtype if include: if not dtype_predicate(dtype, include): return False if exclude: if dtype_predicate(dtype, exclude): return False return True mgr = self._mgr._get_data_subset(predicate).copy(deep=None) return type(self)(mgr).__finalize__(self) def insert( self, loc: int, column: Hashable, value: Scalar | AnyArrayLike, allow_duplicates: bool | lib.NoDefault = lib.no_default, ) -> None: """ Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns). column : str, number, or hashable object Label of the inserted column. value : Scalar, Series, or array-like allow_duplicates : bool, optional, default lib.no_default See Also -------- Index.insert : Insert new item by index. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.insert(1, "newcol", [99, 99]) >>> df col1 newcol col2 0 1 99 3 1 2 99 4 >>> df.insert(0, "col1", [100, 100], allow_duplicates=True) >>> df col1 col1 newcol col2 0 100 1 99 3 1 100 2 99 4 Notice that pandas uses index alignment in case of `value` from type `Series`: >>> df.insert(0, "col0", pd.Series([5, 6], index=[1, 2])) >>> df col0 col1 col1 newcol col2 0 NaN 100 1 99 3 1 5.0 100 2 99 4 """ if allow_duplicates is lib.no_default: allow_duplicates = False if allow_duplicates and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'allow_duplicates=True' when " "'self.flags.allows_duplicate_labels' is False." ) if not allow_duplicates and column in self.columns: # Should this be a different kind of error?? raise ValueError(f"cannot insert {column}, already exists") if not isinstance(loc, int): raise TypeError("loc must be int") value = self._sanitize_column(value) self._mgr.insert(loc, column, value) def assign(self, **kwargs) -> DataFrame: r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. Later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 You can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 """ data = self.copy(deep=None) for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) return data def _sanitize_column(self, value) -> ArrayLike: """ Ensures new columns (which go into the BlockManager as new blocks) are always copied and converted into an array. Parameters ---------- value : scalar, Series, or array-like Returns ------- numpy.ndarray or ExtensionArray """ self._ensure_valid_index(value) # We can get there through isetitem with a DataFrame # or through loc single_block_path if isinstance(value, DataFrame): return _reindex_for_setitem(value, self.index) elif is_dict_like(value): return _reindex_for_setitem(Series(value), self.index) if is_list_like(value): com.require_length_match(value, self.index) return sanitize_array(value, self.index, copy=True, allow_2d=True) def _series(self): return { item: Series( self._mgr.iget(idx), index=self.index, name=item, fastpath=True ) for idx, item in enumerate(self.columns) } # ---------------------------------------------------------------------- # Reindexing and alignment def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy): frame = self columns = axes["columns"] if columns is not None: frame = frame._reindex_columns( columns, method, copy, level, fill_value, limit, tolerance ) index = axes["index"] if index is not None: frame = frame._reindex_index( index, method, copy, level, fill_value, limit, tolerance ) return frame def _reindex_index( self, new_index, method, copy: bool, level: Level, fill_value=np.nan, limit=None, tolerance=None, ): new_index, indexer = self.index.reindex( new_index, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {0: [new_index, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_columns( self, new_columns, method, copy: bool, level: Level, fill_value=None, limit=None, tolerance=None, ): new_columns, indexer = self.columns.reindex( new_columns, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {1: [new_columns, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_multi( self, axes: dict[str, Index], copy: bool, fill_value ) -> DataFrame: """ We are guaranteed non-Nones in the axes. """ new_index, row_indexer = self.index.reindex(axes["index"]) new_columns, col_indexer = self.columns.reindex(axes["columns"]) if row_indexer is not None and col_indexer is not None: # Fastpath. By doing two 'take's at once we avoid making an # unnecessary copy. # We only get here with `not self._is_mixed_type`, which (almost) # ensures that self.values is cheap. It may be worth making this # condition more specific. indexer = row_indexer, col_indexer new_values = take_2d_multi(self.values, indexer, fill_value=fill_value) return self._constructor( new_values, index=new_index, columns=new_columns, copy=False ) else: return self._reindex_with_indexers( {0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, copy=copy, fill_value=fill_value, ) def align( self, other: DataFrame, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value=None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> DataFrame: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) """ Examples -------- >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) Change the row labels. >>> df.set_axis(['a', 'b', 'c'], axis='index') A B a 1 4 b 2 5 c 3 6 Change the column labels. >>> df.set_axis(['I', 'II'], axis='columns') I II 0 1 4 1 2 5 2 3 6 """ ) **_shared_doc_kwargs, extended_summary_sub=" column or", axis_description_sub=", and 1 identifies the columns", see_also_sub=" or columns", ) ) # ---------------------------------------------------------------------- # Reindex-based selection methods # ---------------------------------------------------------------------- # Sorting # error: Signature of "sort_values" incompatible with supertype "NDFrame" # TODO: Just move the sort_values doc here. ) # ---------------------------------------------------------------------- # Arithmetic Methods ) ) ) # ---------------------------------------------------------------------- # Function application ) # error: Signature of "any" incompatible with supertype "NDFrame" [override] # error: Missing return statement ) # ---------------------------------------------------------------------- # Merging / joining methods # ---------------------------------------------------------------------- # Statistical methods, etc. # ---------------------------------------------------------------------- # ndarray-like stats methods # ---------------------------------------------------------------------- # Add index and columns # ---------------------------------------------------------------------- # Add plotting methods to DataFrame # ---------------------------------------------------------------------- # Internal Interface Methods DataFrame def read_excel( io, # sheet name is list or None -> dict[IntStrT, DataFrame] sheet_name: list[IntStrT] | None, *, header: int | Sequence[int] | None = ..., names: list[str] | None = ..., index_col: int | Sequence[int] | None = ..., usecols: int | str | Sequence[int] | Sequence[str] | Callable[[str], bool] | None = ..., dtype: DtypeArg | None = ..., engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb"] | None = ..., converters: dict[str, Callable] | dict[int, Callable] | None = ..., true_values: Iterable[Hashable] | None = ..., false_values: Iterable[Hashable] | None = ..., skiprows: Sequence[int] | int | Callable[[int], object] | None = ..., nrows: int | None = ..., na_values=..., keep_default_na: bool = ..., na_filter: bool = ..., verbose: bool = ..., parse_dates: list | dict | bool = ..., date_parser: Callable | lib.NoDefault = ..., date_format: dict[Hashable, str] | str | None = ..., thousands: str | None = ..., decimal: str = ..., comment: str | None = ..., skipfooter: int = ..., storage_options: StorageOptions = ..., dtype_backend: DtypeBackend | lib.NoDefault = ..., ) -> dict[IntStrT, DataFrame]: ...
null
173,560
from __future__ import annotations import abc import datetime from functools import partial from io import BytesIO import os from textwrap import fill from types import TracebackType from typing import ( IO, Any, Callable, Hashable, Iterable, List, Literal, Mapping, Sequence, Union, cast, overload, ) import zipfile from pandas._config import config from pandas._libs import lib from pandas._libs.parsers import STR_NA_VALUES from pandas._typing import ( DtypeArg, DtypeBackend, FilePath, IntStrT, ReadBuffer, StorageOptions, WriteExcelBuffer, ) from pandas.compat._optional import ( get_version, import_optional_dependency, ) from pandas.errors import EmptyDataError from pandas.util._decorators import ( Appender, doc, ) from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( is_bool, is_float, is_integer, is_list_like, ) from pandas.core.frame import DataFrame from pandas.core.shared_docs import _shared_docs from pandas.util.version import Version from pandas.io.common import ( IOHandles, get_handle, stringify_path, validate_header_arg, ) from pandas.io.excel._util import ( fill_mi_header, get_default_engine, get_writer, maybe_convert_usecols, pop_header_name, ) from pandas.io.parsers import TextParser from pandas.io.parsers.readers import validate_integer class ExcelFile: """ Class for parsing tabular Excel sheets into DataFrame objects. See read_excel for more documentation. Parameters ---------- path_or_buffer : str, bytes, path object (pathlib.Path or py._path.local.LocalPath), A file-like object, xlrd workbook or openpyxl workbook. If a string or path object, expected to be a path to a .xls, .xlsx, .xlsb, .xlsm, .odf, .ods, or .odt file. engine : str, default None If io is not a buffer or path, this must be set to identify io. Supported engines: ``xlrd``, ``openpyxl``, ``odf``, ``pyxlsb`` Engine compatibility : - ``xlrd`` supports old-style Excel files (.xls). - ``openpyxl`` supports newer Excel file formats. - ``odf`` supports OpenDocument file formats (.odf, .ods, .odt). - ``pyxlsb`` supports Binary Excel files. .. versionchanged:: 1.2.0 The engine `xlrd <https://xlrd.readthedocs.io/en/latest/>`_ now only supports old-style ``.xls`` files. When ``engine=None``, the following logic will be used to determine the engine: - If ``path_or_buffer`` is an OpenDocument format (.odf, .ods, .odt), then `odf <https://pypi.org/project/odfpy/>`_ will be used. - Otherwise if ``path_or_buffer`` is an xls format, ``xlrd`` will be used. - Otherwise if ``path_or_buffer`` is in xlsb format, `pyxlsb <https://pypi.org/project/pyxlsb/>`_ will be used. .. versionadded:: 1.3.0 - Otherwise if `openpyxl <https://pypi.org/project/openpyxl/>`_ is installed, then ``openpyxl`` will be used. - Otherwise if ``xlrd >= 2.0`` is installed, a ``ValueError`` will be raised. .. warning:: Please do not report issues when using ``xlrd`` to read ``.xlsx`` files. This is not supported, switch to using ``openpyxl`` instead. """ from pandas.io.excel._odfreader import ODFReader from pandas.io.excel._openpyxl import OpenpyxlReader from pandas.io.excel._pyxlsb import PyxlsbReader from pandas.io.excel._xlrd import XlrdReader _engines: Mapping[str, Any] = { "xlrd": XlrdReader, "openpyxl": OpenpyxlReader, "odf": ODFReader, "pyxlsb": PyxlsbReader, } def __init__( self, path_or_buffer, engine: str | None = None, storage_options: StorageOptions = None, ) -> None: if engine is not None and engine not in self._engines: raise ValueError(f"Unknown engine: {engine}") # First argument can also be bytes, so create a buffer if isinstance(path_or_buffer, bytes): path_or_buffer = BytesIO(path_or_buffer) # Could be a str, ExcelFile, Book, etc. self.io = path_or_buffer # Always a string self._io = stringify_path(path_or_buffer) # Determine xlrd version if installed if import_optional_dependency("xlrd", errors="ignore") is None: xlrd_version = None else: import xlrd xlrd_version = Version(get_version(xlrd)) if engine is None: # Only determine ext if it is needed ext: str | None if xlrd_version is not None and isinstance(path_or_buffer, xlrd.Book): ext = "xls" else: ext = inspect_excel_format( content_or_path=path_or_buffer, storage_options=storage_options ) if ext is None: raise ValueError( "Excel file format cannot be determined, you must specify " "an engine manually." ) engine = config.get_option(f"io.excel.{ext}.reader", silent=True) if engine == "auto": engine = get_default_engine(ext, mode="reader") assert engine is not None self.engine = engine self.storage_options = storage_options self._reader = self._engines[engine](self._io, storage_options=storage_options) def __fspath__(self): return self._io def parse( self, sheet_name: str | int | list[int] | list[str] | None = 0, header: int | Sequence[int] | None = 0, names=None, index_col: int | Sequence[int] | None = None, usecols=None, converters=None, true_values: Iterable[Hashable] | None = None, false_values: Iterable[Hashable] | None = None, skiprows: Sequence[int] | int | Callable[[int], object] | None = None, nrows: int | None = None, na_values=None, parse_dates: list | dict | bool = False, date_parser: Callable | lib.NoDefault = lib.no_default, date_format: str | dict[Hashable, str] | None = None, thousands: str | None = None, comment: str | None = None, skipfooter: int = 0, dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, **kwds, ) -> DataFrame | dict[str, DataFrame] | dict[int, DataFrame]: """ Parse specified sheet(s) into a DataFrame. Equivalent to read_excel(ExcelFile, ...) See the read_excel docstring for more info on accepted parameters. Returns ------- DataFrame or dict of DataFrames DataFrame from the passed in Excel file. """ return self._reader.parse( sheet_name=sheet_name, header=header, names=names, index_col=index_col, usecols=usecols, converters=converters, true_values=true_values, false_values=false_values, skiprows=skiprows, nrows=nrows, na_values=na_values, parse_dates=parse_dates, date_parser=date_parser, date_format=date_format, thousands=thousands, comment=comment, skipfooter=skipfooter, dtype_backend=dtype_backend, **kwds, ) def book(self): return self._reader.book def sheet_names(self): return self._reader.sheet_names def close(self) -> None: """close io if necessary""" self._reader.close() def __enter__(self) -> ExcelFile: return self def __exit__( self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None, ) -> None: self.close() class Callable(BaseTypingInstance): def py__call__(self, arguments): """ def x() -> Callable[[Callable[..., _T]], _T]: ... """ # The 0th index are the arguments. try: param_values = self._generics_manager[0] result_values = self._generics_manager[1] except IndexError: debug.warning('Callable[...] defined without two arguments') return NO_VALUES else: from jedi.inference.gradual.annotation import infer_return_for_callable return infer_return_for_callable(arguments, param_values, result_values) def py__get__(self, instance, class_value): return ValueSet([self]) class Hashable(Protocol, metaclass=ABCMeta): # TODO: This is special, in that a subclass of a hashable class may not be hashable # (for example, list vs. object). It's not obvious how to represent this. This class # is currently mostly useless for static checking. def __hash__(self) -> int: ... class Iterable(Protocol[_T_co]): def __iter__(self) -> Iterator[_T_co]: ... class Sequence(_Collection[_T_co], Reversible[_T_co], Generic[_T_co]): def __getitem__(self, i: int) -> _T_co: ... def __getitem__(self, s: slice) -> Sequence[_T_co]: ... # Mixin methods def index(self, value: Any, start: int = ..., stop: int = ...) -> int: ... def count(self, value: Any) -> int: ... def __contains__(self, x: object) -> bool: ... def __iter__(self) -> Iterator[_T_co]: ... def __reversed__(self) -> Iterator[_T_co]: ... Literal: _SpecialForm = ... IntStrT = TypeVar("IntStrT", int, str) DtypeArg = Union[Dtype, Dict[Hashable, Dtype]] StorageOptions = Optional[Dict[str, Any]] DtypeBackend = Literal["pyarrow", "numpy_nullable"] def check_dtype_backend(dtype_backend) -> None: if dtype_backend is not lib.no_default: if dtype_backend not in ["numpy_nullable", "pyarrow"]: raise ValueError( f"dtype_backend {dtype_backend} is invalid, only 'numpy_nullable' and " f"'pyarrow' are allowed.", ) class DataFrame(NDFrame, OpsMixin): """ Two-dimensional, size-mutable, potentially heterogeneous tabular data. Data structure also contains labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects. The primary pandas data structure. Parameters ---------- data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame Dict can contain Series, arrays, constants, dataclass or list-like objects. If data is a dict, column order follows insertion-order. If a dict contains Series which have an index defined, it is aligned by its index. This alignment also occurs if data is a Series or a DataFrame itself. Alignment is done on Series/DataFrame inputs. If data is a list of dicts, column order follows insertion-order. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided. columns : Index or array-like Column labels to use for resulting frame when data does not have them, defaulting to RangeIndex(0, 1, 2, ..., n). If data contains column labels, will perform column selection instead. dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer. copy : bool or None, default None Copy data from inputs. For dict data, the default of None behaves like ``copy=True``. For DataFrame or 2d ndarray input, the default of None behaves like ``copy=False``. If data is a dict containing one or more Series (possibly of different dtypes), ``copy=False`` will ensure that these inputs are not copied. .. versionchanged:: 1.3.0 See Also -------- DataFrame.from_records : Constructor from tuples, also record arrays. DataFrame.from_dict : From dicts of Series, arrays, or dicts. read_csv : Read a comma-separated values (csv) file into DataFrame. read_table : Read general delimited file into DataFrame. read_clipboard : Read text from clipboard into DataFrame. Notes ----- Please reference the :ref:`User Guide <basics.dataframe>` for more information. Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = pd.DataFrame(data=d) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = pd.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from a dictionary including Series: >>> d = {'col1': [0, 1, 2, 3], 'col2': pd.Series([2, 3], index=[2, 3])} >>> pd.DataFrame(data=d, index=[0, 1, 2, 3]) col1 col2 0 0 NaN 1 1 NaN 2 2 2.0 3 3 3.0 Constructing DataFrame from numpy ndarray: >>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), ... columns=['a', 'b', 'c']) >>> df2 a b c 0 1 2 3 1 4 5 6 2 7 8 9 Constructing DataFrame from a numpy ndarray that has labeled columns: >>> data = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], ... dtype=[("a", "i4"), ("b", "i4"), ("c", "i4")]) >>> df3 = pd.DataFrame(data, columns=['c', 'a']) ... >>> df3 c a 0 3 1 1 6 4 2 9 7 Constructing DataFrame from dataclass: >>> from dataclasses import make_dataclass >>> Point = make_dataclass("Point", [("x", int), ("y", int)]) >>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)]) x y 0 0 0 1 0 3 2 2 3 Constructing DataFrame from Series/DataFrame: >>> ser = pd.Series([1, 2, 3], index=["a", "b", "c"]) >>> df = pd.DataFrame(data=ser, index=["a", "c"]) >>> df 0 a 1 c 3 >>> df1 = pd.DataFrame([1, 2, 3], index=["a", "b", "c"], columns=["x"]) >>> df2 = pd.DataFrame(data=df1, index=["a", "c"]) >>> df2 x a 1 c 3 """ _internal_names_set = {"columns", "index"} | NDFrame._internal_names_set _typ = "dataframe" _HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray) _accessors: set[str] = {"sparse"} _hidden_attrs: frozenset[str] = NDFrame._hidden_attrs | frozenset([]) _mgr: BlockManager | ArrayManager def _constructor(self) -> Callable[..., DataFrame]: return DataFrame _constructor_sliced: Callable[..., Series] = Series # ---------------------------------------------------------------------- # Constructors def __init__( self, data=None, index: Axes | None = None, columns: Axes | None = None, dtype: Dtype | None = None, copy: bool | None = None, ) -> None: if dtype is not None: dtype = self._validate_dtype(dtype) if isinstance(data, DataFrame): data = data._mgr if not copy: # if not copying data, ensure to still return a shallow copy # to avoid the result sharing the same Manager data = data.copy(deep=False) if isinstance(data, (BlockManager, ArrayManager)): if using_copy_on_write(): data = data.copy(deep=False) # first check if a Manager is passed without any other arguments # -> use fastpath (without checking Manager type) if index is None and columns is None and dtype is None and not copy: # GH#33357 fastpath NDFrame.__init__(self, data) return manager = get_option("mode.data_manager") # GH47215 if index is not None and isinstance(index, set): raise ValueError("index cannot be a set") if columns is not None and isinstance(columns, set): raise ValueError("columns cannot be a set") if copy is None: if isinstance(data, dict): # retain pre-GH#38939 default behavior copy = True elif ( manager == "array" and isinstance(data, (np.ndarray, ExtensionArray)) and data.ndim == 2 ): # INFO(ArrayManager) by default copy the 2D input array to get # contiguous 1D arrays copy = True elif using_copy_on_write() and not isinstance( data, (Index, DataFrame, Series) ): copy = True else: copy = False if data is None: index = index if index is not None else default_index(0) columns = columns if columns is not None else default_index(0) dtype = dtype if dtype is not None else pandas_dtype(object) data = [] if isinstance(data, (BlockManager, ArrayManager)): mgr = self._init_mgr( data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy ) elif isinstance(data, dict): # GH#38939 de facto copy defaults to False only in non-dict cases mgr = dict_to_mgr(data, index, columns, dtype=dtype, copy=copy, typ=manager) elif isinstance(data, ma.MaskedArray): from numpy.ma import mrecords # masked recarray if isinstance(data, mrecords.MaskedRecords): raise TypeError( "MaskedRecords are not supported. Pass " "{name: data[name] for name in data.dtype.names} " "instead" ) # a masked array data = sanitize_masked_array(data) mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) elif isinstance(data, (np.ndarray, Series, Index, ExtensionArray)): if data.dtype.names: # i.e. numpy structured array data = cast(np.ndarray, data) mgr = rec_array_to_mgr( data, index, columns, dtype, copy, typ=manager, ) elif getattr(data, "name", None) is not None: # i.e. Series/Index with non-None name _copy = copy if using_copy_on_write() else True mgr = dict_to_mgr( # error: Item "ndarray" of "Union[ndarray, Series, Index]" has no # attribute "name" {data.name: data}, # type: ignore[union-attr] index, columns, dtype=dtype, typ=manager, copy=_copy, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) # For data is list-like, or Iterable (will consume into list) elif is_list_like(data): if not isinstance(data, abc.Sequence): if hasattr(data, "__array__"): # GH#44616 big perf improvement for e.g. pytorch tensor data = np.asarray(data) else: data = list(data) if len(data) > 0: if is_dataclass(data[0]): data = dataclasses_to_dicts(data) if not isinstance(data, np.ndarray) and treat_as_nested(data): # exclude ndarray as we may have cast it a few lines above if columns is not None: columns = ensure_index(columns) arrays, columns, index = nested_data_to_arrays( # error: Argument 3 to "nested_data_to_arrays" has incompatible # type "Optional[Collection[Any]]"; expected "Optional[Index]" data, columns, index, # type: ignore[arg-type] dtype, ) mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, typ=manager, ) else: mgr = ndarray_to_mgr( data, index, columns, dtype=dtype, copy=copy, typ=manager, ) else: mgr = dict_to_mgr( {}, index, columns if columns is not None else default_index(0), dtype=dtype, typ=manager, ) # For data is scalar else: if index is None or columns is None: raise ValueError("DataFrame constructor not properly called!") index = ensure_index(index) columns = ensure_index(columns) if not dtype: dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True) # For data is a scalar extension dtype if isinstance(dtype, ExtensionDtype): # TODO(EA2D): special case not needed with 2D EAs values = [ construct_1d_arraylike_from_scalar(data, len(index), dtype) for _ in range(len(columns)) ] mgr = arrays_to_mgr(values, columns, index, dtype=None, typ=manager) else: arr2d = construct_2d_arraylike_from_scalar( data, len(index), len(columns), dtype, copy, ) mgr = ndarray_to_mgr( arr2d, index, columns, dtype=arr2d.dtype, copy=False, typ=manager, ) # ensure correct Manager type according to settings mgr = mgr_to_mgr(mgr, typ=manager) NDFrame.__init__(self, mgr) # ---------------------------------------------------------------------- def __dataframe__( self, nan_as_null: bool = False, allow_copy: bool = True ) -> DataFrameXchg: """ Return the dataframe interchange object implementing the interchange protocol. Parameters ---------- nan_as_null : bool, default False Whether to tell the DataFrame to overwrite null values in the data with ``NaN`` (or ``NaT``). allow_copy : bool, default True Whether to allow memory copying when exporting. If set to False it would cause non-zero-copy exports to fail. Returns ------- DataFrame interchange object The object which consuming library can use to ingress the dataframe. Notes ----- Details on the interchange protocol: https://data-apis.org/dataframe-protocol/latest/index.html `nan_as_null` currently has no effect; once support for nullable extension dtypes is added, this value should be propagated to columns. """ from pandas.core.interchange.dataframe import PandasDataFrameXchg return PandasDataFrameXchg(self, nan_as_null, allow_copy) # ---------------------------------------------------------------------- def axes(self) -> list[Index]: """ Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='object')] """ return [self.index, self.columns] def shape(self) -> tuple[int, int]: """ Return a tuple representing the dimensionality of the DataFrame. See Also -------- ndarray.shape : Tuple of array dimensions. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self.index), len(self.columns) def _is_homogeneous_type(self) -> bool: """ Whether all the columns in a DataFrame have the same type. Returns ------- bool See Also -------- Index._is_homogeneous_type : Whether the object has a single dtype. MultiIndex._is_homogeneous_type : Whether all the levels of a MultiIndex have the same dtype. Examples -------- >>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type True >>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type False Items with the same type but different sizes are considered different types. >>> DataFrame({ ... "A": np.array([1, 2], dtype=np.int32), ... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type False """ if isinstance(self._mgr, ArrayManager): return len({arr.dtype for arr in self._mgr.arrays}) == 1 if self._mgr.any_extension_types: return len({block.dtype for block in self._mgr.blocks}) == 1 else: return not self._is_mixed_type def _can_fast_transpose(self) -> bool: """ Can we transpose this DataFrame without creating any new array objects. """ if isinstance(self._mgr, ArrayManager): return False blocks = self._mgr.blocks if len(blocks) != 1: return False dtype = blocks[0].dtype # TODO(EA2D) special case would be unnecessary with 2D EAs return not is_1d_only_ea_dtype(dtype) def _values(self) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray: """ Analogue to ._values that may return a 2D ExtensionArray. """ mgr = self._mgr if isinstance(mgr, ArrayManager): if len(mgr.arrays) == 1 and not is_1d_only_ea_dtype(mgr.arrays[0].dtype): # error: Item "ExtensionArray" of "Union[ndarray, ExtensionArray]" # has no attribute "reshape" return mgr.arrays[0].reshape(-1, 1) # type: ignore[union-attr] return ensure_wrapped_if_datetimelike(self.values) blocks = mgr.blocks if len(blocks) != 1: return ensure_wrapped_if_datetimelike(self.values) arr = blocks[0].values if arr.ndim == 1: # non-2D ExtensionArray return self.values # more generally, whatever we allow in NDArrayBackedExtensionBlock arr = cast("np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray", arr) return arr.T # ---------------------------------------------------------------------- # Rendering Methods def _repr_fits_vertical_(self) -> bool: """ Check length against max_rows. """ max_rows = get_option("display.max_rows") return len(self) <= max_rows def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool: """ Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns. In case of non-interactive session, no boundaries apply. `ignore_width` is here so ipynb+HTML output can behave the way users expect. display.max_columns remains in effect. GH3541, GH3573 """ width, height = console.get_console_size() max_columns = get_option("display.max_columns") nb_columns = len(self.columns) # exceed max columns if (max_columns and nb_columns > max_columns) or ( (not ignore_width) and width and nb_columns > (width // 2) ): return False # used by repr_html under IPython notebook or scripts ignore terminal # dims if ignore_width or width is None or not console.in_interactive_session(): return True if get_option("display.width") is not None or console.in_ipython_frontend(): # check at least the column row for excessive width max_rows = 1 else: max_rows = get_option("display.max_rows") # when auto-detecting, so width=None and not in ipython front end # check whether repr fits horizontal by actually checking # the width of the rendered repr buf = StringIO() # only care about the stuff we'll actually print out # and to_string on entire frame may be expensive d = self if max_rows is not None: # unlimited rows # min of two, where one may be None d = d.iloc[: min(max_rows, len(d))] else: return True d.to_string(buf=buf) value = buf.getvalue() repr_width = max(len(line) for line in value.split("\n")) return repr_width < width def _info_repr(self) -> bool: """ True if the repr should show the info view. """ info_repr_option = get_option("display.large_repr") == "info" return info_repr_option and not ( self._repr_fits_horizontal_() and self._repr_fits_vertical_() ) def __repr__(self) -> str: """ Return a string representation for a particular DataFrame. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) return buf.getvalue() repr_params = fmt.get_dataframe_repr_params() return self.to_string(**repr_params) def _repr_html_(self) -> str | None: """ Return a html representation for a particular DataFrame. Mainly for IPython notebook. """ if self._info_repr(): buf = StringIO() self.info(buf=buf) # need to escape the <class>, should be the first line. val = buf.getvalue().replace("<", r"&lt;", 1) val = val.replace(">", r"&gt;", 1) return f"<pre>{val}</pre>" if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") min_rows = get_option("display.min_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") formatter = fmt.DataFrameFormatter( self, columns=None, col_space=None, na_rep="NaN", formatters=None, float_format=None, sparsify=None, justify=None, index_names=True, header=True, index=True, bold_rows=True, escape=True, max_rows=max_rows, min_rows=min_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=".", ) return fmt.DataFrameRenderer(formatter).to_html(notebook=True) else: return None def to_string( self, buf: None = ..., columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> str: ... def to_string( self, buf: FilePath | WriteBuffer[str], columns: Sequence[str] | None = ..., col_space: int | list[int] | dict[Hashable, int] | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: fmt.FormattersType | None = ..., float_format: fmt.FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool = ..., decimal: str = ..., line_width: int | None = ..., min_rows: int | None = ..., max_colwidth: int | None = ..., encoding: str | None = ..., ) -> None: ... header_type="bool or sequence of str", header="Write out the column names. If a list of strings " "is given, it is assumed to be aliases for the " "column names", col_space_type="int, list or dict of int", col_space="The minimum width of each column. If a list of ints is given " "every integers corresponds with one column. If a dict is given, the key " "references the column, while the value defines the space to use.", ) def to_string( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[str] | None = None, col_space: int | list[int] | dict[Hashable, int] | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: fmt.FormattersType | None = None, float_format: fmt.FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool = False, decimal: str = ".", line_width: int | None = None, min_rows: int | None = None, max_colwidth: int | None = None, encoding: str | None = None, ) -> str | None: """ Render a DataFrame to a console-friendly tabular output. %(shared_params)s line_width : int, optional Width to wrap a line in characters. min_rows : int, optional The number of rows to display in the console in a truncated repr (when number of rows is above `max_rows`). max_colwidth : int, optional Max width to truncate each column in characters. By default, no limit. encoding : str, default "utf-8" Set character encoding. %(returns)s See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]} >>> df = pd.DataFrame(d) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 """ from pandas import option_context with option_context("display.max_colwidth", max_colwidth): formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, min_rows=min_rows, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, ) return fmt.DataFrameRenderer(formatter).to_string( buf=buf, encoding=encoding, line_width=line_width, ) # ---------------------------------------------------------------------- def style(self) -> Styler: """ Returns a Styler object. Contains methods for building a styled HTML representation of the DataFrame. See Also -------- io.formats.style.Styler : Helps style a DataFrame or Series according to the data with HTML and CSS. """ from pandas.io.formats.style import Styler return Styler(self) _shared_docs[ "items" ] = r""" Iterate over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Yields ------ label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. Examples -------- >>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.items(): ... print(f'label: {label}') ... print(f'content: {content}', sep='\n') ... label: species content: panda bear polar bear koala marsupial Name: species, dtype: object label: population content: panda 1864 polar 22000 koala 80000 Name: population, dtype: int64 """ def items(self) -> Iterable[tuple[Hashable, Series]]: if self.columns.is_unique and hasattr(self, "_item_cache"): for k in self.columns: yield k, self._get_item_cache(k) else: for i, k in enumerate(self.columns): yield k, self._ixs(i, axis=1) def iterrows(self) -> Iterable[tuple[Hashable, Series]]: """ Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a `MultiIndex`. data : Series The data of the row as a Series. See Also -------- DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- 1. Because ``iterrows`` returns a Series for each row, it does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float']) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row['int'].dtype) float64 >>> print(df['int'].dtype) int64 To preserve dtypes while iterating over the rows, it is better to use :meth:`itertuples` which returns namedtuples of the values and which is generally faster than ``iterrows``. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect. """ columns = self.columns klass = self._constructor_sliced using_cow = using_copy_on_write() for k, v in zip(self.index, self.values): s = klass(v, index=columns, name=k).__finalize__(self) if using_cow and self._mgr.is_single_block: s._mgr.add_references(self._mgr) # type: ignore[arg-type] yield k, s def itertuples( self, index: bool = True, name: str | None = "Pandas" ) -> Iterable[tuple[Any, ...]]: """ Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "Pandas" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An object to iterate over namedtuples for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. Examples -------- >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) ... Pandas(Index='dog', num_legs=4, num_wings=0) Pandas(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) ... Pandas(num_legs=4, num_wings=0) Pandas(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name='Animal'): ... print(row) ... Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2) """ arrays = [] fields = list(self.columns) if index: arrays.append(self.index) fields.insert(0, "Index") # use integer indexing because of possible duplicate column names arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) if name is not None: # https://github.com/python/mypy/issues/9046 # error: namedtuple() expects a string literal as the first argument itertuple = collections.namedtuple( # type: ignore[misc] name, fields, rename=True ) return map(itertuple._make, zip(*arrays)) # fallback to regular tuples return zip(*arrays) def __len__(self) -> int: """ Returns length of info axis, but here we use the index. """ return len(self.index) def dot(self, other: Series) -> Series: ... def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame: ... def dot(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Compute the matrix multiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series, DataFrame or a numpy array. It can also be called using ``self @ other`` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the matrix product with. Returns ------- Series or DataFrame If other is a Series, return the matrix product between self and other as a Series. If other is a DataFrame or a numpy.array, return the matrix product of self and other in a DataFrame of a np.array. See Also -------- Series.dot: Similar method for Series. Notes ----- The dimensions of DataFrame and other must be compatible in order to compute the matrix multiplication. In addition, the column names of DataFrame and the index of other must contain the same values, as they will be aligned prior to the multiplication. The dot method for Series computes the inner product, instead of the matrix product here. Examples -------- Here we multiply a DataFrame with a Series. >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) >>> s = pd.Series([1, 1, 2, 1]) >>> df.dot(s) 0 -4 1 5 dtype: int64 Here we multiply a DataFrame with another DataFrame. >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(other) 0 1 0 1 4 1 2 2 Note that the dot method give the same result as @ >>> df @ other 0 1 0 1 4 1 2 2 The dot method works also if other is an np.array. >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(arr) 0 1 0 1 4 1 2 2 Note how shuffling of the objects does not change the result. >>> s2 = s.reindex([1, 0, 2, 3]) >>> df.dot(s2) 0 -4 1 5 dtype: int64 """ if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index) if len(common) > len(self.columns) or len(common) > len(other.index): raise ValueError("matrices are not aligned") left = self.reindex(columns=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right._values else: left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: raise ValueError( f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" ) if isinstance(other, DataFrame): return self._constructor( np.dot(lvals, rvals), index=left.index, columns=other.columns, copy=False, ) elif isinstance(other, Series): return self._constructor_sliced( np.dot(lvals, rvals), index=left.index, copy=False ) elif isinstance(rvals, (np.ndarray, Index)): result = np.dot(lvals, rvals) if result.ndim == 2: return self._constructor(result, index=left.index, copy=False) else: return self._constructor_sliced(result, index=left.index, copy=False) else: # pragma: no cover raise TypeError(f"unsupported type: {type(other)}") def __matmul__(self, other: Series) -> Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ return self.dot(other) def __rmatmul__(self, other) -> DataFrame: """ Matrix multiplication using binary `@` operator in Python>=3.5. """ try: return self.T.dot(np.transpose(other)).T except ValueError as err: if "shape mismatch" not in str(err): raise # GH#21581 give exception message for original shapes msg = f"shapes {np.shape(other)} and {self.shape} not aligned" raise ValueError(msg) from err # ---------------------------------------------------------------------- # IO methods (to / from other formats) def from_dict( cls, data: dict, orient: str = "columns", dtype: Dtype | None = None, columns: Axes | None = None, ) -> DataFrame: """ Construct DataFrame from dict of array-like or dicts. Creates DataFrame object from dictionary by columns or by index allowing dtype specification. Parameters ---------- data : dict Of the form {field : array-like} or {field : dict}. orient : {'columns', 'index', 'tight'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. If 'tight', assume a dict with keys ['index', 'columns', 'data', 'index_names', 'column_names']. .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument dtype : dtype, default None Data type to force after DataFrame construction, otherwise infer. columns : list, default None Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'`` or ``orient='tight'``. Returns ------- DataFrame See Also -------- DataFrame.from_records : DataFrame from structured ndarray, sequence of tuples or dicts, or DataFrame. DataFrame : DataFrame object creation using constructor. DataFrame.to_dict : Convert the DataFrame to a dictionary. Examples -------- By default the keys of the dict become the DataFrame columns: >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Specify ``orient='index'`` to create the DataFrame using dictionary keys as rows: >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data, orient='index') 0 1 2 3 row_1 3 2 1 0 row_2 a b c d When using the 'index' orientation, the column names can be specified manually: >>> pd.DataFrame.from_dict(data, orient='index', ... columns=['A', 'B', 'C', 'D']) A B C D row_1 3 2 1 0 row_2 a b c d Specify ``orient='tight'`` to create the DataFrame using a 'tight' format: >>> data = {'index': [('a', 'b'), ('a', 'c')], ... 'columns': [('x', 1), ('y', 2)], ... 'data': [[1, 3], [2, 4]], ... 'index_names': ['n1', 'n2'], ... 'column_names': ['z1', 'z2']} >>> pd.DataFrame.from_dict(data, orient='tight') z1 x y z2 1 2 n1 n2 a b 1 3 c 2 4 """ index = None orient = orient.lower() if orient == "index": if len(data) > 0: # TODO speed up Series case if isinstance(list(data.values())[0], (Series, dict)): data = _from_nested_dict(data) else: index = list(data.keys()) # error: Incompatible types in assignment (expression has type # "List[Any]", variable has type "Dict[Any, Any]") data = list(data.values()) # type: ignore[assignment] elif orient in ("columns", "tight"): if columns is not None: raise ValueError(f"cannot use columns parameter with orient='{orient}'") else: # pragma: no cover raise ValueError( f"Expected 'index', 'columns' or 'tight' for orient parameter. " f"Got '{orient}' instead" ) if orient != "tight": return cls(data, index=index, columns=columns, dtype=dtype) else: realdata = data["data"] def create_index(indexlist, namelist): index: Index if len(namelist) > 1: index = MultiIndex.from_tuples(indexlist, names=namelist) else: index = Index(indexlist, name=namelist[0]) return index index = create_index(data["index"], data["index_names"]) columns = create_index(data["columns"], data["column_names"]) return cls(realdata, index=index, columns=columns, dtype=dtype) def to_numpy( self, dtype: npt.DTypeLike | None = None, copy: bool = False, na_value: object = lib.no_default, ) -> np.ndarray: """ Convert the DataFrame to a NumPy array. By default, the dtype of the returned array will be the common NumPy dtype of all types in the DataFrame. For example, if the dtypes are ``float16`` and ``float32``, the results dtype will be ``float32``. This may require copying data and coercing values, which may be expensive. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray`. copy : bool, default False Whether to ensure that the returned value is not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. na_value : Any, optional The value to use for missing values. The default value depends on `dtype` and the dtypes of the DataFrame columns. .. versionadded:: 1.1.0 Returns ------- numpy.ndarray See Also -------- Series.to_numpy : Similar method for Series. Examples -------- >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() array([[1, 3], [2, 4]]) With heterogeneous data, the lowest common type will have to be used. >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) >>> df.to_numpy() array([[1. , 3. ], [2. , 4.5]]) For a mix of numeric and non-numeric types, the output array will have object dtype. >>> df['C'] = pd.date_range('2000', periods=2) >>> df.to_numpy() array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) """ if dtype is not None: dtype = np.dtype(dtype) result = self._mgr.as_array(dtype=dtype, copy=copy, na_value=na_value) if result.dtype is not dtype: result = np.array(result, dtype=dtype, copy=False) return result def _create_data_for_split_and_tight_to_dict( self, are_all_object_dtype_cols: bool, object_dtype_indices: list[int] ) -> list: """ Simple helper method to create data for to ``to_dict(orient="split")`` and ``to_dict(orient="tight")`` to create the main output data """ if are_all_object_dtype_cols: data = [ list(map(maybe_box_native, t)) for t in self.itertuples(index=False, name=None) ] else: data = [list(t) for t in self.itertuples(index=False, name=None)] if object_dtype_indices: # If we have object_dtype_cols, apply maybe_box_naive after list # comprehension for perf for row in data: for i in object_dtype_indices: row[i] = maybe_box_native(row[i]) return data def to_dict( self, orient: Literal["dict", "list", "series", "split", "tight", "index"] = ..., into: type[dict] = ..., ) -> dict: ... def to_dict(self, orient: Literal["records"], into: type[dict] = ...) -> list[dict]: ... def to_dict( self, orient: Literal[ "dict", "list", "series", "split", "tight", "records", "index" ] = "dict", into: type[dict] = dict, index: bool = True, ) -> dict | list[dict]: """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'tight', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'tight' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values], 'index_names' -> [index.names], 'column_names' -> [column.names]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} .. versionadded:: 1.4.0 'tight' as an allowed value for the ``orient`` argument into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. index : bool, default True Whether to include the index item (and index_names item if `orient` is 'tight') in the returned dictionary. Can only be ``False`` when `orient` is 'split' or 'tight'. .. versionadded:: 2.0.0 Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. See Also -------- DataFrame.from_dict: Create a DataFrame from a dictionary. DataFrame.to_json: Convert a DataFrame to JSON format. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df.to_dict() {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}} You can specify the return orientation. >>> df.to_dict('series') {'col1': row1 1 row2 2 Name: col1, dtype: int64, 'col2': row1 0.50 row2 0.75 Name: col2, dtype: float64} >>> df.to_dict('split') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]]} >>> df.to_dict('records') [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}] >>> df.to_dict('index') {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}} >>> df.to_dict('tight') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]], 'index_names': [None], 'column_names': [None]} You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) [defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}), defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})] """ from pandas.core.methods.to_dict import to_dict return to_dict(self, orient, into, index) def to_gbq( self, destination_table: str, project_id: str | None = None, chunksize: int | None = None, reauth: bool = False, if_exists: str = "fail", auth_local_webserver: bool = True, table_schema: list[dict[str, str]] | None = None, location: str | None = None, progress_bar: bool = True, credentials=None, ) -> None: """ Write a DataFrame to a Google BigQuery table. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See the `How to authenticate with Google BigQuery <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__ guide for authentication instructions. Parameters ---------- destination_table : str Name of table to be written, in the form ``dataset.tablename``. project_id : str, optional Google BigQuery Account project ID. Optional when available from the environment. chunksize : int, optional Number of rows to be inserted in each chunk from the dataframe. Set to ``None`` to load the whole dataframe at once. reauth : bool, default False Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. if_exists : str, default 'fail' Behavior when the destination table exists. Value can be one of: ``'fail'`` If table exists raise pandas_gbq.gbq.TableCreationError. ``'replace'`` If table exists, drop it, recreate it, and insert data. ``'append'`` If table exists, insert data. Create if does not exist. auth_local_webserver : bool, default True Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. .. _local webserver flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server .. _console flow: https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console *New in version 0.2.0 of pandas-gbq*. .. versionchanged:: 1.5.0 Default value is changed to ``True``. Google has deprecated the ``auth_local_webserver = False`` `"out of band" (copy-paste) flow <https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html?m=1#disallowed-oob>`_. table_schema : list of dicts, optional List of BigQuery table fields to which according DataFrame columns conform to, e.g. ``[{'name': 'col1', 'type': 'STRING'},...]``. If schema is not provided, it will be generated according to dtypes of DataFrame columns. See BigQuery API documentation on available names of a field. *New in version 0.3.1 of pandas-gbq*. location : str, optional Location where the load job should run. See the `BigQuery locations documentation <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a list of available locations. The location must match that of the target dataset. *New in version 0.5.0 of pandas-gbq*. progress_bar : bool, default True Use the library `tqdm` to show the progress bar for the upload, chunk by chunk. *New in version 0.5.0 of pandas-gbq*. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine :class:`google.auth.compute_engine.Credentials` or Service Account :class:`google.oauth2.service_account.Credentials` directly. *New in version 0.8.0 of pandas-gbq*. See Also -------- pandas_gbq.to_gbq : This function in the pandas-gbq library. read_gbq : Read a DataFrame from Google BigQuery. """ from pandas.io import gbq gbq.to_gbq( self, destination_table, project_id=project_id, chunksize=chunksize, reauth=reauth, if_exists=if_exists, auth_local_webserver=auth_local_webserver, table_schema=table_schema, location=location, progress_bar=progress_bar, credentials=credentials, ) def from_records( cls, data, index=None, exclude=None, columns=None, coerce_float: bool = False, nrows: int | None = None, ) -> DataFrame: """ Convert structured or record ndarray to DataFrame. Creates a DataFrame object from a structured ndarray, sequence of tuples or dicts, or DataFrame. Parameters ---------- data : structured ndarray, sequence of tuples or dicts, or DataFrame Structured input data. index : str, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use. exclude : sequence, default None Columns or fields to exclude. columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns). coerce_float : bool, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets. nrows : int, default None Number of rows to read if data is an iterator. Returns ------- DataFrame See Also -------- DataFrame.from_dict : DataFrame from dict of array-like or dicts. DataFrame : DataFrame object creation using constructor. Examples -------- Data can be provided as a structured ndarray: >>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')], ... dtype=[('col_1', 'i4'), ('col_2', 'U1')]) >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of dicts: >>> data = [{'col_1': 3, 'col_2': 'a'}, ... {'col_1': 2, 'col_2': 'b'}, ... {'col_1': 1, 'col_2': 'c'}, ... {'col_1': 0, 'col_2': 'd'}] >>> pd.DataFrame.from_records(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Data can be provided as a list of tuples with corresponding columns: >>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')] >>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2']) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d """ if isinstance(data, DataFrame): if columns is not None: if is_scalar(columns): columns = [columns] data = data[columns] if index is not None: data = data.set_index(index) if exclude is not None: data = data.drop(columns=exclude) return data.copy(deep=False) result_index = None # Make a copy of the input columns so we can modify it if columns is not None: columns = ensure_index(columns) def maybe_reorder( arrays: list[ArrayLike], arr_columns: Index, columns: Index, index ) -> tuple[list[ArrayLike], Index, Index | None]: """ If our desired 'columns' do not match the data's pre-existing 'arr_columns', we re-order our arrays. This is like a pre-emptive (cheap) reindex. """ if len(arrays): length = len(arrays[0]) else: length = 0 result_index = None if len(arrays) == 0 and index is None and length == 0: result_index = default_index(0) arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, length) return arrays, arr_columns, result_index if is_iterator(data): if nrows == 0: return cls() try: first_row = next(data) except StopIteration: return cls(index=index, columns=columns) dtype = None if hasattr(first_row, "dtype") and first_row.dtype.names: dtype = first_row.dtype values = [first_row] if nrows is None: values += data else: values.extend(itertools.islice(data, nrows - 1)) if dtype is not None: data = np.array(values, dtype=dtype) else: data = values if isinstance(data, dict): if columns is None: columns = arr_columns = ensure_index(sorted(data)) arrays = [data[k] for k in columns] else: arrays = [] arr_columns_list = [] for k, v in data.items(): if k in columns: arr_columns_list.append(k) arrays.append(v) arr_columns = Index(arr_columns_list) arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = to_arrays(data, columns) arr_columns = columns else: arrays, arr_columns = to_arrays(data, columns) if coerce_float: for i, arr in enumerate(arrays): if arr.dtype == object: # error: Argument 1 to "maybe_convert_objects" has # incompatible type "Union[ExtensionArray, ndarray]"; # expected "ndarray" arrays[i] = lib.maybe_convert_objects( arr, # type: ignore[arg-type] try_float=True, ) arr_columns = ensure_index(arr_columns) if columns is None: columns = arr_columns else: arrays, arr_columns, result_index = maybe_reorder( arrays, arr_columns, columns, index ) if exclude is None: exclude = set() else: exclude = set(exclude) if index is not None: if isinstance(index, str) or not hasattr(index, "__iter__"): i = columns.get_loc(index) exclude.add(index) if len(arrays) > 0: result_index = Index(arrays[i], name=index) else: result_index = Index([], name=index) else: try: index_data = [arrays[arr_columns.get_loc(field)] for field in index] except (KeyError, TypeError): # raised by get_loc, see GH#29258 result_index = index else: result_index = ensure_index_from_sequences(index_data, names=index) exclude.update(index) if any(exclude): arr_exclude = [x for x in exclude if x in arr_columns] to_remove = [arr_columns.get_loc(col) for col in arr_exclude] arrays = [v for i, v in enumerate(arrays) if i not in to_remove] columns = columns.drop(exclude) manager = get_option("mode.data_manager") mgr = arrays_to_mgr(arrays, columns, result_index, typ=manager) return cls(mgr) def to_records( self, index: bool = True, column_dtypes=None, index_dtypes=None ) -> np.recarray: """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. column_dtypes : str, type, dict, default None If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) If the DataFrame index has no label then the recarray field name is set to 'index'. If the index has a label then this is used as the field name: >>> df.index = df.index.rename("I") >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')]) As well as for the index: >>> df.to_records(index_dtypes="<S2") rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')]) >>> index_dtypes = f"<S{df.index.str.len().max()}" >>> df.to_records(index_dtypes=index_dtypes) rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')]) """ if index: ix_vals = [ np.asarray(self.index.get_level_values(i)) for i in range(self.index.nlevels) ] arrays = ix_vals + [ np.asarray(self.iloc[:, i]) for i in range(len(self.columns)) ] index_names = list(self.index.names) if isinstance(self.index, MultiIndex): index_names = com.fill_missing_names(index_names) elif index_names[0] is None: index_names = ["index"] names = [str(name) for name in itertools.chain(index_names, self.columns)] else: arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))] names = [str(c) for c in self.columns] index_names = [] index_len = len(index_names) formats = [] for i, v in enumerate(arrays): index_int = i # When the names and arrays are collected, we # first collect those in the DataFrame's index, # followed by those in its columns. # # Thus, the total length of the array is: # len(index_names) + len(DataFrame.columns). # # This check allows us to see whether we are # handling a name / array in the index or column. if index_int < index_len: dtype_mapping = index_dtypes name = index_names[index_int] else: index_int -= index_len dtype_mapping = column_dtypes name = self.columns[index_int] # We have a dictionary, so we get the data type # associated with the index or column (which can # be denoted by its name in the DataFrame or its # position in DataFrame's array of indices or # columns, whichever is applicable. if is_dict_like(dtype_mapping): if name in dtype_mapping: dtype_mapping = dtype_mapping[name] elif index_int in dtype_mapping: dtype_mapping = dtype_mapping[index_int] else: dtype_mapping = None # If no mapping can be found, use the array's # dtype attribute for formatting. # # A valid dtype must either be a type or # string naming a type. if dtype_mapping is None: formats.append(v.dtype) elif isinstance(dtype_mapping, (type, np.dtype, str)): # error: Argument 1 to "append" of "list" has incompatible # type "Union[type, dtype[Any], str]"; expected "dtype[Any]" formats.append(dtype_mapping) # type: ignore[arg-type] else: element = "row" if i < index_len else "column" msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}" raise ValueError(msg) return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats}) def _from_arrays( cls, arrays, columns, index, dtype: Dtype | None = None, verify_integrity: bool = True, ) -> DataFrame: """ Create DataFrame from a list of arrays corresponding to the columns. Parameters ---------- arrays : list-like of arrays Each array in the list corresponds to one column, in order. columns : list-like, Index The column names for the resulting DataFrame. index : list-like, Index The rows labels for the resulting DataFrame. dtype : dtype, optional Optional dtype to enforce for all arrays. verify_integrity : bool, default True Validate and homogenize all input. If set to False, it is assumed that all elements of `arrays` are actual arrays how they will be stored in a block (numpy ndarray or ExtensionArray), have the same length as and are aligned with the index, and that `columns` and `index` are ensured to be an Index object. Returns ------- DataFrame """ if dtype is not None: dtype = pandas_dtype(dtype) manager = get_option("mode.data_manager") columns = ensure_index(columns) if len(columns) != len(arrays): raise ValueError("len(columns) must match len(arrays)") mgr = arrays_to_mgr( arrays, columns, index, dtype=dtype, verify_integrity=verify_integrity, typ=manager, ) return cls(mgr) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path", ) def to_stata( self, path: FilePath | WriteBuffer[bytes], *, convert_dates: dict[Hashable, str] | None = None, write_index: bool = True, byteorder: str | None = None, time_stamp: datetime.datetime | None = None, data_label: str | None = None, variable_labels: dict[Hashable, str] | None = None, version: int | None = 114, convert_strl: Sequence[Hashable] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, value_labels: dict[Hashable, dict[float, str]] | None = None, ) -> None: """ Export DataFrame object to Stata dta format. Writes the DataFrame to a Stata dataset file. "dta" files contain a Stata dataset. Parameters ---------- path : str, path object, or buffer String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has timezone information. write_index : bool Write the index to Stata dataset. byteorder : str Can be ">", "<", "little", or "big". default is `sys.byteorder`. time_stamp : datetime A datetime to use as file creation date. Default is the current time. data_label : str, optional A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. version : {{114, 117, 118, 119, None}}, default 114 Version to use in the output dta file. Set to None to let pandas decide between 118 or 119 formats depending on the number of columns in the frame. Version 114 can be read by Stata 10 and later. Version 117 can be read by Stata 13 or later. Version 118 is supported in Stata 14 and later. Version 119 is supported in Stata 15 and later. Version 114 limits string variables to 244 characters or fewer while versions 117 and later allow strings with lengths up to 2,000,000 characters. Versions 118 and 119 support Unicode characters, and version 119 supports more than 32,767 variables. Version 119 should usually only be used when the number of variables exceeds the capacity of dta format 118. Exporting smaller datasets in format 119 may have unintended consequences, and, as of November 2020, Stata SE cannot read version 119 files. convert_strl : list, optional List of column names to convert to string columns to Stata StrL format. Only available if version is 117. Storing strings in the StrL format can produce smaller dta files if strings have more than 8 characters and values are repeated. {compression_options} .. versionadded:: 1.1.0 .. versionchanged:: 1.4.0 Zstandard support. {storage_options} .. versionadded:: 1.2.0 value_labels : dict of dicts Dictionary containing columns as keys and dictionaries of column value to labels as values. Labels for a single variable must be 32,000 characters or smaller. .. versionadded:: 1.4.0 Raises ------ NotImplementedError * If datetimes contain timezone information * Column dtype is not representable in Stata ValueError * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters See Also -------- read_stata : Import Stata data files. io.stata.StataWriter : Low-level writer for Stata data files. io.stata.StataWriter117 : Low-level writer for version 117 files. Examples -------- >>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], ... 'speed': [350, 18, 361, 15]}}) >>> df.to_stata('animals.dta') # doctest: +SKIP """ if version not in (114, 117, 118, 119, None): raise ValueError("Only formats 114, 117, 118 and 119 are supported.") if version == 114: if convert_strl is not None: raise ValueError("strl is not supported in format 114") from pandas.io.stata import StataWriter as statawriter elif version == 117: # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriter117 as statawriter, ) else: # versions 118 and 119 # Incompatible import of "statawriter" (imported name has type # "Type[StataWriter117]", local name has type "Type[StataWriter]") from pandas.io.stata import ( # type: ignore[assignment] StataWriterUTF8 as statawriter, ) kwargs: dict[str, Any] = {} if version is None or version >= 117: # strl conversion is only supported >= 117 kwargs["convert_strl"] = convert_strl if version is None or version >= 118: # Specifying the version is only supported for UTF8 (118 or 119) kwargs["version"] = version writer = statawriter( path, self, convert_dates=convert_dates, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, write_index=write_index, variable_labels=variable_labels, compression=compression, storage_options=storage_options, value_labels=value_labels, **kwargs, ) writer.write_file() def to_feather(self, path: FilePath | WriteBuffer[bytes], **kwargs) -> None: """ Write a DataFrame to the binary Feather format. Parameters ---------- path : str, path object, file-like object String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If a string or a path, it will be used as Root Directory path when writing a partitioned dataset. **kwargs : Additional keywords passed to :func:`pyarrow.feather.write_feather`. Starting with pyarrow 0.17, this includes the `compression`, `compression_level`, `chunksize` and `version` keywords. .. versionadded:: 1.1.0 Notes ----- This function writes the dataframe as a `feather file <https://arrow.apache.org/docs/python/feather.html>`_. Requires a default index. For saving the DataFrame with your custom index use a method that supports custom indices e.g. `to_parquet`. """ from pandas.io.feather_format import to_feather to_feather(self, path, **kwargs) Series.to_markdown, klass=_shared_doc_kwargs["klass"], storage_options=_shared_docs["storage_options"], examples="""Examples -------- >>> df = pd.DataFrame( ... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]} ... ) >>> print(df.to_markdown()) | | animal_1 | animal_2 | |---:|:-----------|:-----------| | 0 | elk | dog | | 1 | pig | quetzal | Output markdown with a tabulate option. >>> print(df.to_markdown(tablefmt="grid")) +----+------------+------------+ | | animal_1 | animal_2 | +====+============+============+ | 0 | elk | dog | +----+------------+------------+ | 1 | pig | quetzal | +----+------------+------------+""", ) def to_markdown( self, buf: FilePath | WriteBuffer[str] | None = None, mode: str = "wt", index: bool = True, storage_options: StorageOptions = None, **kwargs, ) -> str | None: if "showindex" in kwargs: raise ValueError("Pass 'index' instead of 'showindex") kwargs.setdefault("headers", "keys") kwargs.setdefault("tablefmt", "pipe") kwargs.setdefault("showindex", index) tabulate = import_optional_dependency("tabulate") result = tabulate.tabulate(self, **kwargs) if buf is None: return result with get_handle(buf, mode, storage_options=storage_options) as handles: handles.handle.write(result) return None def to_parquet( self, path: None = ..., engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> bytes: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes], engine: str = ..., compression: str | None = ..., index: bool | None = ..., partition_cols: list[str] | None = ..., storage_options: StorageOptions = ..., **kwargs, ) -> None: ... def to_parquet( self, path: FilePath | WriteBuffer[bytes] | None = None, engine: str = "auto", compression: str | None = "snappy", index: bool | None = None, partition_cols: list[str] | None = None, storage_options: StorageOptions = None, **kwargs, ) -> bytes | None: """ Write a DataFrame to the binary parquet format. This function writes the dataframe as a `parquet file <https://parquet.apache.org/>`_. You can choose different parquet backends, and have the option of compression. See :ref:`the user guide <io.parquet>` for more details. Parameters ---------- path : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a binary ``write()`` function. If None, the result is returned as bytes. If a string or path, it will be used as Root Directory path when writing a partitioned dataset. .. versionchanged:: 1.2.0 Previously this was "fname" engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``True`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. partition_cols : list, optional, default None Column names by which to partition the dataset. Columns are partitioned in the order they are given. Must be None if path is not a string. {storage_options} .. versionadded:: 1.2.0 **kwargs Additional arguments passed to the parquet library. See :ref:`pandas io <io.parquet>` for more details. Returns ------- bytes if no path argument is provided else None See Also -------- read_parquet : Read a parquet file. DataFrame.to_orc : Write an orc file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- This function requires either the `fastparquet <https://pypi.org/project/fastparquet>`_ or `pyarrow <https://arrow.apache.org/docs/python/>`_ library. Examples -------- >>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}}) >>> df.to_parquet('df.parquet.gzip', ... compression='gzip') # doctest: +SKIP >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP col1 col2 0 1 3 1 2 4 If you want to get a buffer to the parquet content you can use a io.BytesIO object, as long as you don't use partition_cols, which creates multiple files. >>> import io >>> f = io.BytesIO() >>> df.to_parquet(f) >>> f.seek(0) 0 >>> content = f.read() """ from pandas.io.parquet import to_parquet return to_parquet( self, path, engine, compression=compression, index=index, partition_cols=partition_cols, storage_options=storage_options, **kwargs, ) def to_orc( self, path: FilePath | WriteBuffer[bytes] | None = None, *, engine: Literal["pyarrow"] = "pyarrow", index: bool | None = None, engine_kwargs: dict[str, Any] | None = None, ) -> bytes | None: """ Write a DataFrame to the ORC format. .. versionadded:: 1.5.0 Parameters ---------- path : str, file-like object or None, default None If a string, it will be used as Root Directory path when writing a partitioned dataset. By file-like object, we refer to objects with a write() method, such as a file handle (e.g. via builtin open function). If path is None, a bytes object is returned. engine : str, default 'pyarrow' ORC library to use. Pyarrow must be >= 7.0.0. index : bool, optional If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, similar to ``infer`` the dataframe's index(es) will be saved. However, instead of being saved as values, the RangeIndex will be stored as a range in the metadata so it doesn't require much space and is faster. Other indexes will be included as columns in the file output. engine_kwargs : dict[str, Any] or None, default None Additional keyword arguments passed to :func:`pyarrow.orc.write_table`. Returns ------- bytes if no path argument is provided else None Raises ------ NotImplementedError Dtype of one or more columns is category, unsigned integers, interval, period or sparse. ValueError engine is not pyarrow. See Also -------- read_orc : Read a ORC file. DataFrame.to_parquet : Write a parquet file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- * Before using this function you should read the :ref:`user guide about ORC <io.orc>` and :ref:`install optional dependencies <install.warn_orc>`. * This function requires `pyarrow <https://arrow.apache.org/docs/python/>`_ library. * For supported dtypes please refer to `supported ORC features in Arrow <https://arrow.apache.org/docs/cpp/orc.html#data-types>`__. * Currently timezones in datetime columns are not preserved when a dataframe is converted into ORC files. Examples -------- >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [4, 3]}) >>> df.to_orc('df.orc') # doctest: +SKIP >>> pd.read_orc('df.orc') # doctest: +SKIP col1 col2 0 1 4 1 2 3 If you want to get a buffer to the orc content you can write it to io.BytesIO >>> import io >>> b = io.BytesIO(df.to_orc()) # doctest: +SKIP >>> b.seek(0) # doctest: +SKIP 0 >>> content = b.read() # doctest: +SKIP """ from pandas.io.orc import to_orc return to_orc( self, path, engine=engine, index=index, engine_kwargs=engine_kwargs ) def to_html( self, buf: FilePath | WriteBuffer[str], columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> None: ... def to_html( self, buf: None = ..., columns: Sequence[Level] | None = ..., col_space: ColspaceArgType | None = ..., header: bool | Sequence[str] = ..., index: bool = ..., na_rep: str = ..., formatters: FormattersType | None = ..., float_format: FloatFormatType | None = ..., sparsify: bool | None = ..., index_names: bool = ..., justify: str | None = ..., max_rows: int | None = ..., max_cols: int | None = ..., show_dimensions: bool | str = ..., decimal: str = ..., bold_rows: bool = ..., classes: str | list | tuple | None = ..., escape: bool = ..., notebook: bool = ..., border: int | bool | None = ..., table_id: str | None = ..., render_links: bool = ..., encoding: str | None = ..., ) -> str: ... header_type="bool", header="Whether to print column labels, default True", col_space_type="str or int, list or dict of int or str", col_space="The minimum width of each column in CSS length " "units. An int is assumed to be px units.", ) def to_html( self, buf: FilePath | WriteBuffer[str] | None = None, columns: Sequence[Level] | None = None, col_space: ColspaceArgType | None = None, header: bool | Sequence[str] = True, index: bool = True, na_rep: str = "NaN", formatters: FormattersType | None = None, float_format: FloatFormatType | None = None, sparsify: bool | None = None, index_names: bool = True, justify: str | None = None, max_rows: int | None = None, max_cols: int | None = None, show_dimensions: bool | str = False, decimal: str = ".", bold_rows: bool = True, classes: str | list | tuple | None = None, escape: bool = True, notebook: bool = False, border: int | bool | None = None, table_id: str | None = None, render_links: bool = False, encoding: str | None = None, ) -> str | None: """ Render a DataFrame as an HTML table. %(shared_params)s bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.display.html.border``. table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links. encoding : str, default "utf-8" Set character encoding. .. versionadded:: 1.0 %(returns)s See Also -------- to_string : Convert DataFrame to a string. """ if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS: raise ValueError("Invalid value for justify parameter") formatter = fmt.DataFrameFormatter( self, columns=columns, col_space=col_space, na_rep=na_rep, header=header, index=index, formatters=formatters, float_format=float_format, bold_rows=bold_rows, sparsify=sparsify, justify=justify, index_names=index_names, escape=escape, decimal=decimal, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, ) # TODO: a generic formatter wld b in DataFrameFormatter return fmt.DataFrameRenderer(formatter).to_html( buf=buf, classes=classes, notebook=notebook, border=border, encoding=encoding, table_id=table_id, render_links=render_links, ) storage_options=_shared_docs["storage_options"], compression_options=_shared_docs["compression_options"] % "path_or_buffer", ) def to_xml( self, path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None, index: bool = True, root_name: str | None = "data", row_name: str | None = "row", na_rep: str | None = None, attr_cols: list[str] | None = None, elem_cols: list[str] | None = None, namespaces: dict[str | None, str] | None = None, prefix: str | None = None, encoding: str = "utf-8", xml_declaration: bool | None = True, pretty_print: bool | None = True, parser: str | None = "lxml", stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = None, compression: CompressionOptions = "infer", storage_options: StorageOptions = None, ) -> str | None: """ Render a DataFrame to an XML document. .. versionadded:: 1.3.0 Parameters ---------- path_or_buffer : str, path object, file-like object, or None, default None String, path object (implementing ``os.PathLike[str]``), or file-like object implementing a ``write()`` function. If None, the result is returned as a string. index : bool, default True Whether to include index in XML document. root_name : str, default 'data' The name of root element in XML document. row_name : str, default 'row' The name of row element in XML document. na_rep : str, optional Missing data representation. attr_cols : list-like, optional List of columns to write as attributes in row element. Hierarchical columns will be flattened with underscore delimiting the different levels. elem_cols : list-like, optional List of columns to write as children in row element. By default, all columns output as children of row element. Hierarchical columns will be flattened with underscore delimiting the different levels. namespaces : dict, optional All namespaces to be defined in root element. Keys of dict should be prefix names and values of dict corresponding URIs. Default namespaces should be given empty string key. For example, :: namespaces = {{"": "https://example.com"}} prefix : str, optional Namespace prefix to be used for every element and/or attribute in document. This should be one of the keys in ``namespaces`` dict. encoding : str, default 'utf-8' Encoding of the resulting document. xml_declaration : bool, default True Whether to include the XML declaration at start of document. pretty_print : bool, default True Whether output should be pretty printed with indentation and line breaks. parser : {{'lxml','etree'}}, default 'lxml' Parser module to use for building of tree. Only 'lxml' and 'etree' are supported. With 'lxml', the ability to use XSLT stylesheet is supported. stylesheet : str, path object or file-like object, optional A URL, file-like object, or a raw string containing an XSLT script used to transform the raw XML output. Script should use layout of elements and attributes from original output. This argument requires ``lxml`` to be installed. Only XSLT 1.0 scripts and not later versions is currently supported. {compression_options} .. versionchanged:: 1.4.0 Zstandard support. {storage_options} Returns ------- None or str If ``io`` is None, returns the resulting XML format as a string. Otherwise returns None. See Also -------- to_json : Convert the pandas object to a JSON string. to_html : Convert DataFrame to a html. Examples -------- >>> df = pd.DataFrame({{'shape': ['square', 'circle', 'triangle'], ... 'degrees': [360, 360, 180], ... 'sides': [4, np.nan, 3]}}) >>> df.to_xml() # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row> <index>0</index> <shape>square</shape> <degrees>360</degrees> <sides>4.0</sides> </row> <row> <index>1</index> <shape>circle</shape> <degrees>360</degrees> <sides/> </row> <row> <index>2</index> <shape>triangle</shape> <degrees>180</degrees> <sides>3.0</sides> </row> </data> >>> df.to_xml(attr_cols=[ ... 'index', 'shape', 'degrees', 'sides' ... ]) # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <data> <row index="0" shape="square" degrees="360" sides="4.0"/> <row index="1" shape="circle" degrees="360"/> <row index="2" shape="triangle" degrees="180" sides="3.0"/> </data> >>> df.to_xml(namespaces={{"doc": "https://example.com"}}, ... prefix="doc") # doctest: +SKIP <?xml version='1.0' encoding='utf-8'?> <doc:data xmlns:doc="https://example.com"> <doc:row> <doc:index>0</doc:index> <doc:shape>square</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides>4.0</doc:sides> </doc:row> <doc:row> <doc:index>1</doc:index> <doc:shape>circle</doc:shape> <doc:degrees>360</doc:degrees> <doc:sides/> </doc:row> <doc:row> <doc:index>2</doc:index> <doc:shape>triangle</doc:shape> <doc:degrees>180</doc:degrees> <doc:sides>3.0</doc:sides> </doc:row> </doc:data> """ from pandas.io.formats.xml import ( EtreeXMLFormatter, LxmlXMLFormatter, ) lxml = import_optional_dependency("lxml.etree", errors="ignore") TreeBuilder: type[EtreeXMLFormatter] | type[LxmlXMLFormatter] if parser == "lxml": if lxml is not None: TreeBuilder = LxmlXMLFormatter else: raise ImportError( "lxml not found, please install or use the etree parser." ) elif parser == "etree": TreeBuilder = EtreeXMLFormatter else: raise ValueError("Values for parser can only be lxml or etree.") xml_formatter = TreeBuilder( self, path_or_buffer=path_or_buffer, index=index, root_name=root_name, row_name=row_name, na_rep=na_rep, attr_cols=attr_cols, elem_cols=elem_cols, namespaces=namespaces, prefix=prefix, encoding=encoding, xml_declaration=xml_declaration, pretty_print=pretty_print, stylesheet=stylesheet, compression=compression, storage_options=storage_options, ) return xml_formatter.write_output() # ---------------------------------------------------------------------- def info( self, verbose: bool | None = None, buf: WriteBuffer[str] | None = None, max_cols: int | None = None, memory_usage: bool | str | None = None, show_counts: bool | None = None, ) -> None: info = DataFrameInfo( data=self, memory_usage=memory_usage, ) info.render( buf=buf, max_cols=max_cols, verbose=verbose, show_counts=show_counts, ) def memory_usage(self, index: bool = True, deep: bool = False) -> Series: """ Return the memory usage of each column in bytes. The memory usage can optionally include the contribution of the index and elements of `object` dtype. This value is displayed in `DataFrame.info` by default. This can be suppressed by setting ``pandas.options.display.memory_usage`` to False. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the DataFrame's index in returned Series. If ``index=True``, the memory usage of the index is the first item in the output. deep : bool, default False If True, introspect the data deeply by interrogating `object` dtypes for system-level memory consumption, and include it in the returned values. Returns ------- Series A Series whose index is the original column names and whose values is the memory usage of each column in bytes. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of an ndarray. Series.memory_usage : Bytes consumed by a Series. Categorical : Memory-efficient array for string values with many repeated values. DataFrame.info : Concise summary of a DataFrame. Notes ----- See the :ref:`Frequently Asked Questions <df-memory-usage>` for more details. Examples -------- >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool'] >>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t)) ... for t in dtypes]) >>> df = pd.DataFrame(data) >>> df.head() int64 float64 complex128 object bool 0 1 1.0 1.0+0.0j 1 True 1 1 1.0 1.0+0.0j 1 True 2 1 1.0 1.0+0.0j 1 True 3 1 1.0 1.0+0.0j 1 True 4 1 1.0 1.0+0.0j 1 True >>> df.memory_usage() Index 128 int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 >>> df.memory_usage(index=False) int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 The memory footprint of `object` dtype columns is ignored by default: >>> df.memory_usage(deep=True) Index 128 int64 40000 float64 40000 complex128 80000 object 180000 bool 5000 dtype: int64 Use a Categorical for efficient storage of an object-dtype column with many repeated values. >>> df['object'].astype('category').memory_usage(deep=True) 5244 """ result = self._constructor_sliced( [c.memory_usage(index=False, deep=deep) for col, c in self.items()], index=self.columns, dtype=np.intp, ) if index: index_memory_usage = self._constructor_sliced( self.index.memory_usage(deep=deep), index=["Index"] ) result = index_memory_usage._append(result) return result def transpose(self, *args, copy: bool = False) -> DataFrame: """ Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. Parameters ---------- *args : tuple, optional Accepted for compatibility with NumPy. copy : bool, default False Whether to copy the data after transposing, even for DataFrames with a single dtype. Note that a copy is always required for mixed dtype DataFrames, or for DataFrames with any extension types. Returns ------- DataFrame The transposed DataFrame. See Also -------- numpy.transpose : Permute the dimensions of a given array. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the `object` dtype. In such a case, a copy of the data is always made. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = pd.DataFrame(data=d1) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T # or df1.transpose() >>> df1_transposed 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'name': ['Alice', 'Bob'], ... 'score': [9.5, 8], ... 'employed': [False, True], ... 'kids': [0, 0]} >>> df2 = pd.DataFrame(data=d2) >>> df2 name score employed kids 0 Alice 9.5 False 0 1 Bob 8.0 True 0 >>> df2_transposed = df2.T # or df2.transpose() >>> df2_transposed 0 1 name Alice Bob score 9.5 8.0 employed False True kids 0 0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the `object` dtype: >>> df2.dtypes name object score float64 employed bool kids int64 dtype: object >>> df2_transposed.dtypes 0 object 1 object dtype: object """ nv.validate_transpose(args, {}) # construct the args dtypes = list(self.dtypes) if self._can_fast_transpose: # Note: tests pass without this, but this improves perf quite a bit. new_vals = self._values.T if copy and not using_copy_on_write(): new_vals = new_vals.copy() result = self._constructor( new_vals, index=self.columns, columns=self.index, copy=False ) if using_copy_on_write() and len(self) > 0: result._mgr.add_references(self._mgr) # type: ignore[arg-type] elif ( self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]) ): # We have EAs with the same dtype. We can preserve that dtype in transpose. dtype = dtypes[0] arr_type = dtype.construct_array_type() values = self.values new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values] result = type(self)._from_arrays( new_values, index=self.columns, columns=self.index ) else: new_arr = self.values.T if copy and not using_copy_on_write(): new_arr = new_arr.copy() result = self._constructor( new_arr, index=self.columns, columns=self.index, # We already made a copy (more than one block) copy=False, ) return result.__finalize__(self, method="transpose") def T(self) -> DataFrame: """ The transpose of the DataFrame. Returns ------- DataFrame The transposed DataFrame. See Also -------- DataFrame.transpose : Transpose index and columns. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.T 0 1 col1 1 2 col2 3 4 """ return self.transpose() # ---------------------------------------------------------------------- # Indexing Methods def _ixs(self, i: int, axis: AxisInt = 0) -> Series: """ Parameters ---------- i : int axis : int Returns ------- Series """ # irow if axis == 0: new_mgr = self._mgr.fast_xs(i) # if we are a copy, mark as such copy = isinstance(new_mgr.array, np.ndarray) and new_mgr.array.base is None result = self._constructor_sliced(new_mgr, name=self.index[i]).__finalize__( self ) result._set_is_copy(self, copy=copy) return result # icol else: label = self.columns[i] col_mgr = self._mgr.iget(i) result = self._box_col_values(col_mgr, i) # this is a cached value, mark it so result._set_as_cached(label, self) return result def _get_column_array(self, i: int) -> ArrayLike: """ Get the values of the i'th column (ndarray or ExtensionArray, as stored in the Block) Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ return self._mgr.iget_values(i) def _iter_column_arrays(self) -> Iterator[ArrayLike]: """ Iterate over the arrays of all columns in order. This returns the values as stored in the Block (ndarray or ExtensionArray). Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution (for read-only purposes). """ for i in range(len(self.columns)): yield self._get_column_array(i) def _getitem_nocopy(self, key: list): """ Behaves like __getitem__, but returns a view in cases where __getitem__ would make a copy. """ # TODO(CoW): can be removed if/when we are always Copy-on-Write indexer = self.columns._get_indexer_strict(key, "columns")[1] new_axis = self.columns[indexer] new_mgr = self._mgr.reindex_indexer( new_axis, indexer, axis=0, allow_dups=True, copy=False, only_slice=True, ) return self._constructor(new_mgr) def __getitem__(self, key): check_dict_or_set_indexers(key) key = lib.item_from_zerodim(key) key = com.apply_if_callable(key, self) if is_hashable(key) and not is_iterator(key): # is_iterator to exclude generator e.g. test_getitem_listlike # shortcut if the key is in columns is_mi = isinstance(self.columns, MultiIndex) # GH#45316 Return view if key is not duplicated # Only use drop_duplicates with duplicates for performance if not is_mi and ( self.columns.is_unique and key in self.columns or key in self.columns.drop_duplicates(keep=False) ): return self._get_item_cache(key) elif is_mi and self.columns.is_unique and key in self.columns: return self._getitem_multilevel(key) # Do we have a slicer (on rows)? if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind="getitem") if isinstance(indexer, np.ndarray): # reachable with DatetimeIndex indexer = lib.maybe_indices_to_slice( indexer.astype(np.intp, copy=False), len(self) ) if isinstance(indexer, np.ndarray): # GH#43223 If we can not convert, use take return self.take(indexer, axis=0) return self._slice(indexer, axis=0) # Do we have a (boolean) DataFrame? if isinstance(key, DataFrame): return self.where(key) # Do we have a (boolean) 1d indexer? if com.is_bool_indexer(key): return self._getitem_bool_array(key) # We are left with two options: a single key, and a collection of keys, # We interpret tuples as collections only for non-MultiIndex is_single_key = isinstance(key, tuple) or not is_list_like(key) if is_single_key: if self.columns.nlevels > 1: return self._getitem_multilevel(key) indexer = self.columns.get_loc(key) if is_integer(indexer): indexer = [indexer] else: if is_iterator(key): key = list(key) indexer = self.columns._get_indexer_strict(key, "columns")[1] # take() does not accept boolean indexers if getattr(indexer, "dtype", None) == bool: indexer = np.where(indexer)[0] data = self._take_with_is_copy(indexer, axis=1) if is_single_key: # What does looking for a single key in a non-unique index return? # The behavior is inconsistent. It returns a Series, except when # - the key itself is repeated (test on data.shape, #9519), or # - we have a MultiIndex on columns (test on self.columns, #21309) if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex): # GH#26490 using data[key] can cause RecursionError return data._get_item_cache(key) return data def _getitem_bool_array(self, key): # also raises Exception if object array with NA values # warning here just in case -- previously __setitem__ was # reindexing but __getitem__ was not; it seems more reasonable to # go with the __setitem__ behavior since that is more consistent # with all other indexing behavior if isinstance(key, Series) and not key.index.equals(self.index): warnings.warn( "Boolean Series key will be reindexed to match DataFrame index.", UserWarning, stacklevel=find_stack_level(), ) elif len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}." ) # check_bool_indexer will throw exception if Series key cannot # be reindexed to match DataFrame rows key = check_bool_indexer(self.index, key) if key.all(): return self.copy(deep=None) indexer = key.nonzero()[0] return self._take_with_is_copy(indexer, axis=0) def _getitem_multilevel(self, key): # self.columns is a MultiIndex loc = self.columns.get_loc(key) if isinstance(loc, (slice, np.ndarray)): new_columns = self.columns[loc] result_columns = maybe_droplevels(new_columns, key) if self._is_mixed_type: result = self.reindex(columns=new_columns) result.columns = result_columns else: new_values = self._values[:, loc] result = self._constructor( new_values, index=self.index, columns=result_columns, copy=False ) if using_copy_on_write() and isinstance(loc, slice): result._mgr.add_references(self._mgr) # type: ignore[arg-type] result = result.__finalize__(self) # If there is only one column being returned, and its name is # either an empty string, or a tuple with an empty string as its # first element, then treat the empty string as a placeholder # and return the column as if the user had provided that empty # string in the key. If the result is a Series, exclude the # implied empty string from its name. if len(result.columns) == 1: # e.g. test_frame_getitem_multicolumn_empty_level, # test_frame_mixed_depth_get, test_loc_setitem_single_column_slice top = result.columns[0] if isinstance(top, tuple): top = top[0] if top == "": result = result[""] if isinstance(result, Series): result = self._constructor_sliced( result, index=self.index, name=key ) result._set_is_copy(self) return result else: # loc is neither a slice nor ndarray, so must be an int return self._ixs(loc, axis=1) def _get_value(self, index, col, takeable: bool = False) -> Scalar: """ Quickly retrieve single value at passed column and index. Parameters ---------- index : row label col : column label takeable : interpret the index/col as indexers, default False Returns ------- scalar Notes ----- Assumes that both `self.index._index_as_unique` and `self.columns._index_as_unique`; Caller is responsible for checking. """ if takeable: series = self._ixs(col, axis=1) return series._values[index] series = self._get_item_cache(col) engine = self.index._engine if not isinstance(self.index, MultiIndex): # CategoricalIndex: Trying to use the engine fastpath may give incorrect # results if our categories are integers that dont match our codes # IntervalIndex: IntervalTree has no get_loc row = self.index.get_loc(index) return series._values[row] # For MultiIndex going through engine effectively restricts us to # same-length tuples; see test_get_set_value_no_partial_indexing loc = engine.get_loc(index) return series._values[loc] def isetitem(self, loc, value) -> None: """ Set the given value in the column with position `loc`. This is a positional analogue to ``__setitem__``. Parameters ---------- loc : int or sequence of ints Index position for the column. value : scalar or arraylike Value(s) for the column. Notes ----- ``frame.isetitem(loc, value)`` is an in-place method as it will modify the DataFrame in place (not returning a new object). In contrast to ``frame.iloc[:, i] = value`` which will try to update the existing values in place, ``frame.isetitem(loc, value)`` will not update the values of the column itself in place, it will instead insert a new array. In cases where ``frame.columns`` is unique, this is equivalent to ``frame[frame.columns[i]] = value``. """ if isinstance(value, DataFrame): if is_scalar(loc): loc = [loc] for i, idx in enumerate(loc): arraylike = self._sanitize_column(value.iloc[:, i]) self._iset_item_mgr(idx, arraylike, inplace=False) return arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=False) def __setitem__(self, key, value): if not PYPY and using_copy_on_write(): if sys.getrefcount(self) <= 3: warnings.warn( _chained_assignment_msg, ChainedAssignmentError, stacklevel=2 ) key = com.apply_if_callable(key, self) # see if we can slice the rows if isinstance(key, slice): slc = self.index._convert_slice_indexer(key, kind="getitem") return self._setitem_slice(slc, value) if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2: self._setitem_frame(key, value) elif isinstance(key, (Series, np.ndarray, list, Index)): self._setitem_array(key, value) elif isinstance(value, DataFrame): self._set_item_frame_value(key, value) elif ( is_list_like(value) and not self.columns.is_unique and 1 < len(self.columns.get_indexer_for([key])) == len(value) ): # Column to set is duplicated self._setitem_array([key], value) else: # set column self._set_item(key, value) def _setitem_slice(self, key: slice, value) -> None: # NB: we can't just use self.loc[key] = value because that # operates on labels and we need to operate positional for # backwards-compat, xref GH#31469 self._check_setitem_copy() self.iloc[key] = value def _setitem_array(self, key, value): # also raises Exception if object array with NA values if com.is_bool_indexer(key): # bool indexer is indexing along rows if len(key) != len(self.index): raise ValueError( f"Item wrong length {len(key)} instead of {len(self.index)}!" ) key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] self._check_setitem_copy() if isinstance(value, DataFrame): # GH#39931 reindex since iloc does not align value = value.reindex(self.index.take(indexer)) self.iloc[indexer] = value else: # Note: unlike self.iloc[:, indexer] = value, this will # never try to overwrite values inplace if isinstance(value, DataFrame): check_key_length(self.columns, key, value) for k1, k2 in zip(key, value.columns): self[k1] = value[k2] elif not is_list_like(value): for col in key: self[col] = value elif isinstance(value, np.ndarray) and value.ndim == 2: self._iset_not_inplace(key, value) elif np.ndim(value) > 1: # list of lists value = DataFrame(value).values return self._setitem_array(key, value) else: self._iset_not_inplace(key, value) def _iset_not_inplace(self, key, value): # GH#39510 when setting with df[key] = obj with a list-like key and # list-like value, we iterate over those listlikes and set columns # one at a time. This is different from dispatching to # `self.loc[:, key]= value` because loc.__setitem__ may overwrite # data inplace, whereas this will insert new arrays. def igetitem(obj, i: int): # Note: we catch DataFrame obj before getting here, but # hypothetically would return obj.iloc[:, i] if isinstance(obj, np.ndarray): return obj[..., i] else: return obj[i] if self.columns.is_unique: if np.shape(value)[-1] != len(key): raise ValueError("Columns must be same length as key") for i, col in enumerate(key): self[col] = igetitem(value, i) else: ilocs = self.columns.get_indexer_non_unique(key)[0] if (ilocs < 0).any(): # key entries not in self.columns raise NotImplementedError if np.shape(value)[-1] != len(ilocs): raise ValueError("Columns must be same length as key") assert np.ndim(value) <= 2 orig_columns = self.columns # Using self.iloc[:, i] = ... may set values inplace, which # by convention we do not do in __setitem__ try: self.columns = Index(range(len(self.columns))) for i, iloc in enumerate(ilocs): self[iloc] = igetitem(value, i) finally: self.columns = orig_columns def _setitem_frame(self, key, value): # support boolean setting with DataFrame input, e.g. # df[df > df2] = 0 if isinstance(key, np.ndarray): if key.shape != self.shape: raise ValueError("Array conditional must be same shape as self") key = self._constructor(key, **self._construct_axes_dict(), copy=False) if key.size and not all(is_bool_dtype(dtype) for dtype in key.dtypes): raise TypeError( "Must pass DataFrame or 2-d ndarray with boolean values only" ) self._check_inplace_setting(value) self._check_setitem_copy() self._where(-key, value, inplace=True) def _set_item_frame_value(self, key, value: DataFrame) -> None: self._ensure_valid_index(value) # align columns if key in self.columns: loc = self.columns.get_loc(key) cols = self.columns[loc] len_cols = 1 if is_scalar(cols) or isinstance(cols, tuple) else len(cols) if len_cols != len(value.columns): raise ValueError("Columns must be same length as key") # align right-hand-side columns if self.columns # is multi-index and self[key] is a sub-frame if isinstance(self.columns, MultiIndex) and isinstance( loc, (slice, Series, np.ndarray, Index) ): cols_droplevel = maybe_droplevels(cols, key) if len(cols_droplevel) and not cols_droplevel.equals(value.columns): value = value.reindex(cols_droplevel, axis=1) for col, col_droplevel in zip(cols, cols_droplevel): self[col] = value[col_droplevel] return if is_scalar(cols): self[cols] = value[value.columns[0]] return # now align rows arraylike = _reindex_for_setitem(value, self.index) self._set_item_mgr(key, arraylike) return if len(value.columns) != 1: raise ValueError( "Cannot set a DataFrame with multiple columns to the single " f"column {key}" ) self[key] = value[value.columns[0]] def _iset_item_mgr( self, loc: int | slice | np.ndarray, value, inplace: bool = False ) -> None: # when called from _set_item_mgr loc can be anything returned from get_loc self._mgr.iset(loc, value, inplace=inplace) self._clear_item_cache() def _set_item_mgr(self, key, value: ArrayLike) -> None: try: loc = self._info_axis.get_loc(key) except KeyError: # This item wasn't present, just insert at end self._mgr.insert(len(self._info_axis), key, value) else: self._iset_item_mgr(loc, value) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _iset_item(self, loc: int, value) -> None: arraylike = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=True) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy() def _set_item(self, key, value) -> None: """ Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the same length as the DataFrames index or an error will be thrown. Series/TimeSeries will be conformed to the DataFrames index to ensure homogeneity. """ value = self._sanitize_column(value) if ( key in self.columns and value.ndim == 1 and not is_extension_array_dtype(value) ): # broadcast across multiple columns if necessary if not self.columns.is_unique or isinstance(self.columns, MultiIndex): existing_piece = self[key] if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)).T self._set_item_mgr(key, value) def _set_value( self, index: IndexLabel, col, value: Scalar, takeable: bool = False ) -> None: """ Put single value at passed column and index. Parameters ---------- index : Label row label col : Label column label value : scalar takeable : bool, default False Sets whether or not index/col interpreted as indexers """ try: if takeable: icol = col iindex = cast(int, index) else: icol = self.columns.get_loc(col) iindex = self.index.get_loc(index) self._mgr.column_setitem(icol, iindex, value, inplace_only=True) self._clear_item_cache() except (KeyError, TypeError, ValueError, LossySetitemError): # get_loc might raise a KeyError for missing labels (falling back # to (i)loc will do expansion of the index) # column_setitem will do validation that may raise TypeError, # ValueError, or LossySetitemError # set using a non-recursive method & reset the cache if takeable: self.iloc[index, col] = value else: self.loc[index, col] = value self._item_cache.pop(col, None) except InvalidIndexError as ii_err: # GH48729: Seems like you are trying to assign a value to a # row when only scalar options are permitted raise InvalidIndexError( f"You can only assign a scalar value not a {type(value)}" ) from ii_err def _ensure_valid_index(self, value) -> None: """ Ensure that if we don't have an index, that we can create one from the passed value. """ # GH5632, make sure that we are a Series convertible if not len(self.index) and is_list_like(value) and len(value): if not isinstance(value, DataFrame): try: value = Series(value) except (ValueError, NotImplementedError, TypeError) as err: raise ValueError( "Cannot set a frame with no defined index " "and a value that cannot be converted to a Series" ) from err # GH31368 preserve name of index index_copy = value.index.copy() if self.index.name is not None: index_copy.name = self.index.name self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan) def _box_col_values(self, values: SingleDataManager, loc: int) -> Series: """ Provide boxed values for a column. """ # Lookup in columns so that if e.g. a str datetime was passed # we attach the Timestamp object as the name. name = self.columns[loc] klass = self._constructor_sliced # We get index=self.index bc values is a SingleDataManager return klass(values, name=name, fastpath=True).__finalize__(self) # ---------------------------------------------------------------------- # Lookup Caching def _clear_item_cache(self) -> None: self._item_cache.clear() def _get_item_cache(self, item: Hashable) -> Series: """Return the cached item, item represents a label indexer.""" if using_copy_on_write(): loc = self.columns.get_loc(item) return self._ixs(loc, axis=1) cache = self._item_cache res = cache.get(item) if res is None: # All places that call _get_item_cache have unique columns, # pending resolution of GH#33047 loc = self.columns.get_loc(item) res = self._ixs(loc, axis=1) cache[item] = res # for a chain res._is_copy = self._is_copy return res def _reset_cacher(self) -> None: # no-op for DataFrame pass def _maybe_cache_changed(self, item, value: Series, inplace: bool) -> None: """ The object has called back to us saying maybe it has changed. """ loc = self._info_axis.get_loc(item) arraylike = value._values old = self._ixs(loc, axis=1) if old._values is value._values and inplace: # GH#46149 avoid making unnecessary copies/block-splitting return self._mgr.iset(loc, arraylike, inplace=inplace) # ---------------------------------------------------------------------- # Unsorted def query(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> DataFrame: ... def query(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def query(self, expr: str, *, inplace: bool = ..., **kwargs) -> DataFrame | None: ... def query(self, expr: str, *, inplace: bool = False, **kwargs) -> DataFrame | None: """ Query the columns of a DataFrame with a boolean expression. Parameters ---------- expr : str The query string to evaluate. You can refer to variables in the environment by prefixing them with an '@' character like ``@a + b``. You can refer to column names that are not valid Python variable names by surrounding them in backticks. Thus, column names containing spaces or punctuations (besides underscores) or starting with digits must be surrounded by backticks. (For example, a column named "Area (cm^2)" would be referenced as ```Area (cm^2)```). Column names which are Python keywords (like "list", "for", "import", etc) cannot be used. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. inplace : bool Whether to modify the DataFrame rather than creating a new one. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`DataFrame.query`. Returns ------- DataFrame or None DataFrame resulting from the provided query expression or None if ``inplace=True``. See Also -------- eval : Evaluate a string describing operations on DataFrame columns. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Notes ----- The result of the evaluation of this expression is first passed to :attr:`DataFrame.loc` and if that fails because of a multidimensional key (e.g., a DataFrame) then the result will be passed to :meth:`DataFrame.__getitem__`. This method uses the top-level :func:`eval` function to evaluate the passed query. The :meth:`~pandas.DataFrame.query` method uses a slightly modified Python syntax by default. For example, the ``&`` and ``|`` (bitwise) operators have the precedence of their boolean cousins, :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, however the semantics are different. You can change the semantics of the expression by passing the keyword argument ``parser='python'``. This enforces the same semantics as evaluation in Python space. Likewise, you can pass ``engine='python'`` to evaluate an expression using Python itself as a backend. This is not recommended as it is inefficient compared to using ``numexpr`` as the engine. The :attr:`DataFrame.index` and :attr:`DataFrame.columns` attributes of the :class:`~pandas.DataFrame` instance are placed in the query namespace by default, which allows you to treat both the index and columns of the frame as a column in the frame. The identifier ``index`` is used for the frame index; you can also use the name of the index to identify it in a query. Please note that Python keywords may not be used as identifiers. For further details and examples see the ``query`` documentation in :ref:`indexing <indexing.query>`. *Backtick quoted variables* Backtick quoted variables are parsed as literal Python code and are converted internally to a Python valid identifier. This can lead to the following problems. During parsing a number of disallowed characters inside the backtick quoted string are replaced by strings that are allowed as a Python identifier. These characters include all operators in Python, the space character, the question mark, the exclamation mark, the dollar sign, and the euro sign. For other characters that fall outside the ASCII range (U+0001..U+007F) and those that are not further specified in PEP 3131, the query parser will raise an error. This excludes whitespace different than the space character, but also the hashtag (as it is used for comments) and the backtick itself (backtick can also not be escaped). In a special case, quotes that make a pair around a backtick can confuse the parser. For example, ```it's` > `that's``` will raise an error, as it forms a quoted string (``'s > `that'``) with a backtick inside. See also the Python documentation about lexical analysis (https://docs.python.org/3/reference/lexical_analysis.html) in combination with the source code in :mod:`pandas.core.computation.parsing`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10 """ inplace = validate_bool_kwarg(inplace, "inplace") if not isinstance(expr, str): msg = f"expr must be a string to be evaluated, {type(expr)} given" raise ValueError(msg) kwargs["level"] = kwargs.pop("level", 0) + 1 kwargs["target"] = None res = self.eval(expr, **kwargs) try: result = self.loc[res] except ValueError: # when res is multi-dimensional loc raises, but this is sometimes a # valid query result = self[res] if inplace: self._update_inplace(result) return None else: return result def eval(self, expr: str, *, inplace: Literal[False] = ..., **kwargs) -> Any: ... def eval(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def eval(self, expr: str, *, inplace: bool = False, **kwargs) -> Any | None: """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. Returns ------- ndarray, scalar, pandas object, or None The result of the evaluation or None if ``inplace=True``. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Notes ----- For more details see the API documentation for :func:`~eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Multiple columns can be assigned to using multi-line expressions: >>> df.eval( ... ''' ... C = A + B ... D = A - B ... ''' ... ) A B C D 0 1 10 11 -9 1 2 8 10 -6 2 3 6 9 -3 3 4 4 8 0 4 5 2 7 3 """ from pandas.core.computation.eval import eval as _eval inplace = validate_bool_kwarg(inplace, "inplace") kwargs["level"] = kwargs.pop("level", 0) + 1 index_resolvers = self._get_index_resolvers() column_resolvers = self._get_cleaned_column_resolvers() resolvers = column_resolvers, index_resolvers if "target" not in kwargs: kwargs["target"] = self kwargs["resolvers"] = tuple(kwargs.get("resolvers", ())) + resolvers return _eval(expr, inplace=inplace, **kwargs) def select_dtypes(self, include=None, exclude=None) -> DataFrame: """ Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty * If ``include`` and ``exclude`` have overlapping elements * If any kind of string dtype is passed in. See Also -------- DataFrame.dtypes: Return Series with the data type of each column. Notes ----- * To select all *numeric* types, use ``np.number`` or ``'number'`` * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy <https://numpy.org/doc/stable/reference/arrays.scalars.html>`__ * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or ``'timedelta64'`` * To select Pandas categorical dtypes, use ``'category'`` * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in 0.20.0) or ``'datetime64[ns, tz]'`` Examples -------- >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int64']) b c 0 True 1.0 1 False 2.0 2 True 1.0 3 False 2.0 4 True 1.0 5 False 2.0 """ if not is_list_like(include): include = (include,) if include is not None else () if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () selection = (frozenset(include), frozenset(exclude)) if not any(selection): raise ValueError("at least one of include or exclude must be nonempty") # convert the myriad valid dtypes object to a single representation def check_int_infer_dtype(dtypes): converted_dtypes: list[type] = [] for dtype in dtypes: # Numpy maps int to different types (int32, in64) on Windows and Linux # see https://github.com/numpy/numpy/issues/9464 if (isinstance(dtype, str) and dtype == "int") or (dtype is int): converted_dtypes.append(np.int32) converted_dtypes.append(np.int64) elif dtype == "float" or dtype is float: # GH#42452 : np.dtype("float") coerces to np.float64 from Numpy 1.20 converted_dtypes.extend([np.float64, np.float32]) else: converted_dtypes.append(infer_dtype_from_object(dtype)) return frozenset(converted_dtypes) include = check_int_infer_dtype(include) exclude = check_int_infer_dtype(exclude) for dtypes in (include, exclude): invalidate_string_dtypes(dtypes) # can't both include AND exclude! if not include.isdisjoint(exclude): raise ValueError(f"include and exclude overlap on {(include & exclude)}") def dtype_predicate(dtype: DtypeObj, dtypes_set) -> bool: # GH 46870: BooleanDtype._is_numeric == True but should be excluded return issubclass(dtype.type, tuple(dtypes_set)) or ( np.number in dtypes_set and getattr(dtype, "_is_numeric", False) and not is_bool_dtype(dtype) ) def predicate(arr: ArrayLike) -> bool: dtype = arr.dtype if include: if not dtype_predicate(dtype, include): return False if exclude: if dtype_predicate(dtype, exclude): return False return True mgr = self._mgr._get_data_subset(predicate).copy(deep=None) return type(self)(mgr).__finalize__(self) def insert( self, loc: int, column: Hashable, value: Scalar | AnyArrayLike, allow_duplicates: bool | lib.NoDefault = lib.no_default, ) -> None: """ Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns). column : str, number, or hashable object Label of the inserted column. value : Scalar, Series, or array-like allow_duplicates : bool, optional, default lib.no_default See Also -------- Index.insert : Insert new item by index. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.insert(1, "newcol", [99, 99]) >>> df col1 newcol col2 0 1 99 3 1 2 99 4 >>> df.insert(0, "col1", [100, 100], allow_duplicates=True) >>> df col1 col1 newcol col2 0 100 1 99 3 1 100 2 99 4 Notice that pandas uses index alignment in case of `value` from type `Series`: >>> df.insert(0, "col0", pd.Series([5, 6], index=[1, 2])) >>> df col0 col1 col1 newcol col2 0 NaN 100 1 99 3 1 5.0 100 2 99 4 """ if allow_duplicates is lib.no_default: allow_duplicates = False if allow_duplicates and not self.flags.allows_duplicate_labels: raise ValueError( "Cannot specify 'allow_duplicates=True' when " "'self.flags.allows_duplicate_labels' is False." ) if not allow_duplicates and column in self.columns: # Should this be a different kind of error?? raise ValueError(f"cannot insert {column}, already exists") if not isinstance(loc, int): raise TypeError("loc must be int") value = self._sanitize_column(value) self._mgr.insert(loc, column, value) def assign(self, **kwargs) -> DataFrame: r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. Later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 You can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 """ data = self.copy(deep=None) for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) return data def _sanitize_column(self, value) -> ArrayLike: """ Ensures new columns (which go into the BlockManager as new blocks) are always copied and converted into an array. Parameters ---------- value : scalar, Series, or array-like Returns ------- numpy.ndarray or ExtensionArray """ self._ensure_valid_index(value) # We can get there through isetitem with a DataFrame # or through loc single_block_path if isinstance(value, DataFrame): return _reindex_for_setitem(value, self.index) elif is_dict_like(value): return _reindex_for_setitem(Series(value), self.index) if is_list_like(value): com.require_length_match(value, self.index) return sanitize_array(value, self.index, copy=True, allow_2d=True) def _series(self): return { item: Series( self._mgr.iget(idx), index=self.index, name=item, fastpath=True ) for idx, item in enumerate(self.columns) } # ---------------------------------------------------------------------- # Reindexing and alignment def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy): frame = self columns = axes["columns"] if columns is not None: frame = frame._reindex_columns( columns, method, copy, level, fill_value, limit, tolerance ) index = axes["index"] if index is not None: frame = frame._reindex_index( index, method, copy, level, fill_value, limit, tolerance ) return frame def _reindex_index( self, new_index, method, copy: bool, level: Level, fill_value=np.nan, limit=None, tolerance=None, ): new_index, indexer = self.index.reindex( new_index, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {0: [new_index, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_columns( self, new_columns, method, copy: bool, level: Level, fill_value=None, limit=None, tolerance=None, ): new_columns, indexer = self.columns.reindex( new_columns, method=method, level=level, limit=limit, tolerance=tolerance ) return self._reindex_with_indexers( {1: [new_columns, indexer]}, copy=copy, fill_value=fill_value, allow_dups=False, ) def _reindex_multi( self, axes: dict[str, Index], copy: bool, fill_value ) -> DataFrame: """ We are guaranteed non-Nones in the axes. """ new_index, row_indexer = self.index.reindex(axes["index"]) new_columns, col_indexer = self.columns.reindex(axes["columns"]) if row_indexer is not None and col_indexer is not None: # Fastpath. By doing two 'take's at once we avoid making an # unnecessary copy. # We only get here with `not self._is_mixed_type`, which (almost) # ensures that self.values is cheap. It may be worth making this # condition more specific. indexer = row_indexer, col_indexer new_values = take_2d_multi(self.values, indexer, fill_value=fill_value) return self._constructor( new_values, index=new_index, columns=new_columns, copy=False ) else: return self._reindex_with_indexers( {0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, copy=copy, fill_value=fill_value, ) def align( self, other: DataFrame, join: AlignJoin = "outer", axis: Axis | None = None, level: Level = None, copy: bool | None = None, fill_value=None, method: FillnaOptions | None = None, limit: int | None = None, fill_axis: Axis = 0, broadcast_axis: Axis | None = None, ) -> DataFrame: return super().align( other, join=join, axis=axis, level=level, copy=copy, fill_value=fill_value, method=method, limit=limit, fill_axis=fill_axis, broadcast_axis=broadcast_axis, ) """ Examples -------- >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) Change the row labels. >>> df.set_axis(['a', 'b', 'c'], axis='index') A B a 1 4 b 2 5 c 3 6 Change the column labels. >>> df.set_axis(['I', 'II'], axis='columns') I II 0 1 4 1 2 5 2 3 6 """ ) **_shared_doc_kwargs, extended_summary_sub=" column or", axis_description_sub=", and 1 identifies the columns", see_also_sub=" or columns", ) ) # ---------------------------------------------------------------------- # Reindex-based selection methods # ---------------------------------------------------------------------- # Sorting # error: Signature of "sort_values" incompatible with supertype "NDFrame" # TODO: Just move the sort_values doc here. ) # ---------------------------------------------------------------------- # Arithmetic Methods ) ) ) # ---------------------------------------------------------------------- # Function application ) # error: Signature of "any" incompatible with supertype "NDFrame" [override] # error: Missing return statement ) # ---------------------------------------------------------------------- # Merging / joining methods # ---------------------------------------------------------------------- # Statistical methods, etc. # ---------------------------------------------------------------------- # ndarray-like stats methods # ---------------------------------------------------------------------- # Add index and columns # ---------------------------------------------------------------------- # Add plotting methods to DataFrame # ---------------------------------------------------------------------- # Internal Interface Methods DataFrame def read_excel( io, sheet_name: str | int | list[IntStrT] | None = 0, *, header: int | Sequence[int] | None = 0, names: list[str] | None = None, index_col: int | Sequence[int] | None = None, usecols: int | str | Sequence[int] | Sequence[str] | Callable[[str], bool] | None = None, dtype: DtypeArg | None = None, engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb"] | None = None, converters: dict[str, Callable] | dict[int, Callable] | None = None, true_values: Iterable[Hashable] | None = None, false_values: Iterable[Hashable] | None = None, skiprows: Sequence[int] | int | Callable[[int], object] | None = None, nrows: int | None = None, na_values=None, keep_default_na: bool = True, na_filter: bool = True, verbose: bool = False, parse_dates: list | dict | bool = False, date_parser: Callable | lib.NoDefault = lib.no_default, date_format: dict[Hashable, str] | str | None = None, thousands: str | None = None, decimal: str = ".", comment: str | None = None, skipfooter: int = 0, storage_options: StorageOptions = None, dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, ) -> DataFrame | dict[IntStrT, DataFrame]: check_dtype_backend(dtype_backend) should_close = False if not isinstance(io, ExcelFile): should_close = True io = ExcelFile(io, storage_options=storage_options, engine=engine) elif engine and engine != io.engine: raise ValueError( "Engine should not be specified when passing " "an ExcelFile - ExcelFile already has the engine set" ) try: data = io.parse( sheet_name=sheet_name, header=header, names=names, index_col=index_col, usecols=usecols, dtype=dtype, converters=converters, true_values=true_values, false_values=false_values, skiprows=skiprows, nrows=nrows, na_values=na_values, keep_default_na=keep_default_na, na_filter=na_filter, verbose=verbose, parse_dates=parse_dates, date_parser=date_parser, date_format=date_format, thousands=thousands, decimal=decimal, comment=comment, skipfooter=skipfooter, dtype_backend=dtype_backend, ) finally: # make sure to close opened file handles if should_close: io.close() return data
null
173,561
from __future__ import annotations import abc import datetime from functools import partial from io import BytesIO import os from textwrap import fill from types import TracebackType from typing import ( IO, Any, Callable, Hashable, Iterable, List, Literal, Mapping, Sequence, Union, cast, overload, ) import zipfile from pandas._config import config from pandas._libs import lib from pandas._libs.parsers import STR_NA_VALUES from pandas._typing import ( DtypeArg, DtypeBackend, FilePath, IntStrT, ReadBuffer, StorageOptions, WriteExcelBuffer, ) from pandas.compat._optional import ( get_version, import_optional_dependency, ) from pandas.errors import EmptyDataError from pandas.util._decorators import ( Appender, doc, ) from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ( is_bool, is_float, is_integer, is_list_like, ) from pandas.core.frame import DataFrame from pandas.core.shared_docs import _shared_docs from pandas.util.version import Version from pandas.io.common import ( IOHandles, get_handle, stringify_path, validate_header_arg, ) from pandas.io.excel._util import ( fill_mi_header, get_default_engine, get_writer, maybe_convert_usecols, pop_header_name, ) from pandas.io.parsers import TextParser from pandas.io.parsers.readers import validate_integer XLS_SIGNATURES = ( b"\x09\x00\x04\x00\x07\x00\x10\x00", # BIFF2 b"\x09\x02\x06\x00\x00\x00\x10\x00", # BIFF3 b"\x09\x04\x06\x00\x00\x00\x10\x00", # BIFF4 b"\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1", # Compound File Binary ) ZIP_SIGNATURE = b"PK\x03\x04" PEEK_SIZE = max(map(len, XLS_SIGNATURES + (ZIP_SIGNATURE,))) class BytesIO(BufferedIOBase, BinaryIO): def __init__(self, initial_bytes: bytes = ...) -> None: ... # BytesIO does not contain a "name" field. This workaround is necessary # to allow BytesIO sub-classes to add this field, as it is defined # as a read-only property on IO[]. name: Any def __enter__(self: _T) -> _T: ... def getvalue(self) -> bytes: ... def getbuffer(self) -> memoryview: ... if sys.version_info >= (3, 7): def read1(self, __size: Optional[int] = ...) -> bytes: ... else: def read1(self, __size: Optional[int]) -> bytes: ... # type: ignore class ReadBuffer(BaseBuffer, Protocol[AnyStr_co]): def read(self, __n: int = ...) -> AnyStr_co: # for BytesIOWrapper, gzip.GzipFile, bz2.BZ2File ... FilePath = Union[str, "PathLike[str]"] StorageOptions = Optional[Dict[str, Any]] ) def get_handle( path_or_buf: FilePath | BaseBuffer, mode: str, *, encoding: str | None = ..., compression: CompressionOptions = ..., memory_map: bool = ..., is_text: Literal[False], errors: str | None = ..., storage_options: StorageOptions = ..., ) -> IOHandles[bytes]: ... def get_handle( path_or_buf: FilePath | BaseBuffer, mode: str, *, encoding: str | None = ..., compression: CompressionOptions = ..., memory_map: bool = ..., is_text: Literal[True] = ..., errors: str | None = ..., storage_options: StorageOptions = ..., ) -> IOHandles[str]: ... def get_handle( path_or_buf: FilePath | BaseBuffer, mode: str, *, encoding: str | None = ..., compression: CompressionOptions = ..., memory_map: bool = ..., is_text: bool = ..., errors: str | None = ..., storage_options: StorageOptions = ..., ) -> IOHandles[str] | IOHandles[bytes]: ... def get_handle( path_or_buf: FilePath | BaseBuffer, mode: str, *, encoding: str | None = None, compression: CompressionOptions = None, memory_map: bool = False, is_text: bool = True, errors: str | None = None, storage_options: StorageOptions = None, ) -> IOHandles[str] | IOHandles[bytes]: """ Get file handle for given path/buffer and mode. Parameters ---------- path_or_buf : str or file handle File path or object. mode : str Mode to open path_or_buf with. encoding : str or None Encoding to use. {compression_options} .. versionchanged:: 1.0.0 May now be a dict with key 'method' as compression mode and other keys as compression options if compression mode is 'zip'. .. versionchanged:: 1.1.0 Passing compression options as keys in dict is now supported for compression modes 'gzip', 'bz2', 'zstd' and 'zip'. .. versionchanged:: 1.4.0 Zstandard support. memory_map : bool, default False See parsers._parser_params for more information. Only used by read_csv. is_text : bool, default True Whether the type of the content passed to the file/buffer is string or bytes. This is not the same as `"b" not in mode`. If a string content is passed to a binary file/buffer, a wrapper is inserted. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. storage_options: StorageOptions = None Passed to _get_filepath_or_buffer .. versionchanged:: 1.2.0 Returns the dataclass IOHandles """ # Windows does not default to utf-8. Set to utf-8 for a consistent behavior encoding = encoding or "utf-8" errors = errors or "strict" # read_csv does not know whether the buffer is opened in binary/text mode if _is_binary_mode(path_or_buf, mode) and "b" not in mode: mode += "b" # validate encoding and errors codecs.lookup(encoding) if isinstance(errors, str): codecs.lookup_error(errors) # open URLs ioargs = _get_filepath_or_buffer( path_or_buf, encoding=encoding, compression=compression, mode=mode, storage_options=storage_options, ) handle = ioargs.filepath_or_buffer handles: list[BaseBuffer] # memory mapping needs to be the first step # only used for read_csv handle, memory_map, handles = _maybe_memory_map(handle, memory_map) is_path = isinstance(handle, str) compression_args = dict(ioargs.compression) compression = compression_args.pop("method") # Only for write methods if "r" not in mode and is_path: check_parent_directory(str(handle)) if compression: if compression != "zstd": # compression libraries do not like an explicit text-mode ioargs.mode = ioargs.mode.replace("t", "") elif compression == "zstd" and "b" not in ioargs.mode: # python-zstandard defaults to text mode, but we always expect # compression libraries to use binary mode. ioargs.mode += "b" # GZ Compression if compression == "gzip": if isinstance(handle, str): # error: Incompatible types in assignment (expression has type # "GzipFile", variable has type "Union[str, BaseBuffer]") handle = gzip.GzipFile( # type: ignore[assignment] filename=handle, mode=ioargs.mode, **compression_args, ) else: handle = gzip.GzipFile( # No overload variant of "GzipFile" matches argument types # "Union[str, BaseBuffer]", "str", "Dict[str, Any]" fileobj=handle, # type: ignore[call-overload] mode=ioargs.mode, **compression_args, ) # BZ Compression elif compression == "bz2": # Overload of "BZ2File" to handle pickle protocol 5 # "Union[str, BaseBuffer]", "str", "Dict[str, Any]" handle = _BZ2File( # type: ignore[call-overload] handle, mode=ioargs.mode, **compression_args, ) # ZIP Compression elif compression == "zip": # error: Argument 1 to "_BytesZipFile" has incompatible type # "Union[str, BaseBuffer]"; expected "Union[Union[str, PathLike[str]], # ReadBuffer[bytes], WriteBuffer[bytes]]" handle = _BytesZipFile( handle, ioargs.mode, **compression_args # type: ignore[arg-type] ) if handle.buffer.mode == "r": handles.append(handle) zip_names = handle.buffer.namelist() if len(zip_names) == 1: handle = handle.buffer.open(zip_names.pop()) elif not zip_names: raise ValueError(f"Zero files found in ZIP file {path_or_buf}") else: raise ValueError( "Multiple files found in ZIP file. " f"Only one file per ZIP: {zip_names}" ) # TAR Encoding elif compression == "tar": compression_args.setdefault("mode", ioargs.mode) if isinstance(handle, str): handle = _BytesTarFile(name=handle, **compression_args) else: # error: Argument "fileobj" to "_BytesTarFile" has incompatible # type "BaseBuffer"; expected "Union[ReadBuffer[bytes], # WriteBuffer[bytes], None]" handle = _BytesTarFile( fileobj=handle, **compression_args # type: ignore[arg-type] ) assert isinstance(handle, _BytesTarFile) if "r" in handle.buffer.mode: handles.append(handle) files = handle.buffer.getnames() if len(files) == 1: file = handle.buffer.extractfile(files[0]) assert file is not None handle = file elif not files: raise ValueError(f"Zero files found in TAR archive {path_or_buf}") else: raise ValueError( "Multiple files found in TAR archive. " f"Only one file per TAR archive: {files}" ) # XZ Compression elif compression == "xz": # error: Argument 1 to "LZMAFile" has incompatible type "Union[str, # BaseBuffer]"; expected "Optional[Union[Union[str, bytes, PathLike[str], # PathLike[bytes]], IO[bytes]]]" handle = get_lzma_file()(handle, ioargs.mode) # type: ignore[arg-type] # Zstd Compression elif compression == "zstd": zstd = import_optional_dependency("zstandard") if "r" in ioargs.mode: open_args = {"dctx": zstd.ZstdDecompressor(**compression_args)} else: open_args = {"cctx": zstd.ZstdCompressor(**compression_args)} handle = zstd.open( handle, mode=ioargs.mode, **open_args, ) # Unrecognized Compression else: msg = f"Unrecognized compression type: {compression}" raise ValueError(msg) assert not isinstance(handle, str) handles.append(handle) elif isinstance(handle, str): # Check whether the filename is to be opened in binary mode. # Binary mode does not support 'encoding' and 'newline'. if ioargs.encoding and "b" not in ioargs.mode: # Encoding handle = open( handle, ioargs.mode, encoding=ioargs.encoding, errors=errors, newline="", ) else: # Binary mode handle = open(handle, ioargs.mode) handles.append(handle) # Convert BytesIO or file objects passed with an encoding is_wrapped = False if not is_text and ioargs.mode == "rb" and isinstance(handle, TextIOBase): # not added to handles as it does not open/buffer resources handle = _BytesIOWrapper( handle, encoding=ioargs.encoding, ) elif is_text and ( compression or memory_map or _is_binary_mode(handle, ioargs.mode) ): if ( not hasattr(handle, "readable") or not hasattr(handle, "writable") or not hasattr(handle, "seekable") ): handle = _IOWrapper(handle) # error: Argument 1 to "TextIOWrapper" has incompatible type # "_IOWrapper"; expected "IO[bytes]" handle = TextIOWrapper( handle, # type: ignore[arg-type] encoding=ioargs.encoding, errors=errors, newline="", ) handles.append(handle) # only marked as wrapped when the caller provided a handle is_wrapped = not ( isinstance(ioargs.filepath_or_buffer, str) or ioargs.should_close ) if "r" in ioargs.mode and not hasattr(handle, "read"): raise TypeError( "Expected file path name or file-like object, " f"got {type(ioargs.filepath_or_buffer)} type" ) handles.reverse() # close the most recently added buffer first if ioargs.should_close: assert not isinstance(ioargs.filepath_or_buffer, str) handles.append(ioargs.filepath_or_buffer) return IOHandles( # error: Argument "handle" to "IOHandles" has incompatible type # "Union[TextIOWrapper, GzipFile, BaseBuffer, typing.IO[bytes], # typing.IO[Any]]"; expected "pandas._typing.IO[Any]" handle=handle, # type: ignore[arg-type] # error: Argument "created_handles" to "IOHandles" has incompatible type # "List[BaseBuffer]"; expected "List[Union[IO[bytes], IO[str]]]" created_handles=handles, # type: ignore[arg-type] is_wrapped=is_wrapped, compression=ioargs.compression, ) The provided code snippet includes necessary dependencies for implementing the `inspect_excel_format` function. Write a Python function `def inspect_excel_format( content_or_path: FilePath | ReadBuffer[bytes], storage_options: StorageOptions = None, ) -> str | None` to solve the following problem: Inspect the path or content of an excel file and get its format. Adopted from xlrd: https://github.com/python-excel/xlrd. Parameters ---------- content_or_path : str or file-like object Path to file or content of file to inspect. May be a URL. {storage_options} Returns ------- str or None Format of file if it can be determined. Raises ------ ValueError If resulting stream is empty. BadZipFile If resulting stream does not have an XLS signature and is not a valid zipfile. Here is the function: def inspect_excel_format( content_or_path: FilePath | ReadBuffer[bytes], storage_options: StorageOptions = None, ) -> str | None: """ Inspect the path or content of an excel file and get its format. Adopted from xlrd: https://github.com/python-excel/xlrd. Parameters ---------- content_or_path : str or file-like object Path to file or content of file to inspect. May be a URL. {storage_options} Returns ------- str or None Format of file if it can be determined. Raises ------ ValueError If resulting stream is empty. BadZipFile If resulting stream does not have an XLS signature and is not a valid zipfile. """ if isinstance(content_or_path, bytes): content_or_path = BytesIO(content_or_path) with get_handle( content_or_path, "rb", storage_options=storage_options, is_text=False ) as handle: stream = handle.handle stream.seek(0) buf = stream.read(PEEK_SIZE) if buf is None: raise ValueError("stream is empty") assert isinstance(buf, bytes) peek = buf stream.seek(0) if any(peek.startswith(sig) for sig in XLS_SIGNATURES): return "xls" elif not peek.startswith(ZIP_SIGNATURE): return None with zipfile.ZipFile(stream) as zf: # Workaround for some third party files that use forward slashes and # lower case names. component_names = [ name.replace("\\", "/").lower() for name in zf.namelist() ] if "xl/workbook.xml" in component_names: return "xlsx" if "xl/workbook.bin" in component_names: return "xlsb" if "content.xml" in component_names: return "ods" return "zip"
Inspect the path or content of an excel file and get its format. Adopted from xlrd: https://github.com/python-excel/xlrd. Parameters ---------- content_or_path : str or file-like object Path to file or content of file to inspect. May be a URL. {storage_options} Returns ------- str or None Format of file if it can be determined. Raises ------ ValueError If resulting stream is empty. BadZipFile If resulting stream does not have an XLS signature and is not a valid zipfile.
173,562
from __future__ import annotations import sys def removesuffix(string: str, suffix: str) -> str: if string.endswith(suffix): return string[: -len(suffix)] return string
null
173,563
from __future__ import annotations import sys def removeprefix(string: str, prefix: str) -> str: if string.startswith(prefix): return string[len(prefix) :] return string
null
173,564
from __future__ import annotations import codecs import json import locale import os import platform import struct import sys from pandas._typing import JSONSerializable from pandas.compat._optional import ( VERSIONS, get_version, import_optional_dependency, ) def _get_sys_info() -> dict[str, JSONSerializable]: """ Returns system information as a JSON serializable dictionary. """ uname_result = platform.uname() language_code, encoding = locale.getlocale() return { "commit": _get_commit_hash(), "python": ".".join([str(i) for i in sys.version_info]), "python-bits": struct.calcsize("P") * 8, "OS": uname_result.system, "OS-release": uname_result.release, "Version": uname_result.version, "machine": uname_result.machine, "processor": uname_result.processor, "byteorder": sys.byteorder, "LC_ALL": os.environ.get("LC_ALL"), "LANG": os.environ.get("LANG"), "LOCALE": {"language-code": language_code, "encoding": encoding}, } def _get_dependency_info() -> dict[str, JSONSerializable]: """ Returns dependency information as a JSON serializable dictionary. """ deps = [ "pandas", # required "numpy", "pytz", "dateutil", # install / build, "setuptools", "pip", "Cython", # test "pytest", "hypothesis", # docs "sphinx", # Other, need a min version "blosc", "feather", "xlsxwriter", "lxml.etree", "html5lib", "pymysql", "psycopg2", "jinja2", # Other, not imported. "IPython", "pandas_datareader", ] deps.extend(list(VERSIONS)) result: dict[str, JSONSerializable] = {} for modname in deps: mod = import_optional_dependency(modname, errors="ignore") result[modname] = get_version(mod) if mod else None return result try: from sys import is_finalizing except ImportError: # Emulate it def _get_emulated_is_finalizing() -> Callable[[], bool]: L = [] # type: List[None] atexit.register(lambda: L.append(None)) def is_finalizing() -> bool: # Not referencing any globals here return L != [] return is_finalizing is_finalizing = _get_emulated_is_finalizing() The provided code snippet includes necessary dependencies for implementing the `show_versions` function. Write a Python function `def show_versions(as_json: str | bool = False) -> None` to solve the following problem: Provide useful information, important for bug reports. It comprises info about hosting operation system, pandas version, and versions of other installed relative packages. Parameters ---------- as_json : str or bool, default False * If False, outputs info in a human readable form to the console. * If str, it will be considered as a path to a file. Info will be written to that file in JSON format. * If True, outputs info in JSON format to the console. Here is the function: def show_versions(as_json: str | bool = False) -> None: """ Provide useful information, important for bug reports. It comprises info about hosting operation system, pandas version, and versions of other installed relative packages. Parameters ---------- as_json : str or bool, default False * If False, outputs info in a human readable form to the console. * If str, it will be considered as a path to a file. Info will be written to that file in JSON format. * If True, outputs info in JSON format to the console. """ sys_info = _get_sys_info() deps = _get_dependency_info() if as_json: j = {"system": sys_info, "dependencies": deps} if as_json is True: sys.stdout.writelines(json.dumps(j, indent=2)) else: assert isinstance(as_json, str) # needed for mypy with codecs.open(as_json, "wb", encoding="utf8") as f: json.dump(j, f, indent=2) else: assert isinstance(sys_info["LOCALE"], dict) # needed for mypy language_code = sys_info["LOCALE"]["language-code"] encoding = sys_info["LOCALE"]["encoding"] sys_info["LOCALE"] = f"{language_code}.{encoding}" maxlen = max(len(x) for x in deps) print("\nINSTALLED VERSIONS") print("------------------") for k, v in sys_info.items(): print(f"{k:<{maxlen}}: {v}") print("") for k, v in deps.items(): print(f"{k:<{maxlen}}: {v}")
Provide useful information, important for bug reports. It comprises info about hosting operation system, pandas version, and versions of other installed relative packages. Parameters ---------- as_json : str or bool, default False * If False, outputs info in a human readable form to the console. * If str, it will be considered as a path to a file. Info will be written to that file in JSON format. * If True, outputs info in JSON format to the console.
173,565
from __future__ import annotations from functools import wraps import inspect from textwrap import dedent from typing import ( Any, Callable, Mapping, cast, ) import warnings from pandas._libs.properties import cache_readonly from pandas._typing import ( F, T, ) from pandas.util._exceptions import find_stack_level def wraps(wrapped: _AnyCallable, assigned: Sequence[str] = ..., updated: Sequence[str] = ...) -> Callable[[_T], _T]: ... def dedent(text: str) -> str: ... Any = object() class Callable(BaseTypingInstance): def py__call__(self, arguments): """ def x() -> Callable[[Callable[..., _T]], _T]: ... """ # The 0th index are the arguments. try: param_values = self._generics_manager[0] result_values = self._generics_manager[1] except IndexError: debug.warning('Callable[...] defined without two arguments') return NO_VALUES else: from jedi.inference.gradual.annotation import infer_return_for_callable return infer_return_for_callable(arguments, param_values, result_values) def py__get__(self, instance, class_value): return ValueSet([self]) F = TypeVar("F", bound=FuncType) The provided code snippet includes necessary dependencies for implementing the `deprecate` function. Write a Python function `def deprecate( name: str, alternative: Callable[..., Any], version: str, alt_name: str | None = None, klass: type[Warning] | None = None, stacklevel: int = 2, msg: str | None = None, ) -> Callable[[F], F]` to solve the following problem: Return a new function that emits a deprecation warning on use. To use this method for a deprecated function, another function `alternative` with the same signature must exist. The deprecated function will emit a deprecation warning, and in the docstring it will contain the deprecation directive with the provided version so it can be detected for future removal. Parameters ---------- name : str Name of function to deprecate. alternative : func Function to use instead. version : str Version of pandas in which the method has been deprecated. alt_name : str, optional Name to use in preference of alternative.__name__. klass : Warning, default FutureWarning stacklevel : int, default 2 msg : str The message to display in the warning. Default is '{name} is deprecated. Use {alt_name} instead.' Here is the function: def deprecate( name: str, alternative: Callable[..., Any], version: str, alt_name: str | None = None, klass: type[Warning] | None = None, stacklevel: int = 2, msg: str | None = None, ) -> Callable[[F], F]: """ Return a new function that emits a deprecation warning on use. To use this method for a deprecated function, another function `alternative` with the same signature must exist. The deprecated function will emit a deprecation warning, and in the docstring it will contain the deprecation directive with the provided version so it can be detected for future removal. Parameters ---------- name : str Name of function to deprecate. alternative : func Function to use instead. version : str Version of pandas in which the method has been deprecated. alt_name : str, optional Name to use in preference of alternative.__name__. klass : Warning, default FutureWarning stacklevel : int, default 2 msg : str The message to display in the warning. Default is '{name} is deprecated. Use {alt_name} instead.' """ alt_name = alt_name or alternative.__name__ klass = klass or FutureWarning warning_msg = msg or f"{name} is deprecated, use {alt_name} instead." @wraps(alternative) def wrapper(*args, **kwargs) -> Callable[..., Any]: warnings.warn(warning_msg, klass, stacklevel=stacklevel) return alternative(*args, **kwargs) # adding deprecated directive to the docstring msg = msg or f"Use `{alt_name}` instead." doc_error_msg = ( "deprecate needs a correctly formatted docstring in " "the target function (should have a one liner short " "summary, and opening quotes should be in their own " f"line). Found:\n{alternative.__doc__}" ) # when python is running in optimized mode (i.e. `-OO`), docstrings are # removed, so we check that a docstring with correct formatting is used # but we allow empty docstrings if alternative.__doc__: if alternative.__doc__.count("\n") < 3: raise AssertionError(doc_error_msg) empty1, summary, empty2, doc_string = alternative.__doc__.split("\n", 3) if empty1 or empty2 and not summary: raise AssertionError(doc_error_msg) wrapper.__doc__ = dedent( f""" {summary.strip()} .. deprecated:: {version} {msg} {dedent(doc_string)}""" ) # error: Incompatible return value type (got "Callable[[VarArg(Any), KwArg(Any)], # Callable[...,Any]]", expected "Callable[[F], F]") return wrapper # type: ignore[return-value]
Return a new function that emits a deprecation warning on use. To use this method for a deprecated function, another function `alternative` with the same signature must exist. The deprecated function will emit a deprecation warning, and in the docstring it will contain the deprecation directive with the provided version so it can be detected for future removal. Parameters ---------- name : str Name of function to deprecate. alternative : func Function to use instead. version : str Version of pandas in which the method has been deprecated. alt_name : str, optional Name to use in preference of alternative.__name__. klass : Warning, default FutureWarning stacklevel : int, default 2 msg : str The message to display in the warning. Default is '{name} is deprecated. Use {alt_name} instead.'
173,566
from __future__ import annotations from functools import wraps import inspect from textwrap import dedent from typing import ( Any, Callable, Mapping, cast, ) import warnings from pandas._libs.properties import cache_readonly from pandas._typing import ( F, T, ) from pandas.util._exceptions import find_stack_level def wraps(wrapped: _AnyCallable, assigned: Sequence[str] = ..., updated: Sequence[str] = ...) -> Callable[[_T], _T]: ... Any = object() class Mapping(_Collection[_KT], Generic[_KT, _VT_co]): # TODO: We wish the key type could also be covariant, but that doesn't work, # see discussion in https: //github.com/python/typing/pull/273. def __getitem__(self, k: _KT) -> _VT_co: ... # Mixin methods def get(self, key: _KT) -> Optional[_VT_co]: ... def get(self, key: _KT, default: Union[_VT_co, _T]) -> Union[_VT_co, _T]: ... def items(self) -> AbstractSet[Tuple[_KT, _VT_co]]: ... def keys(self) -> AbstractSet[_KT]: ... def values(self) -> ValuesView[_VT_co]: ... def __contains__(self, o: object) -> bool: ... def cast(typ: Type[_T], val: Any) -> _T: ... def cast(typ: str, val: Any) -> Any: ... def cast(typ: object, val: Any) -> Any: ... class Callable(BaseTypingInstance): def py__call__(self, arguments): """ def x() -> Callable[[Callable[..., _T]], _T]: ... """ # The 0th index are the arguments. try: param_values = self._generics_manager[0] result_values = self._generics_manager[1] except IndexError: debug.warning('Callable[...] defined without two arguments') return NO_VALUES else: from jedi.inference.gradual.annotation import infer_return_for_callable return infer_return_for_callable(arguments, param_values, result_values) def py__get__(self, instance, class_value): return ValueSet([self]) F = TypeVar("F", bound=FuncType) The provided code snippet includes necessary dependencies for implementing the `deprecate_kwarg` function. Write a Python function `def deprecate_kwarg( old_arg_name: str, new_arg_name: str | None, mapping: Mapping[Any, Any] | Callable[[Any], Any] | None = None, stacklevel: int = 2, ) -> Callable[[F], F]` to solve the following problem: Decorator to deprecate a keyword argument of a function. Parameters ---------- old_arg_name : str Name of argument in function to deprecate new_arg_name : str or None Name of preferred argument in function. Use None to raise warning that ``old_arg_name`` keyword is deprecated. mapping : dict or callable If mapping is present, use it to translate old arguments to new arguments. A callable must do its own value checking; values not found in a dict will be forwarded unchanged. Examples -------- The following deprecates 'cols', using 'columns' instead >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns') ... def f(columns=''): ... print(columns) ... >>> f(columns='should work ok') should work ok >>> f(cols='should raise warning') # doctest: +SKIP FutureWarning: cols is deprecated, use columns instead warnings.warn(msg, FutureWarning) should raise warning >>> f(cols='should error', columns="can\'t pass do both") # doctest: +SKIP TypeError: Can only specify 'cols' or 'columns', not both >>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False}) ... def f(new=False): ... print('yes!' if new else 'no!') ... >>> f(old='yes') # doctest: +SKIP FutureWarning: old='yes' is deprecated, use new=True instead warnings.warn(msg, FutureWarning) yes! To raise a warning that a keyword will be removed entirely in the future >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name=None) ... def f(cols='', another_param=''): ... print(cols) ... >>> f(cols='should raise warning') # doctest: +SKIP FutureWarning: the 'cols' keyword is deprecated and will be removed in a future version please takes steps to stop use of 'cols' should raise warning >>> f(another_param='should not raise warning') # doctest: +SKIP should not raise warning >>> f(cols='should raise warning', another_param='') # doctest: +SKIP FutureWarning: the 'cols' keyword is deprecated and will be removed in a future version please takes steps to stop use of 'cols' should raise warning Here is the function: def deprecate_kwarg( old_arg_name: str, new_arg_name: str | None, mapping: Mapping[Any, Any] | Callable[[Any], Any] | None = None, stacklevel: int = 2, ) -> Callable[[F], F]: """ Decorator to deprecate a keyword argument of a function. Parameters ---------- old_arg_name : str Name of argument in function to deprecate new_arg_name : str or None Name of preferred argument in function. Use None to raise warning that ``old_arg_name`` keyword is deprecated. mapping : dict or callable If mapping is present, use it to translate old arguments to new arguments. A callable must do its own value checking; values not found in a dict will be forwarded unchanged. Examples -------- The following deprecates 'cols', using 'columns' instead >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns') ... def f(columns=''): ... print(columns) ... >>> f(columns='should work ok') should work ok >>> f(cols='should raise warning') # doctest: +SKIP FutureWarning: cols is deprecated, use columns instead warnings.warn(msg, FutureWarning) should raise warning >>> f(cols='should error', columns="can\'t pass do both") # doctest: +SKIP TypeError: Can only specify 'cols' or 'columns', not both >>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False}) ... def f(new=False): ... print('yes!' if new else 'no!') ... >>> f(old='yes') # doctest: +SKIP FutureWarning: old='yes' is deprecated, use new=True instead warnings.warn(msg, FutureWarning) yes! To raise a warning that a keyword will be removed entirely in the future >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name=None) ... def f(cols='', another_param=''): ... print(cols) ... >>> f(cols='should raise warning') # doctest: +SKIP FutureWarning: the 'cols' keyword is deprecated and will be removed in a future version please takes steps to stop use of 'cols' should raise warning >>> f(another_param='should not raise warning') # doctest: +SKIP should not raise warning >>> f(cols='should raise warning', another_param='') # doctest: +SKIP FutureWarning: the 'cols' keyword is deprecated and will be removed in a future version please takes steps to stop use of 'cols' should raise warning """ if mapping is not None and not hasattr(mapping, "get") and not callable(mapping): raise TypeError( "mapping from old to new argument values must be dict or callable!" ) def _deprecate_kwarg(func: F) -> F: @wraps(func) def wrapper(*args, **kwargs) -> Callable[..., Any]: old_arg_value = kwargs.pop(old_arg_name, None) if old_arg_value is not None: if new_arg_name is None: msg = ( f"the {repr(old_arg_name)} keyword is deprecated and " "will be removed in a future version. Please take " f"steps to stop the use of {repr(old_arg_name)}" ) warnings.warn(msg, FutureWarning, stacklevel=stacklevel) kwargs[old_arg_name] = old_arg_value return func(*args, **kwargs) elif mapping is not None: if callable(mapping): new_arg_value = mapping(old_arg_value) else: new_arg_value = mapping.get(old_arg_value, old_arg_value) msg = ( f"the {old_arg_name}={repr(old_arg_value)} keyword is " "deprecated, use " f"{new_arg_name}={repr(new_arg_value)} instead." ) else: new_arg_value = old_arg_value msg = ( f"the {repr(old_arg_name)}' keyword is deprecated, " f"use {repr(new_arg_name)} instead." ) warnings.warn(msg, FutureWarning, stacklevel=stacklevel) if kwargs.get(new_arg_name) is not None: msg = ( f"Can only specify {repr(old_arg_name)} " f"or {repr(new_arg_name)}, not both." ) raise TypeError(msg) kwargs[new_arg_name] = new_arg_value return func(*args, **kwargs) return cast(F, wrapper) return _deprecate_kwarg
Decorator to deprecate a keyword argument of a function. Parameters ---------- old_arg_name : str Name of argument in function to deprecate new_arg_name : str or None Name of preferred argument in function. Use None to raise warning that ``old_arg_name`` keyword is deprecated. mapping : dict or callable If mapping is present, use it to translate old arguments to new arguments. A callable must do its own value checking; values not found in a dict will be forwarded unchanged. Examples -------- The following deprecates 'cols', using 'columns' instead >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns') ... def f(columns=''): ... print(columns) ... >>> f(columns='should work ok') should work ok >>> f(cols='should raise warning') # doctest: +SKIP FutureWarning: cols is deprecated, use columns instead warnings.warn(msg, FutureWarning) should raise warning >>> f(cols='should error', columns="can\'t pass do both") # doctest: +SKIP TypeError: Can only specify 'cols' or 'columns', not both >>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False}) ... def f(new=False): ... print('yes!' if new else 'no!') ... >>> f(old='yes') # doctest: +SKIP FutureWarning: old='yes' is deprecated, use new=True instead warnings.warn(msg, FutureWarning) yes! To raise a warning that a keyword will be removed entirely in the future >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name=None) ... def f(cols='', another_param=''): ... print(cols) ... >>> f(cols='should raise warning') # doctest: +SKIP FutureWarning: the 'cols' keyword is deprecated and will be removed in a future version please takes steps to stop use of 'cols' should raise warning >>> f(another_param='should not raise warning') # doctest: +SKIP should not raise warning >>> f(cols='should raise warning', another_param='') # doctest: +SKIP FutureWarning: the 'cols' keyword is deprecated and will be removed in a future version please takes steps to stop use of 'cols' should raise warning
173,567
from __future__ import annotations from functools import wraps import inspect from textwrap import dedent from typing import ( Any, Callable, Mapping, cast, ) import warnings from pandas._libs.properties import cache_readonly from pandas._typing import ( F, T, ) from pandas.util._exceptions import find_stack_level def _format_argument_list(allow_args: list[str]) -> str: """ Convert the allow_args argument (either string or integer) of `deprecate_nonkeyword_arguments` function to a string describing it to be inserted into warning message. Parameters ---------- allowed_args : list, tuple or int The `allowed_args` argument for `deprecate_nonkeyword_arguments`, but None value is not allowed. Returns ------- str The substring describing the argument list in best way to be inserted to the warning message. Examples -------- `format_argument_list([])` -> '' `format_argument_list(['a'])` -> "except for the arguments 'a'" `format_argument_list(['a', 'b'])` -> "except for the arguments 'a' and 'b'" `format_argument_list(['a', 'b', 'c'])` -> "except for the arguments 'a', 'b' and 'c'" """ if "self" in allow_args: allow_args.remove("self") if not allow_args: return "" elif len(allow_args) == 1: return f" except for the argument '{allow_args[0]}'" else: last = allow_args[-1] args = ", ".join(["'" + x + "'" for x in allow_args[:-1]]) return f" except for the arguments {args} and '{last}'" def future_version_msg(version: str | None) -> str: """Specify which version of pandas the deprecation will take place in.""" if version is None: return "In a future version of pandas" else: return f"Starting with pandas version {version}" def wraps(wrapped: _AnyCallable, assigned: Sequence[str] = ..., updated: Sequence[str] = ...) -> Callable[[_T], _T]: ... class Callable(BaseTypingInstance): def py__call__(self, arguments): """ def x() -> Callable[[Callable[..., _T]], _T]: ... """ # The 0th index are the arguments. try: param_values = self._generics_manager[0] result_values = self._generics_manager[1] except IndexError: debug.warning('Callable[...] defined without two arguments') return NO_VALUES else: from jedi.inference.gradual.annotation import infer_return_for_callable return infer_return_for_callable(arguments, param_values, result_values) def py__get__(self, instance, class_value): return ValueSet([self]) F = TypeVar("F", bound=FuncType) def find_stack_level() -> int: """ Find the first place in the stack that is not inside pandas (tests notwithstanding). """ import pandas as pd pkg_dir = os.path.dirname(pd.__file__) test_dir = os.path.join(pkg_dir, "tests") # https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow frame = inspect.currentframe() n = 0 while frame: fname = inspect.getfile(frame) if fname.startswith(pkg_dir) and not fname.startswith(test_dir): frame = frame.f_back n += 1 else: break return n The provided code snippet includes necessary dependencies for implementing the `deprecate_nonkeyword_arguments` function. Write a Python function `def deprecate_nonkeyword_arguments( version: str | None, allowed_args: list[str] | None = None, name: str | None = None, ) -> Callable[[F], F]` to solve the following problem: Decorator to deprecate a use of non-keyword arguments of a function. Parameters ---------- version : str, optional The version in which positional arguments will become keyword-only. If None, then the warning message won't specify any particular version. allowed_args : list, optional In case of list, it must be the list of names of some first arguments of the decorated functions that are OK to be given as positional arguments. In case of None value, defaults to list of all arguments not having the default value. name : str, optional The specific name of the function to show in the warning message. If None, then the Qualified name of the function is used. Here is the function: def deprecate_nonkeyword_arguments( version: str | None, allowed_args: list[str] | None = None, name: str | None = None, ) -> Callable[[F], F]: """ Decorator to deprecate a use of non-keyword arguments of a function. Parameters ---------- version : str, optional The version in which positional arguments will become keyword-only. If None, then the warning message won't specify any particular version. allowed_args : list, optional In case of list, it must be the list of names of some first arguments of the decorated functions that are OK to be given as positional arguments. In case of None value, defaults to list of all arguments not having the default value. name : str, optional The specific name of the function to show in the warning message. If None, then the Qualified name of the function is used. """ def decorate(func): old_sig = inspect.signature(func) if allowed_args is not None: allow_args = allowed_args else: allow_args = [ p.name for p in old_sig.parameters.values() if p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD) and p.default is p.empty ] new_params = [ p.replace(kind=p.KEYWORD_ONLY) if ( p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD) and p.name not in allow_args ) else p for p in old_sig.parameters.values() ] new_params.sort(key=lambda p: p.kind) new_sig = old_sig.replace(parameters=new_params) num_allow_args = len(allow_args) msg = ( f"{future_version_msg(version)} all arguments of " f"{name or func.__qualname__}{{arguments}} will be keyword-only." ) @wraps(func) def wrapper(*args, **kwargs): if len(args) > num_allow_args: warnings.warn( msg.format(arguments=_format_argument_list(allow_args)), FutureWarning, stacklevel=find_stack_level(), ) return func(*args, **kwargs) # error: "Callable[[VarArg(Any), KwArg(Any)], Any]" has no # attribute "__signature__" wrapper.__signature__ = new_sig # type: ignore[attr-defined] return wrapper return decorate
Decorator to deprecate a use of non-keyword arguments of a function. Parameters ---------- version : str, optional The version in which positional arguments will become keyword-only. If None, then the warning message won't specify any particular version. allowed_args : list, optional In case of list, it must be the list of names of some first arguments of the decorated functions that are OK to be given as positional arguments. In case of None value, defaults to list of all arguments not having the default value. name : str, optional The specific name of the function to show in the warning message. If None, then the Qualified name of the function is used.
173,568
from __future__ import annotations from functools import wraps import inspect from textwrap import dedent from typing import ( Any, Callable, Mapping, cast, ) import warnings from pandas._libs.properties import cache_readonly from pandas._typing import ( F, T, ) from pandas.util._exceptions import find_stack_level def dedent(text: str) -> str: ... class Callable(BaseTypingInstance): def py__call__(self, arguments): """ def x() -> Callable[[Callable[..., _T]], _T]: ... """ # The 0th index are the arguments. try: param_values = self._generics_manager[0] result_values = self._generics_manager[1] except IndexError: debug.warning('Callable[...] defined without two arguments') return NO_VALUES else: from jedi.inference.gradual.annotation import infer_return_for_callable return infer_return_for_callable(arguments, param_values, result_values) def py__get__(self, instance, class_value): return ValueSet([self]) F = TypeVar("F", bound=FuncType) The provided code snippet includes necessary dependencies for implementing the `doc` function. Write a Python function `def doc(*docstrings: None | str | Callable, **params) -> Callable[[F], F]` to solve the following problem: A decorator take docstring templates, concatenate them and perform string substitution on it. This decorator will add a variable "_docstring_components" to the wrapped callable to keep track the original docstring template for potential usage. If it should be consider as a template, it will be saved as a string. Otherwise, it will be saved as callable, and later user __doc__ and dedent to get docstring. Parameters ---------- *docstrings : None, str, or callable The string / docstring / docstring template to be appended in order after default docstring under callable. **params The string which would be used to format docstring template. Here is the function: def doc(*docstrings: None | str | Callable, **params) -> Callable[[F], F]: """ A decorator take docstring templates, concatenate them and perform string substitution on it. This decorator will add a variable "_docstring_components" to the wrapped callable to keep track the original docstring template for potential usage. If it should be consider as a template, it will be saved as a string. Otherwise, it will be saved as callable, and later user __doc__ and dedent to get docstring. Parameters ---------- *docstrings : None, str, or callable The string / docstring / docstring template to be appended in order after default docstring under callable. **params The string which would be used to format docstring template. """ def decorator(decorated: F) -> F: # collecting docstring and docstring templates docstring_components: list[str | Callable] = [] if decorated.__doc__: docstring_components.append(dedent(decorated.__doc__)) for docstring in docstrings: if docstring is None: continue if hasattr(docstring, "_docstring_components"): docstring_components.extend( docstring._docstring_components # pyright: ignore[reportGeneralTypeIssues] # noqa: E501 ) elif isinstance(docstring, str) or docstring.__doc__: docstring_components.append(docstring) params_applied = [ component.format(**params) if isinstance(component, str) and len(params) > 0 else component for component in docstring_components ] decorated.__doc__ = "".join( [ component if isinstance(component, str) else dedent(component.__doc__ or "") for component in params_applied ] ) # error: "F" has no attribute "_docstring_components" decorated._docstring_components = ( # type: ignore[attr-defined] docstring_components ) return decorated return decorator
A decorator take docstring templates, concatenate them and perform string substitution on it. This decorator will add a variable "_docstring_components" to the wrapped callable to keep track the original docstring template for potential usage. If it should be consider as a template, it will be saved as a string. Otherwise, it will be saved as callable, and later user __doc__ and dedent to get docstring. Parameters ---------- *docstrings : None, str, or callable The string / docstring / docstring template to be appended in order after default docstring under callable. **params The string which would be used to format docstring template.
173,569
from __future__ import annotations from functools import wraps import inspect from textwrap import dedent from typing import ( Any, Callable, Mapping, cast, ) import warnings from pandas._libs.properties import cache_readonly from pandas._typing import ( F, T, ) from pandas.util._exceptions import find_stack_level def indent(text: str | None, indents: int = 1) -> str: if not text or not isinstance(text, str): return "" jointext = "".join(["\n"] + [" "] * indents) return jointext.join(text.split("\n"))
null
173,570
from __future__ import annotations from typing import ( Iterable, Sequence, TypeVar, overload, ) import numpy as np from pandas._libs import lib from pandas.core.dtypes.common import ( is_bool, is_integer, ) def _check_arg_length(fname, args, max_fname_arg_count, compat_args): """ Checks whether 'args' has length of at most 'compat_args'. Raises a TypeError if that is not the case, similar to in Python when a function is called with too many arguments. """ if max_fname_arg_count < 0: raise ValueError("'max_fname_arg_count' must be non-negative") if len(args) > len(compat_args): max_arg_count = len(compat_args) + max_fname_arg_count actual_arg_count = len(args) + max_fname_arg_count argument = "argument" if max_arg_count == 1 else "arguments" raise TypeError( f"{fname}() takes at most {max_arg_count} {argument} " f"({actual_arg_count} given)" ) def _check_for_default_values(fname, arg_val_dict, compat_args): """ Check that the keys in `arg_val_dict` are mapped to their default values as specified in `compat_args`. Note that this function is to be called only when it has been checked that arg_val_dict.keys() is a subset of compat_args """ for key in arg_val_dict: # try checking equality directly with '=' operator, # as comparison may have been overridden for the left # hand object try: v1 = arg_val_dict[key] v2 = compat_args[key] # check for None-ness otherwise we could end up # comparing a numpy array vs None if (v1 is not None and v2 is None) or (v1 is None and v2 is not None): match = False else: match = v1 == v2 if not is_bool(match): raise ValueError("'match' is not a boolean") # could not compare them directly, so try comparison # using the 'is' operator except ValueError: match = arg_val_dict[key] is compat_args[key] if not match: raise ValueError( f"the '{key}' parameter is not supported in " f"the pandas implementation of {fname}()" ) The provided code snippet includes necessary dependencies for implementing the `validate_args` function. Write a Python function `def validate_args(fname, args, max_fname_arg_count, compat_args) -> None` to solve the following problem: Checks whether the length of the `*args` argument passed into a function has at most `len(compat_args)` arguments and whether or not all of these elements in `args` are set to their default values. Parameters ---------- fname : str The name of the function being passed the `*args` parameter args : tuple The `*args` parameter passed into a function max_fname_arg_count : int The maximum number of arguments that the function `fname` can accept, excluding those in `args`. Used for displaying appropriate error messages. Must be non-negative. compat_args : dict A dictionary of keys and their associated default values. In order to accommodate buggy behaviour in some versions of `numpy`, where a signature displayed keyword arguments but then passed those arguments **positionally** internally when calling downstream implementations, a dict ensures that the original order of the keyword arguments is enforced. Raises ------ TypeError If `args` contains more values than there are `compat_args` ValueError If `args` contains values that do not correspond to those of the default values specified in `compat_args` Here is the function: def validate_args(fname, args, max_fname_arg_count, compat_args) -> None: """ Checks whether the length of the `*args` argument passed into a function has at most `len(compat_args)` arguments and whether or not all of these elements in `args` are set to their default values. Parameters ---------- fname : str The name of the function being passed the `*args` parameter args : tuple The `*args` parameter passed into a function max_fname_arg_count : int The maximum number of arguments that the function `fname` can accept, excluding those in `args`. Used for displaying appropriate error messages. Must be non-negative. compat_args : dict A dictionary of keys and their associated default values. In order to accommodate buggy behaviour in some versions of `numpy`, where a signature displayed keyword arguments but then passed those arguments **positionally** internally when calling downstream implementations, a dict ensures that the original order of the keyword arguments is enforced. Raises ------ TypeError If `args` contains more values than there are `compat_args` ValueError If `args` contains values that do not correspond to those of the default values specified in `compat_args` """ _check_arg_length(fname, args, max_fname_arg_count, compat_args) # We do this so that we can provide a more informative # error message about the parameters that we are not # supporting in the pandas implementation of 'fname' kwargs = dict(zip(compat_args, args)) _check_for_default_values(fname, kwargs, compat_args)
Checks whether the length of the `*args` argument passed into a function has at most `len(compat_args)` arguments and whether or not all of these elements in `args` are set to their default values. Parameters ---------- fname : str The name of the function being passed the `*args` parameter args : tuple The `*args` parameter passed into a function max_fname_arg_count : int The maximum number of arguments that the function `fname` can accept, excluding those in `args`. Used for displaying appropriate error messages. Must be non-negative. compat_args : dict A dictionary of keys and their associated default values. In order to accommodate buggy behaviour in some versions of `numpy`, where a signature displayed keyword arguments but then passed those arguments **positionally** internally when calling downstream implementations, a dict ensures that the original order of the keyword arguments is enforced. Raises ------ TypeError If `args` contains more values than there are `compat_args` ValueError If `args` contains values that do not correspond to those of the default values specified in `compat_args`
173,571
from __future__ import annotations from typing import ( Iterable, Sequence, TypeVar, overload, ) import numpy as np from pandas._libs import lib from pandas.core.dtypes.common import ( is_bool, is_integer, ) def _check_arg_length(fname, args, max_fname_arg_count, compat_args): """ Checks whether 'args' has length of at most 'compat_args'. Raises a TypeError if that is not the case, similar to in Python when a function is called with too many arguments. """ if max_fname_arg_count < 0: raise ValueError("'max_fname_arg_count' must be non-negative") if len(args) > len(compat_args): max_arg_count = len(compat_args) + max_fname_arg_count actual_arg_count = len(args) + max_fname_arg_count argument = "argument" if max_arg_count == 1 else "arguments" raise TypeError( f"{fname}() takes at most {max_arg_count} {argument} " f"({actual_arg_count} given)" ) def validate_kwargs(fname, kwargs, compat_args) -> None: """ Checks whether parameters passed to the **kwargs argument in a function `fname` are valid parameters as specified in `*compat_args` and whether or not they are set to their default values. Parameters ---------- fname : str The name of the function being passed the `**kwargs` parameter kwargs : dict The `**kwargs` parameter passed into `fname` compat_args: dict A dictionary of keys that `kwargs` is allowed to have and their associated default values Raises ------ TypeError if `kwargs` contains keys not in `compat_args` ValueError if `kwargs` contains keys in `compat_args` that do not map to the default values specified in `compat_args` """ kwds = kwargs.copy() _check_for_invalid_keys(fname, kwargs, compat_args) _check_for_default_values(fname, kwds, compat_args) The provided code snippet includes necessary dependencies for implementing the `validate_args_and_kwargs` function. Write a Python function `def validate_args_and_kwargs( fname, args, kwargs, max_fname_arg_count, compat_args ) -> None` to solve the following problem: Checks whether parameters passed to the *args and **kwargs argument in a function `fname` are valid parameters as specified in `*compat_args` and whether or not they are set to their default values. Parameters ---------- fname: str The name of the function being passed the `**kwargs` parameter args: tuple The `*args` parameter passed into a function kwargs: dict The `**kwargs` parameter passed into `fname` max_fname_arg_count: int The minimum number of arguments that the function `fname` requires, excluding those in `args`. Used for displaying appropriate error messages. Must be non-negative. compat_args: dict A dictionary of keys that `kwargs` is allowed to have and their associated default values. Raises ------ TypeError if `args` contains more values than there are `compat_args` OR `kwargs` contains keys not in `compat_args` ValueError if `args` contains values not at the default value (`None`) `kwargs` contains keys in `compat_args` that do not map to the default value as specified in `compat_args` See Also -------- validate_args : Purely args validation. validate_kwargs : Purely kwargs validation. Here is the function: def validate_args_and_kwargs( fname, args, kwargs, max_fname_arg_count, compat_args ) -> None: """ Checks whether parameters passed to the *args and **kwargs argument in a function `fname` are valid parameters as specified in `*compat_args` and whether or not they are set to their default values. Parameters ---------- fname: str The name of the function being passed the `**kwargs` parameter args: tuple The `*args` parameter passed into a function kwargs: dict The `**kwargs` parameter passed into `fname` max_fname_arg_count: int The minimum number of arguments that the function `fname` requires, excluding those in `args`. Used for displaying appropriate error messages. Must be non-negative. compat_args: dict A dictionary of keys that `kwargs` is allowed to have and their associated default values. Raises ------ TypeError if `args` contains more values than there are `compat_args` OR `kwargs` contains keys not in `compat_args` ValueError if `args` contains values not at the default value (`None`) `kwargs` contains keys in `compat_args` that do not map to the default value as specified in `compat_args` See Also -------- validate_args : Purely args validation. validate_kwargs : Purely kwargs validation. """ # Check that the total number of arguments passed in (i.e. # args and kwargs) does not exceed the length of compat_args _check_arg_length( fname, args + tuple(kwargs.values()), max_fname_arg_count, compat_args ) # Check there is no overlap with the positional and keyword # arguments, similar to what is done in actual Python functions args_dict = dict(zip(compat_args, args)) for key in args_dict: if key in kwargs: raise TypeError( f"{fname}() got multiple values for keyword argument '{key}'" ) kwargs.update(args_dict) validate_kwargs(fname, kwargs, compat_args)
Checks whether parameters passed to the *args and **kwargs argument in a function `fname` are valid parameters as specified in `*compat_args` and whether or not they are set to their default values. Parameters ---------- fname: str The name of the function being passed the `**kwargs` parameter args: tuple The `*args` parameter passed into a function kwargs: dict The `**kwargs` parameter passed into `fname` max_fname_arg_count: int The minimum number of arguments that the function `fname` requires, excluding those in `args`. Used for displaying appropriate error messages. Must be non-negative. compat_args: dict A dictionary of keys that `kwargs` is allowed to have and their associated default values. Raises ------ TypeError if `args` contains more values than there are `compat_args` OR `kwargs` contains keys not in `compat_args` ValueError if `args` contains values not at the default value (`None`) `kwargs` contains keys in `compat_args` that do not map to the default value as specified in `compat_args` See Also -------- validate_args : Purely args validation. validate_kwargs : Purely kwargs validation.
173,572
from __future__ import annotations from typing import ( Iterable, Sequence, TypeVar, overload, ) import numpy as np from pandas._libs import lib from pandas.core.dtypes.common import ( is_bool, is_integer, ) def clean_fill_method(method: str | None, allow_nearest: bool = False): # asfreq is compat for resampling if method in [None, "asfreq"]: return None if isinstance(method, str): method = method.lower() if method == "ffill": method = "pad" elif method == "bfill": method = "backfill" valid_methods = ["pad", "backfill"] expecting = "pad (ffill) or backfill (bfill)" if allow_nearest: valid_methods.append("nearest") expecting = "pad (ffill), backfill (bfill) or nearest" if method not in valid_methods: raise ValueError(f"Invalid fill method. Expecting {expecting}. Got {method}") return method The provided code snippet includes necessary dependencies for implementing the `validate_fillna_kwargs` function. Write a Python function `def validate_fillna_kwargs(value, method, validate_scalar_dict_value: bool = True)` to solve the following problem: Validate the keyword arguments to 'fillna'. This checks that exactly one of 'value' and 'method' is specified. If 'method' is specified, this validates that it's a valid method. Parameters ---------- value, method : object The 'value' and 'method' keyword arguments for 'fillna'. validate_scalar_dict_value : bool, default True Whether to validate that 'value' is a scalar or dict. Specifically, validate that it is not a list or tuple. Returns ------- value, method : object Here is the function: def validate_fillna_kwargs(value, method, validate_scalar_dict_value: bool = True): """ Validate the keyword arguments to 'fillna'. This checks that exactly one of 'value' and 'method' is specified. If 'method' is specified, this validates that it's a valid method. Parameters ---------- value, method : object The 'value' and 'method' keyword arguments for 'fillna'. validate_scalar_dict_value : bool, default True Whether to validate that 'value' is a scalar or dict. Specifically, validate that it is not a list or tuple. Returns ------- value, method : object """ from pandas.core.missing import clean_fill_method if value is None and method is None: raise ValueError("Must specify a fill 'value' or 'method'.") if value is None and method is not None: method = clean_fill_method(method) elif value is not None and method is None: if validate_scalar_dict_value and isinstance(value, (list, tuple)): raise TypeError( '"value" parameter must be a scalar or dict, but ' f'you passed a "{type(value).__name__}"' ) elif value is not None and method is not None: raise ValueError("Cannot specify both 'value' and 'method'.") return value, method
Validate the keyword arguments to 'fillna'. This checks that exactly one of 'value' and 'method' is specified. If 'method' is specified, this validates that it's a valid method. Parameters ---------- value, method : object The 'value' and 'method' keyword arguments for 'fillna'. validate_scalar_dict_value : bool, default True Whether to validate that 'value' is a scalar or dict. Specifically, validate that it is not a list or tuple. Returns ------- value, method : object
173,573
from __future__ import annotations from typing import ( Iterable, Sequence, TypeVar, overload, ) import numpy as np from pandas._libs import lib from pandas.core.dtypes.common import ( is_bool, is_integer, ) BoolishT = TypeVar("BoolishT", bool, int) def validate_ascending(ascending: BoolishT) -> BoolishT: ...
null
173,574
from __future__ import annotations from typing import ( Iterable, Sequence, TypeVar, overload, ) import numpy as np from pandas._libs import lib from pandas.core.dtypes.common import ( is_bool, is_integer, ) BoolishT = TypeVar("BoolishT", bool, int) class Sequence(_Collection[_T_co], Reversible[_T_co], Generic[_T_co]): def __getitem__(self, i: int) -> _T_co: def __getitem__(self, s: slice) -> Sequence[_T_co]: def index(self, value: Any, start: int = ..., stop: int = ...) -> int: def count(self, value: Any) -> int: def __contains__(self, x: object) -> bool: def __iter__(self) -> Iterator[_T_co]: def __reversed__(self) -> Iterator[_T_co]: def validate_ascending(ascending: Sequence[BoolishT]) -> list[BoolishT]: ...
null
173,575
from __future__ import annotations from typing import ( Iterable, Sequence, TypeVar, overload, ) import numpy as np from pandas._libs import lib from pandas.core.dtypes.common import ( is_bool, is_integer, ) BoolishT = TypeVar("BoolishT", bool, int) def validate_bool_kwarg( value: BoolishNoneT, arg_name, none_allowed: bool = True, int_allowed: bool = False ) -> BoolishNoneT: """ Ensure that argument passed in arg_name can be interpreted as boolean. Parameters ---------- value : bool Value to be validated. arg_name : str Name of the argument. To be reflected in the error message. none_allowed : bool, default True Whether to consider None to be a valid boolean. int_allowed : bool, default False Whether to consider integer value to be a valid boolean. Returns ------- value The same value as input. Raises ------ ValueError If the value is not a valid boolean. """ good_value = is_bool(value) if none_allowed: good_value = good_value or value is None if int_allowed: good_value = good_value or isinstance(value, int) if not good_value: raise ValueError( f'For argument "{arg_name}" expected type bool, received ' f"type {type(value).__name__}." ) return value class Sequence(_Collection[_T_co], Reversible[_T_co], Generic[_T_co]): def __getitem__(self, i: int) -> _T_co: ... def __getitem__(self, s: slice) -> Sequence[_T_co]: ... # Mixin methods def index(self, value: Any, start: int = ..., stop: int = ...) -> int: ... def count(self, value: Any) -> int: ... def __contains__(self, x: object) -> bool: ... def __iter__(self) -> Iterator[_T_co]: ... def __reversed__(self) -> Iterator[_T_co]: ... The provided code snippet includes necessary dependencies for implementing the `validate_ascending` function. Write a Python function `def validate_ascending( ascending: bool | int | Sequence[BoolishT], ) -> bool | int | list[BoolishT]` to solve the following problem: Validate ``ascending`` kwargs for ``sort_index`` method. Here is the function: def validate_ascending( ascending: bool | int | Sequence[BoolishT], ) -> bool | int | list[BoolishT]: """Validate ``ascending`` kwargs for ``sort_index`` method.""" kwargs = {"none_allowed": False, "int_allowed": True} if not isinstance(ascending, Sequence): return validate_bool_kwarg(ascending, "ascending", **kwargs) return [validate_bool_kwarg(item, "ascending", **kwargs) for item in ascending]
Validate ``ascending`` kwargs for ``sort_index`` method.
173,576
from __future__ import annotations from typing import ( Iterable, Sequence, TypeVar, overload, ) import numpy as np from pandas._libs import lib from pandas.core.dtypes.common import ( is_bool, is_integer, ) The provided code snippet includes necessary dependencies for implementing the `validate_endpoints` function. Write a Python function `def validate_endpoints(closed: str | None) -> tuple[bool, bool]` to solve the following problem: Check that the `closed` argument is among [None, "left", "right"] Parameters ---------- closed : {None, "left", "right"} Returns ------- left_closed : bool right_closed : bool Raises ------ ValueError : if argument is not among valid values Here is the function: def validate_endpoints(closed: str | None) -> tuple[bool, bool]: """ Check that the `closed` argument is among [None, "left", "right"] Parameters ---------- closed : {None, "left", "right"} Returns ------- left_closed : bool right_closed : bool Raises ------ ValueError : if argument is not among valid values """ left_closed = False right_closed = False if closed is None: left_closed = True right_closed = True elif closed == "left": left_closed = True elif closed == "right": right_closed = True else: raise ValueError("Closed has to be either 'left', 'right' or None") return left_closed, right_closed
Check that the `closed` argument is among [None, "left", "right"] Parameters ---------- closed : {None, "left", "right"} Returns ------- left_closed : bool right_closed : bool Raises ------ ValueError : if argument is not among valid values
173,577
from __future__ import annotations from typing import ( Iterable, Sequence, TypeVar, overload, ) import numpy as np from pandas._libs import lib from pandas.core.dtypes.common import ( is_bool, is_integer, ) The provided code snippet includes necessary dependencies for implementing the `validate_inclusive` function. Write a Python function `def validate_inclusive(inclusive: str | None) -> tuple[bool, bool]` to solve the following problem: Check that the `inclusive` argument is among {"both", "neither", "left", "right"}. Parameters ---------- inclusive : {"both", "neither", "left", "right"} Returns ------- left_right_inclusive : tuple[bool, bool] Raises ------ ValueError : if argument is not among valid values Here is the function: def validate_inclusive(inclusive: str | None) -> tuple[bool, bool]: """ Check that the `inclusive` argument is among {"both", "neither", "left", "right"}. Parameters ---------- inclusive : {"both", "neither", "left", "right"} Returns ------- left_right_inclusive : tuple[bool, bool] Raises ------ ValueError : if argument is not among valid values """ left_right_inclusive: tuple[bool, bool] | None = None if isinstance(inclusive, str): left_right_inclusive = { "both": (True, True), "left": (True, False), "right": (False, True), "neither": (False, False), }.get(inclusive) if left_right_inclusive is None: raise ValueError( "Inclusive has to be either 'both', 'neither', 'left' or 'right'" ) return left_right_inclusive
Check that the `inclusive` argument is among {"both", "neither", "left", "right"}. Parameters ---------- inclusive : {"both", "neither", "left", "right"} Returns ------- left_right_inclusive : tuple[bool, bool] Raises ------ ValueError : if argument is not among valid values
173,578
from __future__ import annotations from typing import ( Iterable, Sequence, TypeVar, overload, ) import numpy as np from pandas._libs import lib from pandas.core.dtypes.common import ( is_bool, is_integer, ) The provided code snippet includes necessary dependencies for implementing the `validate_insert_loc` function. Write a Python function `def validate_insert_loc(loc: int, length: int) -> int` to solve the following problem: Check that we have an integer between -length and length, inclusive. Standardize negative loc to within [0, length]. The exceptions we raise on failure match np.insert. Here is the function: def validate_insert_loc(loc: int, length: int) -> int: """ Check that we have an integer between -length and length, inclusive. Standardize negative loc to within [0, length]. The exceptions we raise on failure match np.insert. """ if not is_integer(loc): raise TypeError(f"loc must be an integer between -{length} and {length}") if loc < 0: loc += length if not 0 <= loc <= length: raise IndexError(f"loc must be an integer between -{length} and {length}") return loc
Check that we have an integer between -length and length, inclusive. Standardize negative loc to within [0, length]. The exceptions we raise on failure match np.insert.
173,579
from __future__ import annotations import contextlib import inspect import os import re from typing import Generator import warnings class Generator(Iterator[_T_co], Generic[_T_co, _T_contra, _V_co]): def __next__(self) -> _T_co: ... def send(self, __value: _T_contra) -> _T_co: ... def throw( self, __typ: Type[BaseException], __val: Union[BaseException, object] = ..., __tb: Optional[TracebackType] = ... ) -> _T_co: ... def throw(self, __typ: BaseException, __val: None = ..., __tb: Optional[TracebackType] = ...) -> _T_co: ... def close(self) -> None: ... def __iter__(self) -> Generator[_T_co, _T_contra, _V_co]: ... def gi_code(self) -> CodeType: ... def gi_frame(self) -> FrameType: ... def gi_running(self) -> bool: ... def gi_yieldfrom(self) -> Optional[Generator[Any, Any, Any]]: ... The provided code snippet includes necessary dependencies for implementing the `rewrite_exception` function. Write a Python function `def rewrite_exception(old_name: str, new_name: str) -> Generator[None, None, None]` to solve the following problem: Rewrite the message of an exception. Here is the function: def rewrite_exception(old_name: str, new_name: str) -> Generator[None, None, None]: """ Rewrite the message of an exception. """ try: yield except Exception as err: if not err.args: raise msg = str(err.args[0]) msg = msg.replace(old_name, new_name) args: tuple[str, ...] = (msg,) if len(err.args) > 1: args = args + err.args[1:] err.args = args raise
Rewrite the message of an exception.