content
stringlengths
1
103k
path
stringlengths
8
216
filename
stringlengths
2
179
language
stringclasses
15 values
size_bytes
int64
2
189k
quality_score
float64
0.5
0.95
complexity
float64
0
1
documentation_ratio
float64
0
1
repository
stringclasses
5 values
stars
int64
0
1k
created_date
stringdate
2023-07-10 19:21:08
2025-07-09 19:11:45
license
stringclasses
4 values
is_test
bool
2 classes
file_hash
stringlengths
32
32
"""\n:mod:``pandas.io.xml`` is a module for reading XML.\n"""\n\nfrom __future__ import annotations\n\nimport io\nfrom os import PathLike\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n)\nimport warnings\n\nfrom pandas._libs import lib\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.errors import (\n AbstractMethodError,\n ParserError,\n)\nfrom pandas.util._decorators import doc\nfrom pandas.util._exceptions import find_stack_level\nfrom pandas.util._validators import check_dtype_backend\n\nfrom pandas.core.dtypes.common import is_list_like\n\nfrom pandas.core.shared_docs import _shared_docs\n\nfrom pandas.io.common import (\n file_exists,\n get_handle,\n infer_compression,\n is_file_like,\n is_fsspec_url,\n is_url,\n stringify_path,\n)\nfrom pandas.io.parsers import TextParser\n\nif TYPE_CHECKING:\n from collections.abc import Sequence\n from xml.etree.ElementTree import Element\n\n from lxml import etree\n\n from pandas._typing import (\n CompressionOptions,\n ConvertersArg,\n DtypeArg,\n DtypeBackend,\n FilePath,\n ParseDatesArg,\n ReadBuffer,\n StorageOptions,\n XMLParsers,\n )\n\n from pandas import DataFrame\n\n\n@doc(\n storage_options=_shared_docs["storage_options"],\n decompression_options=_shared_docs["decompression_options"] % "path_or_buffer",\n)\nclass _XMLFrameParser:\n """\n Internal subclass to parse XML into DataFrames.\n\n Parameters\n ----------\n path_or_buffer : a valid JSON ``str``, path object or file-like object\n Any valid string path is acceptable. The string could be a URL. Valid\n URL schemes include http, ftp, s3, and file.\n\n xpath : str or regex\n The ``XPath`` expression to parse required set of nodes for\n migration to :class:`~pandas.DataFrame`. ``etree`` supports limited ``XPath``.\n\n namespaces : dict\n The namespaces defined in XML document (``xmlns:namespace='URI'``)\n as dicts with key being namespace and value the URI.\n\n elems_only : bool\n Parse only the child elements at the specified ``xpath``.\n\n attrs_only : bool\n Parse only the attributes at the specified ``xpath``.\n\n names : list\n Column names for :class:`~pandas.DataFrame` of parsed XML data.\n\n dtype : dict\n Data type for data or columns. E.g. {{'a': np.float64,\n 'b': np.int32, 'c': 'Int64'}}\n\n .. versionadded:: 1.5.0\n\n converters : dict, optional\n Dict of functions for converting values in certain columns. Keys can\n either be integers or column labels.\n\n .. versionadded:: 1.5.0\n\n parse_dates : bool or list of int or names or list of lists or dict\n Converts either index or select columns to datetimes\n\n .. versionadded:: 1.5.0\n\n encoding : str\n Encoding of xml object or document.\n\n stylesheet : str or file-like\n URL, file, file-like object, or a raw string containing XSLT,\n ``etree`` does not support XSLT but retained for consistency.\n\n iterparse : dict, optional\n Dict with row element as key and list of descendant elements\n and/or attributes as value to be retrieved in iterparsing of\n XML document.\n\n .. versionadded:: 1.5.0\n\n {decompression_options}\n\n .. versionchanged:: 1.4.0 Zstandard support.\n\n {storage_options}\n\n See also\n --------\n pandas.io.xml._EtreeFrameParser\n pandas.io.xml._LxmlFrameParser\n\n Notes\n -----\n To subclass this class effectively you must override the following methods:`\n * :func:`parse_data`\n * :func:`_parse_nodes`\n * :func:`_iterparse_nodes`\n * :func:`_parse_doc`\n * :func:`_validate_names`\n * :func:`_validate_path`\n\n\n See each method's respective documentation for details on their\n functionality.\n """\n\n def __init__(\n self,\n path_or_buffer: FilePath | ReadBuffer[bytes] | ReadBuffer[str],\n xpath: str,\n namespaces: dict[str, str] | None,\n elems_only: bool,\n attrs_only: bool,\n names: Sequence[str] | None,\n dtype: DtypeArg | None,\n converters: ConvertersArg | None,\n parse_dates: ParseDatesArg | None,\n encoding: str | None,\n stylesheet: FilePath | ReadBuffer[bytes] | ReadBuffer[str] | None,\n iterparse: dict[str, list[str]] | None,\n compression: CompressionOptions,\n storage_options: StorageOptions,\n ) -> None:\n self.path_or_buffer = path_or_buffer\n self.xpath = xpath\n self.namespaces = namespaces\n self.elems_only = elems_only\n self.attrs_only = attrs_only\n self.names = names\n self.dtype = dtype\n self.converters = converters\n self.parse_dates = parse_dates\n self.encoding = encoding\n self.stylesheet = stylesheet\n self.iterparse = iterparse\n self.is_style = None\n self.compression: CompressionOptions = compression\n self.storage_options = storage_options\n\n def parse_data(self) -> list[dict[str, str | None]]:\n """\n Parse xml data.\n\n This method will call the other internal methods to\n validate ``xpath``, names, parse and return specific nodes.\n """\n\n raise AbstractMethodError(self)\n\n def _parse_nodes(self, elems: list[Any]) -> list[dict[str, str | None]]:\n """\n Parse xml nodes.\n\n This method will parse the children and attributes of elements\n in ``xpath``, conditionally for only elements, only attributes\n or both while optionally renaming node names.\n\n Raises\n ------\n ValueError\n * If only elements and only attributes are specified.\n\n Notes\n -----\n Namespace URIs will be removed from return node values. Also,\n elements with missing children or attributes compared to siblings\n will have optional keys filled with None values.\n """\n\n dicts: list[dict[str, str | None]]\n\n if self.elems_only and self.attrs_only:\n raise ValueError("Either element or attributes can be parsed not both.")\n if self.elems_only:\n if self.names:\n dicts = [\n {\n **(\n {el.tag: el.text}\n if el.text and not el.text.isspace()\n else {}\n ),\n **{\n nm: ch.text if ch.text else None\n for nm, ch in zip(self.names, el.findall("*"))\n },\n }\n for el in elems\n ]\n else:\n dicts = [\n {ch.tag: ch.text if ch.text else None for ch in el.findall("*")}\n for el in elems\n ]\n\n elif self.attrs_only:\n dicts = [\n {k: v if v else None for k, v in el.attrib.items()} for el in elems\n ]\n\n elif self.names:\n dicts = [\n {\n **el.attrib,\n **({el.tag: el.text} if el.text and not el.text.isspace() else {}),\n **{\n nm: ch.text if ch.text else None\n for nm, ch in zip(self.names, el.findall("*"))\n },\n }\n for el in elems\n ]\n\n else:\n dicts = [\n {\n **el.attrib,\n **({el.tag: el.text} if el.text and not el.text.isspace() else {}),\n **{ch.tag: ch.text if ch.text else None for ch in el.findall("*")},\n }\n for el in elems\n ]\n\n dicts = [\n {k.split("}")[1] if "}" in k else k: v for k, v in d.items()} for d in dicts\n ]\n\n keys = list(dict.fromkeys([k for d in dicts for k in d.keys()]))\n dicts = [{k: d[k] if k in d.keys() else None for k in keys} for d in dicts]\n\n if self.names:\n dicts = [dict(zip(self.names, d.values())) for d in dicts]\n\n return dicts\n\n def _iterparse_nodes(self, iterparse: Callable) -> list[dict[str, str | None]]:\n """\n Iterparse xml nodes.\n\n This method will read in local disk, decompressed XML files for elements\n and underlying descendants using iterparse, a method to iterate through\n an XML tree without holding entire XML tree in memory.\n\n Raises\n ------\n TypeError\n * If ``iterparse`` is not a dict or its dict value is not list-like.\n ParserError\n * If ``path_or_buffer`` is not a physical file on disk or file-like object.\n * If no data is returned from selected items in ``iterparse``.\n\n Notes\n -----\n Namespace URIs will be removed from return node values. Also,\n elements with missing children or attributes in submitted list\n will have optional keys filled with None values.\n """\n\n dicts: list[dict[str, str | None]] = []\n row: dict[str, str | None] | None = None\n\n if not isinstance(self.iterparse, dict):\n raise TypeError(\n f"{type(self.iterparse).__name__} is not a valid type for iterparse"\n )\n\n row_node = next(iter(self.iterparse.keys())) if self.iterparse else ""\n if not is_list_like(self.iterparse[row_node]):\n raise TypeError(\n f"{type(self.iterparse[row_node])} is not a valid type "\n "for value in iterparse"\n )\n\n if (not hasattr(self.path_or_buffer, "read")) and (\n not isinstance(self.path_or_buffer, (str, PathLike))\n or is_url(self.path_or_buffer)\n or is_fsspec_url(self.path_or_buffer)\n or (\n isinstance(self.path_or_buffer, str)\n and self.path_or_buffer.startswith(("<?xml", "<"))\n )\n or infer_compression(self.path_or_buffer, "infer") is not None\n ):\n raise ParserError(\n "iterparse is designed for large XML files that are fully extracted on "\n "local disk and not as compressed files or online sources."\n )\n\n iterparse_repeats = len(self.iterparse[row_node]) != len(\n set(self.iterparse[row_node])\n )\n\n for event, elem in iterparse(self.path_or_buffer, events=("start", "end")):\n curr_elem = elem.tag.split("}")[1] if "}" in elem.tag else elem.tag\n\n if event == "start":\n if curr_elem == row_node:\n row = {}\n\n if row is not None:\n if self.names and iterparse_repeats:\n for col, nm in zip(self.iterparse[row_node], self.names):\n if curr_elem == col:\n elem_val = elem.text if elem.text else None\n if elem_val not in row.values() and nm not in row:\n row[nm] = elem_val\n\n if col in elem.attrib:\n if elem.attrib[col] not in row.values() and nm not in row:\n row[nm] = elem.attrib[col]\n else:\n for col in self.iterparse[row_node]:\n if curr_elem == col:\n row[col] = elem.text if elem.text else None\n if col in elem.attrib:\n row[col] = elem.attrib[col]\n\n if event == "end":\n if curr_elem == row_node and row is not None:\n dicts.append(row)\n row = None\n\n elem.clear()\n if hasattr(elem, "getprevious"):\n while (\n elem.getprevious() is not None and elem.getparent() is not None\n ):\n del elem.getparent()[0]\n\n if dicts == []:\n raise ParserError("No result from selected items in iterparse.")\n\n keys = list(dict.fromkeys([k for d in dicts for k in d.keys()]))\n dicts = [{k: d[k] if k in d.keys() else None for k in keys} for d in dicts]\n\n if self.names:\n dicts = [dict(zip(self.names, d.values())) for d in dicts]\n\n return dicts\n\n def _validate_path(self) -> list[Any]:\n """\n Validate ``xpath``.\n\n This method checks for syntax, evaluation, or empty nodes return.\n\n Raises\n ------\n SyntaxError\n * If xpah is not supported or issues with namespaces.\n\n ValueError\n * If xpah does not return any nodes.\n """\n\n raise AbstractMethodError(self)\n\n def _validate_names(self) -> None:\n """\n Validate names.\n\n This method will check if names is a list-like and aligns\n with length of parse nodes.\n\n Raises\n ------\n ValueError\n * If value is not a list and less then length of nodes.\n """\n raise AbstractMethodError(self)\n\n def _parse_doc(\n self, raw_doc: FilePath | ReadBuffer[bytes] | ReadBuffer[str]\n ) -> Element | etree._Element:\n """\n Build tree from path_or_buffer.\n\n This method will parse XML object into tree\n either from string/bytes or file location.\n """\n raise AbstractMethodError(self)\n\n\nclass _EtreeFrameParser(_XMLFrameParser):\n """\n Internal class to parse XML into DataFrames with the Python\n standard library XML module: `xml.etree.ElementTree`.\n """\n\n def parse_data(self) -> list[dict[str, str | None]]:\n from xml.etree.ElementTree import iterparse\n\n if self.stylesheet is not None:\n raise ValueError(\n "To use stylesheet, you need lxml installed and selected as parser."\n )\n\n if self.iterparse is None:\n self.xml_doc = self._parse_doc(self.path_or_buffer)\n elems = self._validate_path()\n\n self._validate_names()\n\n xml_dicts: list[dict[str, str | None]] = (\n self._parse_nodes(elems)\n if self.iterparse is None\n else self._iterparse_nodes(iterparse)\n )\n\n return xml_dicts\n\n def _validate_path(self) -> list[Any]:\n """\n Notes\n -----\n ``etree`` supports limited ``XPath``. If user attempts a more complex\n expression syntax error will raise.\n """\n\n msg = (\n "xpath does not return any nodes or attributes. "\n "Be sure to specify in `xpath` the parent nodes of "\n "children and attributes to parse. "\n "If document uses namespaces denoted with "\n "xmlns, be sure to define namespaces and "\n "use them in xpath."\n )\n try:\n elems = self.xml_doc.findall(self.xpath, namespaces=self.namespaces)\n children = [ch for el in elems for ch in el.findall("*")]\n attrs = {k: v for el in elems for k, v in el.attrib.items()}\n\n if elems is None:\n raise ValueError(msg)\n\n if elems is not None:\n if self.elems_only and children == []:\n raise ValueError(msg)\n if self.attrs_only and attrs == {}:\n raise ValueError(msg)\n if children == [] and attrs == {}:\n raise ValueError(msg)\n\n except (KeyError, SyntaxError):\n raise SyntaxError(\n "You have used an incorrect or unsupported XPath "\n "expression for etree library or you used an "\n "undeclared namespace prefix."\n )\n\n return elems\n\n def _validate_names(self) -> None:\n children: list[Any]\n\n if self.names:\n if self.iterparse:\n children = self.iterparse[next(iter(self.iterparse))]\n else:\n parent = self.xml_doc.find(self.xpath, namespaces=self.namespaces)\n children = parent.findall("*") if parent is not None else []\n\n if is_list_like(self.names):\n if len(self.names) < len(children):\n raise ValueError(\n "names does not match length of child elements in xpath."\n )\n else:\n raise TypeError(\n f"{type(self.names).__name__} is not a valid type for names"\n )\n\n def _parse_doc(\n self, raw_doc: FilePath | ReadBuffer[bytes] | ReadBuffer[str]\n ) -> Element:\n from xml.etree.ElementTree import (\n XMLParser,\n parse,\n )\n\n handle_data = get_data_from_filepath(\n filepath_or_buffer=raw_doc,\n encoding=self.encoding,\n compression=self.compression,\n storage_options=self.storage_options,\n )\n\n with preprocess_data(handle_data) as xml_data:\n curr_parser = XMLParser(encoding=self.encoding)\n document = parse(xml_data, parser=curr_parser)\n\n return document.getroot()\n\n\nclass _LxmlFrameParser(_XMLFrameParser):\n """\n Internal class to parse XML into :class:`~pandas.DataFrame` with third-party\n full-featured XML library, ``lxml``, that supports\n ``XPath`` 1.0 and XSLT 1.0.\n """\n\n def parse_data(self) -> list[dict[str, str | None]]:\n """\n Parse xml data.\n\n This method will call the other internal methods to\n validate ``xpath``, names, optionally parse and run XSLT,\n and parse original or transformed XML and return specific nodes.\n """\n from lxml.etree import iterparse\n\n if self.iterparse is None:\n self.xml_doc = self._parse_doc(self.path_or_buffer)\n\n if self.stylesheet:\n self.xsl_doc = self._parse_doc(self.stylesheet)\n self.xml_doc = self._transform_doc()\n\n elems = self._validate_path()\n\n self._validate_names()\n\n xml_dicts: list[dict[str, str | None]] = (\n self._parse_nodes(elems)\n if self.iterparse is None\n else self._iterparse_nodes(iterparse)\n )\n\n return xml_dicts\n\n def _validate_path(self) -> list[Any]:\n msg = (\n "xpath does not return any nodes or attributes. "\n "Be sure to specify in `xpath` the parent nodes of "\n "children and attributes to parse. "\n "If document uses namespaces denoted with "\n "xmlns, be sure to define namespaces and "\n "use them in xpath."\n )\n\n elems = self.xml_doc.xpath(self.xpath, namespaces=self.namespaces)\n children = [ch for el in elems for ch in el.xpath("*")]\n attrs = {k: v for el in elems for k, v in el.attrib.items()}\n\n if elems == []:\n raise ValueError(msg)\n\n if elems != []:\n if self.elems_only and children == []:\n raise ValueError(msg)\n if self.attrs_only and attrs == {}:\n raise ValueError(msg)\n if children == [] and attrs == {}:\n raise ValueError(msg)\n\n return elems\n\n def _validate_names(self) -> None:\n children: list[Any]\n\n if self.names:\n if self.iterparse:\n children = self.iterparse[next(iter(self.iterparse))]\n else:\n children = self.xml_doc.xpath(\n self.xpath + "[1]/*", namespaces=self.namespaces\n )\n\n if is_list_like(self.names):\n if len(self.names) < len(children):\n raise ValueError(\n "names does not match length of child elements in xpath."\n )\n else:\n raise TypeError(\n f"{type(self.names).__name__} is not a valid type for names"\n )\n\n def _parse_doc(\n self, raw_doc: FilePath | ReadBuffer[bytes] | ReadBuffer[str]\n ) -> etree._Element:\n from lxml.etree import (\n XMLParser,\n fromstring,\n parse,\n )\n\n handle_data = get_data_from_filepath(\n filepath_or_buffer=raw_doc,\n encoding=self.encoding,\n compression=self.compression,\n storage_options=self.storage_options,\n )\n\n with preprocess_data(handle_data) as xml_data:\n curr_parser = XMLParser(encoding=self.encoding)\n\n if isinstance(xml_data, io.StringIO):\n if self.encoding is None:\n raise TypeError(\n "Can not pass encoding None when input is StringIO."\n )\n\n document = fromstring(\n xml_data.getvalue().encode(self.encoding), parser=curr_parser\n )\n else:\n document = parse(xml_data, parser=curr_parser)\n\n return document\n\n def _transform_doc(self) -> etree._XSLTResultTree:\n """\n Transform original tree using stylesheet.\n\n This method will transform original xml using XSLT script into\n am ideally flatter xml document for easier parsing and migration\n to Data Frame.\n """\n from lxml.etree import XSLT\n\n transformer = XSLT(self.xsl_doc)\n new_doc = transformer(self.xml_doc)\n\n return new_doc\n\n\ndef get_data_from_filepath(\n filepath_or_buffer: FilePath | bytes | ReadBuffer[bytes] | ReadBuffer[str],\n encoding: str | None,\n compression: CompressionOptions,\n storage_options: StorageOptions,\n) -> str | bytes | ReadBuffer[bytes] | ReadBuffer[str]:\n """\n Extract raw XML data.\n\n The method accepts three input types:\n 1. filepath (string-like)\n 2. file-like object (e.g. open file object, StringIO)\n 3. XML string or bytes\n\n This method turns (1) into (2) to simplify the rest of the processing.\n It returns input types (2) and (3) unchanged.\n """\n if not isinstance(filepath_or_buffer, bytes):\n filepath_or_buffer = stringify_path(filepath_or_buffer)\n\n if (\n isinstance(filepath_or_buffer, str)\n and not filepath_or_buffer.startswith(("<?xml", "<"))\n ) and (\n not isinstance(filepath_or_buffer, str)\n or is_url(filepath_or_buffer)\n or is_fsspec_url(filepath_or_buffer)\n or file_exists(filepath_or_buffer)\n ):\n with get_handle(\n filepath_or_buffer,\n "r",\n encoding=encoding,\n compression=compression,\n storage_options=storage_options,\n ) as handle_obj:\n filepath_or_buffer = (\n handle_obj.handle.read()\n if hasattr(handle_obj.handle, "read")\n else handle_obj.handle\n )\n\n return filepath_or_buffer\n\n\ndef preprocess_data(data) -> io.StringIO | io.BytesIO:\n """\n Convert extracted raw data.\n\n This method will return underlying data of extracted XML content.\n The data either has a `read` attribute (e.g. a file object or a\n StringIO/BytesIO) or is a string or bytes that is an XML document.\n """\n\n if isinstance(data, str):\n data = io.StringIO(data)\n\n elif isinstance(data, bytes):\n data = io.BytesIO(data)\n\n return data\n\n\ndef _data_to_frame(data, **kwargs) -> DataFrame:\n """\n Convert parsed data to Data Frame.\n\n This method will bind xml dictionary data of keys and values\n into named columns of Data Frame using the built-in TextParser\n class that build Data Frame and infers specific dtypes.\n """\n\n tags = next(iter(data))\n nodes = [list(d.values()) for d in data]\n\n try:\n with TextParser(nodes, names=tags, **kwargs) as tp:\n return tp.read()\n except ParserError:\n raise ParserError(\n "XML document may be too complex for import. "\n "Try to flatten document and use distinct "\n "element and attribute names."\n )\n\n\ndef _parse(\n path_or_buffer: FilePath | ReadBuffer[bytes] | ReadBuffer[str],\n xpath: str,\n namespaces: dict[str, str] | None,\n elems_only: bool,\n attrs_only: bool,\n names: Sequence[str] | None,\n dtype: DtypeArg | None,\n converters: ConvertersArg | None,\n parse_dates: ParseDatesArg | None,\n encoding: str | None,\n parser: XMLParsers,\n stylesheet: FilePath | ReadBuffer[bytes] | ReadBuffer[str] | None,\n iterparse: dict[str, list[str]] | None,\n compression: CompressionOptions,\n storage_options: StorageOptions,\n dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,\n **kwargs,\n) -> DataFrame:\n """\n Call internal parsers.\n\n This method will conditionally call internal parsers:\n LxmlFrameParser and/or EtreeParser.\n\n Raises\n ------\n ImportError\n * If lxml is not installed if selected as parser.\n\n ValueError\n * If parser is not lxml or etree.\n """\n\n p: _EtreeFrameParser | _LxmlFrameParser\n\n if isinstance(path_or_buffer, str) and not any(\n [\n is_file_like(path_or_buffer),\n file_exists(path_or_buffer),\n is_url(path_or_buffer),\n is_fsspec_url(path_or_buffer),\n ]\n ):\n warnings.warn(\n "Passing literal xml to 'read_xml' is deprecated and "\n "will be removed in a future version. To read from a "\n "literal string, wrap it in a 'StringIO' object.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n if parser == "lxml":\n lxml = import_optional_dependency("lxml.etree", errors="ignore")\n\n if lxml is not None:\n p = _LxmlFrameParser(\n path_or_buffer,\n xpath,\n namespaces,\n elems_only,\n attrs_only,\n names,\n dtype,\n converters,\n parse_dates,\n encoding,\n stylesheet,\n iterparse,\n compression,\n storage_options,\n )\n else:\n raise ImportError("lxml not found, please install or use the etree parser.")\n\n elif parser == "etree":\n p = _EtreeFrameParser(\n path_or_buffer,\n xpath,\n namespaces,\n elems_only,\n attrs_only,\n names,\n dtype,\n converters,\n parse_dates,\n encoding,\n stylesheet,\n iterparse,\n compression,\n storage_options,\n )\n else:\n raise ValueError("Values for parser can only be lxml or etree.")\n\n data_dicts = p.parse_data()\n\n return _data_to_frame(\n data=data_dicts,\n dtype=dtype,\n converters=converters,\n parse_dates=parse_dates,\n dtype_backend=dtype_backend,\n **kwargs,\n )\n\n\n@doc(\n storage_options=_shared_docs["storage_options"],\n decompression_options=_shared_docs["decompression_options"] % "path_or_buffer",\n)\ndef read_xml(\n path_or_buffer: FilePath | ReadBuffer[bytes] | ReadBuffer[str],\n *,\n xpath: str = "./*",\n namespaces: dict[str, str] | None = None,\n elems_only: bool = False,\n attrs_only: bool = False,\n names: Sequence[str] | None = None,\n dtype: DtypeArg | None = None,\n converters: ConvertersArg | None = None,\n parse_dates: ParseDatesArg | None = None,\n # encoding can not be None for lxml and StringIO input\n encoding: str | None = "utf-8",\n parser: XMLParsers = "lxml",\n stylesheet: FilePath | ReadBuffer[bytes] | ReadBuffer[str] | None = None,\n iterparse: dict[str, list[str]] | None = None,\n compression: CompressionOptions = "infer",\n storage_options: StorageOptions | None = None,\n dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,\n) -> DataFrame:\n r"""\n Read XML document into a :class:`~pandas.DataFrame` object.\n\n .. versionadded:: 1.3.0\n\n Parameters\n ----------\n path_or_buffer : str, path object, or file-like object\n String, path object (implementing ``os.PathLike[str]``), or file-like\n object implementing a ``read()`` function. The string can be any valid XML\n string or a path. The string can further be a URL. Valid URL schemes\n include http, ftp, s3, and file.\n\n .. deprecated:: 2.1.0\n Passing xml literal strings is deprecated.\n Wrap literal xml input in ``io.StringIO`` or ``io.BytesIO`` instead.\n\n xpath : str, optional, default './\*'\n The ``XPath`` to parse required set of nodes for migration to\n :class:`~pandas.DataFrame`.``XPath`` should return a collection of elements\n and not a single element. Note: The ``etree`` parser supports limited ``XPath``\n expressions. For more complex ``XPath``, use ``lxml`` which requires\n installation.\n\n namespaces : dict, optional\n The namespaces defined in XML document as dicts with key being\n namespace prefix and value the URI. There is no need to include all\n namespaces in XML, only the ones used in ``xpath`` expression.\n Note: if XML document uses default namespace denoted as\n `xmlns='<URI>'` without a prefix, you must assign any temporary\n namespace prefix such as 'doc' to the URI in order to parse\n underlying nodes and/or attributes. For example, ::\n\n namespaces = {{"doc": "https://example.com"}}\n\n elems_only : bool, optional, default False\n Parse only the child elements at the specified ``xpath``. By default,\n all child elements and non-empty text nodes are returned.\n\n attrs_only : bool, optional, default False\n Parse only the attributes at the specified ``xpath``.\n By default, all attributes are returned.\n\n names : list-like, optional\n Column names for DataFrame of parsed XML data. Use this parameter to\n rename original element names and distinguish same named elements and\n attributes.\n\n dtype : Type name or dict of column -> type, optional\n Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32,\n 'c': 'Int64'}}\n Use `str` or `object` together with suitable `na_values` settings\n to preserve and not interpret dtype.\n If converters are specified, they will be applied INSTEAD\n of dtype conversion.\n\n .. versionadded:: 1.5.0\n\n converters : dict, optional\n Dict of functions for converting values in certain columns. Keys can either\n be integers or column labels.\n\n .. versionadded:: 1.5.0\n\n parse_dates : bool or list of int or names or list of lists or dict, default False\n Identifiers to parse index or columns to datetime. The behavior is as follows:\n\n * boolean. If True -> try parsing the index.\n * list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3\n each as a separate date column.\n * list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as\n a single date column.\n * dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call\n result 'foo'\n\n .. versionadded:: 1.5.0\n\n encoding : str, optional, default 'utf-8'\n Encoding of XML document.\n\n parser : {{'lxml','etree'}}, default 'lxml'\n Parser module to use for retrieval of data. Only 'lxml' and\n 'etree' are supported. With 'lxml' more complex ``XPath`` searches\n and ability to use XSLT stylesheet are supported.\n\n stylesheet : str, path object or file-like object\n A URL, file-like object, or a raw string containing an XSLT script.\n This stylesheet should flatten complex, deeply nested XML documents\n for easier parsing. To use this feature you must have ``lxml`` module\n installed and specify 'lxml' as ``parser``. The ``xpath`` must\n reference nodes of transformed XML document generated after XSLT\n transformation and not the original XML document. Only XSLT 1.0\n scripts and not later versions is currently supported.\n\n iterparse : dict, optional\n The nodes or attributes to retrieve in iterparsing of XML document\n as a dict with key being the name of repeating element and value being\n list of elements or attribute names that are descendants of the repeated\n element. Note: If this option is used, it will replace ``xpath`` parsing\n and unlike ``xpath``, descendants do not need to relate to each other but can\n exist any where in document under the repeating element. This memory-\n efficient method should be used for very large XML files (500MB, 1GB, or 5GB+).\n For example, ::\n\n iterparse = {{"row_element": ["child_elem", "attr", "grandchild_elem"]}}\n\n .. versionadded:: 1.5.0\n\n {decompression_options}\n\n .. versionchanged:: 1.4.0 Zstandard support.\n\n {storage_options}\n\n dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable'\n Back-end data type applied to the resultant :class:`DataFrame`\n (still experimental). Behaviour is as follows:\n\n * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`\n (default).\n * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`\n DataFrame.\n\n .. versionadded:: 2.0\n\n Returns\n -------\n df\n A DataFrame.\n\n See Also\n --------\n read_json : Convert a JSON string to pandas object.\n read_html : Read HTML tables into a list of DataFrame objects.\n\n Notes\n -----\n This method is best designed to import shallow XML documents in\n following format which is the ideal fit for the two-dimensions of a\n ``DataFrame`` (row by column). ::\n\n <root>\n <row>\n <column1>data</column1>\n <column2>data</column2>\n <column3>data</column3>\n ...\n </row>\n <row>\n ...\n </row>\n ...\n </root>\n\n As a file format, XML documents can be designed any way including\n layout of elements and attributes as long as it conforms to W3C\n specifications. Therefore, this method is a convenience handler for\n a specific flatter design and not all possible XML structures.\n\n However, for more complex XML documents, ``stylesheet`` allows you to\n temporarily redesign original document with XSLT (a special purpose\n language) for a flatter version for migration to a DataFrame.\n\n This function will *always* return a single :class:`DataFrame` or raise\n exceptions due to issues with XML document, ``xpath``, or other\n parameters.\n\n See the :ref:`read_xml documentation in the IO section of the docs\n <io.read_xml>` for more information in using this method to parse XML\n files to DataFrames.\n\n Examples\n --------\n >>> from io import StringIO\n >>> xml = '''<?xml version='1.0' encoding='utf-8'?>\n ... <data xmlns="http://example.com">\n ... <row>\n ... <shape>square</shape>\n ... <degrees>360</degrees>\n ... <sides>4.0</sides>\n ... </row>\n ... <row>\n ... <shape>circle</shape>\n ... <degrees>360</degrees>\n ... <sides/>\n ... </row>\n ... <row>\n ... <shape>triangle</shape>\n ... <degrees>180</degrees>\n ... <sides>3.0</sides>\n ... </row>\n ... </data>'''\n\n >>> df = pd.read_xml(StringIO(xml))\n >>> df\n shape degrees sides\n 0 square 360 4.0\n 1 circle 360 NaN\n 2 triangle 180 3.0\n\n >>> xml = '''<?xml version='1.0' encoding='utf-8'?>\n ... <data>\n ... <row shape="square" degrees="360" sides="4.0"/>\n ... <row shape="circle" degrees="360"/>\n ... <row shape="triangle" degrees="180" sides="3.0"/>\n ... </data>'''\n\n >>> df = pd.read_xml(StringIO(xml), xpath=".//row")\n >>> df\n shape degrees sides\n 0 square 360 4.0\n 1 circle 360 NaN\n 2 triangle 180 3.0\n\n >>> xml = '''<?xml version='1.0' encoding='utf-8'?>\n ... <doc:data xmlns:doc="https://example.com">\n ... <doc:row>\n ... <doc:shape>square</doc:shape>\n ... <doc:degrees>360</doc:degrees>\n ... <doc:sides>4.0</doc:sides>\n ... </doc:row>\n ... <doc:row>\n ... <doc:shape>circle</doc:shape>\n ... <doc:degrees>360</doc:degrees>\n ... <doc:sides/>\n ... </doc:row>\n ... <doc:row>\n ... <doc:shape>triangle</doc:shape>\n ... <doc:degrees>180</doc:degrees>\n ... <doc:sides>3.0</doc:sides>\n ... </doc:row>\n ... </doc:data>'''\n\n >>> df = pd.read_xml(StringIO(xml),\n ... xpath="//doc:row",\n ... namespaces={{"doc": "https://example.com"}})\n >>> df\n shape degrees sides\n 0 square 360 4.0\n 1 circle 360 NaN\n 2 triangle 180 3.0\n\n >>> xml_data = '''\n ... <data>\n ... <row>\n ... <index>0</index>\n ... <a>1</a>\n ... <b>2.5</b>\n ... <c>True</c>\n ... <d>a</d>\n ... <e>2019-12-31 00:00:00</e>\n ... </row>\n ... <row>\n ... <index>1</index>\n ... <b>4.5</b>\n ... <c>False</c>\n ... <d>b</d>\n ... <e>2019-12-31 00:00:00</e>\n ... </row>\n ... </data>\n ... '''\n\n >>> df = pd.read_xml(StringIO(xml_data),\n ... dtype_backend="numpy_nullable",\n ... parse_dates=["e"])\n >>> df\n index a b c d e\n 0 0 1 2.5 True a 2019-12-31\n 1 1 <NA> 4.5 False b 2019-12-31\n """\n check_dtype_backend(dtype_backend)\n\n return _parse(\n path_or_buffer=path_or_buffer,\n xpath=xpath,\n namespaces=namespaces,\n elems_only=elems_only,\n attrs_only=attrs_only,\n names=names,\n dtype=dtype,\n converters=converters,\n parse_dates=parse_dates,\n encoding=encoding,\n parser=parser,\n stylesheet=stylesheet,\n iterparse=iterparse,\n compression=compression,\n storage_options=storage_options,\n dtype_backend=dtype_backend,\n )\n
.venv\Lib\site-packages\pandas\io\xml.py
xml.py
Python
38,656
0.95
0.158879
0.033986
react-lib
305
2025-01-08T16:11:56.310414
BSD-3-Clause
false
564808e125710852257535496e737f10
from __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n Literal,\n)\n\nimport numpy as np\n\nfrom pandas._config import using_string_dtype\n\nfrom pandas._libs import lib\nfrom pandas.compat import (\n pa_version_under18p0,\n pa_version_under19p0,\n)\nfrom pandas.compat._optional import import_optional_dependency\n\nimport pandas as pd\n\nif TYPE_CHECKING:\n from collections.abc import Callable\n\n import pyarrow\n\n from pandas._typing import DtypeBackend\n\n\ndef _arrow_dtype_mapping() -> dict:\n pa = import_optional_dependency("pyarrow")\n return {\n pa.int8(): pd.Int8Dtype(),\n pa.int16(): pd.Int16Dtype(),\n pa.int32(): pd.Int32Dtype(),\n pa.int64(): pd.Int64Dtype(),\n pa.uint8(): pd.UInt8Dtype(),\n pa.uint16(): pd.UInt16Dtype(),\n pa.uint32(): pd.UInt32Dtype(),\n pa.uint64(): pd.UInt64Dtype(),\n pa.bool_(): pd.BooleanDtype(),\n pa.string(): pd.StringDtype(),\n pa.float32(): pd.Float32Dtype(),\n pa.float64(): pd.Float64Dtype(),\n pa.string(): pd.StringDtype(),\n pa.large_string(): pd.StringDtype(),\n }\n\n\ndef _arrow_string_types_mapper() -> Callable:\n pa = import_optional_dependency("pyarrow")\n\n mapping = {\n pa.string(): pd.StringDtype(na_value=np.nan),\n pa.large_string(): pd.StringDtype(na_value=np.nan),\n }\n if not pa_version_under18p0:\n mapping[pa.string_view()] = pd.StringDtype(na_value=np.nan)\n\n return mapping.get\n\n\ndef arrow_table_to_pandas(\n table: pyarrow.Table,\n dtype_backend: DtypeBackend | Literal["numpy"] | lib.NoDefault = lib.no_default,\n null_to_int64: bool = False,\n to_pandas_kwargs: dict | None = None,\n) -> pd.DataFrame:\n if to_pandas_kwargs is None:\n to_pandas_kwargs = {}\n\n pa = import_optional_dependency("pyarrow")\n\n types_mapper: type[pd.ArrowDtype] | None | Callable\n if dtype_backend == "numpy_nullable":\n mapping = _arrow_dtype_mapping()\n if null_to_int64:\n # Modify the default mapping to also map null to Int64\n # (to match other engines - only for CSV parser)\n mapping[pa.null()] = pd.Int64Dtype()\n types_mapper = mapping.get\n elif dtype_backend == "pyarrow":\n types_mapper = pd.ArrowDtype\n elif using_string_dtype():\n if pa_version_under19p0:\n types_mapper = _arrow_string_types_mapper()\n else:\n types_mapper = None\n elif dtype_backend is lib.no_default or dtype_backend == "numpy":\n types_mapper = None\n else:\n raise NotImplementedError\n\n df = table.to_pandas(types_mapper=types_mapper, **to_pandas_kwargs)\n return df\n
.venv\Lib\site-packages\pandas\io\_util.py
_util.py
Python
2,676
0.95
0.106383
0.026667
python-kit
548
2023-08-22T12:32:00.944653
GPL-3.0
false
57bbcefa9990466e055403868050425d
# ruff: noqa: TCH004\nfrom typing import TYPE_CHECKING\n\nif TYPE_CHECKING:\n # import modules that have public classes/functions\n from pandas.io import (\n formats,\n json,\n stata,\n )\n\n # mark only those modules as public\n __all__ = ["formats", "json", "stata"]\n
.venv\Lib\site-packages\pandas\io\__init__.py
__init__.py
Python
293
0.95
0.076923
0.272727
react-lib
653
2024-04-25T13:55:45.868971
MIT
false
48ab8bb14318e86ebd6d9a4c5163d55d
"""\nPyperclip\n\nA cross-platform clipboard module for Python,\nwith copy & paste functions for plain text.\nBy Al Sweigart al@inventwithpython.com\nLicence at LICENSES/PYPERCLIP_LICENSE\n\nUsage:\n import pyperclip\n pyperclip.copy('The text to be copied to the clipboard.')\n spam = pyperclip.paste()\n\n if not pyperclip.is_available():\n print("Copy functionality unavailable!")\n\nOn Windows, no additional modules are needed.\nOn Mac, the pyobjc module is used, falling back to the pbcopy and pbpaste cli\n commands. (These commands should come with OS X.).\nOn Linux, install xclip, xsel, or wl-clipboard (for "wayland" sessions) via\npackage manager.\nFor example, in Debian:\n sudo apt-get install xclip\n sudo apt-get install xsel\n sudo apt-get install wl-clipboard\n\nOtherwise on Linux, you will need the PyQt5 modules installed.\n\nThis module does not work with PyGObject yet.\n\nCygwin is currently not supported.\n\nSecurity Note: This module runs programs with these names:\n - pbcopy\n - pbpaste\n - xclip\n - xsel\n - wl-copy/wl-paste\n - klipper\n - qdbus\nA malicious user could rename or add programs with these names, tricking\nPyperclip into running them with whatever permissions the Python process has.\n\n"""\n\n__version__ = "1.8.2"\n\n\nimport contextlib\nimport ctypes\nfrom ctypes import (\n c_size_t,\n c_wchar,\n c_wchar_p,\n get_errno,\n sizeof,\n)\nimport os\nimport platform\nfrom shutil import which as _executable_exists\nimport subprocess\nimport time\nimport warnings\n\nfrom pandas.errors import (\n PyperclipException,\n PyperclipWindowsException,\n)\nfrom pandas.util._exceptions import find_stack_level\n\n# `import PyQt4` sys.exit()s if DISPLAY is not in the environment.\n# Thus, we need to detect the presence of $DISPLAY manually\n# and not load PyQt4 if it is absent.\nHAS_DISPLAY = os.getenv("DISPLAY")\n\nEXCEPT_MSG = """\n Pyperclip could not find a copy/paste mechanism for your system.\n For more information, please visit\n https://pyperclip.readthedocs.io/en/latest/index.html#not-implemented-error\n """\n\nENCODING = "utf-8"\n\n\nclass PyperclipTimeoutException(PyperclipException):\n pass\n\n\ndef _stringifyText(text) -> str:\n acceptedTypes = (str, int, float, bool)\n if not isinstance(text, acceptedTypes):\n raise PyperclipException(\n f"only str, int, float, and bool values "\n f"can be copied to the clipboard, not {type(text).__name__}"\n )\n return str(text)\n\n\ndef init_osx_pbcopy_clipboard():\n def copy_osx_pbcopy(text):\n text = _stringifyText(text) # Converts non-str values to str.\n with subprocess.Popen(\n ["pbcopy", "w"], stdin=subprocess.PIPE, close_fds=True\n ) as p:\n p.communicate(input=text.encode(ENCODING))\n\n def paste_osx_pbcopy():\n with subprocess.Popen(\n ["pbpaste", "r"], stdout=subprocess.PIPE, close_fds=True\n ) as p:\n stdout = p.communicate()[0]\n return stdout.decode(ENCODING)\n\n return copy_osx_pbcopy, paste_osx_pbcopy\n\n\ndef init_osx_pyobjc_clipboard():\n def copy_osx_pyobjc(text):\n """Copy string argument to clipboard"""\n text = _stringifyText(text) # Converts non-str values to str.\n newStr = Foundation.NSString.stringWithString_(text).nsstring()\n newData = newStr.dataUsingEncoding_(Foundation.NSUTF8StringEncoding)\n board = AppKit.NSPasteboard.generalPasteboard()\n board.declareTypes_owner_([AppKit.NSStringPboardType], None)\n board.setData_forType_(newData, AppKit.NSStringPboardType)\n\n def paste_osx_pyobjc():\n """Returns contents of clipboard"""\n board = AppKit.NSPasteboard.generalPasteboard()\n content = board.stringForType_(AppKit.NSStringPboardType)\n return content\n\n return copy_osx_pyobjc, paste_osx_pyobjc\n\n\ndef init_qt_clipboard():\n global QApplication\n # $DISPLAY should exist\n\n # Try to import from qtpy, but if that fails try PyQt5 then PyQt4\n try:\n from qtpy.QtWidgets import QApplication\n except ImportError:\n try:\n from PyQt5.QtWidgets import QApplication\n except ImportError:\n from PyQt4.QtGui import QApplication\n\n app = QApplication.instance()\n if app is None:\n app = QApplication([])\n\n def copy_qt(text):\n text = _stringifyText(text) # Converts non-str values to str.\n cb = app.clipboard()\n cb.setText(text)\n\n def paste_qt() -> str:\n cb = app.clipboard()\n return str(cb.text())\n\n return copy_qt, paste_qt\n\n\ndef init_xclip_clipboard():\n DEFAULT_SELECTION = "c"\n PRIMARY_SELECTION = "p"\n\n def copy_xclip(text, primary=False):\n text = _stringifyText(text) # Converts non-str values to str.\n selection = DEFAULT_SELECTION\n if primary:\n selection = PRIMARY_SELECTION\n with subprocess.Popen(\n ["xclip", "-selection", selection], stdin=subprocess.PIPE, close_fds=True\n ) as p:\n p.communicate(input=text.encode(ENCODING))\n\n def paste_xclip(primary=False):\n selection = DEFAULT_SELECTION\n if primary:\n selection = PRIMARY_SELECTION\n with subprocess.Popen(\n ["xclip", "-selection", selection, "-o"],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n close_fds=True,\n ) as p:\n stdout = p.communicate()[0]\n # Intentionally ignore extraneous output on stderr when clipboard is empty\n return stdout.decode(ENCODING)\n\n return copy_xclip, paste_xclip\n\n\ndef init_xsel_clipboard():\n DEFAULT_SELECTION = "-b"\n PRIMARY_SELECTION = "-p"\n\n def copy_xsel(text, primary=False):\n text = _stringifyText(text) # Converts non-str values to str.\n selection_flag = DEFAULT_SELECTION\n if primary:\n selection_flag = PRIMARY_SELECTION\n with subprocess.Popen(\n ["xsel", selection_flag, "-i"], stdin=subprocess.PIPE, close_fds=True\n ) as p:\n p.communicate(input=text.encode(ENCODING))\n\n def paste_xsel(primary=False):\n selection_flag = DEFAULT_SELECTION\n if primary:\n selection_flag = PRIMARY_SELECTION\n with subprocess.Popen(\n ["xsel", selection_flag, "-o"], stdout=subprocess.PIPE, close_fds=True\n ) as p:\n stdout = p.communicate()[0]\n return stdout.decode(ENCODING)\n\n return copy_xsel, paste_xsel\n\n\ndef init_wl_clipboard():\n PRIMARY_SELECTION = "-p"\n\n def copy_wl(text, primary=False):\n text = _stringifyText(text) # Converts non-str values to str.\n args = ["wl-copy"]\n if primary:\n args.append(PRIMARY_SELECTION)\n if not text:\n args.append("--clear")\n subprocess.check_call(args, close_fds=True)\n else:\n p = subprocess.Popen(args, stdin=subprocess.PIPE, close_fds=True)\n p.communicate(input=text.encode(ENCODING))\n\n def paste_wl(primary=False):\n args = ["wl-paste", "-n"]\n if primary:\n args.append(PRIMARY_SELECTION)\n p = subprocess.Popen(args, stdout=subprocess.PIPE, close_fds=True)\n stdout, _stderr = p.communicate()\n return stdout.decode(ENCODING)\n\n return copy_wl, paste_wl\n\n\ndef init_klipper_clipboard():\n def copy_klipper(text):\n text = _stringifyText(text) # Converts non-str values to str.\n with subprocess.Popen(\n [\n "qdbus",\n "org.kde.klipper",\n "/klipper",\n "setClipboardContents",\n text.encode(ENCODING),\n ],\n stdin=subprocess.PIPE,\n close_fds=True,\n ) as p:\n p.communicate(input=None)\n\n def paste_klipper():\n with subprocess.Popen(\n ["qdbus", "org.kde.klipper", "/klipper", "getClipboardContents"],\n stdout=subprocess.PIPE,\n close_fds=True,\n ) as p:\n stdout = p.communicate()[0]\n\n # Workaround for https://bugs.kde.org/show_bug.cgi?id=342874\n # TODO: https://github.com/asweigart/pyperclip/issues/43\n clipboardContents = stdout.decode(ENCODING)\n # even if blank, Klipper will append a newline at the end\n assert len(clipboardContents) > 0\n # make sure that newline is there\n assert clipboardContents.endswith("\n")\n if clipboardContents.endswith("\n"):\n clipboardContents = clipboardContents[:-1]\n return clipboardContents\n\n return copy_klipper, paste_klipper\n\n\ndef init_dev_clipboard_clipboard():\n def copy_dev_clipboard(text):\n text = _stringifyText(text) # Converts non-str values to str.\n if text == "":\n warnings.warn(\n "Pyperclip cannot copy a blank string to the clipboard on Cygwin. "\n "This is effectively a no-op.",\n stacklevel=find_stack_level(),\n )\n if "\r" in text:\n warnings.warn(\n "Pyperclip cannot handle \\r characters on Cygwin.",\n stacklevel=find_stack_level(),\n )\n\n with open("/dev/clipboard", "w", encoding="utf-8") as fd:\n fd.write(text)\n\n def paste_dev_clipboard() -> str:\n with open("/dev/clipboard", encoding="utf-8") as fd:\n content = fd.read()\n return content\n\n return copy_dev_clipboard, paste_dev_clipboard\n\n\ndef init_no_clipboard():\n class ClipboardUnavailable:\n def __call__(self, *args, **kwargs):\n raise PyperclipException(EXCEPT_MSG)\n\n def __bool__(self) -> bool:\n return False\n\n return ClipboardUnavailable(), ClipboardUnavailable()\n\n\n# Windows-related clipboard functions:\nclass CheckedCall:\n def __init__(self, f) -> None:\n super().__setattr__("f", f)\n\n def __call__(self, *args):\n ret = self.f(*args)\n if not ret and get_errno():\n raise PyperclipWindowsException("Error calling " + self.f.__name__)\n return ret\n\n def __setattr__(self, key, value):\n setattr(self.f, key, value)\n\n\ndef init_windows_clipboard():\n global HGLOBAL, LPVOID, DWORD, LPCSTR, INT\n global HWND, HINSTANCE, HMENU, BOOL, UINT, HANDLE\n from ctypes.wintypes import (\n BOOL,\n DWORD,\n HANDLE,\n HGLOBAL,\n HINSTANCE,\n HMENU,\n HWND,\n INT,\n LPCSTR,\n LPVOID,\n UINT,\n )\n\n windll = ctypes.windll\n msvcrt = ctypes.CDLL("msvcrt")\n\n safeCreateWindowExA = CheckedCall(windll.user32.CreateWindowExA)\n safeCreateWindowExA.argtypes = [\n DWORD,\n LPCSTR,\n LPCSTR,\n DWORD,\n INT,\n INT,\n INT,\n INT,\n HWND,\n HMENU,\n HINSTANCE,\n LPVOID,\n ]\n safeCreateWindowExA.restype = HWND\n\n safeDestroyWindow = CheckedCall(windll.user32.DestroyWindow)\n safeDestroyWindow.argtypes = [HWND]\n safeDestroyWindow.restype = BOOL\n\n OpenClipboard = windll.user32.OpenClipboard\n OpenClipboard.argtypes = [HWND]\n OpenClipboard.restype = BOOL\n\n safeCloseClipboard = CheckedCall(windll.user32.CloseClipboard)\n safeCloseClipboard.argtypes = []\n safeCloseClipboard.restype = BOOL\n\n safeEmptyClipboard = CheckedCall(windll.user32.EmptyClipboard)\n safeEmptyClipboard.argtypes = []\n safeEmptyClipboard.restype = BOOL\n\n safeGetClipboardData = CheckedCall(windll.user32.GetClipboardData)\n safeGetClipboardData.argtypes = [UINT]\n safeGetClipboardData.restype = HANDLE\n\n safeSetClipboardData = CheckedCall(windll.user32.SetClipboardData)\n safeSetClipboardData.argtypes = [UINT, HANDLE]\n safeSetClipboardData.restype = HANDLE\n\n safeGlobalAlloc = CheckedCall(windll.kernel32.GlobalAlloc)\n safeGlobalAlloc.argtypes = [UINT, c_size_t]\n safeGlobalAlloc.restype = HGLOBAL\n\n safeGlobalLock = CheckedCall(windll.kernel32.GlobalLock)\n safeGlobalLock.argtypes = [HGLOBAL]\n safeGlobalLock.restype = LPVOID\n\n safeGlobalUnlock = CheckedCall(windll.kernel32.GlobalUnlock)\n safeGlobalUnlock.argtypes = [HGLOBAL]\n safeGlobalUnlock.restype = BOOL\n\n wcslen = CheckedCall(msvcrt.wcslen)\n wcslen.argtypes = [c_wchar_p]\n wcslen.restype = UINT\n\n GMEM_MOVEABLE = 0x0002\n CF_UNICODETEXT = 13\n\n @contextlib.contextmanager\n def window():\n """\n Context that provides a valid Windows hwnd.\n """\n # we really just need the hwnd, so setting "STATIC"\n # as predefined lpClass is just fine.\n hwnd = safeCreateWindowExA(\n 0, b"STATIC", None, 0, 0, 0, 0, 0, None, None, None, None\n )\n try:\n yield hwnd\n finally:\n safeDestroyWindow(hwnd)\n\n @contextlib.contextmanager\n def clipboard(hwnd):\n """\n Context manager that opens the clipboard and prevents\n other applications from modifying the clipboard content.\n """\n # We may not get the clipboard handle immediately because\n # some other application is accessing it (?)\n # We try for at least 500ms to get the clipboard.\n t = time.time() + 0.5\n success = False\n while time.time() < t:\n success = OpenClipboard(hwnd)\n if success:\n break\n time.sleep(0.01)\n if not success:\n raise PyperclipWindowsException("Error calling OpenClipboard")\n\n try:\n yield\n finally:\n safeCloseClipboard()\n\n def copy_windows(text):\n # This function is heavily based on\n # http://msdn.com/ms649016#_win32_Copying_Information_to_the_Clipboard\n\n text = _stringifyText(text) # Converts non-str values to str.\n\n with window() as hwnd:\n # http://msdn.com/ms649048\n # If an application calls OpenClipboard with hwnd set to NULL,\n # EmptyClipboard sets the clipboard owner to NULL;\n # this causes SetClipboardData to fail.\n # => We need a valid hwnd to copy something.\n with clipboard(hwnd):\n safeEmptyClipboard()\n\n if text:\n # http://msdn.com/ms649051\n # If the hMem parameter identifies a memory object,\n # the object must have been allocated using the\n # function with the GMEM_MOVEABLE flag.\n count = wcslen(text) + 1\n handle = safeGlobalAlloc(GMEM_MOVEABLE, count * sizeof(c_wchar))\n locked_handle = safeGlobalLock(handle)\n\n ctypes.memmove(\n c_wchar_p(locked_handle),\n c_wchar_p(text),\n count * sizeof(c_wchar),\n )\n\n safeGlobalUnlock(handle)\n safeSetClipboardData(CF_UNICODETEXT, handle)\n\n def paste_windows():\n with clipboard(None):\n handle = safeGetClipboardData(CF_UNICODETEXT)\n if not handle:\n # GetClipboardData may return NULL with errno == NO_ERROR\n # if the clipboard is empty.\n # (Also, it may return a handle to an empty buffer,\n # but technically that's not empty)\n return ""\n return c_wchar_p(handle).value\n\n return copy_windows, paste_windows\n\n\ndef init_wsl_clipboard():\n def copy_wsl(text):\n text = _stringifyText(text) # Converts non-str values to str.\n with subprocess.Popen(["clip.exe"], stdin=subprocess.PIPE, close_fds=True) as p:\n p.communicate(input=text.encode(ENCODING))\n\n def paste_wsl():\n with subprocess.Popen(\n ["powershell.exe", "-command", "Get-Clipboard"],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n close_fds=True,\n ) as p:\n stdout = p.communicate()[0]\n # WSL appends "\r\n" to the contents.\n return stdout[:-2].decode(ENCODING)\n\n return copy_wsl, paste_wsl\n\n\n# Automatic detection of clipboard mechanisms\n# and importing is done in determine_clipboard():\ndef determine_clipboard():\n """\n Determine the OS/platform and set the copy() and paste() functions\n accordingly.\n """\n global Foundation, AppKit, qtpy, PyQt4, PyQt5\n\n # Setup for the CYGWIN platform:\n if (\n "cygwin" in platform.system().lower()\n ): # Cygwin has a variety of values returned by platform.system(),\n # such as 'CYGWIN_NT-6.1'\n # FIXME(pyperclip#55): pyperclip currently does not support Cygwin,\n # see https://github.com/asweigart/pyperclip/issues/55\n if os.path.exists("/dev/clipboard"):\n warnings.warn(\n "Pyperclip's support for Cygwin is not perfect, "\n "see https://github.com/asweigart/pyperclip/issues/55",\n stacklevel=find_stack_level(),\n )\n return init_dev_clipboard_clipboard()\n\n # Setup for the WINDOWS platform:\n elif os.name == "nt" or platform.system() == "Windows":\n return init_windows_clipboard()\n\n if platform.system() == "Linux":\n if _executable_exists("wslconfig.exe"):\n return init_wsl_clipboard()\n\n # Setup for the macOS platform:\n if os.name == "mac" or platform.system() == "Darwin":\n try:\n import AppKit\n import Foundation # check if pyobjc is installed\n except ImportError:\n return init_osx_pbcopy_clipboard()\n else:\n return init_osx_pyobjc_clipboard()\n\n # Setup for the LINUX platform:\n if HAS_DISPLAY:\n if os.environ.get("WAYLAND_DISPLAY") and _executable_exists("wl-copy"):\n return init_wl_clipboard()\n if _executable_exists("xsel"):\n return init_xsel_clipboard()\n if _executable_exists("xclip"):\n return init_xclip_clipboard()\n if _executable_exists("klipper") and _executable_exists("qdbus"):\n return init_klipper_clipboard()\n\n try:\n # qtpy is a small abstraction layer that lets you write applications\n # using a single api call to either PyQt or PySide.\n # https://pypi.python.org/project/QtPy\n import qtpy # check if qtpy is installed\n except ImportError:\n # If qtpy isn't installed, fall back on importing PyQt4.\n try:\n import PyQt5 # check if PyQt5 is installed\n except ImportError:\n try:\n import PyQt4 # check if PyQt4 is installed\n except ImportError:\n pass # We want to fail fast for all non-ImportError exceptions.\n else:\n return init_qt_clipboard()\n else:\n return init_qt_clipboard()\n else:\n return init_qt_clipboard()\n\n return init_no_clipboard()\n\n\ndef set_clipboard(clipboard):\n """\n Explicitly sets the clipboard mechanism. The "clipboard mechanism" is how\n the copy() and paste() functions interact with the operating system to\n implement the copy/paste feature. The clipboard parameter must be one of:\n - pbcopy\n - pyobjc (default on macOS)\n - qt\n - xclip\n - xsel\n - klipper\n - windows (default on Windows)\n - no (this is what is set when no clipboard mechanism can be found)\n """\n global copy, paste\n\n clipboard_types = {\n "pbcopy": init_osx_pbcopy_clipboard,\n "pyobjc": init_osx_pyobjc_clipboard,\n "qt": init_qt_clipboard, # TODO - split this into 'qtpy', 'pyqt4', and 'pyqt5'\n "xclip": init_xclip_clipboard,\n "xsel": init_xsel_clipboard,\n "wl-clipboard": init_wl_clipboard,\n "klipper": init_klipper_clipboard,\n "windows": init_windows_clipboard,\n "no": init_no_clipboard,\n }\n\n if clipboard not in clipboard_types:\n allowed_clipboard_types = [repr(_) for _ in clipboard_types]\n raise ValueError(\n f"Argument must be one of {', '.join(allowed_clipboard_types)}"\n )\n\n # Sets pyperclip's copy() and paste() functions:\n copy, paste = clipboard_types[clipboard]()\n\n\ndef lazy_load_stub_copy(text):\n """\n A stub function for copy(), which will load the real copy() function when\n called so that the real copy() function is used for later calls.\n\n This allows users to import pyperclip without having determine_clipboard()\n automatically run, which will automatically select a clipboard mechanism.\n This could be a problem if it selects, say, the memory-heavy PyQt4 module\n but the user was just going to immediately call set_clipboard() to use a\n different clipboard mechanism.\n\n The lazy loading this stub function implements gives the user a chance to\n call set_clipboard() to pick another clipboard mechanism. Or, if the user\n simply calls copy() or paste() without calling set_clipboard() first,\n will fall back on whatever clipboard mechanism that determine_clipboard()\n automatically chooses.\n """\n global copy, paste\n copy, paste = determine_clipboard()\n return copy(text)\n\n\ndef lazy_load_stub_paste():\n """\n A stub function for paste(), which will load the real paste() function when\n called so that the real paste() function is used for later calls.\n\n This allows users to import pyperclip without having determine_clipboard()\n automatically run, which will automatically select a clipboard mechanism.\n This could be a problem if it selects, say, the memory-heavy PyQt4 module\n but the user was just going to immediately call set_clipboard() to use a\n different clipboard mechanism.\n\n The lazy loading this stub function implements gives the user a chance to\n call set_clipboard() to pick another clipboard mechanism. Or, if the user\n simply calls copy() or paste() without calling set_clipboard() first,\n will fall back on whatever clipboard mechanism that determine_clipboard()\n automatically chooses.\n """\n global copy, paste\n copy, paste = determine_clipboard()\n return paste()\n\n\ndef is_available() -> bool:\n return copy != lazy_load_stub_copy and paste != lazy_load_stub_paste\n\n\n# Initially, copy() and paste() are set to lazy loading wrappers which will\n# set `copy` and `paste` to real functions the first time they're used, unless\n# set_clipboard() or determine_clipboard() is called first.\ncopy, paste = lazy_load_stub_copy, lazy_load_stub_paste\n\n\ndef waitForPaste(timeout=None):\n """This function call blocks until a non-empty text string exists on the\n clipboard. It returns this text.\n\n This function raises PyperclipTimeoutException if timeout was set to\n a number of seconds that has elapsed without non-empty text being put on\n the clipboard."""\n startTime = time.time()\n while True:\n clipboardText = paste()\n if clipboardText != "":\n return clipboardText\n time.sleep(0.01)\n\n if timeout is not None and time.time() > startTime + timeout:\n raise PyperclipTimeoutException(\n "waitForPaste() timed out after " + str(timeout) + " seconds."\n )\n\n\ndef waitForNewPaste(timeout=None):\n """This function call blocks until a new text string exists on the\n clipboard that is different from the text that was there when the function\n was first called. It returns this text.\n\n This function raises PyperclipTimeoutException if timeout was set to\n a number of seconds that has elapsed without non-empty text being put on\n the clipboard."""\n startTime = time.time()\n originalText = paste()\n while True:\n currentText = paste()\n if currentText != originalText:\n return currentText\n time.sleep(0.01)\n\n if timeout is not None and time.time() > startTime + timeout:\n raise PyperclipTimeoutException(\n "waitForNewPaste() timed out after " + str(timeout) + " seconds."\n )\n\n\n__all__ = [\n "copy",\n "paste",\n "waitForPaste",\n "waitForNewPaste",\n "set_clipboard",\n "determine_clipboard",\n]\n\n# pandas aliases\nclipboard_get = paste\nclipboard_set = copy\n
.venv\Lib\site-packages\pandas\io\clipboard\__init__.py
__init__.py
Python
24,235
0.95
0.190094
0.081699
awesome-app
319
2023-10-05T09:55:44.826926
MIT
false
85b1ff12cf7350b01faefcf0dbced676
\n\n
.venv\Lib\site-packages\pandas\io\clipboard\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
29,662
0.95
0.072139
0
awesome-app
505
2024-06-15T06:21:59.310714
GPL-3.0
false
8507ea0f6fc7b2cc9e13d18dd3148700
from __future__ import annotations\n\nfrom collections.abc import (\n Hashable,\n Iterable,\n Mapping,\n Sequence,\n)\nimport datetime\nfrom functools import partial\nfrom io import BytesIO\nimport os\nfrom textwrap import fill\nfrom typing import (\n IO,\n TYPE_CHECKING,\n Any,\n Callable,\n Generic,\n Literal,\n TypeVar,\n Union,\n cast,\n overload,\n)\nimport warnings\nimport zipfile\n\nfrom pandas._config import config\n\nfrom pandas._libs import lib\nfrom pandas._libs.parsers import STR_NA_VALUES\nfrom pandas.compat._optional import (\n get_version,\n import_optional_dependency,\n)\nfrom pandas.errors import EmptyDataError\nfrom pandas.util._decorators import (\n Appender,\n doc,\n)\nfrom pandas.util._exceptions import find_stack_level\nfrom pandas.util._validators import check_dtype_backend\n\nfrom pandas.core.dtypes.common import (\n is_bool,\n is_float,\n is_integer,\n is_list_like,\n)\n\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.shared_docs import _shared_docs\nfrom pandas.util.version import Version\n\nfrom pandas.io.common import (\n IOHandles,\n get_handle,\n stringify_path,\n validate_header_arg,\n)\nfrom pandas.io.excel._util import (\n fill_mi_header,\n get_default_engine,\n get_writer,\n maybe_convert_usecols,\n pop_header_name,\n)\nfrom pandas.io.parsers import TextParser\nfrom pandas.io.parsers.readers import validate_integer\n\nif TYPE_CHECKING:\n from types import TracebackType\n\n from pandas._typing import (\n DtypeArg,\n DtypeBackend,\n ExcelWriterIfSheetExists,\n FilePath,\n IntStrT,\n ReadBuffer,\n Self,\n SequenceNotStr,\n StorageOptions,\n WriteExcelBuffer,\n )\n_read_excel_doc = (\n """\nRead an Excel file into a ``pandas`` ``DataFrame``.\n\nSupports `xls`, `xlsx`, `xlsm`, `xlsb`, `odf`, `ods` and `odt` file extensions\nread from a local filesystem or URL. Supports an option to read\na single sheet or a list of sheets.\n\nParameters\n----------\nio : str, bytes, ExcelFile, xlrd.Book, path object, or file-like object\n Any valid string path is acceptable. The string could be a URL. Valid\n URL schemes include http, ftp, s3, and file. For file URLs, a host is\n expected. A local file could be: ``file://localhost/path/to/table.xlsx``.\n\n If you want to pass in a path object, pandas accepts any ``os.PathLike``.\n\n By file-like object, we refer to objects with a ``read()`` method,\n such as a file handle (e.g. via builtin ``open`` function)\n or ``StringIO``.\n\n .. deprecated:: 2.1.0\n Passing byte strings is deprecated. To read from a\n byte string, wrap it in a ``BytesIO`` object.\nsheet_name : str, int, list, or None, default 0\n Strings are used for sheet names. Integers are used in zero-indexed\n sheet positions (chart sheets do not count as a sheet position).\n Lists of strings/integers are used to request multiple sheets.\n Specify ``None`` to get all worksheets.\n\n Available cases:\n\n * Defaults to ``0``: 1st sheet as a `DataFrame`\n * ``1``: 2nd sheet as a `DataFrame`\n * ``"Sheet1"``: Load sheet with name "Sheet1"\n * ``[0, 1, "Sheet5"]``: Load first, second and sheet named "Sheet5"\n as a dict of `DataFrame`\n * ``None``: All worksheets.\n\nheader : int, list of int, default 0\n Row (0-indexed) to use for the column labels of the parsed\n DataFrame. If a list of integers is passed those row positions will\n be combined into a ``MultiIndex``. Use None if there is no header.\nnames : array-like, default None\n List of column names to use. If file contains no header row,\n then you should explicitly pass header=None.\nindex_col : int, str, list of int, default None\n Column (0-indexed) to use as the row labels of the DataFrame.\n Pass None if there is no such column. If a list is passed,\n those columns will be combined into a ``MultiIndex``. If a\n subset of data is selected with ``usecols``, index_col\n is based on the subset.\n\n Missing values will be forward filled to allow roundtripping with\n ``to_excel`` for ``merged_cells=True``. To avoid forward filling the\n missing values use ``set_index`` after reading the data instead of\n ``index_col``.\nusecols : str, list-like, or callable, default None\n * If None, then parse all columns.\n * If str, then indicates comma separated list of Excel column letters\n and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of\n both sides.\n * If list of int, then indicates list of column numbers to be parsed\n (0-indexed).\n * If list of string, then indicates list of column names to be parsed.\n * If callable, then evaluate each column name against it and parse the\n column if the callable returns ``True``.\n\n Returns a subset of the columns according to behavior above.\ndtype : Type name or dict of column -> type, default None\n Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32}}\n Use ``object`` to preserve data as stored in Excel and not interpret dtype,\n which will necessarily result in ``object`` dtype.\n If converters are specified, they will be applied INSTEAD\n of dtype conversion.\n If you use ``None``, it will infer the dtype of each column based on the data.\nengine : {{'openpyxl', 'calamine', 'odf', 'pyxlsb', 'xlrd'}}, default None\n If io is not a buffer or path, this must be set to identify io.\n Engine compatibility :\n\n - ``openpyxl`` supports newer Excel file formats.\n - ``calamine`` supports Excel (.xls, .xlsx, .xlsm, .xlsb)\n and OpenDocument (.ods) file formats.\n - ``odf`` supports OpenDocument file formats (.odf, .ods, .odt).\n - ``pyxlsb`` supports Binary Excel files.\n - ``xlrd`` supports old-style Excel files (.xls).\n\n When ``engine=None``, the following logic will be used to determine the engine:\n\n - If ``path_or_buffer`` is an OpenDocument format (.odf, .ods, .odt),\n then `odf <https://pypi.org/project/odfpy/>`_ will be used.\n - Otherwise if ``path_or_buffer`` is an xls format, ``xlrd`` will be used.\n - Otherwise if ``path_or_buffer`` is in xlsb format, ``pyxlsb`` will be used.\n - Otherwise ``openpyxl`` will be used.\nconverters : dict, default None\n Dict of functions for converting values in certain columns. Keys can\n either be integers or column labels, values are functions that take one\n input argument, the Excel cell content, and return the transformed\n content.\ntrue_values : list, default None\n Values to consider as True.\nfalse_values : list, default None\n Values to consider as False.\nskiprows : list-like, int, or callable, optional\n Line numbers to skip (0-indexed) or number of lines to skip (int) at the\n start of the file. If callable, the callable function will be evaluated\n against the row indices, returning True if the row should be skipped and\n False otherwise. An example of a valid callable argument would be ``lambda\n x: x in [0, 2]``.\nnrows : int, default None\n Number of rows to parse.\nna_values : scalar, str, list-like, or dict, default None\n Additional strings to recognize as NA/NaN. If dict passed, specific\n per-column NA values. By default the following values are interpreted\n as NaN: '"""\n + fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ")\n + """'.\nkeep_default_na : bool, default True\n Whether or not to include the default NaN values when parsing the data.\n Depending on whether ``na_values`` is passed in, the behavior is as follows:\n\n * If ``keep_default_na`` is True, and ``na_values`` are specified,\n ``na_values`` is appended to the default NaN values used for parsing.\n * If ``keep_default_na`` is True, and ``na_values`` are not specified, only\n the default NaN values are used for parsing.\n * If ``keep_default_na`` is False, and ``na_values`` are specified, only\n the NaN values specified ``na_values`` are used for parsing.\n * If ``keep_default_na`` is False, and ``na_values`` are not specified, no\n strings will be parsed as NaN.\n\n Note that if `na_filter` is passed in as False, the ``keep_default_na`` and\n ``na_values`` parameters will be ignored.\nna_filter : bool, default True\n Detect missing value markers (empty strings and the value of na_values). In\n data without any NAs, passing ``na_filter=False`` can improve the\n performance of reading a large file.\nverbose : bool, default False\n Indicate number of NA values placed in non-numeric columns.\nparse_dates : bool, list-like, or dict, default False\n The behavior is as follows:\n\n * ``bool``. If True -> try parsing the index.\n * ``list`` of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3\n each as a separate date column.\n * ``list`` of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as\n a single date column.\n * ``dict``, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call\n result 'foo'\n\n If a column or index contains an unparsable date, the entire column or\n index will be returned unaltered as an object data type. If you don`t want to\n parse some cells as date just change their type in Excel to "Text".\n For non-standard datetime parsing, use ``pd.to_datetime`` after ``pd.read_excel``.\n\n Note: A fast-path exists for iso8601-formatted dates.\ndate_parser : function, optional\n Function to use for converting a sequence of string columns to an array of\n datetime instances. The default uses ``dateutil.parser.parser`` to do the\n conversion. Pandas will try to call `date_parser` in three different ways,\n advancing to the next if an exception occurs: 1) Pass one or more arrays\n (as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the\n string values from the columns defined by `parse_dates` into a single array\n and pass that; and 3) call `date_parser` once for each row using one or\n more strings (corresponding to the columns defined by `parse_dates`) as\n arguments.\n\n .. deprecated:: 2.0.0\n Use ``date_format`` instead, or read in as ``object`` and then apply\n :func:`to_datetime` as-needed.\ndate_format : str or dict of column -> format, default ``None``\n If used in conjunction with ``parse_dates``, will parse dates according to this\n format. For anything more complex,\n please read in as ``object`` and then apply :func:`to_datetime` as-needed.\n\n .. versionadded:: 2.0.0\nthousands : str, default None\n Thousands separator for parsing string columns to numeric. Note that\n this parameter is only necessary for columns stored as TEXT in Excel,\n any numeric columns will automatically be parsed, regardless of display\n format.\ndecimal : str, default '.'\n Character to recognize as decimal point for parsing string columns to numeric.\n Note that this parameter is only necessary for columns stored as TEXT in Excel,\n any numeric columns will automatically be parsed, regardless of display\n format.(e.g. use ',' for European data).\n\n .. versionadded:: 1.4.0\n\ncomment : str, default None\n Comments out remainder of line. Pass a character or characters to this\n argument to indicate comments in the input file. Any data between the\n comment string and the end of the current line is ignored.\nskipfooter : int, default 0\n Rows at the end to skip (0-indexed).\n{storage_options}\n\ndtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable'\n Back-end data type applied to the resultant :class:`DataFrame`\n (still experimental). Behaviour is as follows:\n\n * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`\n (default).\n * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`\n DataFrame.\n\n .. versionadded:: 2.0\n\nengine_kwargs : dict, optional\n Arbitrary keyword arguments passed to excel engine.\n\nReturns\n-------\nDataFrame or dict of DataFrames\n DataFrame from the passed in Excel file. See notes in sheet_name\n argument for more information on when a dict of DataFrames is returned.\n\nSee Also\n--------\nDataFrame.to_excel : Write DataFrame to an Excel file.\nDataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.\nread_csv : Read a comma-separated values (csv) file into DataFrame.\nread_fwf : Read a table of fixed-width formatted lines into DataFrame.\n\nNotes\n-----\nFor specific information on the methods used for each Excel engine, refer to the pandas\n:ref:`user guide <io.excel_reader>`\n\nExamples\n--------\nThe file can be read using the file name as string or an open file object:\n\n>>> pd.read_excel('tmp.xlsx', index_col=0) # doctest: +SKIP\n Name Value\n0 string1 1\n1 string2 2\n2 #Comment 3\n\n>>> pd.read_excel(open('tmp.xlsx', 'rb'),\n... sheet_name='Sheet3') # doctest: +SKIP\n Unnamed: 0 Name Value\n0 0 string1 1\n1 1 string2 2\n2 2 #Comment 3\n\nIndex and header can be specified via the `index_col` and `header` arguments\n\n>>> pd.read_excel('tmp.xlsx', index_col=None, header=None) # doctest: +SKIP\n 0 1 2\n0 NaN Name Value\n1 0.0 string1 1\n2 1.0 string2 2\n3 2.0 #Comment 3\n\nColumn types are inferred but can be explicitly specified\n\n>>> pd.read_excel('tmp.xlsx', index_col=0,\n... dtype={{'Name': str, 'Value': float}}) # doctest: +SKIP\n Name Value\n0 string1 1.0\n1 string2 2.0\n2 #Comment 3.0\n\nTrue, False, and NA values, and thousands separators have defaults,\nbut can be explicitly specified, too. Supply the values you would like\nas strings or lists of strings!\n\n>>> pd.read_excel('tmp.xlsx', index_col=0,\n... na_values=['string1', 'string2']) # doctest: +SKIP\n Name Value\n0 NaN 1\n1 NaN 2\n2 #Comment 3\n\nComment lines in the excel input file can be skipped using the\n``comment`` kwarg.\n\n>>> pd.read_excel('tmp.xlsx', index_col=0, comment='#') # doctest: +SKIP\n Name Value\n0 string1 1.0\n1 string2 2.0\n2 None NaN\n"""\n)\n\n\n@overload\ndef read_excel(\n io,\n # sheet name is str or int -> DataFrame\n sheet_name: str | int = ...,\n *,\n header: int | Sequence[int] | None = ...,\n names: SequenceNotStr[Hashable] | range | None = ...,\n index_col: int | str | Sequence[int] | None = ...,\n usecols: int\n | str\n | Sequence[int]\n | Sequence[str]\n | Callable[[str], bool]\n | None = ...,\n dtype: DtypeArg | None = ...,\n engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb", "calamine"] | None = ...,\n converters: dict[str, Callable] | dict[int, Callable] | None = ...,\n true_values: Iterable[Hashable] | None = ...,\n false_values: Iterable[Hashable] | None = ...,\n skiprows: Sequence[int] | int | Callable[[int], object] | None = ...,\n nrows: int | None = ...,\n na_values=...,\n keep_default_na: bool = ...,\n na_filter: bool = ...,\n verbose: bool = ...,\n parse_dates: list | dict | bool = ...,\n date_parser: Callable | lib.NoDefault = ...,\n date_format: dict[Hashable, str] | str | None = ...,\n thousands: str | None = ...,\n decimal: str = ...,\n comment: str | None = ...,\n skipfooter: int = ...,\n storage_options: StorageOptions = ...,\n dtype_backend: DtypeBackend | lib.NoDefault = ...,\n) -> DataFrame:\n ...\n\n\n@overload\ndef read_excel(\n io,\n # sheet name is list or None -> dict[IntStrT, DataFrame]\n sheet_name: list[IntStrT] | None,\n *,\n header: int | Sequence[int] | None = ...,\n names: SequenceNotStr[Hashable] | range | None = ...,\n index_col: int | str | Sequence[int] | None = ...,\n usecols: int\n | str\n | Sequence[int]\n | Sequence[str]\n | Callable[[str], bool]\n | None = ...,\n dtype: DtypeArg | None = ...,\n engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb", "calamine"] | None = ...,\n converters: dict[str, Callable] | dict[int, Callable] | None = ...,\n true_values: Iterable[Hashable] | None = ...,\n false_values: Iterable[Hashable] | None = ...,\n skiprows: Sequence[int] | int | Callable[[int], object] | None = ...,\n nrows: int | None = ...,\n na_values=...,\n keep_default_na: bool = ...,\n na_filter: bool = ...,\n verbose: bool = ...,\n parse_dates: list | dict | bool = ...,\n date_parser: Callable | lib.NoDefault = ...,\n date_format: dict[Hashable, str] | str | None = ...,\n thousands: str | None = ...,\n decimal: str = ...,\n comment: str | None = ...,\n skipfooter: int = ...,\n storage_options: StorageOptions = ...,\n dtype_backend: DtypeBackend | lib.NoDefault = ...,\n) -> dict[IntStrT, DataFrame]:\n ...\n\n\n@doc(storage_options=_shared_docs["storage_options"])\n@Appender(_read_excel_doc)\ndef read_excel(\n io,\n sheet_name: str | int | list[IntStrT] | None = 0,\n *,\n header: int | Sequence[int] | None = 0,\n names: SequenceNotStr[Hashable] | range | None = None,\n index_col: int | str | Sequence[int] | None = None,\n usecols: int\n | str\n | Sequence[int]\n | Sequence[str]\n | Callable[[str], bool]\n | None = None,\n dtype: DtypeArg | None = None,\n engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb", "calamine"] | None = None,\n converters: dict[str, Callable] | dict[int, Callable] | None = None,\n true_values: Iterable[Hashable] | None = None,\n false_values: Iterable[Hashable] | None = None,\n skiprows: Sequence[int] | int | Callable[[int], object] | None = None,\n nrows: int | None = None,\n na_values=None,\n keep_default_na: bool = True,\n na_filter: bool = True,\n verbose: bool = False,\n parse_dates: list | dict | bool = False,\n date_parser: Callable | lib.NoDefault = lib.no_default,\n date_format: dict[Hashable, str] | str | None = None,\n thousands: str | None = None,\n decimal: str = ".",\n comment: str | None = None,\n skipfooter: int = 0,\n storage_options: StorageOptions | None = None,\n dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,\n engine_kwargs: dict | None = None,\n) -> DataFrame | dict[IntStrT, DataFrame]:\n check_dtype_backend(dtype_backend)\n should_close = False\n if engine_kwargs is None:\n engine_kwargs = {}\n\n if not isinstance(io, ExcelFile):\n should_close = True\n io = ExcelFile(\n io,\n storage_options=storage_options,\n engine=engine,\n engine_kwargs=engine_kwargs,\n )\n elif engine and engine != io.engine:\n raise ValueError(\n "Engine should not be specified when passing "\n "an ExcelFile - ExcelFile already has the engine set"\n )\n\n try:\n data = io.parse(\n sheet_name=sheet_name,\n header=header,\n names=names,\n index_col=index_col,\n usecols=usecols,\n dtype=dtype,\n converters=converters,\n true_values=true_values,\n false_values=false_values,\n skiprows=skiprows,\n nrows=nrows,\n na_values=na_values,\n keep_default_na=keep_default_na,\n na_filter=na_filter,\n verbose=verbose,\n parse_dates=parse_dates,\n date_parser=date_parser,\n date_format=date_format,\n thousands=thousands,\n decimal=decimal,\n comment=comment,\n skipfooter=skipfooter,\n dtype_backend=dtype_backend,\n )\n finally:\n # make sure to close opened file handles\n if should_close:\n io.close()\n return data\n\n\n_WorkbookT = TypeVar("_WorkbookT")\n\n\nclass BaseExcelReader(Generic[_WorkbookT]):\n book: _WorkbookT\n\n def __init__(\n self,\n filepath_or_buffer,\n storage_options: StorageOptions | None = None,\n engine_kwargs: dict | None = None,\n ) -> None:\n if engine_kwargs is None:\n engine_kwargs = {}\n\n # First argument can also be bytes, so create a buffer\n if isinstance(filepath_or_buffer, bytes):\n filepath_or_buffer = BytesIO(filepath_or_buffer)\n\n self.handles = IOHandles(\n handle=filepath_or_buffer, compression={"method": None}\n )\n if not isinstance(filepath_or_buffer, (ExcelFile, self._workbook_class)):\n self.handles = get_handle(\n filepath_or_buffer, "rb", storage_options=storage_options, is_text=False\n )\n\n if isinstance(self.handles.handle, self._workbook_class):\n self.book = self.handles.handle\n elif hasattr(self.handles.handle, "read"):\n # N.B. xlrd.Book has a read attribute too\n self.handles.handle.seek(0)\n try:\n self.book = self.load_workbook(self.handles.handle, engine_kwargs)\n except Exception:\n self.close()\n raise\n else:\n raise ValueError(\n "Must explicitly set engine if not passing in buffer or path for io."\n )\n\n @property\n def _workbook_class(self) -> type[_WorkbookT]:\n raise NotImplementedError\n\n def load_workbook(self, filepath_or_buffer, engine_kwargs) -> _WorkbookT:\n raise NotImplementedError\n\n def close(self) -> None:\n if hasattr(self, "book"):\n if hasattr(self.book, "close"):\n # pyxlsb: opens a TemporaryFile\n # openpyxl: https://stackoverflow.com/questions/31416842/\n # openpyxl-does-not-close-excel-workbook-in-read-only-mode\n self.book.close()\n elif hasattr(self.book, "release_resources"):\n # xlrd\n # https://github.com/python-excel/xlrd/blob/2.0.1/xlrd/book.py#L548\n self.book.release_resources()\n self.handles.close()\n\n @property\n def sheet_names(self) -> list[str]:\n raise NotImplementedError\n\n def get_sheet_by_name(self, name: str):\n raise NotImplementedError\n\n def get_sheet_by_index(self, index: int):\n raise NotImplementedError\n\n def get_sheet_data(self, sheet, rows: int | None = None):\n raise NotImplementedError\n\n def raise_if_bad_sheet_by_index(self, index: int) -> None:\n n_sheets = len(self.sheet_names)\n if index >= n_sheets:\n raise ValueError(\n f"Worksheet index {index} is invalid, {n_sheets} worksheets found"\n )\n\n def raise_if_bad_sheet_by_name(self, name: str) -> None:\n if name not in self.sheet_names:\n raise ValueError(f"Worksheet named '{name}' not found")\n\n def _check_skiprows_func(\n self,\n skiprows: Callable,\n rows_to_use: int,\n ) -> int:\n """\n Determine how many file rows are required to obtain `nrows` data\n rows when `skiprows` is a function.\n\n Parameters\n ----------\n skiprows : function\n The function passed to read_excel by the user.\n rows_to_use : int\n The number of rows that will be needed for the header and\n the data.\n\n Returns\n -------\n int\n """\n i = 0\n rows_used_so_far = 0\n while rows_used_so_far < rows_to_use:\n if not skiprows(i):\n rows_used_so_far += 1\n i += 1\n return i\n\n def _calc_rows(\n self,\n header: int | Sequence[int] | None,\n index_col: int | Sequence[int] | None,\n skiprows: Sequence[int] | int | Callable[[int], object] | None,\n nrows: int | None,\n ) -> int | None:\n """\n If nrows specified, find the number of rows needed from the\n file, otherwise return None.\n\n\n Parameters\n ----------\n header : int, list of int, or None\n See read_excel docstring.\n index_col : int, str, list of int, or None\n See read_excel docstring.\n skiprows : list-like, int, callable, or None\n See read_excel docstring.\n nrows : int or None\n See read_excel docstring.\n\n Returns\n -------\n int or None\n """\n if nrows is None:\n return None\n if header is None:\n header_rows = 1\n elif is_integer(header):\n header = cast(int, header)\n header_rows = 1 + header\n else:\n header = cast(Sequence, header)\n header_rows = 1 + header[-1]\n # If there is a MultiIndex header and an index then there is also\n # a row containing just the index name(s)\n if is_list_like(header) and index_col is not None:\n header = cast(Sequence, header)\n if len(header) > 1:\n header_rows += 1\n if skiprows is None:\n return header_rows + nrows\n if is_integer(skiprows):\n skiprows = cast(int, skiprows)\n return header_rows + nrows + skiprows\n if is_list_like(skiprows):\n\n def f(skiprows: Sequence, x: int) -> bool:\n return x in skiprows\n\n skiprows = cast(Sequence, skiprows)\n return self._check_skiprows_func(partial(f, skiprows), header_rows + nrows)\n if callable(skiprows):\n return self._check_skiprows_func(\n skiprows,\n header_rows + nrows,\n )\n # else unexpected skiprows type: read_excel will not optimize\n # the number of rows read from file\n return None\n\n def parse(\n self,\n sheet_name: str | int | list[int] | list[str] | None = 0,\n header: int | Sequence[int] | None = 0,\n names: SequenceNotStr[Hashable] | range | None = None,\n index_col: int | Sequence[int] | None = None,\n usecols=None,\n dtype: DtypeArg | None = None,\n true_values: Iterable[Hashable] | None = None,\n false_values: Iterable[Hashable] | None = None,\n skiprows: Sequence[int] | int | Callable[[int], object] | None = None,\n nrows: int | None = None,\n na_values=None,\n verbose: bool = False,\n parse_dates: list | dict | bool = False,\n date_parser: Callable | lib.NoDefault = lib.no_default,\n date_format: dict[Hashable, str] | str | None = None,\n thousands: str | None = None,\n decimal: str = ".",\n comment: str | None = None,\n skipfooter: int = 0,\n dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,\n **kwds,\n ):\n validate_header_arg(header)\n validate_integer("nrows", nrows)\n\n ret_dict = False\n\n # Keep sheetname to maintain backwards compatibility.\n sheets: list[int] | list[str]\n if isinstance(sheet_name, list):\n sheets = sheet_name\n ret_dict = True\n elif sheet_name is None:\n sheets = self.sheet_names\n ret_dict = True\n elif isinstance(sheet_name, str):\n sheets = [sheet_name]\n else:\n sheets = [sheet_name]\n\n # handle same-type duplicates.\n sheets = cast(Union[list[int], list[str]], list(dict.fromkeys(sheets).keys()))\n\n output = {}\n\n last_sheetname = None\n for asheetname in sheets:\n last_sheetname = asheetname\n if verbose:\n print(f"Reading sheet {asheetname}")\n\n if isinstance(asheetname, str):\n sheet = self.get_sheet_by_name(asheetname)\n else: # assume an integer if not a string\n sheet = self.get_sheet_by_index(asheetname)\n\n file_rows_needed = self._calc_rows(header, index_col, skiprows, nrows)\n data = self.get_sheet_data(sheet, file_rows_needed)\n if hasattr(sheet, "close"):\n # pyxlsb opens two TemporaryFiles\n sheet.close()\n usecols = maybe_convert_usecols(usecols)\n\n if not data:\n output[asheetname] = DataFrame()\n continue\n\n is_list_header = False\n is_len_one_list_header = False\n if is_list_like(header):\n assert isinstance(header, Sequence)\n is_list_header = True\n if len(header) == 1:\n is_len_one_list_header = True\n\n if is_len_one_list_header:\n header = cast(Sequence[int], header)[0]\n\n # forward fill and pull out names for MultiIndex column\n header_names = None\n if header is not None and is_list_like(header):\n assert isinstance(header, Sequence)\n\n header_names = []\n control_row = [True] * len(data[0])\n\n for row in header:\n if is_integer(skiprows):\n assert isinstance(skiprows, int)\n row += skiprows\n\n if row > len(data) - 1:\n raise ValueError(\n f"header index {row} exceeds maximum index "\n f"{len(data) - 1} of data.",\n )\n\n data[row], control_row = fill_mi_header(data[row], control_row)\n\n if index_col is not None:\n header_name, _ = pop_header_name(data[row], index_col)\n header_names.append(header_name)\n\n # If there is a MultiIndex header and an index then there is also\n # a row containing just the index name(s)\n has_index_names = False\n if is_list_header and not is_len_one_list_header and index_col is not None:\n index_col_list: Sequence[int]\n if isinstance(index_col, int):\n index_col_list = [index_col]\n else:\n assert isinstance(index_col, Sequence)\n index_col_list = index_col\n\n # We have to handle mi without names. If any of the entries in the data\n # columns are not empty, this is a regular row\n assert isinstance(header, Sequence)\n if len(header) < len(data):\n potential_index_names = data[len(header)]\n potential_data = [\n x\n for i, x in enumerate(potential_index_names)\n if not control_row[i] and i not in index_col_list\n ]\n has_index_names = all(x == "" or x is None for x in potential_data)\n\n if is_list_like(index_col):\n # Forward fill values for MultiIndex index.\n if header is None:\n offset = 0\n elif isinstance(header, int):\n offset = 1 + header\n else:\n offset = 1 + max(header)\n\n # GH34673: if MultiIndex names present and not defined in the header,\n # offset needs to be incremented so that forward filling starts\n # from the first MI value instead of the name\n if has_index_names:\n offset += 1\n\n # Check if we have an empty dataset\n # before trying to collect data.\n if offset < len(data):\n assert isinstance(index_col, Sequence)\n\n for col in index_col:\n last = data[offset][col]\n\n for row in range(offset + 1, len(data)):\n if data[row][col] == "" or data[row][col] is None:\n data[row][col] = last\n else:\n last = data[row][col]\n\n # GH 12292 : error when read one empty column from excel file\n try:\n parser = TextParser(\n data,\n names=names,\n header=header,\n index_col=index_col,\n has_index_names=has_index_names,\n dtype=dtype,\n true_values=true_values,\n false_values=false_values,\n skiprows=skiprows,\n nrows=nrows,\n na_values=na_values,\n skip_blank_lines=False, # GH 39808\n parse_dates=parse_dates,\n date_parser=date_parser,\n date_format=date_format,\n thousands=thousands,\n decimal=decimal,\n comment=comment,\n skipfooter=skipfooter,\n usecols=usecols,\n dtype_backend=dtype_backend,\n **kwds,\n )\n\n output[asheetname] = parser.read(nrows=nrows)\n\n if header_names:\n output[asheetname].columns = output[asheetname].columns.set_names(\n header_names\n )\n\n except EmptyDataError:\n # No Data, return an empty DataFrame\n output[asheetname] = DataFrame()\n\n except Exception as err:\n err.args = (f"{err.args[0]} (sheet: {asheetname})", *err.args[1:])\n raise err\n\n if last_sheetname is None:\n raise ValueError("Sheet name is an empty list")\n\n if ret_dict:\n return output\n else:\n return output[last_sheetname]\n\n\n@doc(storage_options=_shared_docs["storage_options"])\nclass ExcelWriter(Generic[_WorkbookT]):\n """\n Class for writing DataFrame objects into excel sheets.\n\n Default is to use:\n\n * `xlsxwriter <https://pypi.org/project/XlsxWriter/>`__ for xlsx files if xlsxwriter\n is installed otherwise `openpyxl <https://pypi.org/project/openpyxl/>`__\n * `odswriter <https://pypi.org/project/odswriter/>`__ for ods files\n\n See ``DataFrame.to_excel`` for typical usage.\n\n The writer should be used as a context manager. Otherwise, call `close()` to save\n and close any opened file handles.\n\n Parameters\n ----------\n path : str or typing.BinaryIO\n Path to xls or xlsx or ods file.\n engine : str (optional)\n Engine to use for writing. If None, defaults to\n ``io.excel.<extension>.writer``. NOTE: can only be passed as a keyword\n argument.\n date_format : str, default None\n Format string for dates written into Excel files (e.g. 'YYYY-MM-DD').\n datetime_format : str, default None\n Format string for datetime objects written into Excel files.\n (e.g. 'YYYY-MM-DD HH:MM:SS').\n mode : {{'w', 'a'}}, default 'w'\n File mode to use (write or append). Append does not work with fsspec URLs.\n {storage_options}\n\n if_sheet_exists : {{'error', 'new', 'replace', 'overlay'}}, default 'error'\n How to behave when trying to write to a sheet that already\n exists (append mode only).\n\n * error: raise a ValueError.\n * new: Create a new sheet, with a name determined by the engine.\n * replace: Delete the contents of the sheet before writing to it.\n * overlay: Write contents to the existing sheet without first removing,\n but possibly over top of, the existing contents.\n\n .. versionadded:: 1.3.0\n\n .. versionchanged:: 1.4.0\n\n Added ``overlay`` option\n\n engine_kwargs : dict, optional\n Keyword arguments to be passed into the engine. These will be passed to\n the following functions of the respective engines:\n\n * xlsxwriter: ``xlsxwriter.Workbook(file, **engine_kwargs)``\n * openpyxl (write mode): ``openpyxl.Workbook(**engine_kwargs)``\n * openpyxl (append mode): ``openpyxl.load_workbook(file, **engine_kwargs)``\n * odswriter: ``odf.opendocument.OpenDocumentSpreadsheet(**engine_kwargs)``\n\n .. versionadded:: 1.3.0\n\n Notes\n -----\n For compatibility with CSV writers, ExcelWriter serializes lists\n and dicts to strings before writing.\n\n Examples\n --------\n Default usage:\n\n >>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP\n >>> with pd.ExcelWriter("path_to_file.xlsx") as writer:\n ... df.to_excel(writer) # doctest: +SKIP\n\n To write to separate sheets in a single file:\n\n >>> df1 = pd.DataFrame([["AAA", "BBB"]], columns=["Spam", "Egg"]) # doctest: +SKIP\n >>> df2 = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP\n >>> with pd.ExcelWriter("path_to_file.xlsx") as writer:\n ... df1.to_excel(writer, sheet_name="Sheet1") # doctest: +SKIP\n ... df2.to_excel(writer, sheet_name="Sheet2") # doctest: +SKIP\n\n You can set the date format or datetime format:\n\n >>> from datetime import date, datetime # doctest: +SKIP\n >>> df = pd.DataFrame(\n ... [\n ... [date(2014, 1, 31), date(1999, 9, 24)],\n ... [datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)],\n ... ],\n ... index=["Date", "Datetime"],\n ... columns=["X", "Y"],\n ... ) # doctest: +SKIP\n >>> with pd.ExcelWriter(\n ... "path_to_file.xlsx",\n ... date_format="YYYY-MM-DD",\n ... datetime_format="YYYY-MM-DD HH:MM:SS"\n ... ) as writer:\n ... df.to_excel(writer) # doctest: +SKIP\n\n You can also append to an existing Excel file:\n\n >>> with pd.ExcelWriter("path_to_file.xlsx", mode="a", engine="openpyxl") as writer:\n ... df.to_excel(writer, sheet_name="Sheet3") # doctest: +SKIP\n\n Here, the `if_sheet_exists` parameter can be set to replace a sheet if it\n already exists:\n\n >>> with ExcelWriter(\n ... "path_to_file.xlsx",\n ... mode="a",\n ... engine="openpyxl",\n ... if_sheet_exists="replace",\n ... ) as writer:\n ... df.to_excel(writer, sheet_name="Sheet1") # doctest: +SKIP\n\n You can also write multiple DataFrames to a single sheet. Note that the\n ``if_sheet_exists`` parameter needs to be set to ``overlay``:\n\n >>> with ExcelWriter("path_to_file.xlsx",\n ... mode="a",\n ... engine="openpyxl",\n ... if_sheet_exists="overlay",\n ... ) as writer:\n ... df1.to_excel(writer, sheet_name="Sheet1")\n ... df2.to_excel(writer, sheet_name="Sheet1", startcol=3) # doctest: +SKIP\n\n You can store Excel file in RAM:\n\n >>> import io\n >>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"])\n >>> buffer = io.BytesIO()\n >>> with pd.ExcelWriter(buffer) as writer:\n ... df.to_excel(writer)\n\n You can pack Excel file into zip archive:\n\n >>> import zipfile # doctest: +SKIP\n >>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP\n >>> with zipfile.ZipFile("path_to_file.zip", "w") as zf:\n ... with zf.open("filename.xlsx", "w") as buffer:\n ... with pd.ExcelWriter(buffer) as writer:\n ... df.to_excel(writer) # doctest: +SKIP\n\n You can specify additional arguments to the underlying engine:\n\n >>> with pd.ExcelWriter(\n ... "path_to_file.xlsx",\n ... engine="xlsxwriter",\n ... engine_kwargs={{"options": {{"nan_inf_to_errors": True}}}}\n ... ) as writer:\n ... df.to_excel(writer) # doctest: +SKIP\n\n In append mode, ``engine_kwargs`` are passed through to\n openpyxl's ``load_workbook``:\n\n >>> with pd.ExcelWriter(\n ... "path_to_file.xlsx",\n ... engine="openpyxl",\n ... mode="a",\n ... engine_kwargs={{"keep_vba": True}}\n ... ) as writer:\n ... df.to_excel(writer, sheet_name="Sheet2") # doctest: +SKIP\n """\n\n # Defining an ExcelWriter implementation (see abstract methods for more...)\n\n # - Mandatory\n # - ``write_cells(self, cells, sheet_name=None, startrow=0, startcol=0)``\n # --> called to write additional DataFrames to disk\n # - ``_supported_extensions`` (tuple of supported extensions), used to\n # check that engine supports the given extension.\n # - ``_engine`` - string that gives the engine name. Necessary to\n # instantiate class directly and bypass ``ExcelWriterMeta`` engine\n # lookup.\n # - ``save(self)`` --> called to save file to disk\n # - Mostly mandatory (i.e. should at least exist)\n # - book, cur_sheet, path\n\n # - Optional:\n # - ``__init__(self, path, engine=None, **kwargs)`` --> always called\n # with path as first argument.\n\n # You also need to register the class with ``register_writer()``.\n # Technically, ExcelWriter implementations don't need to subclass\n # ExcelWriter.\n\n _engine: str\n _supported_extensions: tuple[str, ...]\n\n def __new__(\n cls,\n path: FilePath | WriteExcelBuffer | ExcelWriter,\n engine: str | None = None,\n date_format: str | None = None,\n datetime_format: str | None = None,\n mode: str = "w",\n storage_options: StorageOptions | None = None,\n if_sheet_exists: ExcelWriterIfSheetExists | None = None,\n engine_kwargs: dict | None = None,\n ) -> Self:\n # only switch class if generic(ExcelWriter)\n if cls is ExcelWriter:\n if engine is None or (isinstance(engine, str) and engine == "auto"):\n if isinstance(path, str):\n ext = os.path.splitext(path)[-1][1:]\n else:\n ext = "xlsx"\n\n try:\n engine = config.get_option(f"io.excel.{ext}.writer", silent=True)\n if engine == "auto":\n engine = get_default_engine(ext, mode="writer")\n except KeyError as err:\n raise ValueError(f"No engine for filetype: '{ext}'") from err\n\n # for mypy\n assert engine is not None\n # error: Incompatible types in assignment (expression has type\n # "type[ExcelWriter[Any]]", variable has type "type[Self]")\n cls = get_writer(engine) # type: ignore[assignment]\n\n return object.__new__(cls)\n\n # declare external properties you can count on\n _path = None\n\n @property\n def supported_extensions(self) -> tuple[str, ...]:\n """Extensions that writer engine supports."""\n return self._supported_extensions\n\n @property\n def engine(self) -> str:\n """Name of engine."""\n return self._engine\n\n @property\n def sheets(self) -> dict[str, Any]:\n """Mapping of sheet names to sheet objects."""\n raise NotImplementedError\n\n @property\n def book(self) -> _WorkbookT:\n """\n Book instance. Class type will depend on the engine used.\n\n This attribute can be used to access engine-specific features.\n """\n raise NotImplementedError\n\n def _write_cells(\n self,\n cells,\n sheet_name: str | None = None,\n startrow: int = 0,\n startcol: int = 0,\n freeze_panes: tuple[int, int] | None = None,\n ) -> None:\n """\n Write given formatted cells into Excel an excel sheet\n\n Parameters\n ----------\n cells : generator\n cell of formatted data to save to Excel sheet\n sheet_name : str, default None\n Name of Excel sheet, if None, then use self.cur_sheet\n startrow : upper left cell row to dump data frame\n startcol : upper left cell column to dump data frame\n freeze_panes: int tuple of length 2\n contains the bottom-most row and right-most column to freeze\n """\n raise NotImplementedError\n\n def _save(self) -> None:\n """\n Save workbook to disk.\n """\n raise NotImplementedError\n\n def __init__(\n self,\n path: FilePath | WriteExcelBuffer | ExcelWriter,\n engine: str | None = None,\n date_format: str | None = None,\n datetime_format: str | None = None,\n mode: str = "w",\n storage_options: StorageOptions | None = None,\n if_sheet_exists: ExcelWriterIfSheetExists | None = None,\n engine_kwargs: dict[str, Any] | None = None,\n ) -> None:\n # validate that this engine can handle the extension\n if isinstance(path, str):\n ext = os.path.splitext(path)[-1]\n self.check_extension(ext)\n\n # use mode to open the file\n if "b" not in mode:\n mode += "b"\n # use "a" for the user to append data to excel but internally use "r+" to let\n # the excel backend first read the existing file and then write any data to it\n mode = mode.replace("a", "r+")\n\n if if_sheet_exists not in (None, "error", "new", "replace", "overlay"):\n raise ValueError(\n f"'{if_sheet_exists}' is not valid for if_sheet_exists. "\n "Valid options are 'error', 'new', 'replace' and 'overlay'."\n )\n if if_sheet_exists and "r+" not in mode:\n raise ValueError("if_sheet_exists is only valid in append mode (mode='a')")\n if if_sheet_exists is None:\n if_sheet_exists = "error"\n self._if_sheet_exists = if_sheet_exists\n\n # cast ExcelWriter to avoid adding 'if self._handles is not None'\n self._handles = IOHandles(\n cast(IO[bytes], path), compression={"compression": None}\n )\n if not isinstance(path, ExcelWriter):\n self._handles = get_handle(\n path, mode, storage_options=storage_options, is_text=False\n )\n self._cur_sheet = None\n\n if date_format is None:\n self._date_format = "YYYY-MM-DD"\n else:\n self._date_format = date_format\n if datetime_format is None:\n self._datetime_format = "YYYY-MM-DD HH:MM:SS"\n else:\n self._datetime_format = datetime_format\n\n self._mode = mode\n\n @property\n def date_format(self) -> str:\n """\n Format string for dates written into Excel files (e.g. 'YYYY-MM-DD').\n """\n return self._date_format\n\n @property\n def datetime_format(self) -> str:\n """\n Format string for dates written into Excel files (e.g. 'YYYY-MM-DD').\n """\n return self._datetime_format\n\n @property\n def if_sheet_exists(self) -> str:\n """\n How to behave when writing to a sheet that already exists in append mode.\n """\n return self._if_sheet_exists\n\n def __fspath__(self) -> str:\n return getattr(self._handles.handle, "name", "")\n\n def _get_sheet_name(self, sheet_name: str | None) -> str:\n if sheet_name is None:\n sheet_name = self._cur_sheet\n if sheet_name is None: # pragma: no cover\n raise ValueError("Must pass explicit sheet_name or set _cur_sheet property")\n return sheet_name\n\n def _value_with_fmt(\n self, val\n ) -> tuple[\n int | float | bool | str | datetime.datetime | datetime.date, str | None\n ]:\n """\n Convert numpy types to Python types for the Excel writers.\n\n Parameters\n ----------\n val : object\n Value to be written into cells\n\n Returns\n -------\n Tuple with the first element being the converted value and the second\n being an optional format\n """\n fmt = None\n\n if is_integer(val):\n val = int(val)\n elif is_float(val):\n val = float(val)\n elif is_bool(val):\n val = bool(val)\n elif isinstance(val, datetime.datetime):\n fmt = self._datetime_format\n elif isinstance(val, datetime.date):\n fmt = self._date_format\n elif isinstance(val, datetime.timedelta):\n val = val.total_seconds() / 86400\n fmt = "0"\n else:\n val = str(val)\n\n return val, fmt\n\n @classmethod\n def check_extension(cls, ext: str) -> Literal[True]:\n """\n checks that path's extension against the Writer's supported\n extensions. If it isn't supported, raises UnsupportedFiletypeError.\n """\n if ext.startswith("."):\n ext = ext[1:]\n if not any(ext in extension for extension in cls._supported_extensions):\n raise ValueError(f"Invalid extension for engine '{cls.engine}': '{ext}'")\n return True\n\n # Allow use as a contextmanager\n def __enter__(self) -> Self:\n return self\n\n def __exit__(\n self,\n exc_type: type[BaseException] | None,\n exc_value: BaseException | None,\n traceback: TracebackType | None,\n ) -> None:\n self.close()\n\n def close(self) -> None:\n """synonym for save, to make it more file-like"""\n self._save()\n self._handles.close()\n\n\nXLS_SIGNATURES = (\n b"\x09\x00\x04\x00\x07\x00\x10\x00", # BIFF2\n b"\x09\x02\x06\x00\x00\x00\x10\x00", # BIFF3\n b"\x09\x04\x06\x00\x00\x00\x10\x00", # BIFF4\n b"\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1", # Compound File Binary\n)\nZIP_SIGNATURE = b"PK\x03\x04"\nPEEK_SIZE = max(map(len, XLS_SIGNATURES + (ZIP_SIGNATURE,)))\n\n\n@doc(storage_options=_shared_docs["storage_options"])\ndef inspect_excel_format(\n content_or_path: FilePath | ReadBuffer[bytes],\n storage_options: StorageOptions | None = None,\n) -> str | None:\n """\n Inspect the path or content of an excel file and get its format.\n\n Adopted from xlrd: https://github.com/python-excel/xlrd.\n\n Parameters\n ----------\n content_or_path : str or file-like object\n Path to file or content of file to inspect. May be a URL.\n {storage_options}\n\n Returns\n -------\n str or None\n Format of file if it can be determined.\n\n Raises\n ------\n ValueError\n If resulting stream is empty.\n BadZipFile\n If resulting stream does not have an XLS signature and is not a valid zipfile.\n """\n if isinstance(content_or_path, bytes):\n content_or_path = BytesIO(content_or_path)\n\n with get_handle(\n content_or_path, "rb", storage_options=storage_options, is_text=False\n ) as handle:\n stream = handle.handle\n stream.seek(0)\n buf = stream.read(PEEK_SIZE)\n if buf is None:\n raise ValueError("stream is empty")\n assert isinstance(buf, bytes)\n peek = buf\n stream.seek(0)\n\n if any(peek.startswith(sig) for sig in XLS_SIGNATURES):\n return "xls"\n elif not peek.startswith(ZIP_SIGNATURE):\n return None\n\n with zipfile.ZipFile(stream) as zf:\n # Workaround for some third party files that use forward slashes and\n # lower case names.\n component_names = [\n name.replace("\\", "/").lower() for name in zf.namelist()\n ]\n\n if "xl/workbook.xml" in component_names:\n return "xlsx"\n if "xl/workbook.bin" in component_names:\n return "xlsb"\n if "content.xml" in component_names:\n return "ods"\n return "zip"\n\n\nclass ExcelFile:\n """\n Class for parsing tabular Excel sheets into DataFrame objects.\n\n See read_excel for more documentation.\n\n Parameters\n ----------\n path_or_buffer : str, bytes, path object (pathlib.Path or py._path.local.LocalPath),\n A file-like object, xlrd workbook or openpyxl workbook.\n If a string or path object, expected to be a path to a\n .xls, .xlsx, .xlsb, .xlsm, .odf, .ods, or .odt file.\n engine : str, default None\n If io is not a buffer or path, this must be set to identify io.\n Supported engines: ``xlrd``, ``openpyxl``, ``odf``, ``pyxlsb``, ``calamine``\n Engine compatibility :\n\n - ``xlrd`` supports old-style Excel files (.xls).\n - ``openpyxl`` supports newer Excel file formats.\n - ``odf`` supports OpenDocument file formats (.odf, .ods, .odt).\n - ``pyxlsb`` supports Binary Excel files.\n - ``calamine`` supports Excel (.xls, .xlsx, .xlsm, .xlsb)\n and OpenDocument (.ods) file formats.\n\n .. versionchanged:: 1.2.0\n\n The engine `xlrd <https://xlrd.readthedocs.io/en/latest/>`_\n now only supports old-style ``.xls`` files.\n When ``engine=None``, the following logic will be\n used to determine the engine:\n\n - If ``path_or_buffer`` is an OpenDocument format (.odf, .ods, .odt),\n then `odf <https://pypi.org/project/odfpy/>`_ will be used.\n - Otherwise if ``path_or_buffer`` is an xls format,\n ``xlrd`` will be used.\n - Otherwise if ``path_or_buffer`` is in xlsb format,\n `pyxlsb <https://pypi.org/project/pyxlsb/>`_ will be used.\n\n .. versionadded:: 1.3.0\n\n - Otherwise if `openpyxl <https://pypi.org/project/openpyxl/>`_ is installed,\n then ``openpyxl`` will be used.\n - Otherwise if ``xlrd >= 2.0`` is installed, a ``ValueError`` will be raised.\n\n .. warning::\n\n Please do not report issues when using ``xlrd`` to read ``.xlsx`` files.\n This is not supported, switch to using ``openpyxl`` instead.\n engine_kwargs : dict, optional\n Arbitrary keyword arguments passed to excel engine.\n\n Examples\n --------\n >>> file = pd.ExcelFile('myfile.xlsx') # doctest: +SKIP\n >>> with pd.ExcelFile("myfile.xls") as xls: # doctest: +SKIP\n ... df1 = pd.read_excel(xls, "Sheet1") # doctest: +SKIP\n """\n\n from pandas.io.excel._calamine import CalamineReader\n from pandas.io.excel._odfreader import ODFReader\n from pandas.io.excel._openpyxl import OpenpyxlReader\n from pandas.io.excel._pyxlsb import PyxlsbReader\n from pandas.io.excel._xlrd import XlrdReader\n\n _engines: Mapping[str, Any] = {\n "xlrd": XlrdReader,\n "openpyxl": OpenpyxlReader,\n "odf": ODFReader,\n "pyxlsb": PyxlsbReader,\n "calamine": CalamineReader,\n }\n\n def __init__(\n self,\n path_or_buffer,\n engine: str | None = None,\n storage_options: StorageOptions | None = None,\n engine_kwargs: dict | None = None,\n ) -> None:\n if engine_kwargs is None:\n engine_kwargs = {}\n\n if engine is not None and engine not in self._engines:\n raise ValueError(f"Unknown engine: {engine}")\n\n # First argument can also be bytes, so create a buffer\n if isinstance(path_or_buffer, bytes):\n path_or_buffer = BytesIO(path_or_buffer)\n warnings.warn(\n "Passing bytes to 'read_excel' is deprecated and "\n "will be removed in a future version. To read from a "\n "byte string, wrap it in a `BytesIO` object.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n # Could be a str, ExcelFile, Book, etc.\n self.io = path_or_buffer\n # Always a string\n self._io = stringify_path(path_or_buffer)\n\n # Determine xlrd version if installed\n if import_optional_dependency("xlrd", errors="ignore") is None:\n xlrd_version = None\n else:\n import xlrd\n\n xlrd_version = Version(get_version(xlrd))\n\n if engine is None:\n # Only determine ext if it is needed\n ext: str | None\n if xlrd_version is not None and isinstance(path_or_buffer, xlrd.Book):\n ext = "xls"\n else:\n ext = inspect_excel_format(\n content_or_path=path_or_buffer, storage_options=storage_options\n )\n if ext is None:\n raise ValueError(\n "Excel file format cannot be determined, you must specify "\n "an engine manually."\n )\n\n engine = config.get_option(f"io.excel.{ext}.reader", silent=True)\n if engine == "auto":\n engine = get_default_engine(ext, mode="reader")\n\n assert engine is not None\n self.engine = engine\n self.storage_options = storage_options\n\n self._reader = self._engines[engine](\n self._io,\n storage_options=storage_options,\n engine_kwargs=engine_kwargs,\n )\n\n def __fspath__(self):\n return self._io\n\n def parse(\n self,\n sheet_name: str | int | list[int] | list[str] | None = 0,\n header: int | Sequence[int] | None = 0,\n names: SequenceNotStr[Hashable] | range | None = None,\n index_col: int | Sequence[int] | None = None,\n usecols=None,\n converters=None,\n true_values: Iterable[Hashable] | None = None,\n false_values: Iterable[Hashable] | None = None,\n skiprows: Sequence[int] | int | Callable[[int], object] | None = None,\n nrows: int | None = None,\n na_values=None,\n parse_dates: list | dict | bool = False,\n date_parser: Callable | lib.NoDefault = lib.no_default,\n date_format: str | dict[Hashable, str] | None = None,\n thousands: str | None = None,\n comment: str | None = None,\n skipfooter: int = 0,\n dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,\n **kwds,\n ) -> DataFrame | dict[str, DataFrame] | dict[int, DataFrame]:\n """\n Parse specified sheet(s) into a DataFrame.\n\n Equivalent to read_excel(ExcelFile, ...) See the read_excel\n docstring for more info on accepted parameters.\n\n Returns\n -------\n DataFrame or dict of DataFrames\n DataFrame from the passed in Excel file.\n\n Examples\n --------\n >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])\n >>> df.to_excel('myfile.xlsx') # doctest: +SKIP\n >>> file = pd.ExcelFile('myfile.xlsx') # doctest: +SKIP\n >>> file.parse() # doctest: +SKIP\n """\n return self._reader.parse(\n sheet_name=sheet_name,\n header=header,\n names=names,\n index_col=index_col,\n usecols=usecols,\n converters=converters,\n true_values=true_values,\n false_values=false_values,\n skiprows=skiprows,\n nrows=nrows,\n na_values=na_values,\n parse_dates=parse_dates,\n date_parser=date_parser,\n date_format=date_format,\n thousands=thousands,\n comment=comment,\n skipfooter=skipfooter,\n dtype_backend=dtype_backend,\n **kwds,\n )\n\n @property\n def book(self):\n return self._reader.book\n\n @property\n def sheet_names(self):\n return self._reader.sheet_names\n\n def close(self) -> None:\n """close io if necessary"""\n self._reader.close()\n\n def __enter__(self) -> Self:\n return self\n\n def __exit__(\n self,\n exc_type: type[BaseException] | None,\n exc_value: BaseException | None,\n traceback: TracebackType | None,\n ) -> None:\n self.close()\n
.venv\Lib\site-packages\pandas\io\excel\_base.py
_base.py
Python
59,073
0.75
0.133816
0.072281
node-utils
456
2024-10-27T11:34:05.298227
Apache-2.0
false
559b9caf57c3a67976ac0fd268818910
from __future__ import annotations\n\nfrom datetime import (\n date,\n datetime,\n time,\n timedelta,\n)\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Union,\n)\n\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.util._decorators import doc\n\nimport pandas as pd\nfrom pandas.core.shared_docs import _shared_docs\n\nfrom pandas.io.excel._base import BaseExcelReader\n\nif TYPE_CHECKING:\n from python_calamine import (\n CalamineSheet,\n CalamineWorkbook,\n )\n\n from pandas._typing import (\n FilePath,\n NaTType,\n ReadBuffer,\n Scalar,\n StorageOptions,\n )\n\n_CellValue = Union[int, float, str, bool, time, date, datetime, timedelta]\n\n\nclass CalamineReader(BaseExcelReader["CalamineWorkbook"]):\n @doc(storage_options=_shared_docs["storage_options"])\n def __init__(\n self,\n filepath_or_buffer: FilePath | ReadBuffer[bytes],\n storage_options: StorageOptions | None = None,\n engine_kwargs: dict | None = None,\n ) -> None:\n """\n Reader using calamine engine (xlsx/xls/xlsb/ods).\n\n Parameters\n ----------\n filepath_or_buffer : str, path to be parsed or\n an open readable stream.\n {storage_options}\n engine_kwargs : dict, optional\n Arbitrary keyword arguments passed to excel engine.\n """\n import_optional_dependency("python_calamine")\n super().__init__(\n filepath_or_buffer,\n storage_options=storage_options,\n engine_kwargs=engine_kwargs,\n )\n\n @property\n def _workbook_class(self) -> type[CalamineWorkbook]:\n from python_calamine import CalamineWorkbook\n\n return CalamineWorkbook\n\n def load_workbook(\n self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs: Any\n ) -> CalamineWorkbook:\n from python_calamine import load_workbook\n\n return load_workbook(filepath_or_buffer, **engine_kwargs)\n\n @property\n def sheet_names(self) -> list[str]:\n from python_calamine import SheetTypeEnum\n\n return [\n sheet.name\n for sheet in self.book.sheets_metadata\n if sheet.typ == SheetTypeEnum.WorkSheet\n ]\n\n def get_sheet_by_name(self, name: str) -> CalamineSheet:\n self.raise_if_bad_sheet_by_name(name)\n return self.book.get_sheet_by_name(name)\n\n def get_sheet_by_index(self, index: int) -> CalamineSheet:\n self.raise_if_bad_sheet_by_index(index)\n return self.book.get_sheet_by_index(index)\n\n def get_sheet_data(\n self, sheet: CalamineSheet, file_rows_needed: int | None = None\n ) -> list[list[Scalar | NaTType | time]]:\n def _convert_cell(value: _CellValue) -> Scalar | NaTType | time:\n if isinstance(value, float):\n val = int(value)\n if val == value:\n return val\n else:\n return value\n elif isinstance(value, date):\n return pd.Timestamp(value)\n elif isinstance(value, timedelta):\n return pd.Timedelta(value)\n elif isinstance(value, time):\n return value\n\n return value\n\n rows: list[list[_CellValue]] = sheet.to_python(\n skip_empty_area=False, nrows=file_rows_needed\n )\n data = [[_convert_cell(cell) for cell in row] for row in rows]\n\n return data\n
.venv\Lib\site-packages\pandas\io\excel\_calamine.py
_calamine.py
Python
3,474
0.85
0.132231
0
awesome-app
181
2024-09-01T00:54:50.520639
MIT
false
c94a91a9529827268ffd42c1c5b656d4
from __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n cast,\n)\n\nimport numpy as np\n\nfrom pandas._typing import (\n FilePath,\n ReadBuffer,\n Scalar,\n StorageOptions,\n)\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.util._decorators import doc\n\nimport pandas as pd\nfrom pandas.core.shared_docs import _shared_docs\n\nfrom pandas.io.excel._base import BaseExcelReader\n\nif TYPE_CHECKING:\n from odf.opendocument import OpenDocument\n\n from pandas._libs.tslibs.nattype import NaTType\n\n\n@doc(storage_options=_shared_docs["storage_options"])\nclass ODFReader(BaseExcelReader["OpenDocument"]):\n def __init__(\n self,\n filepath_or_buffer: FilePath | ReadBuffer[bytes],\n storage_options: StorageOptions | None = None,\n engine_kwargs: dict | None = None,\n ) -> None:\n """\n Read tables out of OpenDocument formatted files.\n\n Parameters\n ----------\n filepath_or_buffer : str, path to be parsed or\n an open readable stream.\n {storage_options}\n engine_kwargs : dict, optional\n Arbitrary keyword arguments passed to excel engine.\n """\n import_optional_dependency("odf")\n super().__init__(\n filepath_or_buffer,\n storage_options=storage_options,\n engine_kwargs=engine_kwargs,\n )\n\n @property\n def _workbook_class(self) -> type[OpenDocument]:\n from odf.opendocument import OpenDocument\n\n return OpenDocument\n\n def load_workbook(\n self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs\n ) -> OpenDocument:\n from odf.opendocument import load\n\n return load(filepath_or_buffer, **engine_kwargs)\n\n @property\n def empty_value(self) -> str:\n """Property for compat with other readers."""\n return ""\n\n @property\n def sheet_names(self) -> list[str]:\n """Return a list of sheet names present in the document"""\n from odf.table import Table\n\n tables = self.book.getElementsByType(Table)\n return [t.getAttribute("name") for t in tables]\n\n def get_sheet_by_index(self, index: int):\n from odf.table import Table\n\n self.raise_if_bad_sheet_by_index(index)\n tables = self.book.getElementsByType(Table)\n return tables[index]\n\n def get_sheet_by_name(self, name: str):\n from odf.table import Table\n\n self.raise_if_bad_sheet_by_name(name)\n tables = self.book.getElementsByType(Table)\n\n for table in tables:\n if table.getAttribute("name") == name:\n return table\n\n self.close()\n raise ValueError(f"sheet {name} not found")\n\n def get_sheet_data(\n self, sheet, file_rows_needed: int | None = None\n ) -> list[list[Scalar | NaTType]]:\n """\n Parse an ODF Table into a list of lists\n """\n from odf.table import (\n CoveredTableCell,\n TableCell,\n TableRow,\n )\n\n covered_cell_name = CoveredTableCell().qname\n table_cell_name = TableCell().qname\n cell_names = {covered_cell_name, table_cell_name}\n\n sheet_rows = sheet.getElementsByType(TableRow)\n empty_rows = 0\n max_row_len = 0\n\n table: list[list[Scalar | NaTType]] = []\n\n for sheet_row in sheet_rows:\n sheet_cells = [\n x\n for x in sheet_row.childNodes\n if hasattr(x, "qname") and x.qname in cell_names\n ]\n empty_cells = 0\n table_row: list[Scalar | NaTType] = []\n\n for sheet_cell in sheet_cells:\n if sheet_cell.qname == table_cell_name:\n value = self._get_cell_value(sheet_cell)\n else:\n value = self.empty_value\n\n column_repeat = self._get_column_repeat(sheet_cell)\n\n # Queue up empty values, writing only if content succeeds them\n if value == self.empty_value:\n empty_cells += column_repeat\n else:\n table_row.extend([self.empty_value] * empty_cells)\n empty_cells = 0\n table_row.extend([value] * column_repeat)\n\n if max_row_len < len(table_row):\n max_row_len = len(table_row)\n\n row_repeat = self._get_row_repeat(sheet_row)\n if len(table_row) == 0:\n empty_rows += row_repeat\n else:\n # add blank rows to our table\n table.extend([[self.empty_value]] * empty_rows)\n empty_rows = 0\n table.extend(table_row for _ in range(row_repeat))\n if file_rows_needed is not None and len(table) >= file_rows_needed:\n break\n\n # Make our table square\n for row in table:\n if len(row) < max_row_len:\n row.extend([self.empty_value] * (max_row_len - len(row)))\n\n return table\n\n def _get_row_repeat(self, row) -> int:\n """\n Return number of times this row was repeated\n Repeating an empty row appeared to be a common way\n of representing sparse rows in the table.\n """\n from odf.namespaces import TABLENS\n\n return int(row.attributes.get((TABLENS, "number-rows-repeated"), 1))\n\n def _get_column_repeat(self, cell) -> int:\n from odf.namespaces import TABLENS\n\n return int(cell.attributes.get((TABLENS, "number-columns-repeated"), 1))\n\n def _get_cell_value(self, cell) -> Scalar | NaTType:\n from odf.namespaces import OFFICENS\n\n if str(cell) == "#N/A":\n return np.nan\n\n cell_type = cell.attributes.get((OFFICENS, "value-type"))\n if cell_type == "boolean":\n if str(cell) == "TRUE":\n return True\n return False\n if cell_type is None:\n return self.empty_value\n elif cell_type == "float":\n # GH5394\n cell_value = float(cell.attributes.get((OFFICENS, "value")))\n val = int(cell_value)\n if val == cell_value:\n return val\n return cell_value\n elif cell_type == "percentage":\n cell_value = cell.attributes.get((OFFICENS, "value"))\n return float(cell_value)\n elif cell_type == "string":\n return self._get_cell_string_value(cell)\n elif cell_type == "currency":\n cell_value = cell.attributes.get((OFFICENS, "value"))\n return float(cell_value)\n elif cell_type == "date":\n cell_value = cell.attributes.get((OFFICENS, "date-value"))\n return pd.Timestamp(cell_value)\n elif cell_type == "time":\n stamp = pd.Timestamp(str(cell))\n # cast needed here because Scalar doesn't include datetime.time\n return cast(Scalar, stamp.time())\n else:\n self.close()\n raise ValueError(f"Unrecognized type {cell_type}")\n\n def _get_cell_string_value(self, cell) -> str:\n """\n Find and decode OpenDocument text:s tags that represent\n a run length encoded sequence of space characters.\n """\n from odf.element import Element\n from odf.namespaces import TEXTNS\n from odf.office import Annotation\n from odf.text import S\n\n office_annotation = Annotation().qname\n text_s = S().qname\n\n value = []\n\n for fragment in cell.childNodes:\n if isinstance(fragment, Element):\n if fragment.qname == text_s:\n spaces = int(fragment.attributes.get((TEXTNS, "c"), 1))\n value.append(" " * spaces)\n elif fragment.qname == office_annotation:\n continue\n else:\n # recursive impl needed in case of nested fragments\n # with multiple spaces\n # https://github.com/pandas-dev/pandas/pull/36175#discussion_r484639704\n value.append(self._get_cell_string_value(fragment))\n else:\n value.append(str(fragment).strip("\n"))\n return "".join(value)\n
.venv\Lib\site-packages\pandas\io\excel\_odfreader.py
_odfreader.py
Python
8,262
0.95
0.15415
0.038647
awesome-app
312
2025-06-19T03:39:21.132348
Apache-2.0
false
05bf55ab6aa3f705fe3a9dfb8c9e53f6
from __future__ import annotations\n\nfrom collections import defaultdict\nimport datetime\nimport json\nfrom typing import (\n TYPE_CHECKING,\n Any,\n DefaultDict,\n cast,\n overload,\n)\n\nfrom pandas.io.excel._base import ExcelWriter\nfrom pandas.io.excel._util import (\n combine_kwargs,\n validate_freeze_panes,\n)\n\nif TYPE_CHECKING:\n from pandas._typing import (\n ExcelWriterIfSheetExists,\n FilePath,\n StorageOptions,\n WriteExcelBuffer,\n )\n\n from pandas.io.formats.excel import ExcelCell\n\n\nclass ODSWriter(ExcelWriter):\n _engine = "odf"\n _supported_extensions = (".ods",)\n\n def __init__(\n self,\n path: FilePath | WriteExcelBuffer | ExcelWriter,\n engine: str | None = None,\n date_format: str | None = None,\n datetime_format=None,\n mode: str = "w",\n storage_options: StorageOptions | None = None,\n if_sheet_exists: ExcelWriterIfSheetExists | None = None,\n engine_kwargs: dict[str, Any] | None = None,\n **kwargs,\n ) -> None:\n from odf.opendocument import OpenDocumentSpreadsheet\n\n if mode == "a":\n raise ValueError("Append mode is not supported with odf!")\n\n engine_kwargs = combine_kwargs(engine_kwargs, kwargs)\n self._book = OpenDocumentSpreadsheet(**engine_kwargs)\n\n super().__init__(\n path,\n mode=mode,\n storage_options=storage_options,\n if_sheet_exists=if_sheet_exists,\n engine_kwargs=engine_kwargs,\n )\n\n self._style_dict: dict[str, str] = {}\n\n @property\n def book(self):\n """\n Book instance of class odf.opendocument.OpenDocumentSpreadsheet.\n\n This attribute can be used to access engine-specific features.\n """\n return self._book\n\n @property\n def sheets(self) -> dict[str, Any]:\n """Mapping of sheet names to sheet objects."""\n from odf.table import Table\n\n result = {\n sheet.getAttribute("name"): sheet\n for sheet in self.book.getElementsByType(Table)\n }\n return result\n\n def _save(self) -> None:\n """\n Save workbook to disk.\n """\n for sheet in self.sheets.values():\n self.book.spreadsheet.addElement(sheet)\n self.book.save(self._handles.handle)\n\n def _write_cells(\n self,\n cells: list[ExcelCell],\n sheet_name: str | None = None,\n startrow: int = 0,\n startcol: int = 0,\n freeze_panes: tuple[int, int] | None = None,\n ) -> None:\n """\n Write the frame cells using odf\n """\n from odf.table import (\n Table,\n TableCell,\n TableRow,\n )\n from odf.text import P\n\n sheet_name = self._get_sheet_name(sheet_name)\n assert sheet_name is not None\n\n if sheet_name in self.sheets:\n wks = self.sheets[sheet_name]\n else:\n wks = Table(name=sheet_name)\n self.book.spreadsheet.addElement(wks)\n\n if validate_freeze_panes(freeze_panes):\n freeze_panes = cast(tuple[int, int], freeze_panes)\n self._create_freeze_panes(sheet_name, freeze_panes)\n\n for _ in range(startrow):\n wks.addElement(TableRow())\n\n rows: DefaultDict = defaultdict(TableRow)\n col_count: DefaultDict = defaultdict(int)\n\n for cell in sorted(cells, key=lambda cell: (cell.row, cell.col)):\n # only add empty cells if the row is still empty\n if not col_count[cell.row]:\n for _ in range(startcol):\n rows[cell.row].addElement(TableCell())\n\n # fill with empty cells if needed\n for _ in range(cell.col - col_count[cell.row]):\n rows[cell.row].addElement(TableCell())\n col_count[cell.row] += 1\n\n pvalue, tc = self._make_table_cell(cell)\n rows[cell.row].addElement(tc)\n col_count[cell.row] += 1\n p = P(text=pvalue)\n tc.addElement(p)\n\n # add all rows to the sheet\n if len(rows) > 0:\n for row_nr in range(max(rows.keys()) + 1):\n wks.addElement(rows[row_nr])\n\n def _make_table_cell_attributes(self, cell) -> dict[str, int | str]:\n """Convert cell attributes to OpenDocument attributes\n\n Parameters\n ----------\n cell : ExcelCell\n Spreadsheet cell data\n\n Returns\n -------\n attributes : Dict[str, Union[int, str]]\n Dictionary with attributes and attribute values\n """\n attributes: dict[str, int | str] = {}\n style_name = self._process_style(cell.style)\n if style_name is not None:\n attributes["stylename"] = style_name\n if cell.mergestart is not None and cell.mergeend is not None:\n attributes["numberrowsspanned"] = max(1, cell.mergestart)\n attributes["numbercolumnsspanned"] = cell.mergeend\n return attributes\n\n def _make_table_cell(self, cell) -> tuple[object, Any]:\n """Convert cell data to an OpenDocument spreadsheet cell\n\n Parameters\n ----------\n cell : ExcelCell\n Spreadsheet cell data\n\n Returns\n -------\n pvalue, cell : Tuple[str, TableCell]\n Display value, Cell value\n """\n from odf.table import TableCell\n\n attributes = self._make_table_cell_attributes(cell)\n val, fmt = self._value_with_fmt(cell.val)\n pvalue = value = val\n if isinstance(val, bool):\n value = str(val).lower()\n pvalue = str(val).upper()\n return (\n pvalue,\n TableCell(\n valuetype="boolean",\n booleanvalue=value,\n attributes=attributes,\n ),\n )\n elif isinstance(val, datetime.datetime):\n # Fast formatting\n value = val.isoformat()\n # Slow but locale-dependent\n pvalue = val.strftime("%c")\n return (\n pvalue,\n TableCell(valuetype="date", datevalue=value, attributes=attributes),\n )\n elif isinstance(val, datetime.date):\n # Fast formatting\n value = f"{val.year}-{val.month:02d}-{val.day:02d}"\n # Slow but locale-dependent\n pvalue = val.strftime("%x")\n return (\n pvalue,\n TableCell(valuetype="date", datevalue=value, attributes=attributes),\n )\n elif isinstance(val, str):\n return (\n pvalue,\n TableCell(\n valuetype="string",\n stringvalue=value,\n attributes=attributes,\n ),\n )\n else:\n return (\n pvalue,\n TableCell(\n valuetype="float",\n value=value,\n attributes=attributes,\n ),\n )\n\n @overload\n def _process_style(self, style: dict[str, Any]) -> str:\n ...\n\n @overload\n def _process_style(self, style: None) -> None:\n ...\n\n def _process_style(self, style: dict[str, Any] | None) -> str | None:\n """Convert a style dictionary to a OpenDocument style sheet\n\n Parameters\n ----------\n style : Dict\n Style dictionary\n\n Returns\n -------\n style_key : str\n Unique style key for later reference in sheet\n """\n from odf.style import (\n ParagraphProperties,\n Style,\n TableCellProperties,\n TextProperties,\n )\n\n if style is None:\n return None\n style_key = json.dumps(style)\n if style_key in self._style_dict:\n return self._style_dict[style_key]\n name = f"pd{len(self._style_dict)+1}"\n self._style_dict[style_key] = name\n odf_style = Style(name=name, family="table-cell")\n if "font" in style:\n font = style["font"]\n if font.get("bold", False):\n odf_style.addElement(TextProperties(fontweight="bold"))\n if "borders" in style:\n borders = style["borders"]\n for side, thickness in borders.items():\n thickness_translation = {"thin": "0.75pt solid #000000"}\n odf_style.addElement(\n TableCellProperties(\n attributes={f"border{side}": thickness_translation[thickness]}\n )\n )\n if "alignment" in style:\n alignment = style["alignment"]\n horizontal = alignment.get("horizontal")\n if horizontal:\n odf_style.addElement(ParagraphProperties(textalign=horizontal))\n vertical = alignment.get("vertical")\n if vertical:\n odf_style.addElement(TableCellProperties(verticalalign=vertical))\n self.book.styles.addElement(odf_style)\n return name\n\n def _create_freeze_panes(\n self, sheet_name: str, freeze_panes: tuple[int, int]\n ) -> None:\n """\n Create freeze panes in the sheet.\n\n Parameters\n ----------\n sheet_name : str\n Name of the spreadsheet\n freeze_panes : tuple of (int, int)\n Freeze pane location x and y\n """\n from odf.config import (\n ConfigItem,\n ConfigItemMapEntry,\n ConfigItemMapIndexed,\n ConfigItemMapNamed,\n ConfigItemSet,\n )\n\n config_item_set = ConfigItemSet(name="ooo:view-settings")\n self.book.settings.addElement(config_item_set)\n\n config_item_map_indexed = ConfigItemMapIndexed(name="Views")\n config_item_set.addElement(config_item_map_indexed)\n\n config_item_map_entry = ConfigItemMapEntry()\n config_item_map_indexed.addElement(config_item_map_entry)\n\n config_item_map_named = ConfigItemMapNamed(name="Tables")\n config_item_map_entry.addElement(config_item_map_named)\n\n config_item_map_entry = ConfigItemMapEntry(name=sheet_name)\n config_item_map_named.addElement(config_item_map_entry)\n\n config_item_map_entry.addElement(\n ConfigItem(name="HorizontalSplitMode", type="short", text="2")\n )\n config_item_map_entry.addElement(\n ConfigItem(name="VerticalSplitMode", type="short", text="2")\n )\n config_item_map_entry.addElement(\n ConfigItem(\n name="HorizontalSplitPosition", type="int", text=str(freeze_panes[0])\n )\n )\n config_item_map_entry.addElement(\n ConfigItem(\n name="VerticalSplitPosition", type="int", text=str(freeze_panes[1])\n )\n )\n config_item_map_entry.addElement(\n ConfigItem(name="PositionRight", type="int", text=str(freeze_panes[0]))\n )\n config_item_map_entry.addElement(\n ConfigItem(name="PositionBottom", type="int", text=str(freeze_panes[1]))\n )\n
.venv\Lib\site-packages\pandas\io\excel\_odswriter.py
_odswriter.py
Python
11,210
0.95
0.114846
0.025806
awesome-app
156
2023-09-29T14:24:20.172309
BSD-3-Clause
false
a92ffb50ee1e42a94eaf555777a27ec4
from __future__ import annotations\n\nimport mmap\nfrom typing import (\n TYPE_CHECKING,\n Any,\n cast,\n)\n\nimport numpy as np\n\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.util._decorators import doc\n\nfrom pandas.core.shared_docs import _shared_docs\n\nfrom pandas.io.excel._base import (\n BaseExcelReader,\n ExcelWriter,\n)\nfrom pandas.io.excel._util import (\n combine_kwargs,\n validate_freeze_panes,\n)\n\nif TYPE_CHECKING:\n from openpyxl import Workbook\n from openpyxl.descriptors.serialisable import Serialisable\n\n from pandas._typing import (\n ExcelWriterIfSheetExists,\n FilePath,\n ReadBuffer,\n Scalar,\n StorageOptions,\n WriteExcelBuffer,\n )\n\n\nclass OpenpyxlWriter(ExcelWriter):\n _engine = "openpyxl"\n _supported_extensions = (".xlsx", ".xlsm")\n\n def __init__(\n self,\n path: FilePath | WriteExcelBuffer | ExcelWriter,\n engine: str | None = None,\n date_format: str | None = None,\n datetime_format: str | None = None,\n mode: str = "w",\n storage_options: StorageOptions | None = None,\n if_sheet_exists: ExcelWriterIfSheetExists | None = None,\n engine_kwargs: dict[str, Any] | None = None,\n **kwargs,\n ) -> None:\n # Use the openpyxl module as the Excel writer.\n from openpyxl.workbook import Workbook\n\n engine_kwargs = combine_kwargs(engine_kwargs, kwargs)\n\n super().__init__(\n path,\n mode=mode,\n storage_options=storage_options,\n if_sheet_exists=if_sheet_exists,\n engine_kwargs=engine_kwargs,\n )\n\n # ExcelWriter replaced "a" by "r+" to allow us to first read the excel file from\n # the file and later write to it\n if "r+" in self._mode: # Load from existing workbook\n from openpyxl import load_workbook\n\n try:\n self._book = load_workbook(self._handles.handle, **engine_kwargs)\n except TypeError:\n self._handles.handle.close()\n raise\n self._handles.handle.seek(0)\n else:\n # Create workbook object with default optimized_write=True.\n try:\n self._book = Workbook(**engine_kwargs)\n except TypeError:\n self._handles.handle.close()\n raise\n\n if self.book.worksheets:\n self.book.remove(self.book.worksheets[0])\n\n @property\n def book(self) -> Workbook:\n """\n Book instance of class openpyxl.workbook.Workbook.\n\n This attribute can be used to access engine-specific features.\n """\n return self._book\n\n @property\n def sheets(self) -> dict[str, Any]:\n """Mapping of sheet names to sheet objects."""\n result = {name: self.book[name] for name in self.book.sheetnames}\n return result\n\n def _save(self) -> None:\n """\n Save workbook to disk.\n """\n self.book.save(self._handles.handle)\n if "r+" in self._mode and not isinstance(self._handles.handle, mmap.mmap):\n # truncate file to the written content\n self._handles.handle.truncate()\n\n @classmethod\n def _convert_to_style_kwargs(cls, style_dict: dict) -> dict[str, Serialisable]:\n """\n Convert a style_dict to a set of kwargs suitable for initializing\n or updating-on-copy an openpyxl v2 style object.\n\n Parameters\n ----------\n style_dict : dict\n A dict with zero or more of the following keys (or their synonyms).\n 'font'\n 'fill'\n 'border' ('borders')\n 'alignment'\n 'number_format'\n 'protection'\n\n Returns\n -------\n style_kwargs : dict\n A dict with the same, normalized keys as ``style_dict`` but each\n value has been replaced with a native openpyxl style object of the\n appropriate class.\n """\n _style_key_map = {"borders": "border"}\n\n style_kwargs: dict[str, Serialisable] = {}\n for k, v in style_dict.items():\n k = _style_key_map.get(k, k)\n _conv_to_x = getattr(cls, f"_convert_to_{k}", lambda x: None)\n new_v = _conv_to_x(v)\n if new_v:\n style_kwargs[k] = new_v\n\n return style_kwargs\n\n @classmethod\n def _convert_to_color(cls, color_spec):\n """\n Convert ``color_spec`` to an openpyxl v2 Color object.\n\n Parameters\n ----------\n color_spec : str, dict\n A 32-bit ARGB hex string, or a dict with zero or more of the\n following keys.\n 'rgb'\n 'indexed'\n 'auto'\n 'theme'\n 'tint'\n 'index'\n 'type'\n\n Returns\n -------\n color : openpyxl.styles.Color\n """\n from openpyxl.styles import Color\n\n if isinstance(color_spec, str):\n return Color(color_spec)\n else:\n return Color(**color_spec)\n\n @classmethod\n def _convert_to_font(cls, font_dict):\n """\n Convert ``font_dict`` to an openpyxl v2 Font object.\n\n Parameters\n ----------\n font_dict : dict\n A dict with zero or more of the following keys (or their synonyms).\n 'name'\n 'size' ('sz')\n 'bold' ('b')\n 'italic' ('i')\n 'underline' ('u')\n 'strikethrough' ('strike')\n 'color'\n 'vertAlign' ('vertalign')\n 'charset'\n 'scheme'\n 'family'\n 'outline'\n 'shadow'\n 'condense'\n\n Returns\n -------\n font : openpyxl.styles.Font\n """\n from openpyxl.styles import Font\n\n _font_key_map = {\n "sz": "size",\n "b": "bold",\n "i": "italic",\n "u": "underline",\n "strike": "strikethrough",\n "vertalign": "vertAlign",\n }\n\n font_kwargs = {}\n for k, v in font_dict.items():\n k = _font_key_map.get(k, k)\n if k == "color":\n v = cls._convert_to_color(v)\n font_kwargs[k] = v\n\n return Font(**font_kwargs)\n\n @classmethod\n def _convert_to_stop(cls, stop_seq):\n """\n Convert ``stop_seq`` to a list of openpyxl v2 Color objects,\n suitable for initializing the ``GradientFill`` ``stop`` parameter.\n\n Parameters\n ----------\n stop_seq : iterable\n An iterable that yields objects suitable for consumption by\n ``_convert_to_color``.\n\n Returns\n -------\n stop : list of openpyxl.styles.Color\n """\n return map(cls._convert_to_color, stop_seq)\n\n @classmethod\n def _convert_to_fill(cls, fill_dict: dict[str, Any]):\n """\n Convert ``fill_dict`` to an openpyxl v2 Fill object.\n\n Parameters\n ----------\n fill_dict : dict\n A dict with one or more of the following keys (or their synonyms),\n 'fill_type' ('patternType', 'patterntype')\n 'start_color' ('fgColor', 'fgcolor')\n 'end_color' ('bgColor', 'bgcolor')\n or one or more of the following keys (or their synonyms).\n 'type' ('fill_type')\n 'degree'\n 'left'\n 'right'\n 'top'\n 'bottom'\n 'stop'\n\n Returns\n -------\n fill : openpyxl.styles.Fill\n """\n from openpyxl.styles import (\n GradientFill,\n PatternFill,\n )\n\n _pattern_fill_key_map = {\n "patternType": "fill_type",\n "patterntype": "fill_type",\n "fgColor": "start_color",\n "fgcolor": "start_color",\n "bgColor": "end_color",\n "bgcolor": "end_color",\n }\n\n _gradient_fill_key_map = {"fill_type": "type"}\n\n pfill_kwargs = {}\n gfill_kwargs = {}\n for k, v in fill_dict.items():\n pk = _pattern_fill_key_map.get(k)\n gk = _gradient_fill_key_map.get(k)\n if pk in ["start_color", "end_color"]:\n v = cls._convert_to_color(v)\n if gk == "stop":\n v = cls._convert_to_stop(v)\n if pk:\n pfill_kwargs[pk] = v\n elif gk:\n gfill_kwargs[gk] = v\n else:\n pfill_kwargs[k] = v\n gfill_kwargs[k] = v\n\n try:\n return PatternFill(**pfill_kwargs)\n except TypeError:\n return GradientFill(**gfill_kwargs)\n\n @classmethod\n def _convert_to_side(cls, side_spec):\n """\n Convert ``side_spec`` to an openpyxl v2 Side object.\n\n Parameters\n ----------\n side_spec : str, dict\n A string specifying the border style, or a dict with zero or more\n of the following keys (or their synonyms).\n 'style' ('border_style')\n 'color'\n\n Returns\n -------\n side : openpyxl.styles.Side\n """\n from openpyxl.styles import Side\n\n _side_key_map = {"border_style": "style"}\n\n if isinstance(side_spec, str):\n return Side(style=side_spec)\n\n side_kwargs = {}\n for k, v in side_spec.items():\n k = _side_key_map.get(k, k)\n if k == "color":\n v = cls._convert_to_color(v)\n side_kwargs[k] = v\n\n return Side(**side_kwargs)\n\n @classmethod\n def _convert_to_border(cls, border_dict):\n """\n Convert ``border_dict`` to an openpyxl v2 Border object.\n\n Parameters\n ----------\n border_dict : dict\n A dict with zero or more of the following keys (or their synonyms).\n 'left'\n 'right'\n 'top'\n 'bottom'\n 'diagonal'\n 'diagonal_direction'\n 'vertical'\n 'horizontal'\n 'diagonalUp' ('diagonalup')\n 'diagonalDown' ('diagonaldown')\n 'outline'\n\n Returns\n -------\n border : openpyxl.styles.Border\n """\n from openpyxl.styles import Border\n\n _border_key_map = {"diagonalup": "diagonalUp", "diagonaldown": "diagonalDown"}\n\n border_kwargs = {}\n for k, v in border_dict.items():\n k = _border_key_map.get(k, k)\n if k == "color":\n v = cls._convert_to_color(v)\n if k in ["left", "right", "top", "bottom", "diagonal"]:\n v = cls._convert_to_side(v)\n border_kwargs[k] = v\n\n return Border(**border_kwargs)\n\n @classmethod\n def _convert_to_alignment(cls, alignment_dict):\n """\n Convert ``alignment_dict`` to an openpyxl v2 Alignment object.\n\n Parameters\n ----------\n alignment_dict : dict\n A dict with zero or more of the following keys (or their synonyms).\n 'horizontal'\n 'vertical'\n 'text_rotation'\n 'wrap_text'\n 'shrink_to_fit'\n 'indent'\n Returns\n -------\n alignment : openpyxl.styles.Alignment\n """\n from openpyxl.styles import Alignment\n\n return Alignment(**alignment_dict)\n\n @classmethod\n def _convert_to_number_format(cls, number_format_dict):\n """\n Convert ``number_format_dict`` to an openpyxl v2.1.0 number format\n initializer.\n\n Parameters\n ----------\n number_format_dict : dict\n A dict with zero or more of the following keys.\n 'format_code' : str\n\n Returns\n -------\n number_format : str\n """\n return number_format_dict["format_code"]\n\n @classmethod\n def _convert_to_protection(cls, protection_dict):\n """\n Convert ``protection_dict`` to an openpyxl v2 Protection object.\n\n Parameters\n ----------\n protection_dict : dict\n A dict with zero or more of the following keys.\n 'locked'\n 'hidden'\n\n Returns\n -------\n """\n from openpyxl.styles import Protection\n\n return Protection(**protection_dict)\n\n def _write_cells(\n self,\n cells,\n sheet_name: str | None = None,\n startrow: int = 0,\n startcol: int = 0,\n freeze_panes: tuple[int, int] | None = None,\n ) -> None:\n # Write the frame cells using openpyxl.\n sheet_name = self._get_sheet_name(sheet_name)\n\n _style_cache: dict[str, dict[str, Serialisable]] = {}\n\n if sheet_name in self.sheets and self._if_sheet_exists != "new":\n if "r+" in self._mode:\n if self._if_sheet_exists == "replace":\n old_wks = self.sheets[sheet_name]\n target_index = self.book.index(old_wks)\n del self.book[sheet_name]\n wks = self.book.create_sheet(sheet_name, target_index)\n elif self._if_sheet_exists == "error":\n raise ValueError(\n f"Sheet '{sheet_name}' already exists and "\n f"if_sheet_exists is set to 'error'."\n )\n elif self._if_sheet_exists == "overlay":\n wks = self.sheets[sheet_name]\n else:\n raise ValueError(\n f"'{self._if_sheet_exists}' is not valid for if_sheet_exists. "\n "Valid options are 'error', 'new', 'replace' and 'overlay'."\n )\n else:\n wks = self.sheets[sheet_name]\n else:\n wks = self.book.create_sheet()\n wks.title = sheet_name\n\n if validate_freeze_panes(freeze_panes):\n freeze_panes = cast(tuple[int, int], freeze_panes)\n wks.freeze_panes = wks.cell(\n row=freeze_panes[0] + 1, column=freeze_panes[1] + 1\n )\n\n for cell in cells:\n xcell = wks.cell(\n row=startrow + cell.row + 1, column=startcol + cell.col + 1\n )\n xcell.value, fmt = self._value_with_fmt(cell.val)\n if fmt:\n xcell.number_format = fmt\n\n style_kwargs: dict[str, Serialisable] | None = {}\n if cell.style:\n key = str(cell.style)\n style_kwargs = _style_cache.get(key)\n if style_kwargs is None:\n style_kwargs = self._convert_to_style_kwargs(cell.style)\n _style_cache[key] = style_kwargs\n\n if style_kwargs:\n for k, v in style_kwargs.items():\n setattr(xcell, k, v)\n\n if cell.mergestart is not None and cell.mergeend is not None:\n wks.merge_cells(\n start_row=startrow + cell.row + 1,\n start_column=startcol + cell.col + 1,\n end_column=startcol + cell.mergeend + 1,\n end_row=startrow + cell.mergestart + 1,\n )\n\n # When cells are merged only the top-left cell is preserved\n # The behaviour of the other cells in a merged range is\n # undefined\n if style_kwargs:\n first_row = startrow + cell.row + 1\n last_row = startrow + cell.mergestart + 1\n first_col = startcol + cell.col + 1\n last_col = startcol + cell.mergeend + 1\n\n for row in range(first_row, last_row + 1):\n for col in range(first_col, last_col + 1):\n if row == first_row and col == first_col:\n # Ignore first cell. It is already handled.\n continue\n xcell = wks.cell(column=col, row=row)\n for k, v in style_kwargs.items():\n setattr(xcell, k, v)\n\n\nclass OpenpyxlReader(BaseExcelReader["Workbook"]):\n @doc(storage_options=_shared_docs["storage_options"])\n def __init__(\n self,\n filepath_or_buffer: FilePath | ReadBuffer[bytes],\n storage_options: StorageOptions | None = None,\n engine_kwargs: dict | None = None,\n ) -> None:\n """\n Reader using openpyxl engine.\n\n Parameters\n ----------\n filepath_or_buffer : str, path object or Workbook\n Object to be parsed.\n {storage_options}\n engine_kwargs : dict, optional\n Arbitrary keyword arguments passed to excel engine.\n """\n import_optional_dependency("openpyxl")\n super().__init__(\n filepath_or_buffer,\n storage_options=storage_options,\n engine_kwargs=engine_kwargs,\n )\n\n @property\n def _workbook_class(self) -> type[Workbook]:\n from openpyxl import Workbook\n\n return Workbook\n\n def load_workbook(\n self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs\n ) -> Workbook:\n from openpyxl import load_workbook\n\n default_kwargs = {"read_only": True, "data_only": True, "keep_links": False}\n\n return load_workbook(\n filepath_or_buffer,\n **(default_kwargs | engine_kwargs),\n )\n\n @property\n def sheet_names(self) -> list[str]:\n return [sheet.title for sheet in self.book.worksheets]\n\n def get_sheet_by_name(self, name: str):\n self.raise_if_bad_sheet_by_name(name)\n return self.book[name]\n\n def get_sheet_by_index(self, index: int):\n self.raise_if_bad_sheet_by_index(index)\n return self.book.worksheets[index]\n\n def _convert_cell(self, cell) -> Scalar:\n from openpyxl.cell.cell import (\n TYPE_ERROR,\n TYPE_NUMERIC,\n )\n\n if cell.value is None:\n return "" # compat with xlrd\n elif cell.data_type == TYPE_ERROR:\n return np.nan\n elif cell.data_type == TYPE_NUMERIC:\n val = int(cell.value)\n if val == cell.value:\n return val\n return float(cell.value)\n\n return cell.value\n\n def get_sheet_data(\n self, sheet, file_rows_needed: int | None = None\n ) -> list[list[Scalar]]:\n if self.book.read_only:\n sheet.reset_dimensions()\n\n data: list[list[Scalar]] = []\n last_row_with_data = -1\n for row_number, row in enumerate(sheet.rows):\n converted_row = [self._convert_cell(cell) for cell in row]\n while converted_row and converted_row[-1] == "":\n # trim trailing empty elements\n converted_row.pop()\n if converted_row:\n last_row_with_data = row_number\n data.append(converted_row)\n if file_rows_needed is not None and len(data) >= file_rows_needed:\n break\n\n # Trim trailing empty rows\n data = data[: last_row_with_data + 1]\n\n if len(data) > 0:\n # extend rows to max width\n max_width = max(len(data_row) for data_row in data)\n if min(len(data_row) for data_row in data) < max_width:\n empty_cell: list[Scalar] = [""]\n data = [\n data_row + (max_width - len(data_row)) * empty_cell\n for data_row in data\n ]\n\n return data\n
.venv\Lib\site-packages\pandas\io\excel\_openpyxl.py
_openpyxl.py
Python
19,861
0.95
0.131455
0.027624
node-utils
865
2025-06-19T16:47:06.430277
GPL-3.0
false
5569abc511d199f21d8936a92f0487b7
# pyright: reportMissingImports=false\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.util._decorators import doc\n\nfrom pandas.core.shared_docs import _shared_docs\n\nfrom pandas.io.excel._base import BaseExcelReader\n\nif TYPE_CHECKING:\n from pyxlsb import Workbook\n\n from pandas._typing import (\n FilePath,\n ReadBuffer,\n Scalar,\n StorageOptions,\n )\n\n\nclass PyxlsbReader(BaseExcelReader["Workbook"]):\n @doc(storage_options=_shared_docs["storage_options"])\n def __init__(\n self,\n filepath_or_buffer: FilePath | ReadBuffer[bytes],\n storage_options: StorageOptions | None = None,\n engine_kwargs: dict | None = None,\n ) -> None:\n """\n Reader using pyxlsb engine.\n\n Parameters\n ----------\n filepath_or_buffer : str, path object, or Workbook\n Object to be parsed.\n {storage_options}\n engine_kwargs : dict, optional\n Arbitrary keyword arguments passed to excel engine.\n """\n import_optional_dependency("pyxlsb")\n # This will call load_workbook on the filepath or buffer\n # And set the result to the book-attribute\n super().__init__(\n filepath_or_buffer,\n storage_options=storage_options,\n engine_kwargs=engine_kwargs,\n )\n\n @property\n def _workbook_class(self) -> type[Workbook]:\n from pyxlsb import Workbook\n\n return Workbook\n\n def load_workbook(\n self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs\n ) -> Workbook:\n from pyxlsb import open_workbook\n\n # TODO: hack in buffer capability\n # This might need some modifications to the Pyxlsb library\n # Actual work for opening it is in xlsbpackage.py, line 20-ish\n\n return open_workbook(filepath_or_buffer, **engine_kwargs)\n\n @property\n def sheet_names(self) -> list[str]:\n return self.book.sheets\n\n def get_sheet_by_name(self, name: str):\n self.raise_if_bad_sheet_by_name(name)\n return self.book.get_sheet(name)\n\n def get_sheet_by_index(self, index: int):\n self.raise_if_bad_sheet_by_index(index)\n # pyxlsb sheets are indexed from 1 onwards\n # There's a fix for this in the source, but the pypi package doesn't have it\n return self.book.get_sheet(index + 1)\n\n def _convert_cell(self, cell) -> Scalar:\n # TODO: there is no way to distinguish between floats and datetimes in pyxlsb\n # This means that there is no way to read datetime types from an xlsb file yet\n if cell.v is None:\n return "" # Prevents non-named columns from not showing up as Unnamed: i\n if isinstance(cell.v, float):\n val = int(cell.v)\n if val == cell.v:\n return val\n else:\n return float(cell.v)\n\n return cell.v\n\n def get_sheet_data(\n self,\n sheet,\n file_rows_needed: int | None = None,\n ) -> list[list[Scalar]]:\n data: list[list[Scalar]] = []\n previous_row_number = -1\n # When sparse=True the rows can have different lengths and empty rows are\n # not returned. The cells are namedtuples of row, col, value (r, c, v).\n for row in sheet.rows(sparse=True):\n row_number = row[0].r\n converted_row = [self._convert_cell(cell) for cell in row]\n while converted_row and converted_row[-1] == "":\n # trim trailing empty elements\n converted_row.pop()\n if converted_row:\n data.extend([[]] * (row_number - previous_row_number - 1))\n data.append(converted_row)\n previous_row_number = row_number\n if file_rows_needed is not None and len(data) >= file_rows_needed:\n break\n if data:\n # extend rows to max_width\n max_width = max(len(data_row) for data_row in data)\n if min(len(data_row) for data_row in data) < max_width:\n empty_cell: list[Scalar] = [""]\n data = [\n data_row + (max_width - len(data_row)) * empty_cell\n for data_row in data\n ]\n return data\n
.venv\Lib\site-packages\pandas\io\excel\_pyxlsb.py
_pyxlsb.py
Python
4,358
0.95
0.19685
0.130841
react-lib
360
2023-11-06T18:32:31.640750
MIT
false
754791c57bf785e3948be7bd0efc8a2a
from __future__ import annotations\n\nfrom collections.abc import (\n Hashable,\n Iterable,\n MutableMapping,\n Sequence,\n)\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Literal,\n TypeVar,\n overload,\n)\n\nfrom pandas.compat._optional import import_optional_dependency\n\nfrom pandas.core.dtypes.common import (\n is_integer,\n is_list_like,\n)\n\nif TYPE_CHECKING:\n from pandas.io.excel._base import ExcelWriter\n\n ExcelWriter_t = type[ExcelWriter]\n usecols_func = TypeVar("usecols_func", bound=Callable[[Hashable], object])\n\n_writers: MutableMapping[str, ExcelWriter_t] = {}\n\n\ndef register_writer(klass: ExcelWriter_t) -> None:\n """\n Add engine to the excel writer registry.io.excel.\n\n You must use this method to integrate with ``to_excel``.\n\n Parameters\n ----------\n klass : ExcelWriter\n """\n if not callable(klass):\n raise ValueError("Can only register callables as engines")\n engine_name = klass._engine\n _writers[engine_name] = klass\n\n\ndef get_default_engine(ext: str, mode: Literal["reader", "writer"] = "reader") -> str:\n """\n Return the default reader/writer for the given extension.\n\n Parameters\n ----------\n ext : str\n The excel file extension for which to get the default engine.\n mode : str {'reader', 'writer'}\n Whether to get the default engine for reading or writing.\n Either 'reader' or 'writer'\n\n Returns\n -------\n str\n The default engine for the extension.\n """\n _default_readers = {\n "xlsx": "openpyxl",\n "xlsm": "openpyxl",\n "xlsb": "pyxlsb",\n "xls": "xlrd",\n "ods": "odf",\n }\n _default_writers = {\n "xlsx": "openpyxl",\n "xlsm": "openpyxl",\n "xlsb": "pyxlsb",\n "ods": "odf",\n }\n assert mode in ["reader", "writer"]\n if mode == "writer":\n # Prefer xlsxwriter over openpyxl if installed\n xlsxwriter = import_optional_dependency("xlsxwriter", errors="warn")\n if xlsxwriter:\n _default_writers["xlsx"] = "xlsxwriter"\n return _default_writers[ext]\n else:\n return _default_readers[ext]\n\n\ndef get_writer(engine_name: str) -> ExcelWriter_t:\n try:\n return _writers[engine_name]\n except KeyError as err:\n raise ValueError(f"No Excel writer '{engine_name}'") from err\n\n\ndef _excel2num(x: str) -> int:\n """\n Convert Excel column name like 'AB' to 0-based column index.\n\n Parameters\n ----------\n x : str\n The Excel column name to convert to a 0-based column index.\n\n Returns\n -------\n num : int\n The column index corresponding to the name.\n\n Raises\n ------\n ValueError\n Part of the Excel column name was invalid.\n """\n index = 0\n\n for c in x.upper().strip():\n cp = ord(c)\n\n if cp < ord("A") or cp > ord("Z"):\n raise ValueError(f"Invalid column name: {x}")\n\n index = index * 26 + cp - ord("A") + 1\n\n return index - 1\n\n\ndef _range2cols(areas: str) -> list[int]:\n """\n Convert comma separated list of column names and ranges to indices.\n\n Parameters\n ----------\n areas : str\n A string containing a sequence of column ranges (or areas).\n\n Returns\n -------\n cols : list\n A list of 0-based column indices.\n\n Examples\n --------\n >>> _range2cols('A:E')\n [0, 1, 2, 3, 4]\n >>> _range2cols('A,C,Z:AB')\n [0, 2, 25, 26, 27]\n """\n cols: list[int] = []\n\n for rng in areas.split(","):\n if ":" in rng:\n rngs = rng.split(":")\n cols.extend(range(_excel2num(rngs[0]), _excel2num(rngs[1]) + 1))\n else:\n cols.append(_excel2num(rng))\n\n return cols\n\n\n@overload\ndef maybe_convert_usecols(usecols: str | list[int]) -> list[int]:\n ...\n\n\n@overload\ndef maybe_convert_usecols(usecols: list[str]) -> list[str]:\n ...\n\n\n@overload\ndef maybe_convert_usecols(usecols: usecols_func) -> usecols_func:\n ...\n\n\n@overload\ndef maybe_convert_usecols(usecols: None) -> None:\n ...\n\n\ndef maybe_convert_usecols(\n usecols: str | list[int] | list[str] | usecols_func | None,\n) -> None | list[int] | list[str] | usecols_func:\n """\n Convert `usecols` into a compatible format for parsing in `parsers.py`.\n\n Parameters\n ----------\n usecols : object\n The use-columns object to potentially convert.\n\n Returns\n -------\n converted : object\n The compatible format of `usecols`.\n """\n if usecols is None:\n return usecols\n\n if is_integer(usecols):\n raise ValueError(\n "Passing an integer for `usecols` is no longer supported. "\n "Please pass in a list of int from 0 to `usecols` inclusive instead."\n )\n\n if isinstance(usecols, str):\n return _range2cols(usecols)\n\n return usecols\n\n\n@overload\ndef validate_freeze_panes(freeze_panes: tuple[int, int]) -> Literal[True]:\n ...\n\n\n@overload\ndef validate_freeze_panes(freeze_panes: None) -> Literal[False]:\n ...\n\n\ndef validate_freeze_panes(freeze_panes: tuple[int, int] | None) -> bool:\n if freeze_panes is not None:\n if len(freeze_panes) == 2 and all(\n isinstance(item, int) for item in freeze_panes\n ):\n return True\n\n raise ValueError(\n "freeze_panes must be of form (row, column) "\n "where row and column are integers"\n )\n\n # freeze_panes wasn't specified, return False so it won't be applied\n # to output sheet\n return False\n\n\ndef fill_mi_header(\n row: list[Hashable], control_row: list[bool]\n) -> tuple[list[Hashable], list[bool]]:\n """\n Forward fill blank entries in row but only inside the same parent index.\n\n Used for creating headers in Multiindex.\n\n Parameters\n ----------\n row : list\n List of items in a single row.\n control_row : list of bool\n Helps to determine if particular column is in same parent index as the\n previous value. Used to stop propagation of empty cells between\n different indexes.\n\n Returns\n -------\n Returns changed row and control_row\n """\n last = row[0]\n for i in range(1, len(row)):\n if not control_row[i]:\n last = row[i]\n\n if row[i] == "" or row[i] is None:\n row[i] = last\n else:\n control_row[i] = False\n last = row[i]\n\n return row, control_row\n\n\ndef pop_header_name(\n row: list[Hashable], index_col: int | Sequence[int]\n) -> tuple[Hashable | None, list[Hashable]]:\n """\n Pop the header name for MultiIndex parsing.\n\n Parameters\n ----------\n row : list\n The data row to parse for the header name.\n index_col : int, list\n The index columns for our data. Assumed to be non-null.\n\n Returns\n -------\n header_name : str\n The extracted header name.\n trimmed_row : list\n The original data row with the header name removed.\n """\n # Pop out header name and fill w/blank.\n if is_list_like(index_col):\n assert isinstance(index_col, Iterable)\n i = max(index_col)\n else:\n assert not isinstance(index_col, Iterable)\n i = index_col\n\n header_name = row[i]\n header_name = None if header_name == "" else header_name\n\n return header_name, row[:i] + [""] + row[i + 1 :]\n\n\ndef combine_kwargs(engine_kwargs: dict[str, Any] | None, kwargs: dict) -> dict:\n """\n Used to combine two sources of kwargs for the backend engine.\n\n Use of kwargs is deprecated, this function is solely for use in 1.3 and should\n be removed in 1.4/2.0. Also _base.ExcelWriter.__new__ ensures either engine_kwargs\n or kwargs must be None or empty respectively.\n\n Parameters\n ----------\n engine_kwargs: dict\n kwargs to be passed through to the engine.\n kwargs: dict\n kwargs to be psased through to the engine (deprecated)\n\n Returns\n -------\n engine_kwargs combined with kwargs\n """\n if engine_kwargs is None:\n result = {}\n else:\n result = engine_kwargs.copy()\n result.update(kwargs)\n return result\n
.venv\Lib\site-packages\pandas\io\excel\_util.py
_util.py
Python
8,105
0.95
0.155689
0.015326
python-kit
664
2024-01-22T04:52:03.850826
GPL-3.0
false
a9f7ba1e0d3da21fc611263e1fb6901a
from __future__ import annotations\n\nfrom datetime import time\nimport math\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\n\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.util._decorators import doc\n\nfrom pandas.core.shared_docs import _shared_docs\n\nfrom pandas.io.excel._base import BaseExcelReader\n\nif TYPE_CHECKING:\n from xlrd import Book\n\n from pandas._typing import (\n Scalar,\n StorageOptions,\n )\n\n\nclass XlrdReader(BaseExcelReader["Book"]):\n @doc(storage_options=_shared_docs["storage_options"])\n def __init__(\n self,\n filepath_or_buffer,\n storage_options: StorageOptions | None = None,\n engine_kwargs: dict | None = None,\n ) -> None:\n """\n Reader using xlrd engine.\n\n Parameters\n ----------\n filepath_or_buffer : str, path object or Workbook\n Object to be parsed.\n {storage_options}\n engine_kwargs : dict, optional\n Arbitrary keyword arguments passed to excel engine.\n """\n err_msg = "Install xlrd >= 2.0.1 for xls Excel support"\n import_optional_dependency("xlrd", extra=err_msg)\n super().__init__(\n filepath_or_buffer,\n storage_options=storage_options,\n engine_kwargs=engine_kwargs,\n )\n\n @property\n def _workbook_class(self) -> type[Book]:\n from xlrd import Book\n\n return Book\n\n def load_workbook(self, filepath_or_buffer, engine_kwargs) -> Book:\n from xlrd import open_workbook\n\n if hasattr(filepath_or_buffer, "read"):\n data = filepath_or_buffer.read()\n return open_workbook(file_contents=data, **engine_kwargs)\n else:\n return open_workbook(filepath_or_buffer, **engine_kwargs)\n\n @property\n def sheet_names(self):\n return self.book.sheet_names()\n\n def get_sheet_by_name(self, name):\n self.raise_if_bad_sheet_by_name(name)\n return self.book.sheet_by_name(name)\n\n def get_sheet_by_index(self, index):\n self.raise_if_bad_sheet_by_index(index)\n return self.book.sheet_by_index(index)\n\n def get_sheet_data(\n self, sheet, file_rows_needed: int | None = None\n ) -> list[list[Scalar]]:\n from xlrd import (\n XL_CELL_BOOLEAN,\n XL_CELL_DATE,\n XL_CELL_ERROR,\n XL_CELL_NUMBER,\n xldate,\n )\n\n epoch1904 = self.book.datemode\n\n def _parse_cell(cell_contents, cell_typ):\n """\n converts the contents of the cell into a pandas appropriate object\n """\n if cell_typ == XL_CELL_DATE:\n # Use the newer xlrd datetime handling.\n try:\n cell_contents = xldate.xldate_as_datetime(cell_contents, epoch1904)\n except OverflowError:\n return cell_contents\n\n # Excel doesn't distinguish between dates and time,\n # so we treat dates on the epoch as times only.\n # Also, Excel supports 1900 and 1904 epochs.\n year = (cell_contents.timetuple())[0:3]\n if (not epoch1904 and year == (1899, 12, 31)) or (\n epoch1904 and year == (1904, 1, 1)\n ):\n cell_contents = time(\n cell_contents.hour,\n cell_contents.minute,\n cell_contents.second,\n cell_contents.microsecond,\n )\n\n elif cell_typ == XL_CELL_ERROR:\n cell_contents = np.nan\n elif cell_typ == XL_CELL_BOOLEAN:\n cell_contents = bool(cell_contents)\n elif cell_typ == XL_CELL_NUMBER:\n # GH5394 - Excel 'numbers' are always floats\n # it's a minimal perf hit and less surprising\n if math.isfinite(cell_contents):\n # GH54564 - don't attempt to convert NaN/Inf\n val = int(cell_contents)\n if val == cell_contents:\n cell_contents = val\n return cell_contents\n\n data = []\n\n nrows = sheet.nrows\n if file_rows_needed is not None:\n nrows = min(nrows, file_rows_needed)\n for i in range(nrows):\n row = [\n _parse_cell(value, typ)\n for value, typ in zip(sheet.row_values(i), sheet.row_types(i))\n ]\n data.append(row)\n\n return data\n
.venv\Lib\site-packages\pandas\io\excel\_xlrd.py
_xlrd.py
Python
4,556
0.95
0.13986
0.059322
node-utils
134
2023-11-17T06:58:09.335645
BSD-3-Clause
false
4b3e719d6d15879665a5e3a22632b7f9
from __future__ import annotations\n\nimport json\nfrom typing import (\n TYPE_CHECKING,\n Any,\n)\n\nfrom pandas.io.excel._base import ExcelWriter\nfrom pandas.io.excel._util import (\n combine_kwargs,\n validate_freeze_panes,\n)\n\nif TYPE_CHECKING:\n from pandas._typing import (\n ExcelWriterIfSheetExists,\n FilePath,\n StorageOptions,\n WriteExcelBuffer,\n )\n\n\nclass _XlsxStyler:\n # Map from openpyxl-oriented styles to flatter xlsxwriter representation\n # Ordering necessary for both determinism and because some are keyed by\n # prefixes of others.\n STYLE_MAPPING: dict[str, list[tuple[tuple[str, ...], str]]] = {\n "font": [\n (("name",), "font_name"),\n (("sz",), "font_size"),\n (("size",), "font_size"),\n (("color", "rgb"), "font_color"),\n (("color",), "font_color"),\n (("b",), "bold"),\n (("bold",), "bold"),\n (("i",), "italic"),\n (("italic",), "italic"),\n (("u",), "underline"),\n (("underline",), "underline"),\n (("strike",), "font_strikeout"),\n (("vertAlign",), "font_script"),\n (("vertalign",), "font_script"),\n ],\n "number_format": [(("format_code",), "num_format"), ((), "num_format")],\n "protection": [(("locked",), "locked"), (("hidden",), "hidden")],\n "alignment": [\n (("horizontal",), "align"),\n (("vertical",), "valign"),\n (("text_rotation",), "rotation"),\n (("wrap_text",), "text_wrap"),\n (("indent",), "indent"),\n (("shrink_to_fit",), "shrink"),\n ],\n "fill": [\n (("patternType",), "pattern"),\n (("patterntype",), "pattern"),\n (("fill_type",), "pattern"),\n (("start_color", "rgb"), "fg_color"),\n (("fgColor", "rgb"), "fg_color"),\n (("fgcolor", "rgb"), "fg_color"),\n (("start_color",), "fg_color"),\n (("fgColor",), "fg_color"),\n (("fgcolor",), "fg_color"),\n (("end_color", "rgb"), "bg_color"),\n (("bgColor", "rgb"), "bg_color"),\n (("bgcolor", "rgb"), "bg_color"),\n (("end_color",), "bg_color"),\n (("bgColor",), "bg_color"),\n (("bgcolor",), "bg_color"),\n ],\n "border": [\n (("color", "rgb"), "border_color"),\n (("color",), "border_color"),\n (("style",), "border"),\n (("top", "color", "rgb"), "top_color"),\n (("top", "color"), "top_color"),\n (("top", "style"), "top"),\n (("top",), "top"),\n (("right", "color", "rgb"), "right_color"),\n (("right", "color"), "right_color"),\n (("right", "style"), "right"),\n (("right",), "right"),\n (("bottom", "color", "rgb"), "bottom_color"),\n (("bottom", "color"), "bottom_color"),\n (("bottom", "style"), "bottom"),\n (("bottom",), "bottom"),\n (("left", "color", "rgb"), "left_color"),\n (("left", "color"), "left_color"),\n (("left", "style"), "left"),\n (("left",), "left"),\n ],\n }\n\n @classmethod\n def convert(cls, style_dict, num_format_str=None):\n """\n converts a style_dict to an xlsxwriter format dict\n\n Parameters\n ----------\n style_dict : style dictionary to convert\n num_format_str : optional number format string\n """\n # Create a XlsxWriter format object.\n props = {}\n\n if num_format_str is not None:\n props["num_format"] = num_format_str\n\n if style_dict is None:\n return props\n\n if "borders" in style_dict:\n style_dict = style_dict.copy()\n style_dict["border"] = style_dict.pop("borders")\n\n for style_group_key, style_group in style_dict.items():\n for src, dst in cls.STYLE_MAPPING.get(style_group_key, []):\n # src is a sequence of keys into a nested dict\n # dst is a flat key\n if dst in props:\n continue\n v = style_group\n for k in src:\n try:\n v = v[k]\n except (KeyError, TypeError):\n break\n else:\n props[dst] = v\n\n if isinstance(props.get("pattern"), str):\n # TODO: support other fill patterns\n props["pattern"] = 0 if props["pattern"] == "none" else 1\n\n for k in ["border", "top", "right", "bottom", "left"]:\n if isinstance(props.get(k), str):\n try:\n props[k] = [\n "none",\n "thin",\n "medium",\n "dashed",\n "dotted",\n "thick",\n "double",\n "hair",\n "mediumDashed",\n "dashDot",\n "mediumDashDot",\n "dashDotDot",\n "mediumDashDotDot",\n "slantDashDot",\n ].index(props[k])\n except ValueError:\n props[k] = 2\n\n if isinstance(props.get("font_script"), str):\n props["font_script"] = ["baseline", "superscript", "subscript"].index(\n props["font_script"]\n )\n\n if isinstance(props.get("underline"), str):\n props["underline"] = {\n "none": 0,\n "single": 1,\n "double": 2,\n "singleAccounting": 33,\n "doubleAccounting": 34,\n }[props["underline"]]\n\n # GH 30107 - xlsxwriter uses different name\n if props.get("valign") == "center":\n props["valign"] = "vcenter"\n\n return props\n\n\nclass XlsxWriter(ExcelWriter):\n _engine = "xlsxwriter"\n _supported_extensions = (".xlsx",)\n\n def __init__(\n self,\n path: FilePath | WriteExcelBuffer | ExcelWriter,\n engine: str | None = None,\n date_format: str | None = None,\n datetime_format: str | None = None,\n mode: str = "w",\n storage_options: StorageOptions | None = None,\n if_sheet_exists: ExcelWriterIfSheetExists | None = None,\n engine_kwargs: dict[str, Any] | None = None,\n **kwargs,\n ) -> None:\n # Use the xlsxwriter module as the Excel writer.\n from xlsxwriter import Workbook\n\n engine_kwargs = combine_kwargs(engine_kwargs, kwargs)\n\n if mode == "a":\n raise ValueError("Append mode is not supported with xlsxwriter!")\n\n super().__init__(\n path,\n engine=engine,\n date_format=date_format,\n datetime_format=datetime_format,\n mode=mode,\n storage_options=storage_options,\n if_sheet_exists=if_sheet_exists,\n engine_kwargs=engine_kwargs,\n )\n\n try:\n self._book = Workbook(self._handles.handle, **engine_kwargs)\n except TypeError:\n self._handles.handle.close()\n raise\n\n @property\n def book(self):\n """\n Book instance of class xlsxwriter.Workbook.\n\n This attribute can be used to access engine-specific features.\n """\n return self._book\n\n @property\n def sheets(self) -> dict[str, Any]:\n result = self.book.sheetnames\n return result\n\n def _save(self) -> None:\n """\n Save workbook to disk.\n """\n self.book.close()\n\n def _write_cells(\n self,\n cells,\n sheet_name: str | None = None,\n startrow: int = 0,\n startcol: int = 0,\n freeze_panes: tuple[int, int] | None = None,\n ) -> None:\n # Write the frame cells using xlsxwriter.\n sheet_name = self._get_sheet_name(sheet_name)\n\n wks = self.book.get_worksheet_by_name(sheet_name)\n if wks is None:\n wks = self.book.add_worksheet(sheet_name)\n\n style_dict = {"null": None}\n\n if validate_freeze_panes(freeze_panes):\n wks.freeze_panes(*(freeze_panes))\n\n for cell in cells:\n val, fmt = self._value_with_fmt(cell.val)\n\n stylekey = json.dumps(cell.style)\n if fmt:\n stylekey += fmt\n\n if stylekey in style_dict:\n style = style_dict[stylekey]\n else:\n style = self.book.add_format(_XlsxStyler.convert(cell.style, fmt))\n style_dict[stylekey] = style\n\n if cell.mergestart is not None and cell.mergeend is not None:\n wks.merge_range(\n startrow + cell.row,\n startcol + cell.col,\n startrow + cell.mergestart,\n startcol + cell.mergeend,\n val,\n style,\n )\n else:\n wks.write(startrow + cell.row, startcol + cell.col, val, style)\n
.venv\Lib\site-packages\pandas\io\excel\_xlsxwriter.py
_xlsxwriter.py
Python
9,191
0.95
0.123239
0.044355
vue-tools
185
2024-10-16T16:25:09.407013
Apache-2.0
false
da44dae9771c9d1ca7ec9373d5c91aa3
from pandas.io.excel._base import (\n ExcelFile,\n ExcelWriter,\n read_excel,\n)\nfrom pandas.io.excel._odswriter import ODSWriter as _ODSWriter\nfrom pandas.io.excel._openpyxl import OpenpyxlWriter as _OpenpyxlWriter\nfrom pandas.io.excel._util import register_writer\nfrom pandas.io.excel._xlsxwriter import XlsxWriter as _XlsxWriter\n\n__all__ = ["read_excel", "ExcelWriter", "ExcelFile"]\n\n\nregister_writer(_OpenpyxlWriter)\n\nregister_writer(_XlsxWriter)\n\n\nregister_writer(_ODSWriter)\n
.venv\Lib\site-packages\pandas\io\excel\__init__.py
__init__.py
Python
486
0.85
0
0
awesome-app
192
2024-04-18T17:52:15.021282
GPL-3.0
false
81afed8d218e5032bcdbffe7771e1b4e
\n\n
.venv\Lib\site-packages\pandas\io\excel\__pycache__\_base.cpython-313.pyc
_base.cpython-313.pyc
Other
58,532
0.75
0.071125
0.039855
awesome-app
756
2024-10-27T10:23:34.106462
GPL-3.0
false
00eb73c28061c01dff84bee3923a8541
\n\n
.venv\Lib\site-packages\pandas\io\excel\__pycache__\_calamine.cpython-313.pyc
_calamine.cpython-313.pyc
Other
5,147
0.95
0
0
vue-tools
867
2023-11-29T03:29:40.267235
Apache-2.0
false
8ae04cb683fdb74a822d019066eba499
\n\n
.venv\Lib\site-packages\pandas\io\excel\__pycache__\_odfreader.cpython-313.pyc
_odfreader.cpython-313.pyc
Other
10,498
0.95
0.007246
0
node-utils
351
2024-12-10T05:24:25.945143
BSD-3-Clause
false
b800123373e6a6a0c2e9cab73b037283
\n\n
.venv\Lib\site-packages\pandas\io\excel\__pycache__\_odswriter.cpython-313.pyc
_odswriter.cpython-313.pyc
Other
12,965
0.95
0.010582
0
node-utils
483
2023-07-23T15:40:03.837380
GPL-3.0
false
d2838ca117012a15850f5b01d9478210
\n\n
.venv\Lib\site-packages\pandas\io\excel\__pycache__\_openpyxl.cpython-313.pyc
_openpyxl.cpython-313.pyc
Other
22,520
0.95
0.016713
0
node-utils
320
2025-04-14T08:03:14.660803
GPL-3.0
false
2117b4de0bbcb4fa37a2afb0662b7033
\n\n
.venv\Lib\site-packages\pandas\io\excel\__pycache__\_pyxlsb.cpython-313.pyc
_pyxlsb.cpython-313.pyc
Other
5,432
0.95
0
0
awesome-app
897
2024-11-20T22:29:13.823169
BSD-3-Clause
false
cf3711e591a98de27a70461ac8bc8d15
\n\n
.venv\Lib\site-packages\pandas\io\excel\__pycache__\_util.cpython-313.pyc
_util.cpython-313.pyc
Other
9,770
0.95
0.071429
0.005747
vue-tools
987
2024-08-04T10:27:49.315939
GPL-3.0
false
3c3f5d4d7995fa07f7cc5b5b07650693
\n\n
.venv\Lib\site-packages\pandas\io\excel\__pycache__\_xlrd.cpython-313.pyc
_xlrd.cpython-313.pyc
Other
5,614
0.95
0.013514
0
awesome-app
785
2024-01-05T15:41:08.555485
GPL-3.0
false
0659687aa8d1a8c4db205b6495498628
\n\n
.venv\Lib\site-packages\pandas\io\excel\__pycache__\_xlsxwriter.cpython-313.pyc
_xlsxwriter.cpython-313.pyc
Other
8,655
0.95
0.010101
0.010417
vue-tools
293
2025-05-04T12:37:37.430535
GPL-3.0
false
ee799d1d6e011948622708cf25be2f60
\n\n
.venv\Lib\site-packages\pandas\io\excel\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
708
0.7
0
0
node-utils
128
2024-02-15T07:01:21.685042
Apache-2.0
false
218772ca92c095bb0b4a2227711125a7
"""\nInternal module for console introspection\n"""\nfrom __future__ import annotations\n\nfrom shutil import get_terminal_size\n\n\ndef get_console_size() -> tuple[int | None, int | None]:\n """\n Return console size as tuple = (width, height).\n\n Returns (None,None) in non-interactive session.\n """\n from pandas import get_option\n\n display_width = get_option("display.width")\n display_height = get_option("display.max_rows")\n\n # Consider\n # interactive shell terminal, can detect term size\n # interactive non-shell terminal (ipnb/ipqtconsole), cannot detect term\n # size non-interactive script, should disregard term size\n\n # in addition\n # width,height have default values, but setting to 'None' signals\n # should use Auto-Detection, But only in interactive shell-terminal.\n # Simple. yeah.\n\n if in_interactive_session():\n if in_ipython_frontend():\n # sane defaults for interactive non-shell terminal\n # match default for width,height in config_init\n from pandas._config.config import get_default_val\n\n terminal_width = get_default_val("display.width")\n terminal_height = get_default_val("display.max_rows")\n else:\n # pure terminal\n terminal_width, terminal_height = get_terminal_size()\n else:\n terminal_width, terminal_height = None, None\n\n # Note if the User sets width/Height to None (auto-detection)\n # and we're in a script (non-inter), this will return (None,None)\n # caller needs to deal.\n return display_width or terminal_width, display_height or terminal_height\n\n\n# ----------------------------------------------------------------------\n# Detect our environment\n\n\ndef in_interactive_session() -> bool:\n """\n Check if we're running in an interactive shell.\n\n Returns\n -------\n bool\n True if running under python/ipython interactive shell.\n """\n from pandas import get_option\n\n def check_main():\n try:\n import __main__ as main\n except ModuleNotFoundError:\n return get_option("mode.sim_interactive")\n return not hasattr(main, "__file__") or get_option("mode.sim_interactive")\n\n try:\n # error: Name '__IPYTHON__' is not defined\n return __IPYTHON__ or check_main() # type: ignore[name-defined]\n except NameError:\n return check_main()\n\n\ndef in_ipython_frontend() -> bool:\n """\n Check if we're inside an IPython zmq frontend.\n\n Returns\n -------\n bool\n """\n try:\n # error: Name 'get_ipython' is not defined\n ip = get_ipython() # type: ignore[name-defined]\n return "zmq" in str(type(ip)).lower()\n except NameError:\n pass\n\n return False\n
.venv\Lib\site-packages\pandas\io\formats\console.py
console.py
Python
2,748
0.95
0.170213
0.246575
awesome-app
879
2024-09-30T18:28:13.651148
MIT
false
d8afd34ff661a65782197b165e9f04ea
"""\nUtilities for interpreting CSS from Stylers for formatting non-HTML outputs.\n"""\nfrom __future__ import annotations\n\nimport re\nfrom typing import (\n TYPE_CHECKING,\n Callable,\n)\nimport warnings\n\nfrom pandas.errors import CSSWarning\nfrom pandas.util._exceptions import find_stack_level\n\nif TYPE_CHECKING:\n from collections.abc import (\n Generator,\n Iterable,\n Iterator,\n )\n\n\ndef _side_expander(prop_fmt: str) -> Callable:\n """\n Wrapper to expand shorthand property into top, right, bottom, left properties\n\n Parameters\n ----------\n side : str\n The border side to expand into properties\n\n Returns\n -------\n function: Return to call when a 'border(-{side}): {value}' string is encountered\n """\n\n def expand(self, prop, value: str) -> Generator[tuple[str, str], None, None]:\n """\n Expand shorthand property into side-specific property (top, right, bottom, left)\n\n Parameters\n ----------\n prop (str): CSS property name\n value (str): String token for property\n\n Yields\n ------\n Tuple (str, str): Expanded property, value\n """\n tokens = value.split()\n try:\n mapping = self.SIDE_SHORTHANDS[len(tokens)]\n except KeyError:\n warnings.warn(\n f'Could not expand "{prop}: {value}"',\n CSSWarning,\n stacklevel=find_stack_level(),\n )\n return\n for key, idx in zip(self.SIDES, mapping):\n yield prop_fmt.format(key), tokens[idx]\n\n return expand\n\n\ndef _border_expander(side: str = "") -> Callable:\n """\n Wrapper to expand 'border' property into border color, style, and width properties\n\n Parameters\n ----------\n side : str\n The border side to expand into properties\n\n Returns\n -------\n function: Return to call when a 'border(-{side}): {value}' string is encountered\n """\n if side != "":\n side = f"-{side}"\n\n def expand(self, prop, value: str) -> Generator[tuple[str, str], None, None]:\n """\n Expand border into color, style, and width tuples\n\n Parameters\n ----------\n prop : str\n CSS property name passed to styler\n value : str\n Value passed to styler for property\n\n Yields\n ------\n Tuple (str, str): Expanded property, value\n """\n tokens = value.split()\n if len(tokens) == 0 or len(tokens) > 3:\n warnings.warn(\n f'Too many tokens provided to "{prop}" (expected 1-3)',\n CSSWarning,\n stacklevel=find_stack_level(),\n )\n\n # TODO: Can we use current color as initial value to comply with CSS standards?\n border_declarations = {\n f"border{side}-color": "black",\n f"border{side}-style": "none",\n f"border{side}-width": "medium",\n }\n for token in tokens:\n if token.lower() in self.BORDER_STYLES:\n border_declarations[f"border{side}-style"] = token\n elif any(ratio in token.lower() for ratio in self.BORDER_WIDTH_RATIOS):\n border_declarations[f"border{side}-width"] = token\n else:\n border_declarations[f"border{side}-color"] = token\n # TODO: Warn user if item entered more than once (e.g. "border: red green")\n\n # Per CSS, "border" will reset previous "border-*" definitions\n yield from self.atomize(border_declarations.items())\n\n return expand\n\n\nclass CSSResolver:\n """\n A callable for parsing and resolving CSS to atomic properties.\n """\n\n UNIT_RATIOS = {\n "pt": ("pt", 1),\n "em": ("em", 1),\n "rem": ("pt", 12),\n "ex": ("em", 0.5),\n # 'ch':\n "px": ("pt", 0.75),\n "pc": ("pt", 12),\n "in": ("pt", 72),\n "cm": ("in", 1 / 2.54),\n "mm": ("in", 1 / 25.4),\n "q": ("mm", 0.25),\n "!!default": ("em", 0),\n }\n\n FONT_SIZE_RATIOS = UNIT_RATIOS.copy()\n FONT_SIZE_RATIOS.update(\n {\n "%": ("em", 0.01),\n "xx-small": ("rem", 0.5),\n "x-small": ("rem", 0.625),\n "small": ("rem", 0.8),\n "medium": ("rem", 1),\n "large": ("rem", 1.125),\n "x-large": ("rem", 1.5),\n "xx-large": ("rem", 2),\n "smaller": ("em", 1 / 1.2),\n "larger": ("em", 1.2),\n "!!default": ("em", 1),\n }\n )\n\n MARGIN_RATIOS = UNIT_RATIOS.copy()\n MARGIN_RATIOS.update({"none": ("pt", 0)})\n\n BORDER_WIDTH_RATIOS = UNIT_RATIOS.copy()\n BORDER_WIDTH_RATIOS.update(\n {\n "none": ("pt", 0),\n "thick": ("px", 4),\n "medium": ("px", 2),\n "thin": ("px", 1),\n # Default: medium only if solid\n }\n )\n\n BORDER_STYLES = [\n "none",\n "hidden",\n "dotted",\n "dashed",\n "solid",\n "double",\n "groove",\n "ridge",\n "inset",\n "outset",\n "mediumdashdot",\n "dashdotdot",\n "hair",\n "mediumdashdotdot",\n "dashdot",\n "slantdashdot",\n "mediumdashed",\n ]\n\n SIDE_SHORTHANDS = {\n 1: [0, 0, 0, 0],\n 2: [0, 1, 0, 1],\n 3: [0, 1, 2, 1],\n 4: [0, 1, 2, 3],\n }\n\n SIDES = ("top", "right", "bottom", "left")\n\n CSS_EXPANSIONS = {\n **{\n (f"border-{prop}" if prop else "border"): _border_expander(prop)\n for prop in ["", "top", "right", "bottom", "left"]\n },\n **{\n f"border-{prop}": _side_expander(f"border-{{:s}}-{prop}")\n for prop in ["color", "style", "width"]\n },\n "margin": _side_expander("margin-{:s}"),\n "padding": _side_expander("padding-{:s}"),\n }\n\n def __call__(\n self,\n declarations: str | Iterable[tuple[str, str]],\n inherited: dict[str, str] | None = None,\n ) -> dict[str, str]:\n """\n The given declarations to atomic properties.\n\n Parameters\n ----------\n declarations_str : str | Iterable[tuple[str, str]]\n A CSS string or set of CSS declaration tuples\n e.g. "font-weight: bold; background: blue" or\n {("font-weight", "bold"), ("background", "blue")}\n inherited : dict, optional\n Atomic properties indicating the inherited style context in which\n declarations_str is to be resolved. ``inherited`` should already\n be resolved, i.e. valid output of this method.\n\n Returns\n -------\n dict\n Atomic CSS 2.2 properties.\n\n Examples\n --------\n >>> resolve = CSSResolver()\n >>> inherited = {'font-family': 'serif', 'font-weight': 'bold'}\n >>> out = resolve('''\n ... border-color: BLUE RED;\n ... font-size: 1em;\n ... font-size: 2em;\n ... font-weight: normal;\n ... font-weight: inherit;\n ... ''', inherited)\n >>> sorted(out.items()) # doctest: +NORMALIZE_WHITESPACE\n [('border-bottom-color', 'blue'),\n ('border-left-color', 'red'),\n ('border-right-color', 'red'),\n ('border-top-color', 'blue'),\n ('font-family', 'serif'),\n ('font-size', '24pt'),\n ('font-weight', 'bold')]\n """\n if isinstance(declarations, str):\n declarations = self.parse(declarations)\n props = dict(self.atomize(declarations))\n if inherited is None:\n inherited = {}\n\n props = self._update_initial(props, inherited)\n props = self._update_font_size(props, inherited)\n return self._update_other_units(props)\n\n def _update_initial(\n self,\n props: dict[str, str],\n inherited: dict[str, str],\n ) -> dict[str, str]:\n # 1. resolve inherited, initial\n for prop, val in inherited.items():\n if prop not in props:\n props[prop] = val\n\n new_props = props.copy()\n for prop, val in props.items():\n if val == "inherit":\n val = inherited.get(prop, "initial")\n\n if val in ("initial", None):\n # we do not define a complete initial stylesheet\n del new_props[prop]\n else:\n new_props[prop] = val\n return new_props\n\n def _update_font_size(\n self,\n props: dict[str, str],\n inherited: dict[str, str],\n ) -> dict[str, str]:\n # 2. resolve relative font size\n if props.get("font-size"):\n props["font-size"] = self.size_to_pt(\n props["font-size"],\n self._get_font_size(inherited),\n conversions=self.FONT_SIZE_RATIOS,\n )\n return props\n\n def _get_font_size(self, props: dict[str, str]) -> float | None:\n if props.get("font-size"):\n font_size_string = props["font-size"]\n return self._get_float_font_size_from_pt(font_size_string)\n return None\n\n def _get_float_font_size_from_pt(self, font_size_string: str) -> float:\n assert font_size_string.endswith("pt")\n return float(font_size_string.rstrip("pt"))\n\n def _update_other_units(self, props: dict[str, str]) -> dict[str, str]:\n font_size = self._get_font_size(props)\n # 3. TODO: resolve other font-relative units\n for side in self.SIDES:\n prop = f"border-{side}-width"\n if prop in props:\n props[prop] = self.size_to_pt(\n props[prop],\n em_pt=font_size,\n conversions=self.BORDER_WIDTH_RATIOS,\n )\n\n for prop in [f"margin-{side}", f"padding-{side}"]:\n if prop in props:\n # TODO: support %\n props[prop] = self.size_to_pt(\n props[prop],\n em_pt=font_size,\n conversions=self.MARGIN_RATIOS,\n )\n return props\n\n def size_to_pt(self, in_val, em_pt=None, conversions=UNIT_RATIOS) -> str:\n def _error():\n warnings.warn(\n f"Unhandled size: {repr(in_val)}",\n CSSWarning,\n stacklevel=find_stack_level(),\n )\n return self.size_to_pt("1!!default", conversions=conversions)\n\n match = re.match(r"^(\S*?)([a-zA-Z%!].*)", in_val)\n if match is None:\n return _error()\n\n val, unit = match.groups()\n if val == "":\n # hack for 'large' etc.\n val = 1\n else:\n try:\n val = float(val)\n except ValueError:\n return _error()\n\n while unit != "pt":\n if unit == "em":\n if em_pt is None:\n unit = "rem"\n else:\n val *= em_pt\n unit = "pt"\n continue\n\n try:\n unit, mul = conversions[unit]\n except KeyError:\n return _error()\n val *= mul\n\n val = round(val, 5)\n if int(val) == val:\n size_fmt = f"{int(val):d}pt"\n else:\n size_fmt = f"{val:f}pt"\n return size_fmt\n\n def atomize(self, declarations: Iterable) -> Generator[tuple[str, str], None, None]:\n for prop, value in declarations:\n prop = prop.lower()\n value = value.lower()\n if prop in self.CSS_EXPANSIONS:\n expand = self.CSS_EXPANSIONS[prop]\n yield from expand(self, prop, value)\n else:\n yield prop, value\n\n def parse(self, declarations_str: str) -> Iterator[tuple[str, str]]:\n """\n Generates (prop, value) pairs from declarations.\n\n In a future version may generate parsed tokens from tinycss/tinycss2\n\n Parameters\n ----------\n declarations_str : str\n """\n for decl in declarations_str.split(";"):\n if not decl.strip():\n continue\n prop, sep, val = decl.partition(":")\n prop = prop.strip().lower()\n # TODO: don't lowercase case sensitive parts of values (strings)\n val = val.strip().lower()\n if sep:\n yield prop, val\n else:\n warnings.warn(\n f"Ill-formatted attribute: expected a colon in {repr(decl)}",\n CSSWarning,\n stacklevel=find_stack_level(),\n )\n
.venv\Lib\site-packages\pandas\io\formats\css.py
css.py
Python
12,793
0.95
0.147268
0.038147
react-lib
908
2025-06-12T01:00:51.963199
BSD-3-Clause
false
4e9722ebf7ae7badf8dfacd571162e48
"""\nModule for formatting output data into CSV files.\n"""\n\nfrom __future__ import annotations\n\nfrom collections.abc import (\n Hashable,\n Iterable,\n Iterator,\n Sequence,\n)\nimport csv as csvlib\nimport os\nfrom typing import (\n TYPE_CHECKING,\n Any,\n cast,\n)\n\nimport numpy as np\n\nfrom pandas._libs import writers as libwriters\nfrom pandas._typing import SequenceNotStr\nfrom pandas.util._decorators import cache_readonly\n\nfrom pandas.core.dtypes.generic import (\n ABCDatetimeIndex,\n ABCIndex,\n ABCMultiIndex,\n ABCPeriodIndex,\n)\nfrom pandas.core.dtypes.missing import notna\n\nfrom pandas.core.indexes.api import Index\n\nfrom pandas.io.common import get_handle\n\nif TYPE_CHECKING:\n from pandas._typing import (\n CompressionOptions,\n FilePath,\n FloatFormatType,\n IndexLabel,\n StorageOptions,\n WriteBuffer,\n npt,\n )\n\n from pandas.io.formats.format import DataFrameFormatter\n\n\n_DEFAULT_CHUNKSIZE_CELLS = 100_000\n\n\nclass CSVFormatter:\n cols: npt.NDArray[np.object_]\n\n def __init__(\n self,\n formatter: DataFrameFormatter,\n path_or_buf: FilePath | WriteBuffer[str] | WriteBuffer[bytes] = "",\n sep: str = ",",\n cols: Sequence[Hashable] | None = None,\n index_label: IndexLabel | None = None,\n mode: str = "w",\n encoding: str | None = None,\n errors: str = "strict",\n compression: CompressionOptions = "infer",\n quoting: int | None = None,\n lineterminator: str | None = "\n",\n chunksize: int | None = None,\n quotechar: str | None = '"',\n date_format: str | None = None,\n doublequote: bool = True,\n escapechar: str | None = None,\n storage_options: StorageOptions | None = None,\n ) -> None:\n self.fmt = formatter\n\n self.obj = self.fmt.frame\n\n self.filepath_or_buffer = path_or_buf\n self.encoding = encoding\n self.compression: CompressionOptions = compression\n self.mode = mode\n self.storage_options = storage_options\n\n self.sep = sep\n self.index_label = self._initialize_index_label(index_label)\n self.errors = errors\n self.quoting = quoting or csvlib.QUOTE_MINIMAL\n self.quotechar = self._initialize_quotechar(quotechar)\n self.doublequote = doublequote\n self.escapechar = escapechar\n self.lineterminator = lineterminator or os.linesep\n self.date_format = date_format\n self.cols = self._initialize_columns(cols)\n self.chunksize = self._initialize_chunksize(chunksize)\n\n @property\n def na_rep(self) -> str:\n return self.fmt.na_rep\n\n @property\n def float_format(self) -> FloatFormatType | None:\n return self.fmt.float_format\n\n @property\n def decimal(self) -> str:\n return self.fmt.decimal\n\n @property\n def header(self) -> bool | SequenceNotStr[str]:\n return self.fmt.header\n\n @property\n def index(self) -> bool:\n return self.fmt.index\n\n def _initialize_index_label(self, index_label: IndexLabel | None) -> IndexLabel:\n if index_label is not False:\n if index_label is None:\n return self._get_index_label_from_obj()\n elif not isinstance(index_label, (list, tuple, np.ndarray, ABCIndex)):\n # given a string for a DF with Index\n return [index_label]\n return index_label\n\n def _get_index_label_from_obj(self) -> Sequence[Hashable]:\n if isinstance(self.obj.index, ABCMultiIndex):\n return self._get_index_label_multiindex()\n else:\n return self._get_index_label_flat()\n\n def _get_index_label_multiindex(self) -> Sequence[Hashable]:\n return [name or "" for name in self.obj.index.names]\n\n def _get_index_label_flat(self) -> Sequence[Hashable]:\n index_label = self.obj.index.name\n return [""] if index_label is None else [index_label]\n\n def _initialize_quotechar(self, quotechar: str | None) -> str | None:\n if self.quoting != csvlib.QUOTE_NONE:\n # prevents crash in _csv\n return quotechar\n return None\n\n @property\n def has_mi_columns(self) -> bool:\n return bool(isinstance(self.obj.columns, ABCMultiIndex))\n\n def _initialize_columns(\n self, cols: Iterable[Hashable] | None\n ) -> npt.NDArray[np.object_]:\n # validate mi options\n if self.has_mi_columns:\n if cols is not None:\n msg = "cannot specify cols with a MultiIndex on the columns"\n raise TypeError(msg)\n\n if cols is not None:\n if isinstance(cols, ABCIndex):\n cols = cols._get_values_for_csv(**self._number_format)\n else:\n cols = list(cols)\n self.obj = self.obj.loc[:, cols]\n\n # update columns to include possible multiplicity of dupes\n # and make sure cols is just a list of labels\n new_cols = self.obj.columns\n return new_cols._get_values_for_csv(**self._number_format)\n\n def _initialize_chunksize(self, chunksize: int | None) -> int:\n if chunksize is None:\n return (_DEFAULT_CHUNKSIZE_CELLS // (len(self.cols) or 1)) or 1\n return int(chunksize)\n\n @property\n def _number_format(self) -> dict[str, Any]:\n """Dictionary used for storing number formatting settings."""\n return {\n "na_rep": self.na_rep,\n "float_format": self.float_format,\n "date_format": self.date_format,\n "quoting": self.quoting,\n "decimal": self.decimal,\n }\n\n @cache_readonly\n def data_index(self) -> Index:\n data_index = self.obj.index\n if (\n isinstance(data_index, (ABCDatetimeIndex, ABCPeriodIndex))\n and self.date_format is not None\n ):\n data_index = Index(\n [x.strftime(self.date_format) if notna(x) else "" for x in data_index]\n )\n elif isinstance(data_index, ABCMultiIndex):\n data_index = data_index.remove_unused_levels()\n return data_index\n\n @property\n def nlevels(self) -> int:\n if self.index:\n return getattr(self.data_index, "nlevels", 1)\n else:\n return 0\n\n @property\n def _has_aliases(self) -> bool:\n return isinstance(self.header, (tuple, list, np.ndarray, ABCIndex))\n\n @property\n def _need_to_save_header(self) -> bool:\n return bool(self._has_aliases or self.header)\n\n @property\n def write_cols(self) -> SequenceNotStr[Hashable]:\n if self._has_aliases:\n assert not isinstance(self.header, bool)\n if len(self.header) != len(self.cols):\n raise ValueError(\n f"Writing {len(self.cols)} cols but got {len(self.header)} aliases"\n )\n return self.header\n else:\n # self.cols is an ndarray derived from Index._get_values_for_csv,\n # so its entries are strings, i.e. hashable\n return cast(SequenceNotStr[Hashable], self.cols)\n\n @property\n def encoded_labels(self) -> list[Hashable]:\n encoded_labels: list[Hashable] = []\n\n if self.index and self.index_label:\n assert isinstance(self.index_label, Sequence)\n encoded_labels = list(self.index_label)\n\n if not self.has_mi_columns or self._has_aliases:\n encoded_labels += list(self.write_cols)\n\n return encoded_labels\n\n def save(self) -> None:\n """\n Create the writer & save.\n """\n # apply compression and byte/text conversion\n with get_handle(\n self.filepath_or_buffer,\n self.mode,\n encoding=self.encoding,\n errors=self.errors,\n compression=self.compression,\n storage_options=self.storage_options,\n ) as handles:\n # Note: self.encoding is irrelevant here\n self.writer = csvlib.writer(\n handles.handle,\n lineterminator=self.lineterminator,\n delimiter=self.sep,\n quoting=self.quoting,\n doublequote=self.doublequote,\n escapechar=self.escapechar,\n quotechar=self.quotechar,\n )\n\n self._save()\n\n def _save(self) -> None:\n if self._need_to_save_header:\n self._save_header()\n self._save_body()\n\n def _save_header(self) -> None:\n if not self.has_mi_columns or self._has_aliases:\n self.writer.writerow(self.encoded_labels)\n else:\n for row in self._generate_multiindex_header_rows():\n self.writer.writerow(row)\n\n def _generate_multiindex_header_rows(self) -> Iterator[list[Hashable]]:\n columns = self.obj.columns\n for i in range(columns.nlevels):\n # we need at least 1 index column to write our col names\n col_line = []\n if self.index:\n # name is the first column\n col_line.append(columns.names[i])\n\n if isinstance(self.index_label, list) and len(self.index_label) > 1:\n col_line.extend([""] * (len(self.index_label) - 1))\n\n col_line.extend(columns._get_level_values(i))\n yield col_line\n\n # Write out the index line if it's not empty.\n # Otherwise, we will print out an extraneous\n # blank line between the mi and the data rows.\n if self.encoded_labels and set(self.encoded_labels) != {""}:\n yield self.encoded_labels + [""] * len(columns)\n\n def _save_body(self) -> None:\n nrows = len(self.data_index)\n chunks = (nrows // self.chunksize) + 1\n for i in range(chunks):\n start_i = i * self.chunksize\n end_i = min(start_i + self.chunksize, nrows)\n if start_i >= end_i:\n break\n self._save_chunk(start_i, end_i)\n\n def _save_chunk(self, start_i: int, end_i: int) -> None:\n # create the data for a chunk\n slicer = slice(start_i, end_i)\n df = self.obj.iloc[slicer]\n\n res = df._get_values_for_csv(**self._number_format)\n data = list(res._iter_column_arrays())\n\n ix = self.data_index[slicer]._get_values_for_csv(**self._number_format)\n libwriters.write_csv_rows(\n data,\n ix,\n self.nlevels,\n self.cols,\n self.writer,\n )\n
.venv\Lib\site-packages\pandas\io\formats\csvs.py
csvs.py
Python
10,526
0.95
0.187879
0.054348
node-utils
3
2024-11-29T09:24:32.741902
GPL-3.0
false
7c68af32632f0124d05ac9643309855f
"""\nUtilities for conversion to writer-agnostic Excel representation.\n"""\nfrom __future__ import annotations\n\nfrom collections.abc import (\n Hashable,\n Iterable,\n Mapping,\n Sequence,\n)\nimport functools\nimport itertools\nimport re\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n cast,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs.lib import is_list_like\nfrom pandas.util._decorators import doc\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes import missing\nfrom pandas.core.dtypes.common import (\n is_float,\n is_scalar,\n)\n\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n PeriodIndex,\n)\nimport pandas.core.common as com\nfrom pandas.core.shared_docs import _shared_docs\n\nfrom pandas.io.formats._color_data import CSS4_COLORS\nfrom pandas.io.formats.css import (\n CSSResolver,\n CSSWarning,\n)\nfrom pandas.io.formats.format import get_level_lengths\nfrom pandas.io.formats.printing import pprint_thing\n\nif TYPE_CHECKING:\n from pandas._typing import (\n FilePath,\n IndexLabel,\n StorageOptions,\n WriteExcelBuffer,\n )\n\n from pandas import ExcelWriter\n\n\nclass ExcelCell:\n __fields__ = ("row", "col", "val", "style", "mergestart", "mergeend")\n __slots__ = __fields__\n\n def __init__(\n self,\n row: int,\n col: int,\n val,\n style=None,\n mergestart: int | None = None,\n mergeend: int | None = None,\n ) -> None:\n self.row = row\n self.col = col\n self.val = val\n self.style = style\n self.mergestart = mergestart\n self.mergeend = mergeend\n\n\nclass CssExcelCell(ExcelCell):\n def __init__(\n self,\n row: int,\n col: int,\n val,\n style: dict | None,\n css_styles: dict[tuple[int, int], list[tuple[str, Any]]] | None,\n css_row: int,\n css_col: int,\n css_converter: Callable | None,\n **kwargs,\n ) -> None:\n if css_styles and css_converter:\n # Use dict to get only one (case-insensitive) declaration per property\n declaration_dict = {\n prop.lower(): val for prop, val in css_styles[css_row, css_col]\n }\n # Convert to frozenset for order-invariant caching\n unique_declarations = frozenset(declaration_dict.items())\n style = css_converter(unique_declarations)\n\n super().__init__(row=row, col=col, val=val, style=style, **kwargs)\n\n\nclass CSSToExcelConverter:\n """\n A callable for converting CSS declarations to ExcelWriter styles\n\n Supports parts of CSS 2.2, with minimal CSS 3.0 support (e.g. text-shadow),\n focusing on font styling, backgrounds, borders and alignment.\n\n Operates by first computing CSS styles in a fairly generic\n way (see :meth:`compute_css`) then determining Excel style\n properties from CSS properties (see :meth:`build_xlstyle`).\n\n Parameters\n ----------\n inherited : str, optional\n CSS declarations understood to be the containing scope for the\n CSS processed by :meth:`__call__`.\n """\n\n NAMED_COLORS = CSS4_COLORS\n\n VERTICAL_MAP = {\n "top": "top",\n "text-top": "top",\n "middle": "center",\n "baseline": "bottom",\n "bottom": "bottom",\n "text-bottom": "bottom",\n # OpenXML also has 'justify', 'distributed'\n }\n\n BOLD_MAP = {\n "bold": True,\n "bolder": True,\n "600": True,\n "700": True,\n "800": True,\n "900": True,\n "normal": False,\n "lighter": False,\n "100": False,\n "200": False,\n "300": False,\n "400": False,\n "500": False,\n }\n\n ITALIC_MAP = {\n "normal": False,\n "italic": True,\n "oblique": True,\n }\n\n FAMILY_MAP = {\n "serif": 1, # roman\n "sans-serif": 2, # swiss\n "cursive": 4, # script\n "fantasy": 5, # decorative\n }\n\n BORDER_STYLE_MAP = {\n style.lower(): style\n for style in [\n "dashed",\n "mediumDashDot",\n "dashDotDot",\n "hair",\n "dotted",\n "mediumDashDotDot",\n "double",\n "dashDot",\n "slantDashDot",\n "mediumDashed",\n ]\n }\n\n # NB: Most of the methods here could be classmethods, as only __init__\n # and __call__ make use of instance attributes. We leave them as\n # instancemethods so that users can easily experiment with extensions\n # without monkey-patching.\n inherited: dict[str, str] | None\n\n def __init__(self, inherited: str | None = None) -> None:\n if inherited is not None:\n self.inherited = self.compute_css(inherited)\n else:\n self.inherited = None\n # We should avoid cache on the __call__ method.\n # Otherwise once the method __call__ has been called\n # garbage collection no longer deletes the instance.\n self._call_cached = functools.cache(self._call_uncached)\n\n compute_css = CSSResolver()\n\n def __call__(\n self, declarations: str | frozenset[tuple[str, str]]\n ) -> dict[str, dict[str, str]]:\n """\n Convert CSS declarations to ExcelWriter style.\n\n Parameters\n ----------\n declarations : str | frozenset[tuple[str, str]]\n CSS string or set of CSS declaration tuples.\n e.g. "font-weight: bold; background: blue" or\n {("font-weight", "bold"), ("background", "blue")}\n\n Returns\n -------\n xlstyle : dict\n A style as interpreted by ExcelWriter when found in\n ExcelCell.style.\n """\n return self._call_cached(declarations)\n\n def _call_uncached(\n self, declarations: str | frozenset[tuple[str, str]]\n ) -> dict[str, dict[str, str]]:\n properties = self.compute_css(declarations, self.inherited)\n return self.build_xlstyle(properties)\n\n def build_xlstyle(self, props: Mapping[str, str]) -> dict[str, dict[str, str]]:\n out = {\n "alignment": self.build_alignment(props),\n "border": self.build_border(props),\n "fill": self.build_fill(props),\n "font": self.build_font(props),\n "number_format": self.build_number_format(props),\n }\n\n # TODO: handle cell width and height: needs support in pandas.io.excel\n\n def remove_none(d: dict[str, str | None]) -> None:\n """Remove key where value is None, through nested dicts"""\n for k, v in list(d.items()):\n if v is None:\n del d[k]\n elif isinstance(v, dict):\n remove_none(v)\n if not v:\n del d[k]\n\n remove_none(out)\n return out\n\n def build_alignment(self, props: Mapping[str, str]) -> dict[str, bool | str | None]:\n # TODO: text-indent, padding-left -> alignment.indent\n return {\n "horizontal": props.get("text-align"),\n "vertical": self._get_vertical_alignment(props),\n "wrap_text": self._get_is_wrap_text(props),\n }\n\n def _get_vertical_alignment(self, props: Mapping[str, str]) -> str | None:\n vertical_align = props.get("vertical-align")\n if vertical_align:\n return self.VERTICAL_MAP.get(vertical_align)\n return None\n\n def _get_is_wrap_text(self, props: Mapping[str, str]) -> bool | None:\n if props.get("white-space") is None:\n return None\n return bool(props["white-space"] not in ("nowrap", "pre", "pre-line"))\n\n def build_border(\n self, props: Mapping[str, str]\n ) -> dict[str, dict[str, str | None]]:\n return {\n side: {\n "style": self._border_style(\n props.get(f"border-{side}-style"),\n props.get(f"border-{side}-width"),\n self.color_to_excel(props.get(f"border-{side}-color")),\n ),\n "color": self.color_to_excel(props.get(f"border-{side}-color")),\n }\n for side in ["top", "right", "bottom", "left"]\n }\n\n def _border_style(self, style: str | None, width: str | None, color: str | None):\n # convert styles and widths to openxml, one of:\n # 'dashDot'\n # 'dashDotDot'\n # 'dashed'\n # 'dotted'\n # 'double'\n # 'hair'\n # 'medium'\n # 'mediumDashDot'\n # 'mediumDashDotDot'\n # 'mediumDashed'\n # 'slantDashDot'\n # 'thick'\n # 'thin'\n if width is None and style is None and color is None:\n # Return None will remove "border" from style dictionary\n return None\n\n if width is None and style is None:\n # Return "none" will keep "border" in style dictionary\n return "none"\n\n if style in ("none", "hidden"):\n return "none"\n\n width_name = self._get_width_name(width)\n if width_name is None:\n return "none"\n\n if style in (None, "groove", "ridge", "inset", "outset", "solid"):\n # not handled\n return width_name\n\n if style == "double":\n return "double"\n if style == "dotted":\n if width_name in ("hair", "thin"):\n return "dotted"\n return "mediumDashDotDot"\n if style == "dashed":\n if width_name in ("hair", "thin"):\n return "dashed"\n return "mediumDashed"\n elif style in self.BORDER_STYLE_MAP:\n # Excel-specific styles\n return self.BORDER_STYLE_MAP[style]\n else:\n warnings.warn(\n f"Unhandled border style format: {repr(style)}",\n CSSWarning,\n stacklevel=find_stack_level(),\n )\n return "none"\n\n def _get_width_name(self, width_input: str | None) -> str | None:\n width = self._width_to_float(width_input)\n if width < 1e-5:\n return None\n elif width < 1.3:\n return "thin"\n elif width < 2.8:\n return "medium"\n return "thick"\n\n def _width_to_float(self, width: str | None) -> float:\n if width is None:\n width = "2pt"\n return self._pt_to_float(width)\n\n def _pt_to_float(self, pt_string: str) -> float:\n assert pt_string.endswith("pt")\n return float(pt_string.rstrip("pt"))\n\n def build_fill(self, props: Mapping[str, str]):\n # TODO: perhaps allow for special properties\n # -excel-pattern-bgcolor and -excel-pattern-type\n fill_color = props.get("background-color")\n if fill_color not in (None, "transparent", "none"):\n return {"fgColor": self.color_to_excel(fill_color), "patternType": "solid"}\n\n def build_number_format(self, props: Mapping[str, str]) -> dict[str, str | None]:\n fc = props.get("number-format")\n fc = fc.replace("§", ";") if isinstance(fc, str) else fc\n return {"format_code": fc}\n\n def build_font(\n self, props: Mapping[str, str]\n ) -> dict[str, bool | float | str | None]:\n font_names = self._get_font_names(props)\n decoration = self._get_decoration(props)\n return {\n "name": font_names[0] if font_names else None,\n "family": self._select_font_family(font_names),\n "size": self._get_font_size(props),\n "bold": self._get_is_bold(props),\n "italic": self._get_is_italic(props),\n "underline": ("single" if "underline" in decoration else None),\n "strike": ("line-through" in decoration) or None,\n "color": self.color_to_excel(props.get("color")),\n # shadow if nonzero digit before shadow color\n "shadow": self._get_shadow(props),\n }\n\n def _get_is_bold(self, props: Mapping[str, str]) -> bool | None:\n weight = props.get("font-weight")\n if weight:\n return self.BOLD_MAP.get(weight)\n return None\n\n def _get_is_italic(self, props: Mapping[str, str]) -> bool | None:\n font_style = props.get("font-style")\n if font_style:\n return self.ITALIC_MAP.get(font_style)\n return None\n\n def _get_decoration(self, props: Mapping[str, str]) -> Sequence[str]:\n decoration = props.get("text-decoration")\n if decoration is not None:\n return decoration.split()\n else:\n return ()\n\n def _get_underline(self, decoration: Sequence[str]) -> str | None:\n if "underline" in decoration:\n return "single"\n return None\n\n def _get_shadow(self, props: Mapping[str, str]) -> bool | None:\n if "text-shadow" in props:\n return bool(re.search("^[^#(]*[1-9]", props["text-shadow"]))\n return None\n\n def _get_font_names(self, props: Mapping[str, str]) -> Sequence[str]:\n font_names_tmp = re.findall(\n r"""(?x)\n (\n "(?:[^"]|\\")+"\n |\n '(?:[^']|\\')+'\n |\n [^'",]+\n )(?=,|\s*$)\n """,\n props.get("font-family", ""),\n )\n\n font_names = []\n for name in font_names_tmp:\n if name[:1] == '"':\n name = name[1:-1].replace('\\"', '"')\n elif name[:1] == "'":\n name = name[1:-1].replace("\\'", "'")\n else:\n name = name.strip()\n if name:\n font_names.append(name)\n return font_names\n\n def _get_font_size(self, props: Mapping[str, str]) -> float | None:\n size = props.get("font-size")\n if size is None:\n return size\n return self._pt_to_float(size)\n\n def _select_font_family(self, font_names: Sequence[str]) -> int | None:\n family = None\n for name in font_names:\n family = self.FAMILY_MAP.get(name)\n if family:\n break\n\n return family\n\n def color_to_excel(self, val: str | None) -> str | None:\n if val is None:\n return None\n\n if self._is_hex_color(val):\n return self._convert_hex_to_excel(val)\n\n try:\n return self.NAMED_COLORS[val]\n except KeyError:\n warnings.warn(\n f"Unhandled color format: {repr(val)}",\n CSSWarning,\n stacklevel=find_stack_level(),\n )\n return None\n\n def _is_hex_color(self, color_string: str) -> bool:\n return bool(color_string.startswith("#"))\n\n def _convert_hex_to_excel(self, color_string: str) -> str:\n code = color_string.lstrip("#")\n if self._is_shorthand_color(color_string):\n return (code[0] * 2 + code[1] * 2 + code[2] * 2).upper()\n else:\n return code.upper()\n\n def _is_shorthand_color(self, color_string: str) -> bool:\n """Check if color code is shorthand.\n\n #FFF is a shorthand as opposed to full #FFFFFF.\n """\n code = color_string.lstrip("#")\n if len(code) == 3:\n return True\n elif len(code) == 6:\n return False\n else:\n raise ValueError(f"Unexpected color {color_string}")\n\n\nclass ExcelFormatter:\n """\n Class for formatting a DataFrame to a list of ExcelCells,\n\n Parameters\n ----------\n df : DataFrame or Styler\n na_rep: na representation\n float_format : str, default None\n Format string for floating point numbers\n cols : sequence, optional\n Columns to write\n header : bool or sequence of str, default True\n Write out column names. If a list of string is given it is\n assumed to be aliases for the column names\n index : bool, default True\n output row names (index)\n index_label : str or sequence, default None\n Column label for index column(s) if desired. If None is given, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the DataFrame uses MultiIndex.\n merge_cells : bool, default False\n Format MultiIndex and Hierarchical Rows as merged cells.\n inf_rep : str, default `'inf'`\n representation for np.inf values (which aren't representable in Excel)\n A `'-'` sign will be added in front of -inf.\n style_converter : callable, optional\n This translates Styler styles (CSS) into ExcelWriter styles.\n Defaults to ``CSSToExcelConverter()``.\n It should have signature css_declarations string -> excel style.\n This is only called for body cells.\n """\n\n max_rows = 2**20\n max_cols = 2**14\n\n def __init__(\n self,\n df,\n na_rep: str = "",\n float_format: str | None = None,\n cols: Sequence[Hashable] | None = None,\n header: Sequence[Hashable] | bool = True,\n index: bool = True,\n index_label: IndexLabel | None = None,\n merge_cells: bool = False,\n inf_rep: str = "inf",\n style_converter: Callable | None = None,\n ) -> None:\n self.rowcounter = 0\n self.na_rep = na_rep\n if not isinstance(df, DataFrame):\n self.styler = df\n self.styler._compute() # calculate applied styles\n df = df.data\n if style_converter is None:\n style_converter = CSSToExcelConverter()\n self.style_converter: Callable | None = style_converter\n else:\n self.styler = None\n self.style_converter = None\n self.df = df\n if cols is not None:\n # all missing, raise\n if not len(Index(cols).intersection(df.columns)):\n raise KeyError("passes columns are not ALL present dataframe")\n\n if len(Index(cols).intersection(df.columns)) != len(set(cols)):\n # Deprecated in GH#17295, enforced in 1.0.0\n raise KeyError("Not all names specified in 'columns' are found")\n\n self.df = df.reindex(columns=cols)\n\n self.columns = self.df.columns\n self.float_format = float_format\n self.index = index\n self.index_label = index_label\n self.header = header\n self.merge_cells = merge_cells\n self.inf_rep = inf_rep\n\n @property\n def header_style(self) -> dict[str, dict[str, str | bool]]:\n return {\n "font": {"bold": True},\n "borders": {\n "top": "thin",\n "right": "thin",\n "bottom": "thin",\n "left": "thin",\n },\n "alignment": {"horizontal": "center", "vertical": "top"},\n }\n\n def _format_value(self, val):\n if is_scalar(val) and missing.isna(val):\n val = self.na_rep\n elif is_float(val):\n if missing.isposinf_scalar(val):\n val = self.inf_rep\n elif missing.isneginf_scalar(val):\n val = f"-{self.inf_rep}"\n elif self.float_format is not None:\n val = float(self.float_format % val)\n if getattr(val, "tzinfo", None) is not None:\n raise ValueError(\n "Excel does not support datetimes with "\n "timezones. Please ensure that datetimes "\n "are timezone unaware before writing to Excel."\n )\n return val\n\n def _format_header_mi(self) -> Iterable[ExcelCell]:\n if self.columns.nlevels > 1:\n if not self.index:\n raise NotImplementedError(\n "Writing to Excel with MultiIndex columns and no "\n "index ('index'=False) is not yet implemented."\n )\n\n if not (self._has_aliases or self.header):\n return\n\n columns = self.columns\n level_strs = columns._format_multi(\n sparsify=self.merge_cells, include_names=False\n )\n level_lengths = get_level_lengths(level_strs)\n coloffset = 0\n lnum = 0\n\n if self.index and isinstance(self.df.index, MultiIndex):\n coloffset = len(self.df.index[0]) - 1\n\n if self.merge_cells:\n # Format multi-index as a merged cells.\n for lnum, name in enumerate(columns.names):\n yield ExcelCell(\n row=lnum,\n col=coloffset,\n val=name,\n style=self.header_style,\n )\n\n for lnum, (spans, levels, level_codes) in enumerate(\n zip(level_lengths, columns.levels, columns.codes)\n ):\n values = levels.take(level_codes)\n for i, span_val in spans.items():\n mergestart, mergeend = None, None\n if span_val > 1:\n mergestart, mergeend = lnum, coloffset + i + span_val\n yield CssExcelCell(\n row=lnum,\n col=coloffset + i + 1,\n val=values[i],\n style=self.header_style,\n css_styles=getattr(self.styler, "ctx_columns", None),\n css_row=lnum,\n css_col=i,\n css_converter=self.style_converter,\n mergestart=mergestart,\n mergeend=mergeend,\n )\n else:\n # Format in legacy format with dots to indicate levels.\n for i, values in enumerate(zip(*level_strs)):\n v = ".".join(map(pprint_thing, values))\n yield CssExcelCell(\n row=lnum,\n col=coloffset + i + 1,\n val=v,\n style=self.header_style,\n css_styles=getattr(self.styler, "ctx_columns", None),\n css_row=lnum,\n css_col=i,\n css_converter=self.style_converter,\n )\n\n self.rowcounter = lnum\n\n def _format_header_regular(self) -> Iterable[ExcelCell]:\n if self._has_aliases or self.header:\n coloffset = 0\n\n if self.index:\n coloffset = 1\n if isinstance(self.df.index, MultiIndex):\n coloffset = len(self.df.index.names)\n\n colnames = self.columns\n if self._has_aliases:\n self.header = cast(Sequence, self.header)\n if len(self.header) != len(self.columns):\n raise ValueError(\n f"Writing {len(self.columns)} cols "\n f"but got {len(self.header)} aliases"\n )\n colnames = self.header\n\n for colindex, colname in enumerate(colnames):\n yield CssExcelCell(\n row=self.rowcounter,\n col=colindex + coloffset,\n val=colname,\n style=self.header_style,\n css_styles=getattr(self.styler, "ctx_columns", None),\n css_row=0,\n css_col=colindex,\n css_converter=self.style_converter,\n )\n\n def _format_header(self) -> Iterable[ExcelCell]:\n gen: Iterable[ExcelCell]\n\n if isinstance(self.columns, MultiIndex):\n gen = self._format_header_mi()\n else:\n gen = self._format_header_regular()\n\n gen2: Iterable[ExcelCell] = ()\n\n if self.df.index.names:\n row = [x if x is not None else "" for x in self.df.index.names] + [\n ""\n ] * len(self.columns)\n if functools.reduce(lambda x, y: x and y, (x != "" for x in row)):\n gen2 = (\n ExcelCell(self.rowcounter, colindex, val, self.header_style)\n for colindex, val in enumerate(row)\n )\n self.rowcounter += 1\n return itertools.chain(gen, gen2)\n\n def _format_body(self) -> Iterable[ExcelCell]:\n if isinstance(self.df.index, MultiIndex):\n return self._format_hierarchical_rows()\n else:\n return self._format_regular_rows()\n\n def _format_regular_rows(self) -> Iterable[ExcelCell]:\n if self._has_aliases or self.header:\n self.rowcounter += 1\n\n # output index and index_label?\n if self.index:\n # check aliases\n # if list only take first as this is not a MultiIndex\n if self.index_label and isinstance(\n self.index_label, (list, tuple, np.ndarray, Index)\n ):\n index_label = self.index_label[0]\n # if string good to go\n elif self.index_label and isinstance(self.index_label, str):\n index_label = self.index_label\n else:\n index_label = self.df.index.names[0]\n\n if isinstance(self.columns, MultiIndex):\n self.rowcounter += 1\n\n if index_label and self.header is not False:\n yield ExcelCell(self.rowcounter - 1, 0, index_label, self.header_style)\n\n # write index_values\n index_values = self.df.index\n if isinstance(self.df.index, PeriodIndex):\n index_values = self.df.index.to_timestamp()\n\n for idx, idxval in enumerate(index_values):\n yield CssExcelCell(\n row=self.rowcounter + idx,\n col=0,\n val=idxval,\n style=self.header_style,\n css_styles=getattr(self.styler, "ctx_index", None),\n css_row=idx,\n css_col=0,\n css_converter=self.style_converter,\n )\n coloffset = 1\n else:\n coloffset = 0\n\n yield from self._generate_body(coloffset)\n\n def _format_hierarchical_rows(self) -> Iterable[ExcelCell]:\n if self._has_aliases or self.header:\n self.rowcounter += 1\n\n gcolidx = 0\n\n if self.index:\n index_labels = self.df.index.names\n # check for aliases\n if self.index_label and isinstance(\n self.index_label, (list, tuple, np.ndarray, Index)\n ):\n index_labels = self.index_label\n\n # MultiIndex columns require an extra row\n # with index names (blank if None) for\n # unambiguous round-trip, unless not merging,\n # in which case the names all go on one row Issue #11328\n if isinstance(self.columns, MultiIndex) and self.merge_cells:\n self.rowcounter += 1\n\n # if index labels are not empty go ahead and dump\n if com.any_not_none(*index_labels) and self.header is not False:\n for cidx, name in enumerate(index_labels):\n yield ExcelCell(self.rowcounter - 1, cidx, name, self.header_style)\n\n if self.merge_cells:\n # Format hierarchical rows as merged cells.\n level_strs = self.df.index._format_multi(\n sparsify=True, include_names=False\n )\n level_lengths = get_level_lengths(level_strs)\n\n for spans, levels, level_codes in zip(\n level_lengths, self.df.index.levels, self.df.index.codes\n ):\n values = levels.take(\n level_codes,\n allow_fill=levels._can_hold_na,\n fill_value=levels._na_value,\n )\n\n for i, span_val in spans.items():\n mergestart, mergeend = None, None\n if span_val > 1:\n mergestart = self.rowcounter + i + span_val - 1\n mergeend = gcolidx\n yield CssExcelCell(\n row=self.rowcounter + i,\n col=gcolidx,\n val=values[i],\n style=self.header_style,\n css_styles=getattr(self.styler, "ctx_index", None),\n css_row=i,\n css_col=gcolidx,\n css_converter=self.style_converter,\n mergestart=mergestart,\n mergeend=mergeend,\n )\n gcolidx += 1\n\n else:\n # Format hierarchical rows with non-merged values.\n for indexcolvals in zip(*self.df.index):\n for idx, indexcolval in enumerate(indexcolvals):\n yield CssExcelCell(\n row=self.rowcounter + idx,\n col=gcolidx,\n val=indexcolval,\n style=self.header_style,\n css_styles=getattr(self.styler, "ctx_index", None),\n css_row=idx,\n css_col=gcolidx,\n css_converter=self.style_converter,\n )\n gcolidx += 1\n\n yield from self._generate_body(gcolidx)\n\n @property\n def _has_aliases(self) -> bool:\n """Whether the aliases for column names are present."""\n return is_list_like(self.header)\n\n def _generate_body(self, coloffset: int) -> Iterable[ExcelCell]:\n # Write the body of the frame data series by series.\n for colidx in range(len(self.columns)):\n series = self.df.iloc[:, colidx]\n for i, val in enumerate(series):\n yield CssExcelCell(\n row=self.rowcounter + i,\n col=colidx + coloffset,\n val=val,\n style=None,\n css_styles=getattr(self.styler, "ctx", None),\n css_row=i,\n css_col=colidx,\n css_converter=self.style_converter,\n )\n\n def get_formatted_cells(self) -> Iterable[ExcelCell]:\n for cell in itertools.chain(self._format_header(), self._format_body()):\n cell.val = self._format_value(cell.val)\n yield cell\n\n @doc(storage_options=_shared_docs["storage_options"])\n def write(\n self,\n writer: FilePath | WriteExcelBuffer | ExcelWriter,\n sheet_name: str = "Sheet1",\n startrow: int = 0,\n startcol: int = 0,\n freeze_panes: tuple[int, int] | None = None,\n engine: str | None = None,\n storage_options: StorageOptions | None = None,\n engine_kwargs: dict | None = None,\n ) -> None:\n """\n writer : path-like, file-like, or ExcelWriter object\n File path or existing ExcelWriter\n sheet_name : str, default 'Sheet1'\n Name of sheet which will contain DataFrame\n startrow :\n upper left cell row to dump data frame\n startcol :\n upper left cell column to dump data frame\n freeze_panes : tuple of integer (length 2), default None\n Specifies the one-based bottommost row and rightmost column that\n is to be frozen\n engine : string, default None\n write engine to use if writer is a path - you can also set this\n via the options ``io.excel.xlsx.writer``,\n or ``io.excel.xlsm.writer``.\n\n {storage_options}\n\n engine_kwargs: dict, optional\n Arbitrary keyword arguments passed to excel engine.\n """\n from pandas.io.excel import ExcelWriter\n\n num_rows, num_cols = self.df.shape\n if num_rows > self.max_rows or num_cols > self.max_cols:\n raise ValueError(\n f"This sheet is too large! Your sheet size is: {num_rows}, {num_cols} "\n f"Max sheet size is: {self.max_rows}, {self.max_cols}"\n )\n\n if engine_kwargs is None:\n engine_kwargs = {}\n\n formatted_cells = self.get_formatted_cells()\n if isinstance(writer, ExcelWriter):\n need_save = False\n else:\n writer = ExcelWriter(\n writer,\n engine=engine,\n storage_options=storage_options,\n engine_kwargs=engine_kwargs,\n )\n need_save = True\n\n try:\n writer._write_cells(\n formatted_cells,\n sheet_name,\n startrow=startrow,\n startcol=startcol,\n freeze_panes=freeze_panes,\n )\n finally:\n # make sure to close opened file handles\n if need_save:\n writer.close()\n
.venv\Lib\site-packages\pandas\io\formats\excel.py
excel.py
Python
32,994
0.95
0.178794
0.064286
vue-tools
90
2024-11-27T14:40:39.771484
BSD-3-Clause
false
3a86d279342321994706d63a90cd9576
"""\nInternal module for formatting output data in csv, html, xml,\nand latex files. This module also applies to display formatting.\n"""\nfrom __future__ import annotations\n\nfrom collections.abc import (\n Generator,\n Hashable,\n Mapping,\n Sequence,\n)\nfrom contextlib import contextmanager\nfrom csv import QUOTE_NONE\nfrom decimal import Decimal\nfrom functools import partial\nfrom io import StringIO\nimport math\nimport re\nfrom shutil import get_terminal_size\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Final,\n cast,\n)\n\nimport numpy as np\n\nfrom pandas._config.config import (\n get_option,\n set_option,\n)\n\nfrom pandas._libs import lib\nfrom pandas._libs.missing import NA\nfrom pandas._libs.tslibs import (\n NaT,\n Timedelta,\n Timestamp,\n)\nfrom pandas._libs.tslibs.nattype import NaTType\n\nfrom pandas.core.dtypes.common import (\n is_complex_dtype,\n is_float,\n is_integer,\n is_list_like,\n is_numeric_dtype,\n is_scalar,\n)\nfrom pandas.core.dtypes.dtypes import (\n CategoricalDtype,\n DatetimeTZDtype,\n ExtensionDtype,\n)\nfrom pandas.core.dtypes.missing import (\n isna,\n notna,\n)\n\nfrom pandas.core.arrays import (\n Categorical,\n DatetimeArray,\n ExtensionArray,\n TimedeltaArray,\n)\nfrom pandas.core.arrays.string_ import StringDtype\nfrom pandas.core.base import PandasObject\nimport pandas.core.common as com\nfrom pandas.core.indexes.api import (\n Index,\n MultiIndex,\n PeriodIndex,\n ensure_index,\n)\nfrom pandas.core.indexes.datetimes import DatetimeIndex\nfrom pandas.core.indexes.timedeltas import TimedeltaIndex\nfrom pandas.core.reshape.concat import concat\n\nfrom pandas.io.common import (\n check_parent_directory,\n stringify_path,\n)\nfrom pandas.io.formats import printing\n\nif TYPE_CHECKING:\n from pandas._typing import (\n ArrayLike,\n Axes,\n ColspaceArgType,\n ColspaceType,\n CompressionOptions,\n FilePath,\n FloatFormatType,\n FormattersType,\n IndexLabel,\n SequenceNotStr,\n StorageOptions,\n WriteBuffer,\n )\n\n from pandas import (\n DataFrame,\n Series,\n )\n\n\ncommon_docstring: Final = """\n Parameters\n ----------\n buf : str, Path or StringIO-like, optional, default None\n Buffer to write to. If None, the output is returned as a string.\n columns : array-like, optional, default None\n The subset of columns to write. Writes all columns by default.\n col_space : %(col_space_type)s, optional\n %(col_space)s.\n header : %(header_type)s, optional\n %(header)s.\n index : bool, optional, default True\n Whether to print index (row) labels.\n na_rep : str, optional, default 'NaN'\n String representation of ``NaN`` to use.\n formatters : list, tuple or dict of one-param. functions, optional\n Formatter functions to apply to columns' elements by position or\n name.\n The result of each function must be a unicode string.\n List/tuple must be of length equal to the number of columns.\n float_format : one-parameter function, optional, default None\n Formatter function to apply to columns' elements if they are\n floats. This function must return a unicode string and will be\n applied only to the non-``NaN`` elements, with ``NaN`` being\n handled by ``na_rep``.\n sparsify : bool, optional, default True\n Set to False for a DataFrame with a hierarchical index to print\n every multiindex key at each row.\n index_names : bool, optional, default True\n Prints the names of the indexes.\n justify : str, default None\n How to justify the column labels. If None uses the option from\n the print configuration (controlled by set_option), 'right' out\n of the box. Valid values are\n\n * left\n * right\n * center\n * justify\n * justify-all\n * start\n * end\n * inherit\n * match-parent\n * initial\n * unset.\n max_rows : int, optional\n Maximum number of rows to display in the console.\n max_cols : int, optional\n Maximum number of columns to display in the console.\n show_dimensions : bool, default False\n Display DataFrame dimensions (number of rows by number of columns).\n decimal : str, default '.'\n Character recognized as decimal separator, e.g. ',' in Europe.\n """\n\nVALID_JUSTIFY_PARAMETERS = (\n "left",\n "right",\n "center",\n "justify",\n "justify-all",\n "start",\n "end",\n "inherit",\n "match-parent",\n "initial",\n "unset",\n)\n\nreturn_docstring: Final = """\n Returns\n -------\n str or None\n If buf is None, returns the result as a string. Otherwise returns\n None.\n """\n\n\nclass SeriesFormatter:\n """\n Implement the main logic of Series.to_string, which underlies\n Series.__repr__.\n """\n\n def __init__(\n self,\n series: Series,\n *,\n length: bool | str = True,\n header: bool = True,\n index: bool = True,\n na_rep: str = "NaN",\n name: bool = False,\n float_format: str | None = None,\n dtype: bool = True,\n max_rows: int | None = None,\n min_rows: int | None = None,\n ) -> None:\n self.series = series\n self.buf = StringIO()\n self.name = name\n self.na_rep = na_rep\n self.header = header\n self.length = length\n self.index = index\n self.max_rows = max_rows\n self.min_rows = min_rows\n\n if float_format is None:\n float_format = get_option("display.float_format")\n self.float_format = float_format\n self.dtype = dtype\n self.adj = printing.get_adjustment()\n\n self._chk_truncate()\n\n def _chk_truncate(self) -> None:\n self.tr_row_num: int | None\n\n min_rows = self.min_rows\n max_rows = self.max_rows\n # truncation determined by max_rows, actual truncated number of rows\n # used below by min_rows\n is_truncated_vertically = max_rows and (len(self.series) > max_rows)\n series = self.series\n if is_truncated_vertically:\n max_rows = cast(int, max_rows)\n if min_rows:\n # if min_rows is set (not None or 0), set max_rows to minimum\n # of both\n max_rows = min(min_rows, max_rows)\n if max_rows == 1:\n row_num = max_rows\n series = series.iloc[:max_rows]\n else:\n row_num = max_rows // 2\n series = concat((series.iloc[:row_num], series.iloc[-row_num:]))\n self.tr_row_num = row_num\n else:\n self.tr_row_num = None\n self.tr_series = series\n self.is_truncated_vertically = is_truncated_vertically\n\n def _get_footer(self) -> str:\n name = self.series.name\n footer = ""\n\n index = self.series.index\n if (\n isinstance(index, (DatetimeIndex, PeriodIndex, TimedeltaIndex))\n and index.freq is not None\n ):\n footer += f"Freq: {index.freqstr}"\n\n if self.name is not False and name is not None:\n if footer:\n footer += ", "\n\n series_name = printing.pprint_thing(name, escape_chars=("\t", "\r", "\n"))\n footer += f"Name: {series_name}"\n\n if self.length is True or (\n self.length == "truncate" and self.is_truncated_vertically\n ):\n if footer:\n footer += ", "\n footer += f"Length: {len(self.series)}"\n\n if self.dtype is not False and self.dtype is not None:\n dtype_name = getattr(self.tr_series.dtype, "name", None)\n if dtype_name:\n if footer:\n footer += ", "\n footer += f"dtype: {printing.pprint_thing(dtype_name)}"\n\n # level infos are added to the end and in a new line, like it is done\n # for Categoricals\n if isinstance(self.tr_series.dtype, CategoricalDtype):\n level_info = self.tr_series._values._get_repr_footer()\n if footer:\n footer += "\n"\n footer += level_info\n\n return str(footer)\n\n def _get_formatted_values(self) -> list[str]:\n return format_array(\n self.tr_series._values,\n None,\n float_format=self.float_format,\n na_rep=self.na_rep,\n leading_space=self.index,\n )\n\n def to_string(self) -> str:\n series = self.tr_series\n footer = self._get_footer()\n\n if len(series) == 0:\n return f"{type(self.series).__name__}([], {footer})"\n\n index = series.index\n have_header = _has_names(index)\n if isinstance(index, MultiIndex):\n fmt_index = index._format_multi(include_names=True, sparsify=None)\n adj = printing.get_adjustment()\n fmt_index = adj.adjoin(2, *fmt_index).split("\n")\n else:\n fmt_index = index._format_flat(include_name=True)\n fmt_values = self._get_formatted_values()\n\n if self.is_truncated_vertically:\n n_header_rows = 0\n row_num = self.tr_row_num\n row_num = cast(int, row_num)\n width = self.adj.len(fmt_values[row_num - 1])\n if width > 3:\n dot_str = "..."\n else:\n dot_str = ".."\n # Series uses mode=center because it has single value columns\n # DataFrame uses mode=left\n dot_str = self.adj.justify([dot_str], width, mode="center")[0]\n fmt_values.insert(row_num + n_header_rows, dot_str)\n fmt_index.insert(row_num + 1, "")\n\n if self.index:\n result = self.adj.adjoin(3, *[fmt_index[1:], fmt_values])\n else:\n result = self.adj.adjoin(3, fmt_values)\n\n if self.header and have_header:\n result = fmt_index[0] + "\n" + result\n\n if footer:\n result += "\n" + footer\n\n return str("".join(result))\n\n\ndef get_dataframe_repr_params() -> dict[str, Any]:\n """Get the parameters used to repr(dataFrame) calls using DataFrame.to_string.\n\n Supplying these parameters to DataFrame.to_string is equivalent to calling\n ``repr(DataFrame)``. This is useful if you want to adjust the repr output.\n\n .. versionadded:: 1.4.0\n\n Example\n -------\n >>> import pandas as pd\n >>>\n >>> df = pd.DataFrame([[1, 2], [3, 4]])\n >>> repr_params = pd.io.formats.format.get_dataframe_repr_params()\n >>> repr(df) == df.to_string(**repr_params)\n True\n """\n from pandas.io.formats import console\n\n if get_option("display.expand_frame_repr"):\n line_width, _ = console.get_console_size()\n else:\n line_width = None\n return {\n "max_rows": get_option("display.max_rows"),\n "min_rows": get_option("display.min_rows"),\n "max_cols": get_option("display.max_columns"),\n "max_colwidth": get_option("display.max_colwidth"),\n "show_dimensions": get_option("display.show_dimensions"),\n "line_width": line_width,\n }\n\n\ndef get_series_repr_params() -> dict[str, Any]:\n """Get the parameters used to repr(Series) calls using Series.to_string.\n\n Supplying these parameters to Series.to_string is equivalent to calling\n ``repr(series)``. This is useful if you want to adjust the series repr output.\n\n .. versionadded:: 1.4.0\n\n Example\n -------\n >>> import pandas as pd\n >>>\n >>> ser = pd.Series([1, 2, 3, 4])\n >>> repr_params = pd.io.formats.format.get_series_repr_params()\n >>> repr(ser) == ser.to_string(**repr_params)\n True\n """\n width, height = get_terminal_size()\n max_rows_opt = get_option("display.max_rows")\n max_rows = height if max_rows_opt == 0 else max_rows_opt\n min_rows = height if max_rows_opt == 0 else get_option("display.min_rows")\n\n return {\n "name": True,\n "dtype": True,\n "min_rows": min_rows,\n "max_rows": max_rows,\n "length": get_option("display.show_dimensions"),\n }\n\n\nclass DataFrameFormatter:\n """\n Class for processing dataframe formatting options and data.\n\n Used by DataFrame.to_string, which backs DataFrame.__repr__.\n """\n\n __doc__ = __doc__ if __doc__ else ""\n __doc__ += common_docstring + return_docstring\n\n def __init__(\n self,\n frame: DataFrame,\n columns: Axes | None = None,\n col_space: ColspaceArgType | None = None,\n header: bool | SequenceNotStr[str] = True,\n index: bool = True,\n na_rep: str = "NaN",\n formatters: FormattersType | None = None,\n justify: str | None = None,\n float_format: FloatFormatType | None = None,\n sparsify: bool | None = None,\n index_names: bool = True,\n max_rows: int | None = None,\n min_rows: int | None = None,\n max_cols: int | None = None,\n show_dimensions: bool | str = False,\n decimal: str = ".",\n bold_rows: bool = False,\n escape: bool = True,\n ) -> None:\n self.frame = frame\n self.columns = self._initialize_columns(columns)\n self.col_space = self._initialize_colspace(col_space)\n self.header = header\n self.index = index\n self.na_rep = na_rep\n self.formatters = self._initialize_formatters(formatters)\n self.justify = self._initialize_justify(justify)\n self.float_format = float_format\n self.sparsify = self._initialize_sparsify(sparsify)\n self.show_index_names = index_names\n self.decimal = decimal\n self.bold_rows = bold_rows\n self.escape = escape\n self.max_rows = max_rows\n self.min_rows = min_rows\n self.max_cols = max_cols\n self.show_dimensions = show_dimensions\n\n self.max_cols_fitted = self._calc_max_cols_fitted()\n self.max_rows_fitted = self._calc_max_rows_fitted()\n\n self.tr_frame = self.frame\n self.truncate()\n self.adj = printing.get_adjustment()\n\n def get_strcols(self) -> list[list[str]]:\n """\n Render a DataFrame to a list of columns (as lists of strings).\n """\n strcols = self._get_strcols_without_index()\n\n if self.index:\n str_index = self._get_formatted_index(self.tr_frame)\n strcols.insert(0, str_index)\n\n return strcols\n\n @property\n def should_show_dimensions(self) -> bool:\n return self.show_dimensions is True or (\n self.show_dimensions == "truncate" and self.is_truncated\n )\n\n @property\n def is_truncated(self) -> bool:\n return bool(self.is_truncated_horizontally or self.is_truncated_vertically)\n\n @property\n def is_truncated_horizontally(self) -> bool:\n return bool(self.max_cols_fitted and (len(self.columns) > self.max_cols_fitted))\n\n @property\n def is_truncated_vertically(self) -> bool:\n return bool(self.max_rows_fitted and (len(self.frame) > self.max_rows_fitted))\n\n @property\n def dimensions_info(self) -> str:\n return f"\n\n[{len(self.frame)} rows x {len(self.frame.columns)} columns]"\n\n @property\n def has_index_names(self) -> bool:\n return _has_names(self.frame.index)\n\n @property\n def has_column_names(self) -> bool:\n return _has_names(self.frame.columns)\n\n @property\n def show_row_idx_names(self) -> bool:\n return all((self.has_index_names, self.index, self.show_index_names))\n\n @property\n def show_col_idx_names(self) -> bool:\n return all((self.has_column_names, self.show_index_names, self.header))\n\n @property\n def max_rows_displayed(self) -> int:\n return min(self.max_rows or len(self.frame), len(self.frame))\n\n def _initialize_sparsify(self, sparsify: bool | None) -> bool:\n if sparsify is None:\n return get_option("display.multi_sparse")\n return sparsify\n\n def _initialize_formatters(\n self, formatters: FormattersType | None\n ) -> FormattersType:\n if formatters is None:\n return {}\n elif len(self.frame.columns) == len(formatters) or isinstance(formatters, dict):\n return formatters\n else:\n raise ValueError(\n f"Formatters length({len(formatters)}) should match "\n f"DataFrame number of columns({len(self.frame.columns)})"\n )\n\n def _initialize_justify(self, justify: str | None) -> str:\n if justify is None:\n return get_option("display.colheader_justify")\n else:\n return justify\n\n def _initialize_columns(self, columns: Axes | None) -> Index:\n if columns is not None:\n cols = ensure_index(columns)\n self.frame = self.frame[cols]\n return cols\n else:\n return self.frame.columns\n\n def _initialize_colspace(self, col_space: ColspaceArgType | None) -> ColspaceType:\n result: ColspaceType\n\n if col_space is None:\n result = {}\n elif isinstance(col_space, (int, str)):\n result = {"": col_space}\n result.update({column: col_space for column in self.frame.columns})\n elif isinstance(col_space, Mapping):\n for column in col_space.keys():\n if column not in self.frame.columns and column != "":\n raise ValueError(\n f"Col_space is defined for an unknown column: {column}"\n )\n result = col_space\n else:\n if len(self.frame.columns) != len(col_space):\n raise ValueError(\n f"Col_space length({len(col_space)}) should match "\n f"DataFrame number of columns({len(self.frame.columns)})"\n )\n result = dict(zip(self.frame.columns, col_space))\n return result\n\n def _calc_max_cols_fitted(self) -> int | None:\n """Number of columns fitting the screen."""\n if not self._is_in_terminal():\n return self.max_cols\n\n width, _ = get_terminal_size()\n if self._is_screen_narrow(width):\n return width\n else:\n return self.max_cols\n\n def _calc_max_rows_fitted(self) -> int | None:\n """Number of rows with data fitting the screen."""\n max_rows: int | None\n\n if self._is_in_terminal():\n _, height = get_terminal_size()\n if self.max_rows == 0:\n # rows available to fill with actual data\n return height - self._get_number_of_auxiliary_rows()\n\n if self._is_screen_short(height):\n max_rows = height\n else:\n max_rows = self.max_rows\n else:\n max_rows = self.max_rows\n\n return self._adjust_max_rows(max_rows)\n\n def _adjust_max_rows(self, max_rows: int | None) -> int | None:\n """Adjust max_rows using display logic.\n\n See description here:\n https://pandas.pydata.org/docs/dev/user_guide/options.html#frequently-used-options\n\n GH #37359\n """\n if max_rows:\n if (len(self.frame) > max_rows) and self.min_rows:\n # if truncated, set max_rows showed to min_rows\n max_rows = min(self.min_rows, max_rows)\n return max_rows\n\n def _is_in_terminal(self) -> bool:\n """Check if the output is to be shown in terminal."""\n return bool(self.max_cols == 0 or self.max_rows == 0)\n\n def _is_screen_narrow(self, max_width) -> bool:\n return bool(self.max_cols == 0 and len(self.frame.columns) > max_width)\n\n def _is_screen_short(self, max_height) -> bool:\n return bool(self.max_rows == 0 and len(self.frame) > max_height)\n\n def _get_number_of_auxiliary_rows(self) -> int:\n """Get number of rows occupied by prompt, dots and dimension info."""\n dot_row = 1\n prompt_row = 1\n num_rows = dot_row + prompt_row\n\n if self.show_dimensions:\n num_rows += len(self.dimensions_info.splitlines())\n\n if self.header:\n num_rows += 1\n\n return num_rows\n\n def truncate(self) -> None:\n """\n Check whether the frame should be truncated. If so, slice the frame up.\n """\n if self.is_truncated_horizontally:\n self._truncate_horizontally()\n\n if self.is_truncated_vertically:\n self._truncate_vertically()\n\n def _truncate_horizontally(self) -> None:\n """Remove columns, which are not to be displayed and adjust formatters.\n\n Attributes affected:\n - tr_frame\n - formatters\n - tr_col_num\n """\n assert self.max_cols_fitted is not None\n col_num = self.max_cols_fitted // 2\n if col_num >= 1:\n left = self.tr_frame.iloc[:, :col_num]\n right = self.tr_frame.iloc[:, -col_num:]\n self.tr_frame = concat((left, right), axis=1)\n\n # truncate formatter\n if isinstance(self.formatters, (list, tuple)):\n self.formatters = [\n *self.formatters[:col_num],\n *self.formatters[-col_num:],\n ]\n else:\n col_num = cast(int, self.max_cols)\n self.tr_frame = self.tr_frame.iloc[:, :col_num]\n self.tr_col_num = col_num\n\n def _truncate_vertically(self) -> None:\n """Remove rows, which are not to be displayed.\n\n Attributes affected:\n - tr_frame\n - tr_row_num\n """\n assert self.max_rows_fitted is not None\n row_num = self.max_rows_fitted // 2\n if row_num >= 1:\n _len = len(self.tr_frame)\n _slice = np.hstack([np.arange(row_num), np.arange(_len - row_num, _len)])\n self.tr_frame = self.tr_frame.iloc[_slice]\n else:\n row_num = cast(int, self.max_rows)\n self.tr_frame = self.tr_frame.iloc[:row_num, :]\n self.tr_row_num = row_num\n\n def _get_strcols_without_index(self) -> list[list[str]]:\n strcols: list[list[str]] = []\n\n if not is_list_like(self.header) and not self.header:\n for i, c in enumerate(self.tr_frame):\n fmt_values = self.format_col(i)\n fmt_values = _make_fixed_width(\n strings=fmt_values,\n justify=self.justify,\n minimum=int(self.col_space.get(c, 0)),\n adj=self.adj,\n )\n strcols.append(fmt_values)\n return strcols\n\n if is_list_like(self.header):\n # cast here since can't be bool if is_list_like\n self.header = cast(list[str], self.header)\n if len(self.header) != len(self.columns):\n raise ValueError(\n f"Writing {len(self.columns)} cols "\n f"but got {len(self.header)} aliases"\n )\n str_columns = [[label] for label in self.header]\n else:\n str_columns = self._get_formatted_column_labels(self.tr_frame)\n\n if self.show_row_idx_names:\n for x in str_columns:\n x.append("")\n\n for i, c in enumerate(self.tr_frame):\n cheader = str_columns[i]\n header_colwidth = max(\n int(self.col_space.get(c, 0)), *(self.adj.len(x) for x in cheader)\n )\n fmt_values = self.format_col(i)\n fmt_values = _make_fixed_width(\n fmt_values, self.justify, minimum=header_colwidth, adj=self.adj\n )\n\n max_len = max(*(self.adj.len(x) for x in fmt_values), header_colwidth)\n cheader = self.adj.justify(cheader, max_len, mode=self.justify)\n strcols.append(cheader + fmt_values)\n\n return strcols\n\n def format_col(self, i: int) -> list[str]:\n frame = self.tr_frame\n formatter = self._get_formatter(i)\n return format_array(\n frame.iloc[:, i]._values,\n formatter,\n float_format=self.float_format,\n na_rep=self.na_rep,\n space=self.col_space.get(frame.columns[i]),\n decimal=self.decimal,\n leading_space=self.index,\n )\n\n def _get_formatter(self, i: str | int) -> Callable | None:\n if isinstance(self.formatters, (list, tuple)):\n if is_integer(i):\n i = cast(int, i)\n return self.formatters[i]\n else:\n return None\n else:\n if is_integer(i) and i not in self.columns:\n i = self.columns[i]\n return self.formatters.get(i, None)\n\n def _get_formatted_column_labels(self, frame: DataFrame) -> list[list[str]]:\n from pandas.core.indexes.multi import sparsify_labels\n\n columns = frame.columns\n\n if isinstance(columns, MultiIndex):\n fmt_columns = columns._format_multi(sparsify=False, include_names=False)\n fmt_columns = list(zip(*fmt_columns))\n dtypes = self.frame.dtypes._values\n\n # if we have a Float level, they don't use leading space at all\n restrict_formatting = any(level.is_floating for level in columns.levels)\n need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))\n\n def space_format(x, y):\n if (\n y not in self.formatters\n and need_leadsp[x]\n and not restrict_formatting\n ):\n return " " + y\n return y\n\n str_columns_tuple = list(\n zip(*([space_format(x, y) for y in x] for x in fmt_columns))\n )\n if self.sparsify and len(str_columns_tuple):\n str_columns_tuple = sparsify_labels(str_columns_tuple)\n\n str_columns = [list(x) for x in zip(*str_columns_tuple)]\n else:\n fmt_columns = columns._format_flat(include_name=False)\n dtypes = self.frame.dtypes\n need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))\n str_columns = [\n [" " + x if not self._get_formatter(i) and need_leadsp[x] else x]\n for i, x in enumerate(fmt_columns)\n ]\n # self.str_columns = str_columns\n return str_columns\n\n def _get_formatted_index(self, frame: DataFrame) -> list[str]:\n # Note: this is only used by to_string() and to_latex(), not by\n # to_html(). so safe to cast col_space here.\n col_space = {k: cast(int, v) for k, v in self.col_space.items()}\n index = frame.index\n columns = frame.columns\n fmt = self._get_formatter("__index__")\n\n if isinstance(index, MultiIndex):\n fmt_index = index._format_multi(\n sparsify=self.sparsify,\n include_names=self.show_row_idx_names,\n formatter=fmt,\n )\n else:\n fmt_index = [\n index._format_flat(include_name=self.show_row_idx_names, formatter=fmt)\n ]\n\n fmt_index = [\n tuple(\n _make_fixed_width(\n list(x), justify="left", minimum=col_space.get("", 0), adj=self.adj\n )\n )\n for x in fmt_index\n ]\n\n adjoined = self.adj.adjoin(1, *fmt_index).split("\n")\n\n # empty space for columns\n if self.show_col_idx_names:\n col_header = [str(x) for x in self._get_column_name_list()]\n else:\n col_header = [""] * columns.nlevels\n\n if self.header:\n return col_header + adjoined\n else:\n return adjoined\n\n def _get_column_name_list(self) -> list[Hashable]:\n names: list[Hashable] = []\n columns = self.frame.columns\n if isinstance(columns, MultiIndex):\n names.extend("" if name is None else name for name in columns.names)\n else:\n names.append("" if columns.name is None else columns.name)\n return names\n\n\nclass DataFrameRenderer:\n """Class for creating dataframe output in multiple formats.\n\n Called in pandas.core.generic.NDFrame:\n - to_csv\n - to_latex\n\n Called in pandas.core.frame.DataFrame:\n - to_html\n - to_string\n\n Parameters\n ----------\n fmt : DataFrameFormatter\n Formatter with the formatting options.\n """\n\n def __init__(self, fmt: DataFrameFormatter) -> None:\n self.fmt = fmt\n\n def to_html(\n self,\n buf: FilePath | WriteBuffer[str] | None = None,\n encoding: str | None = None,\n classes: str | list | tuple | None = None,\n notebook: bool = False,\n border: int | bool | None = None,\n table_id: str | None = None,\n render_links: bool = False,\n ) -> str | None:\n """\n Render a DataFrame to a html table.\n\n Parameters\n ----------\n buf : str, path object, file-like object, or None, default None\n String, path object (implementing ``os.PathLike[str]``), or file-like\n object implementing a string ``write()`` function. If None, the result is\n returned as a string.\n encoding : str, default “utf-8”\n Set character encoding.\n classes : str or list-like\n classes to include in the `class` attribute of the opening\n ``<table>`` tag, in addition to the default "dataframe".\n notebook : {True, False}, optional, default False\n Whether the generated HTML is for IPython Notebook.\n border : int\n A ``border=border`` attribute is included in the opening\n ``<table>`` tag. Default ``pd.options.display.html.border``.\n table_id : str, optional\n A css id is included in the opening `<table>` tag if specified.\n render_links : bool, default False\n Convert URLs to HTML links.\n """\n from pandas.io.formats.html import (\n HTMLFormatter,\n NotebookFormatter,\n )\n\n Klass = NotebookFormatter if notebook else HTMLFormatter\n\n html_formatter = Klass(\n self.fmt,\n classes=classes,\n border=border,\n table_id=table_id,\n render_links=render_links,\n )\n string = html_formatter.to_string()\n return save_to_buffer(string, buf=buf, encoding=encoding)\n\n def to_string(\n self,\n buf: FilePath | WriteBuffer[str] | None = None,\n encoding: str | None = None,\n line_width: int | None = None,\n ) -> str | None:\n """\n Render a DataFrame to a console-friendly tabular output.\n\n Parameters\n ----------\n buf : str, path object, file-like object, or None, default None\n String, path object (implementing ``os.PathLike[str]``), or file-like\n object implementing a string ``write()`` function. If None, the result is\n returned as a string.\n encoding: str, default “utf-8”\n Set character encoding.\n line_width : int, optional\n Width to wrap a line in characters.\n """\n from pandas.io.formats.string import StringFormatter\n\n string_formatter = StringFormatter(self.fmt, line_width=line_width)\n string = string_formatter.to_string()\n return save_to_buffer(string, buf=buf, encoding=encoding)\n\n def to_csv(\n self,\n path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None,\n encoding: str | None = None,\n sep: str = ",",\n columns: Sequence[Hashable] | None = None,\n index_label: IndexLabel | None = None,\n mode: str = "w",\n compression: CompressionOptions = "infer",\n quoting: int | None = None,\n quotechar: str = '"',\n lineterminator: str | None = None,\n chunksize: int | None = None,\n date_format: str | None = None,\n doublequote: bool = True,\n escapechar: str | None = None,\n errors: str = "strict",\n storage_options: StorageOptions | None = None,\n ) -> str | None:\n """\n Render dataframe as comma-separated file.\n """\n from pandas.io.formats.csvs import CSVFormatter\n\n if path_or_buf is None:\n created_buffer = True\n path_or_buf = StringIO()\n else:\n created_buffer = False\n\n csv_formatter = CSVFormatter(\n path_or_buf=path_or_buf,\n lineterminator=lineterminator,\n sep=sep,\n encoding=encoding,\n errors=errors,\n compression=compression,\n quoting=quoting,\n cols=columns,\n index_label=index_label,\n mode=mode,\n chunksize=chunksize,\n quotechar=quotechar,\n date_format=date_format,\n doublequote=doublequote,\n escapechar=escapechar,\n storage_options=storage_options,\n formatter=self.fmt,\n )\n csv_formatter.save()\n\n if created_buffer:\n assert isinstance(path_or_buf, StringIO)\n content = path_or_buf.getvalue()\n path_or_buf.close()\n return content\n\n return None\n\n\ndef save_to_buffer(\n string: str,\n buf: FilePath | WriteBuffer[str] | None = None,\n encoding: str | None = None,\n) -> str | None:\n """\n Perform serialization. Write to buf or return as string if buf is None.\n """\n with _get_buffer(buf, encoding=encoding) as fd:\n fd.write(string)\n if buf is None:\n # error: "WriteBuffer[str]" has no attribute "getvalue"\n return fd.getvalue() # type: ignore[attr-defined]\n return None\n\n\n@contextmanager\ndef _get_buffer(\n buf: FilePath | WriteBuffer[str] | None, encoding: str | None = None\n) -> Generator[WriteBuffer[str], None, None] | Generator[StringIO, None, None]:\n """\n Context manager to open, yield and close buffer for filenames or Path-like\n objects, otherwise yield buf unchanged.\n """\n if buf is not None:\n buf = stringify_path(buf)\n else:\n buf = StringIO()\n\n if encoding is None:\n encoding = "utf-8"\n elif not isinstance(buf, str):\n raise ValueError("buf is not a file name and encoding is specified.")\n\n if hasattr(buf, "write"):\n # Incompatible types in "yield" (actual type "Union[str, WriteBuffer[str],\n # StringIO]", expected type "Union[WriteBuffer[str], StringIO]")\n yield buf # type: ignore[misc]\n elif isinstance(buf, str):\n check_parent_directory(str(buf))\n with open(buf, "w", encoding=encoding, newline="") as f:\n # GH#30034 open instead of codecs.open prevents a file leak\n # if we have an invalid encoding argument.\n # newline="" is needed to roundtrip correctly on\n # windows test_to_latex_filename\n yield f\n else:\n raise TypeError("buf is not a file name and it has no write method")\n\n\n# ----------------------------------------------------------------------\n# Array formatters\n\n\ndef format_array(\n values: ArrayLike,\n formatter: Callable | None,\n float_format: FloatFormatType | None = None,\n na_rep: str = "NaN",\n digits: int | None = None,\n space: str | int | None = None,\n justify: str = "right",\n decimal: str = ".",\n leading_space: bool | None = True,\n quoting: int | None = None,\n fallback_formatter: Callable | None = None,\n) -> list[str]:\n """\n Format an array for printing.\n\n Parameters\n ----------\n values : np.ndarray or ExtensionArray\n formatter\n float_format\n na_rep\n digits\n space\n justify\n decimal\n leading_space : bool, optional, default True\n Whether the array should be formatted with a leading space.\n When an array as a column of a Series or DataFrame, we do want\n the leading space to pad between columns.\n\n When formatting an Index subclass\n (e.g. IntervalIndex._get_values_for_csv), we don't want the\n leading space since it should be left-aligned.\n fallback_formatter\n\n Returns\n -------\n List[str]\n """\n fmt_klass: type[_GenericArrayFormatter]\n if lib.is_np_dtype(values.dtype, "M"):\n fmt_klass = _Datetime64Formatter\n values = cast(DatetimeArray, values)\n elif isinstance(values.dtype, DatetimeTZDtype):\n fmt_klass = _Datetime64TZFormatter\n values = cast(DatetimeArray, values)\n elif lib.is_np_dtype(values.dtype, "m"):\n fmt_klass = _Timedelta64Formatter\n values = cast(TimedeltaArray, values)\n elif isinstance(values.dtype, ExtensionDtype):\n fmt_klass = _ExtensionArrayFormatter\n elif lib.is_np_dtype(values.dtype, "fc"):\n fmt_klass = FloatArrayFormatter\n elif lib.is_np_dtype(values.dtype, "iu"):\n fmt_klass = _IntArrayFormatter\n else:\n fmt_klass = _GenericArrayFormatter\n\n if space is None:\n space = 12\n\n if float_format is None:\n float_format = get_option("display.float_format")\n\n if digits is None:\n digits = get_option("display.precision")\n\n fmt_obj = fmt_klass(\n values,\n digits=digits,\n na_rep=na_rep,\n float_format=float_format,\n formatter=formatter,\n space=space,\n justify=justify,\n decimal=decimal,\n leading_space=leading_space,\n quoting=quoting,\n fallback_formatter=fallback_formatter,\n )\n\n return fmt_obj.get_result()\n\n\nclass _GenericArrayFormatter:\n def __init__(\n self,\n values: ArrayLike,\n digits: int = 7,\n formatter: Callable | None = None,\n na_rep: str = "NaN",\n space: str | int = 12,\n float_format: FloatFormatType | None = None,\n justify: str = "right",\n decimal: str = ".",\n quoting: int | None = None,\n fixed_width: bool = True,\n leading_space: bool | None = True,\n fallback_formatter: Callable | None = None,\n ) -> None:\n self.values = values\n self.digits = digits\n self.na_rep = na_rep\n self.space = space\n self.formatter = formatter\n self.float_format = float_format\n self.justify = justify\n self.decimal = decimal\n self.quoting = quoting\n self.fixed_width = fixed_width\n self.leading_space = leading_space\n self.fallback_formatter = fallback_formatter\n\n def get_result(self) -> list[str]:\n fmt_values = self._format_strings()\n return _make_fixed_width(fmt_values, self.justify)\n\n def _format_strings(self) -> list[str]:\n if self.float_format is None:\n float_format = get_option("display.float_format")\n if float_format is None:\n precision = get_option("display.precision")\n float_format = lambda x: _trim_zeros_single_float(\n f"{x: .{precision:d}f}"\n )\n else:\n float_format = self.float_format\n\n if self.formatter is not None:\n formatter = self.formatter\n elif self.fallback_formatter is not None:\n formatter = self.fallback_formatter\n else:\n quote_strings = self.quoting is not None and self.quoting != QUOTE_NONE\n formatter = partial(\n printing.pprint_thing,\n escape_chars=("\t", "\r", "\n"),\n quote_strings=quote_strings,\n )\n\n def _format(x):\n if self.na_rep is not None and is_scalar(x) and isna(x):\n if x is None:\n return "None"\n elif x is NA:\n return str(NA)\n elif lib.is_float(x) and np.isinf(x):\n # TODO(3.0): this will be unreachable when use_inf_as_na\n # deprecation is enforced\n return str(x)\n elif x is NaT or isinstance(x, (np.datetime64, np.timedelta64)):\n return "NaT"\n return self.na_rep\n elif isinstance(x, PandasObject):\n return str(x)\n elif isinstance(x, StringDtype):\n return repr(x)\n else:\n # object dtype\n return str(formatter(x))\n\n vals = self.values\n if not isinstance(vals, np.ndarray):\n raise TypeError(\n "ExtensionArray formatting should use _ExtensionArrayFormatter"\n )\n inferred = lib.map_infer(vals, is_float)\n is_float_type = (\n inferred\n # vals may have 2 or more dimensions\n & np.all(notna(vals), axis=tuple(range(1, len(vals.shape))))\n )\n leading_space = self.leading_space\n if leading_space is None:\n leading_space = is_float_type.any()\n\n fmt_values = []\n for i, v in enumerate(vals):\n if (not is_float_type[i] or self.formatter is not None) and leading_space:\n fmt_values.append(f" {_format(v)}")\n elif is_float_type[i]:\n fmt_values.append(float_format(v))\n else:\n if leading_space is False:\n # False specifically, so that the default is\n # to include a space if we get here.\n tpl = "{v}"\n else:\n tpl = " {v}"\n fmt_values.append(tpl.format(v=_format(v)))\n\n return fmt_values\n\n\nclass FloatArrayFormatter(_GenericArrayFormatter):\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n\n # float_format is expected to be a string\n # formatter should be used to pass a function\n if self.float_format is not None and self.formatter is None:\n # GH21625, GH22270\n self.fixed_width = False\n if callable(self.float_format):\n self.formatter = self.float_format\n self.float_format = None\n\n def _value_formatter(\n self,\n float_format: FloatFormatType | None = None,\n threshold: float | None = None,\n ) -> Callable:\n """Returns a function to be applied on each value to format it"""\n # the float_format parameter supersedes self.float_format\n if float_format is None:\n float_format = self.float_format\n\n # we are going to compose different functions, to first convert to\n # a string, then replace the decimal symbol, and finally chop according\n # to the threshold\n\n # when there is no float_format, we use str instead of '%g'\n # because str(0.0) = '0.0' while '%g' % 0.0 = '0'\n if float_format:\n\n def base_formatter(v):\n assert float_format is not None # for mypy\n # error: "str" not callable\n # error: Unexpected keyword argument "value" for "__call__" of\n # "EngFormatter"\n return (\n float_format(value=v) # type: ignore[operator,call-arg]\n if notna(v)\n else self.na_rep\n )\n\n else:\n\n def base_formatter(v):\n return str(v) if notna(v) else self.na_rep\n\n if self.decimal != ".":\n\n def decimal_formatter(v):\n return base_formatter(v).replace(".", self.decimal, 1)\n\n else:\n decimal_formatter = base_formatter\n\n if threshold is None:\n return decimal_formatter\n\n def formatter(value):\n if notna(value):\n if abs(value) > threshold:\n return decimal_formatter(value)\n else:\n return decimal_formatter(0.0)\n else:\n return self.na_rep\n\n return formatter\n\n def get_result_as_array(self) -> np.ndarray:\n """\n Returns the float values converted into strings using\n the parameters given at initialisation, as a numpy array\n """\n\n def format_with_na_rep(values: ArrayLike, formatter: Callable, na_rep: str):\n mask = isna(values)\n formatted = np.array(\n [\n formatter(val) if not m else na_rep\n for val, m in zip(values.ravel(), mask.ravel())\n ]\n ).reshape(values.shape)\n return formatted\n\n def format_complex_with_na_rep(\n values: ArrayLike, formatter: Callable, na_rep: str\n ):\n real_values = np.real(values).ravel() # type: ignore[arg-type]\n imag_values = np.imag(values).ravel() # type: ignore[arg-type]\n real_mask, imag_mask = isna(real_values), isna(imag_values)\n formatted_lst = []\n for val, real_val, imag_val, re_isna, im_isna in zip(\n values.ravel(),\n real_values,\n imag_values,\n real_mask,\n imag_mask,\n ):\n if not re_isna and not im_isna:\n formatted_lst.append(formatter(val))\n elif not re_isna: # xxx+nanj\n formatted_lst.append(f"{formatter(real_val)}+{na_rep}j")\n elif not im_isna: # nan[+/-]xxxj\n # The imaginary part may either start with a "-" or a space\n imag_formatted = formatter(imag_val).strip()\n if imag_formatted.startswith("-"):\n formatted_lst.append(f"{na_rep}{imag_formatted}j")\n else:\n formatted_lst.append(f"{na_rep}+{imag_formatted}j")\n else: # nan+nanj\n formatted_lst.append(f"{na_rep}+{na_rep}j")\n return np.array(formatted_lst).reshape(values.shape)\n\n if self.formatter is not None:\n return format_with_na_rep(self.values, self.formatter, self.na_rep)\n\n if self.fixed_width:\n threshold = get_option("display.chop_threshold")\n else:\n threshold = None\n\n # if we have a fixed_width, we'll need to try different float_format\n def format_values_with(float_format):\n formatter = self._value_formatter(float_format, threshold)\n\n # default formatter leaves a space to the left when formatting\n # floats, must be consistent for left-justifying NaNs (GH #25061)\n na_rep = " " + self.na_rep if self.justify == "left" else self.na_rep\n\n # different formatting strategies for complex and non-complex data\n # need to distinguish complex and float NaNs (GH #53762)\n values = self.values\n is_complex = is_complex_dtype(values)\n\n # separate the wheat from the chaff\n if is_complex:\n values = format_complex_with_na_rep(values, formatter, na_rep)\n else:\n values = format_with_na_rep(values, formatter, na_rep)\n\n if self.fixed_width:\n if is_complex:\n result = _trim_zeros_complex(values, self.decimal)\n else:\n result = _trim_zeros_float(values, self.decimal)\n return np.asarray(result, dtype="object")\n\n return values\n\n # There is a special default string when we are fixed-width\n # The default is otherwise to use str instead of a formatting string\n float_format: FloatFormatType | None\n if self.float_format is None:\n if self.fixed_width:\n if self.leading_space is True:\n fmt_str = "{value: .{digits:d}f}"\n else:\n fmt_str = "{value:.{digits:d}f}"\n float_format = partial(fmt_str.format, digits=self.digits)\n else:\n float_format = self.float_format\n else:\n float_format = lambda value: self.float_format % value\n\n formatted_values = format_values_with(float_format)\n\n if not self.fixed_width:\n return formatted_values\n\n # we need do convert to engineering format if some values are too small\n # and would appear as 0, or if some values are too big and take too\n # much space\n\n if len(formatted_values) > 0:\n maxlen = max(len(x) for x in formatted_values)\n too_long = maxlen > self.digits + 6\n else:\n too_long = False\n\n abs_vals = np.abs(self.values)\n # this is pretty arbitrary for now\n # large values: more that 8 characters including decimal symbol\n # and first digit, hence > 1e6\n has_large_values = (abs_vals > 1e6).any()\n has_small_values = ((abs_vals < 10 ** (-self.digits)) & (abs_vals > 0)).any()\n\n if has_small_values or (too_long and has_large_values):\n if self.leading_space is True:\n fmt_str = "{value: .{digits:d}e}"\n else:\n fmt_str = "{value:.{digits:d}e}"\n float_format = partial(fmt_str.format, digits=self.digits)\n formatted_values = format_values_with(float_format)\n\n return formatted_values\n\n def _format_strings(self) -> list[str]:\n return list(self.get_result_as_array())\n\n\nclass _IntArrayFormatter(_GenericArrayFormatter):\n def _format_strings(self) -> list[str]:\n if self.leading_space is False:\n formatter_str = lambda x: f"{x:d}".format(x=x)\n else:\n formatter_str = lambda x: f"{x: d}".format(x=x)\n formatter = self.formatter or formatter_str\n fmt_values = [formatter(x) for x in self.values]\n return fmt_values\n\n\nclass _Datetime64Formatter(_GenericArrayFormatter):\n values: DatetimeArray\n\n def __init__(\n self,\n values: DatetimeArray,\n nat_rep: str = "NaT",\n date_format: None = None,\n **kwargs,\n ) -> None:\n super().__init__(values, **kwargs)\n self.nat_rep = nat_rep\n self.date_format = date_format\n\n def _format_strings(self) -> list[str]:\n """we by definition have DO NOT have a TZ"""\n values = self.values\n\n if self.formatter is not None:\n return [self.formatter(x) for x in values]\n\n fmt_values = values._format_native_types(\n na_rep=self.nat_rep, date_format=self.date_format\n )\n return fmt_values.tolist()\n\n\nclass _ExtensionArrayFormatter(_GenericArrayFormatter):\n values: ExtensionArray\n\n def _format_strings(self) -> list[str]:\n values = self.values\n\n formatter = self.formatter\n fallback_formatter = None\n if formatter is None:\n fallback_formatter = values._formatter(boxed=True)\n\n if isinstance(values, Categorical):\n # Categorical is special for now, so that we can preserve tzinfo\n array = values._internal_get_values()\n else:\n array = np.asarray(values, dtype=object)\n\n fmt_values = format_array(\n array,\n formatter,\n float_format=self.float_format,\n na_rep=self.na_rep,\n digits=self.digits,\n space=self.space,\n justify=self.justify,\n decimal=self.decimal,\n leading_space=self.leading_space,\n quoting=self.quoting,\n fallback_formatter=fallback_formatter,\n )\n return fmt_values\n\n\ndef format_percentiles(\n percentiles: (np.ndarray | Sequence[float]),\n) -> list[str]:\n """\n Outputs rounded and formatted percentiles.\n\n Parameters\n ----------\n percentiles : list-like, containing floats from interval [0,1]\n\n Returns\n -------\n formatted : list of strings\n\n Notes\n -----\n Rounding precision is chosen so that: (1) if any two elements of\n ``percentiles`` differ, they remain different after rounding\n (2) no entry is *rounded* to 0% or 100%.\n Any non-integer is always rounded to at least 1 decimal place.\n\n Examples\n --------\n Keeps all entries different after rounding:\n\n >>> format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999])\n ['1.999%', '2.001%', '50%', '66.667%', '99.99%']\n\n No element is rounded to 0% or 100% (unless already equal to it).\n Duplicates are allowed:\n\n >>> format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999])\n ['0%', '50%', '2.0%', '50%', '66.67%', '99.99%']\n """\n percentiles = np.asarray(percentiles)\n\n # It checks for np.nan as well\n if (\n not is_numeric_dtype(percentiles)\n or not np.all(percentiles >= 0)\n or not np.all(percentiles <= 1)\n ):\n raise ValueError("percentiles should all be in the interval [0,1]")\n\n percentiles = 100 * percentiles\n prec = get_precision(percentiles)\n percentiles_round_type = percentiles.round(prec).astype(int)\n\n int_idx = np.isclose(percentiles_round_type, percentiles)\n\n if np.all(int_idx):\n out = percentiles_round_type.astype(str)\n return [i + "%" for i in out]\n\n unique_pcts = np.unique(percentiles)\n prec = get_precision(unique_pcts)\n out = np.empty_like(percentiles, dtype=object)\n out[int_idx] = percentiles[int_idx].round().astype(int).astype(str)\n\n out[~int_idx] = percentiles[~int_idx].round(prec).astype(str)\n return [i + "%" for i in out]\n\n\ndef get_precision(array: np.ndarray | Sequence[float]) -> int:\n to_begin = array[0] if array[0] > 0 else None\n to_end = 100 - array[-1] if array[-1] < 100 else None\n diff = np.ediff1d(array, to_begin=to_begin, to_end=to_end)\n diff = abs(diff)\n prec = -np.floor(np.log10(np.min(diff))).astype(int)\n prec = max(1, prec)\n return prec\n\n\ndef _format_datetime64(x: NaTType | Timestamp, nat_rep: str = "NaT") -> str:\n if x is NaT:\n return nat_rep\n\n # Timestamp.__str__ falls back to datetime.datetime.__str__ = isoformat(sep=' ')\n # so it already uses string formatting rather than strftime (faster).\n return str(x)\n\n\ndef _format_datetime64_dateonly(\n x: NaTType | Timestamp,\n nat_rep: str = "NaT",\n date_format: str | None = None,\n) -> str:\n if isinstance(x, NaTType):\n return nat_rep\n\n if date_format:\n return x.strftime(date_format)\n else:\n # Timestamp._date_repr relies on string formatting (faster than strftime)\n return x._date_repr\n\n\ndef get_format_datetime64(\n is_dates_only: bool, nat_rep: str = "NaT", date_format: str | None = None\n) -> Callable:\n """Return a formatter callable taking a datetime64 as input and providing\n a string as output"""\n\n if is_dates_only:\n return lambda x: _format_datetime64_dateonly(\n x, nat_rep=nat_rep, date_format=date_format\n )\n else:\n return lambda x: _format_datetime64(x, nat_rep=nat_rep)\n\n\nclass _Datetime64TZFormatter(_Datetime64Formatter):\n values: DatetimeArray\n\n def _format_strings(self) -> list[str]:\n """we by definition have a TZ"""\n ido = self.values._is_dates_only\n values = self.values.astype(object)\n formatter = self.formatter or get_format_datetime64(\n ido, date_format=self.date_format\n )\n fmt_values = [formatter(x) for x in values]\n\n return fmt_values\n\n\nclass _Timedelta64Formatter(_GenericArrayFormatter):\n values: TimedeltaArray\n\n def __init__(\n self,\n values: TimedeltaArray,\n nat_rep: str = "NaT",\n **kwargs,\n ) -> None:\n # TODO: nat_rep is never passed, na_rep is.\n super().__init__(values, **kwargs)\n self.nat_rep = nat_rep\n\n def _format_strings(self) -> list[str]:\n formatter = self.formatter or get_format_timedelta64(\n self.values, nat_rep=self.nat_rep, box=False\n )\n return [formatter(x) for x in self.values]\n\n\ndef get_format_timedelta64(\n values: TimedeltaArray,\n nat_rep: str | float = "NaT",\n box: bool = False,\n) -> Callable:\n """\n Return a formatter function for a range of timedeltas.\n These will all have the same format argument\n\n If box, then show the return in quotes\n """\n even_days = values._is_dates_only\n\n if even_days:\n format = None\n else:\n format = "long"\n\n def _formatter(x):\n if x is None or (is_scalar(x) and isna(x)):\n return nat_rep\n\n if not isinstance(x, Timedelta):\n x = Timedelta(x)\n\n # Timedelta._repr_base uses string formatting (faster than strftime)\n result = x._repr_base(format=format)\n if box:\n result = f"'{result}'"\n return result\n\n return _formatter\n\n\ndef _make_fixed_width(\n strings: list[str],\n justify: str = "right",\n minimum: int | None = None,\n adj: printing._TextAdjustment | None = None,\n) -> list[str]:\n if len(strings) == 0 or justify == "all":\n return strings\n\n if adj is None:\n adjustment = printing.get_adjustment()\n else:\n adjustment = adj\n\n max_len = max(adjustment.len(x) for x in strings)\n\n if minimum is not None:\n max_len = max(minimum, max_len)\n\n conf_max = get_option("display.max_colwidth")\n if conf_max is not None and max_len > conf_max:\n max_len = conf_max\n\n def just(x: str) -> str:\n if conf_max is not None:\n if (conf_max > 3) & (adjustment.len(x) > max_len):\n x = x[: max_len - 3] + "..."\n return x\n\n strings = [just(x) for x in strings]\n result = adjustment.justify(strings, max_len, mode=justify)\n return result\n\n\ndef _trim_zeros_complex(str_complexes: ArrayLike, decimal: str = ".") -> list[str]:\n """\n Separates the real and imaginary parts from the complex number, and\n executes the _trim_zeros_float method on each of those.\n """\n real_part, imag_part = [], []\n for x in str_complexes:\n # Complex numbers are represented as "(-)xxx(+/-)xxxj"\n # The split will give [{"", "-"}, "xxx", "+/-", "xxx", "j", ""]\n # Therefore, the imaginary part is the 4th and 3rd last elements,\n # and the real part is everything before the imaginary part\n trimmed = re.split(r"([j+-])", x)\n real_part.append("".join(trimmed[:-4]))\n imag_part.append("".join(trimmed[-4:-2]))\n\n # We want to align the lengths of the real and imaginary parts of each complex\n # number, as well as the lengths the real (resp. complex) parts of all numbers\n # in the array\n n = len(str_complexes)\n padded_parts = _trim_zeros_float(real_part + imag_part, decimal)\n if len(padded_parts) == 0:\n return []\n padded_length = max(len(part) for part in padded_parts) - 1\n padded = [\n real_pt # real part, possibly NaN\n + imag_pt[0] # +/-\n + f"{imag_pt[1:]:>{padded_length}}" # complex part (no sign), possibly nan\n + "j"\n for real_pt, imag_pt in zip(padded_parts[:n], padded_parts[n:])\n ]\n return padded\n\n\ndef _trim_zeros_single_float(str_float: str) -> str:\n """\n Trims trailing zeros after a decimal point,\n leaving just one if necessary.\n """\n str_float = str_float.rstrip("0")\n if str_float.endswith("."):\n str_float += "0"\n\n return str_float\n\n\ndef _trim_zeros_float(\n str_floats: ArrayLike | list[str], decimal: str = "."\n) -> list[str]:\n """\n Trims the maximum number of trailing zeros equally from\n all numbers containing decimals, leaving just one if\n necessary.\n """\n trimmed = str_floats\n number_regex = re.compile(rf"^\s*[\+-]?[0-9]+\{decimal}[0-9]*$")\n\n def is_number_with_decimal(x) -> bool:\n return re.match(number_regex, x) is not None\n\n def should_trim(values: ArrayLike | list[str]) -> bool:\n """\n Determine if an array of strings should be trimmed.\n\n Returns True if all numbers containing decimals (defined by the\n above regular expression) within the array end in a zero, otherwise\n returns False.\n """\n numbers = [x for x in values if is_number_with_decimal(x)]\n return len(numbers) > 0 and all(x.endswith("0") for x in numbers)\n\n while should_trim(trimmed):\n trimmed = [x[:-1] if is_number_with_decimal(x) else x for x in trimmed]\n\n # leave one 0 after the decimal points if need be.\n result = [\n x + "0" if is_number_with_decimal(x) and x.endswith(decimal) else x\n for x in trimmed\n ]\n return result\n\n\ndef _has_names(index: Index) -> bool:\n if isinstance(index, MultiIndex):\n return com.any_not_none(*index.names)\n else:\n return index.name is not None\n\n\nclass EngFormatter:\n """\n Formats float values according to engineering format.\n\n Based on matplotlib.ticker.EngFormatter\n """\n\n # The SI engineering prefixes\n ENG_PREFIXES = {\n -24: "y",\n -21: "z",\n -18: "a",\n -15: "f",\n -12: "p",\n -9: "n",\n -6: "u",\n -3: "m",\n 0: "",\n 3: "k",\n 6: "M",\n 9: "G",\n 12: "T",\n 15: "P",\n 18: "E",\n 21: "Z",\n 24: "Y",\n }\n\n def __init__(\n self, accuracy: int | None = None, use_eng_prefix: bool = False\n ) -> None:\n self.accuracy = accuracy\n self.use_eng_prefix = use_eng_prefix\n\n def __call__(self, num: float) -> str:\n """\n Formats a number in engineering notation, appending a letter\n representing the power of 1000 of the original number. Some examples:\n >>> format_eng = EngFormatter(accuracy=0, use_eng_prefix=True)\n >>> format_eng(0)\n ' 0'\n >>> format_eng = EngFormatter(accuracy=1, use_eng_prefix=True)\n >>> format_eng(1_000_000)\n ' 1.0M'\n >>> format_eng = EngFormatter(accuracy=2, use_eng_prefix=False)\n >>> format_eng("-1e-6")\n '-1.00E-06'\n\n @param num: the value to represent\n @type num: either a numeric value or a string that can be converted to\n a numeric value (as per decimal.Decimal constructor)\n\n @return: engineering formatted string\n """\n dnum = Decimal(str(num))\n\n if Decimal.is_nan(dnum):\n return "NaN"\n\n if Decimal.is_infinite(dnum):\n return "inf"\n\n sign = 1\n\n if dnum < 0: # pragma: no cover\n sign = -1\n dnum = -dnum\n\n if dnum != 0:\n pow10 = Decimal(int(math.floor(dnum.log10() / 3) * 3))\n else:\n pow10 = Decimal(0)\n\n pow10 = pow10.min(max(self.ENG_PREFIXES.keys()))\n pow10 = pow10.max(min(self.ENG_PREFIXES.keys()))\n int_pow10 = int(pow10)\n\n if self.use_eng_prefix:\n prefix = self.ENG_PREFIXES[int_pow10]\n elif int_pow10 < 0:\n prefix = f"E-{-int_pow10:02d}"\n else:\n prefix = f"E+{int_pow10:02d}"\n\n mant = sign * dnum / (10**pow10)\n\n if self.accuracy is None: # pragma: no cover\n format_str = "{mant: g}{prefix}"\n else:\n format_str = f"{{mant: .{self.accuracy:d}f}}{{prefix}}"\n\n formatted = format_str.format(mant=mant, prefix=prefix)\n\n return formatted\n\n\ndef set_eng_float_format(accuracy: int = 3, use_eng_prefix: bool = False) -> None:\n """\n Format float representation in DataFrame with SI notation.\n\n Parameters\n ----------\n accuracy : int, default 3\n Number of decimal digits after the floating point.\n use_eng_prefix : bool, default False\n Whether to represent a value with SI prefixes.\n\n Returns\n -------\n None\n\n Examples\n --------\n >>> df = pd.DataFrame([1e-9, 1e-3, 1, 1e3, 1e6])\n >>> df\n 0\n 0 1.000000e-09\n 1 1.000000e-03\n 2 1.000000e+00\n 3 1.000000e+03\n 4 1.000000e+06\n\n >>> pd.set_eng_float_format(accuracy=1)\n >>> df\n 0\n 0 1.0E-09\n 1 1.0E-03\n 2 1.0E+00\n 3 1.0E+03\n 4 1.0E+06\n\n >>> pd.set_eng_float_format(use_eng_prefix=True)\n >>> df\n 0\n 0 1.000n\n 1 1.000m\n 2 1.000\n 3 1.000k\n 4 1.000M\n\n >>> pd.set_eng_float_format(accuracy=1, use_eng_prefix=True)\n >>> df\n 0\n 0 1.0n\n 1 1.0m\n 2 1.0\n 3 1.0k\n 4 1.0M\n\n >>> pd.set_option("display.float_format", None) # unset option\n """\n set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix))\n\n\ndef get_level_lengths(\n levels: Any, sentinel: bool | object | str = ""\n) -> list[dict[int, int]]:\n """\n For each index in each level the function returns lengths of indexes.\n\n Parameters\n ----------\n levels : list of lists\n List of values on for level.\n sentinel : string, optional\n Value which states that no new index starts on there.\n\n Returns\n -------\n Returns list of maps. For each level returns map of indexes (key is index\n in row and value is length of index).\n """\n if len(levels) == 0:\n return []\n\n control = [True] * len(levels[0])\n\n result = []\n for level in levels:\n last_index = 0\n\n lengths = {}\n for i, key in enumerate(level):\n if control[i] and key == sentinel:\n pass\n else:\n control[i] = False\n lengths[last_index] = i - last_index\n last_index = i\n\n lengths[last_index] = len(level) - last_index\n\n result.append(lengths)\n\n return result\n\n\ndef buffer_put_lines(buf: WriteBuffer[str], lines: list[str]) -> None:\n """\n Appends lines to a buffer.\n\n Parameters\n ----------\n buf\n The buffer to write to\n lines\n The lines to append.\n """\n if any(isinstance(x, str) for x in lines):\n lines = [str(x) for x in lines]\n buf.write("\n".join(lines))\n
.venv\Lib\site-packages\pandas\io\formats\format.py
format.py
Python
66,127
0.75
0.166667
0.052419
python-kit
206
2023-09-28T03:42:47.592565
MIT
false
746d229a42eb1bbe7069272f5a82f289
"""\nModule for formatting output data in HTML.\n"""\nfrom __future__ import annotations\n\nfrom textwrap import dedent\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Final,\n cast,\n)\n\nfrom pandas._config import get_option\n\nfrom pandas._libs import lib\n\nfrom pandas import (\n MultiIndex,\n option_context,\n)\n\nfrom pandas.io.common import is_url\nfrom pandas.io.formats.format import (\n DataFrameFormatter,\n get_level_lengths,\n)\nfrom pandas.io.formats.printing import pprint_thing\n\nif TYPE_CHECKING:\n from collections.abc import (\n Hashable,\n Iterable,\n Mapping,\n )\n\n\nclass HTMLFormatter:\n """\n Internal class for formatting output data in html.\n This class is intended for shared functionality between\n DataFrame.to_html() and DataFrame._repr_html_().\n Any logic in common with other output formatting methods\n should ideally be inherited from classes in format.py\n and this class responsible for only producing html markup.\n """\n\n indent_delta: Final = 2\n\n def __init__(\n self,\n formatter: DataFrameFormatter,\n classes: str | list[str] | tuple[str, ...] | None = None,\n border: int | bool | None = None,\n table_id: str | None = None,\n render_links: bool = False,\n ) -> None:\n self.fmt = formatter\n self.classes = classes\n\n self.frame = self.fmt.frame\n self.columns = self.fmt.tr_frame.columns\n self.elements: list[str] = []\n self.bold_rows = self.fmt.bold_rows\n self.escape = self.fmt.escape\n self.show_dimensions = self.fmt.show_dimensions\n if border is None or border is True:\n border = cast(int, get_option("display.html.border"))\n elif not border:\n border = None\n\n self.border = border\n self.table_id = table_id\n self.render_links = render_links\n\n self.col_space = {}\n is_multi_index = isinstance(self.columns, MultiIndex)\n for column, value in self.fmt.col_space.items():\n col_space_value = f"{value}px" if isinstance(value, int) else value\n self.col_space[column] = col_space_value\n # GH 53885: Handling case where column is index\n # Flatten the data in the multi index and add in the map\n if is_multi_index and isinstance(column, tuple):\n for column_index in column:\n self.col_space[str(column_index)] = col_space_value\n\n def to_string(self) -> str:\n lines = self.render()\n if any(isinstance(x, str) for x in lines):\n lines = [str(x) for x in lines]\n return "\n".join(lines)\n\n def render(self) -> list[str]:\n self._write_table()\n\n if self.should_show_dimensions:\n by = chr(215) # × # noqa: RUF003\n self.write(\n f"<p>{len(self.frame)} rows {by} {len(self.frame.columns)} columns</p>"\n )\n\n return self.elements\n\n @property\n def should_show_dimensions(self) -> bool:\n return self.fmt.should_show_dimensions\n\n @property\n def show_row_idx_names(self) -> bool:\n return self.fmt.show_row_idx_names\n\n @property\n def show_col_idx_names(self) -> bool:\n return self.fmt.show_col_idx_names\n\n @property\n def row_levels(self) -> int:\n if self.fmt.index:\n # showing (row) index\n return self.frame.index.nlevels\n elif self.show_col_idx_names:\n # see gh-22579\n # Column misalignment also occurs for\n # a standard index when the columns index is named.\n # If the row index is not displayed a column of\n # blank cells need to be included before the DataFrame values.\n return 1\n # not showing (row) index\n return 0\n\n def _get_columns_formatted_values(self) -> Iterable:\n return self.columns\n\n @property\n def is_truncated(self) -> bool:\n return self.fmt.is_truncated\n\n @property\n def ncols(self) -> int:\n return len(self.fmt.tr_frame.columns)\n\n def write(self, s: Any, indent: int = 0) -> None:\n rs = pprint_thing(s)\n self.elements.append(" " * indent + rs)\n\n def write_th(\n self, s: Any, header: bool = False, indent: int = 0, tags: str | None = None\n ) -> None:\n """\n Method for writing a formatted <th> cell.\n\n If col_space is set on the formatter then that is used for\n the value of min-width.\n\n Parameters\n ----------\n s : object\n The data to be written inside the cell.\n header : bool, default False\n Set to True if the <th> is for use inside <thead>. This will\n cause min-width to be set if there is one.\n indent : int, default 0\n The indentation level of the cell.\n tags : str, default None\n Tags to include in the cell.\n\n Returns\n -------\n A written <th> cell.\n """\n col_space = self.col_space.get(s, None)\n\n if header and col_space is not None:\n tags = tags or ""\n tags += f'style="min-width: {col_space};"'\n\n self._write_cell(s, kind="th", indent=indent, tags=tags)\n\n def write_td(self, s: Any, indent: int = 0, tags: str | None = None) -> None:\n self._write_cell(s, kind="td", indent=indent, tags=tags)\n\n def _write_cell(\n self, s: Any, kind: str = "td", indent: int = 0, tags: str | None = None\n ) -> None:\n if tags is not None:\n start_tag = f"<{kind} {tags}>"\n else:\n start_tag = f"<{kind}>"\n\n if self.escape:\n # escape & first to prevent double escaping of &\n esc = {"&": r"&amp;", "<": r"&lt;", ">": r"&gt;"}\n else:\n esc = {}\n\n rs = pprint_thing(s, escape_chars=esc).strip()\n\n if self.render_links and is_url(rs):\n rs_unescaped = pprint_thing(s, escape_chars={}).strip()\n start_tag += f'<a href="{rs_unescaped}" target="_blank">'\n end_a = "</a>"\n else:\n end_a = ""\n\n self.write(f"{start_tag}{rs}{end_a}</{kind}>", indent)\n\n def write_tr(\n self,\n line: Iterable,\n indent: int = 0,\n indent_delta: int = 0,\n header: bool = False,\n align: str | None = None,\n tags: dict[int, str] | None = None,\n nindex_levels: int = 0,\n ) -> None:\n if tags is None:\n tags = {}\n\n if align is None:\n self.write("<tr>", indent)\n else:\n self.write(f'<tr style="text-align: {align};">', indent)\n indent += indent_delta\n\n for i, s in enumerate(line):\n val_tag = tags.get(i, None)\n if header or (self.bold_rows and i < nindex_levels):\n self.write_th(s, indent=indent, header=header, tags=val_tag)\n else:\n self.write_td(s, indent, tags=val_tag)\n\n indent -= indent_delta\n self.write("</tr>", indent)\n\n def _write_table(self, indent: int = 0) -> None:\n _classes = ["dataframe"] # Default class.\n use_mathjax = get_option("display.html.use_mathjax")\n if not use_mathjax:\n _classes.append("tex2jax_ignore")\n if self.classes is not None:\n if isinstance(self.classes, str):\n self.classes = self.classes.split()\n if not isinstance(self.classes, (list, tuple)):\n raise TypeError(\n "classes must be a string, list, "\n f"or tuple, not {type(self.classes)}"\n )\n _classes.extend(self.classes)\n\n if self.table_id is None:\n id_section = ""\n else:\n id_section = f' id="{self.table_id}"'\n\n if self.border is None:\n border_attr = ""\n else:\n border_attr = f' border="{self.border}"'\n\n self.write(\n f'<table{border_attr} class="{" ".join(_classes)}"{id_section}>',\n indent,\n )\n\n if self.fmt.header or self.show_row_idx_names:\n self._write_header(indent + self.indent_delta)\n\n self._write_body(indent + self.indent_delta)\n\n self.write("</table>", indent)\n\n def _write_col_header(self, indent: int) -> None:\n row: list[Hashable]\n is_truncated_horizontally = self.fmt.is_truncated_horizontally\n if isinstance(self.columns, MultiIndex):\n template = 'colspan="{span:d}" halign="left"'\n\n sentinel: lib.NoDefault | bool\n if self.fmt.sparsify:\n # GH3547\n sentinel = lib.no_default\n else:\n sentinel = False\n levels = self.columns._format_multi(sparsify=sentinel, include_names=False)\n level_lengths = get_level_lengths(levels, sentinel)\n inner_lvl = len(level_lengths) - 1\n for lnum, (records, values) in enumerate(zip(level_lengths, levels)):\n if is_truncated_horizontally:\n # modify the header lines\n ins_col = self.fmt.tr_col_num\n if self.fmt.sparsify:\n recs_new = {}\n # Increment tags after ... col.\n for tag, span in list(records.items()):\n if tag >= ins_col:\n recs_new[tag + 1] = span\n elif tag + span > ins_col:\n recs_new[tag] = span + 1\n if lnum == inner_lvl:\n values = (\n values[:ins_col] + ("...",) + values[ins_col:]\n )\n else:\n # sparse col headers do not receive a ...\n values = (\n values[:ins_col]\n + (values[ins_col - 1],)\n + values[ins_col:]\n )\n else:\n recs_new[tag] = span\n # if ins_col lies between tags, all col headers\n # get ...\n if tag + span == ins_col:\n recs_new[ins_col] = 1\n values = values[:ins_col] + ("...",) + values[ins_col:]\n records = recs_new\n inner_lvl = len(level_lengths) - 1\n if lnum == inner_lvl:\n records[ins_col] = 1\n else:\n recs_new = {}\n for tag, span in list(records.items()):\n if tag >= ins_col:\n recs_new[tag + 1] = span\n else:\n recs_new[tag] = span\n recs_new[ins_col] = 1\n records = recs_new\n values = values[:ins_col] + ["..."] + values[ins_col:]\n\n # see gh-22579\n # Column Offset Bug with to_html(index=False) with\n # MultiIndex Columns and Index.\n # Initially fill row with blank cells before column names.\n # TODO: Refactor to remove code duplication with code\n # block below for standard columns index.\n row = [""] * (self.row_levels - 1)\n if self.fmt.index or self.show_col_idx_names:\n # see gh-22747\n # If to_html(index_names=False) do not show columns\n # index names.\n # TODO: Refactor to use _get_column_name_list from\n # DataFrameFormatter class and create a\n # _get_formatted_column_labels function for code\n # parity with DataFrameFormatter class.\n if self.fmt.show_index_names:\n name = self.columns.names[lnum]\n row.append(pprint_thing(name or ""))\n else:\n row.append("")\n\n tags = {}\n j = len(row)\n for i, v in enumerate(values):\n if i in records:\n if records[i] > 1:\n tags[j] = template.format(span=records[i])\n else:\n continue\n j += 1\n row.append(v)\n self.write_tr(row, indent, self.indent_delta, tags=tags, header=True)\n else:\n # see gh-22579\n # Column misalignment also occurs for\n # a standard index when the columns index is named.\n # Initially fill row with blank cells before column names.\n # TODO: Refactor to remove code duplication with code block\n # above for columns MultiIndex.\n row = [""] * (self.row_levels - 1)\n if self.fmt.index or self.show_col_idx_names:\n # see gh-22747\n # If to_html(index_names=False) do not show columns\n # index names.\n # TODO: Refactor to use _get_column_name_list from\n # DataFrameFormatter class.\n if self.fmt.show_index_names:\n row.append(self.columns.name or "")\n else:\n row.append("")\n row.extend(self._get_columns_formatted_values())\n align = self.fmt.justify\n\n if is_truncated_horizontally:\n ins_col = self.row_levels + self.fmt.tr_col_num\n row.insert(ins_col, "...")\n\n self.write_tr(row, indent, self.indent_delta, header=True, align=align)\n\n def _write_row_header(self, indent: int) -> None:\n is_truncated_horizontally = self.fmt.is_truncated_horizontally\n row = [x if x is not None else "" for x in self.frame.index.names] + [""] * (\n self.ncols + (1 if is_truncated_horizontally else 0)\n )\n self.write_tr(row, indent, self.indent_delta, header=True)\n\n def _write_header(self, indent: int) -> None:\n self.write("<thead>", indent)\n\n if self.fmt.header:\n self._write_col_header(indent + self.indent_delta)\n\n if self.show_row_idx_names:\n self._write_row_header(indent + self.indent_delta)\n\n self.write("</thead>", indent)\n\n def _get_formatted_values(self) -> dict[int, list[str]]:\n with option_context("display.max_colwidth", None):\n fmt_values = {i: self.fmt.format_col(i) for i in range(self.ncols)}\n return fmt_values\n\n def _write_body(self, indent: int) -> None:\n self.write("<tbody>", indent)\n fmt_values = self._get_formatted_values()\n\n # write values\n if self.fmt.index and isinstance(self.frame.index, MultiIndex):\n self._write_hierarchical_rows(fmt_values, indent + self.indent_delta)\n else:\n self._write_regular_rows(fmt_values, indent + self.indent_delta)\n\n self.write("</tbody>", indent)\n\n def _write_regular_rows(\n self, fmt_values: Mapping[int, list[str]], indent: int\n ) -> None:\n is_truncated_horizontally = self.fmt.is_truncated_horizontally\n is_truncated_vertically = self.fmt.is_truncated_vertically\n\n nrows = len(self.fmt.tr_frame)\n\n if self.fmt.index:\n fmt = self.fmt._get_formatter("__index__")\n if fmt is not None:\n index_values = self.fmt.tr_frame.index.map(fmt)\n else:\n # only reached with non-Multi index\n index_values = self.fmt.tr_frame.index._format_flat(include_name=False)\n\n row: list[str] = []\n for i in range(nrows):\n if is_truncated_vertically and i == (self.fmt.tr_row_num):\n str_sep_row = ["..."] * len(row)\n self.write_tr(\n str_sep_row,\n indent,\n self.indent_delta,\n tags=None,\n nindex_levels=self.row_levels,\n )\n\n row = []\n if self.fmt.index:\n row.append(index_values[i])\n # see gh-22579\n # Column misalignment also occurs for\n # a standard index when the columns index is named.\n # Add blank cell before data cells.\n elif self.show_col_idx_names:\n row.append("")\n row.extend(fmt_values[j][i] for j in range(self.ncols))\n\n if is_truncated_horizontally:\n dot_col_ix = self.fmt.tr_col_num + self.row_levels\n row.insert(dot_col_ix, "...")\n self.write_tr(\n row, indent, self.indent_delta, tags=None, nindex_levels=self.row_levels\n )\n\n def _write_hierarchical_rows(\n self, fmt_values: Mapping[int, list[str]], indent: int\n ) -> None:\n template = 'rowspan="{span}" valign="top"'\n\n is_truncated_horizontally = self.fmt.is_truncated_horizontally\n is_truncated_vertically = self.fmt.is_truncated_vertically\n frame = self.fmt.tr_frame\n nrows = len(frame)\n\n assert isinstance(frame.index, MultiIndex)\n idx_values = frame.index._format_multi(sparsify=False, include_names=False)\n idx_values = list(zip(*idx_values))\n\n if self.fmt.sparsify:\n # GH3547\n sentinel = lib.no_default\n levels = frame.index._format_multi(sparsify=sentinel, include_names=False)\n\n level_lengths = get_level_lengths(levels, sentinel)\n inner_lvl = len(level_lengths) - 1\n if is_truncated_vertically:\n # Insert ... row and adjust idx_values and\n # level_lengths to take this into account.\n ins_row = self.fmt.tr_row_num\n inserted = False\n for lnum, records in enumerate(level_lengths):\n rec_new = {}\n for tag, span in list(records.items()):\n if tag >= ins_row:\n rec_new[tag + 1] = span\n elif tag + span > ins_row:\n rec_new[tag] = span + 1\n\n # GH 14882 - Make sure insertion done once\n if not inserted:\n dot_row = list(idx_values[ins_row - 1])\n dot_row[-1] = "..."\n idx_values.insert(ins_row, tuple(dot_row))\n inserted = True\n else:\n dot_row = list(idx_values[ins_row])\n dot_row[inner_lvl - lnum] = "..."\n idx_values[ins_row] = tuple(dot_row)\n else:\n rec_new[tag] = span\n # If ins_row lies between tags, all cols idx cols\n # receive ...\n if tag + span == ins_row:\n rec_new[ins_row] = 1\n if lnum == 0:\n idx_values.insert(\n ins_row, tuple(["..."] * len(level_lengths))\n )\n\n # GH 14882 - Place ... in correct level\n elif inserted:\n dot_row = list(idx_values[ins_row])\n dot_row[inner_lvl - lnum] = "..."\n idx_values[ins_row] = tuple(dot_row)\n level_lengths[lnum] = rec_new\n\n level_lengths[inner_lvl][ins_row] = 1\n for ix_col in fmt_values:\n fmt_values[ix_col].insert(ins_row, "...")\n nrows += 1\n\n for i in range(nrows):\n row = []\n tags = {}\n\n sparse_offset = 0\n j = 0\n for records, v in zip(level_lengths, idx_values[i]):\n if i in records:\n if records[i] > 1:\n tags[j] = template.format(span=records[i])\n else:\n sparse_offset += 1\n continue\n\n j += 1\n row.append(v)\n\n row.extend(fmt_values[j][i] for j in range(self.ncols))\n if is_truncated_horizontally:\n row.insert(\n self.row_levels - sparse_offset + self.fmt.tr_col_num, "..."\n )\n self.write_tr(\n row,\n indent,\n self.indent_delta,\n tags=tags,\n nindex_levels=len(levels) - sparse_offset,\n )\n else:\n row = []\n for i in range(len(frame)):\n if is_truncated_vertically and i == (self.fmt.tr_row_num):\n str_sep_row = ["..."] * len(row)\n self.write_tr(\n str_sep_row,\n indent,\n self.indent_delta,\n tags=None,\n nindex_levels=self.row_levels,\n )\n\n idx_values = list(\n zip(*frame.index._format_multi(sparsify=False, include_names=False))\n )\n row = []\n row.extend(idx_values[i])\n row.extend(fmt_values[j][i] for j in range(self.ncols))\n if is_truncated_horizontally:\n row.insert(self.row_levels + self.fmt.tr_col_num, "...")\n self.write_tr(\n row,\n indent,\n self.indent_delta,\n tags=None,\n nindex_levels=frame.index.nlevels,\n )\n\n\nclass NotebookFormatter(HTMLFormatter):\n """\n Internal class for formatting output data in html for display in Jupyter\n Notebooks. This class is intended for functionality specific to\n DataFrame._repr_html_() and DataFrame.to_html(notebook=True)\n """\n\n def _get_formatted_values(self) -> dict[int, list[str]]:\n return {i: self.fmt.format_col(i) for i in range(self.ncols)}\n\n def _get_columns_formatted_values(self) -> list[str]:\n # only reached with non-Multi Index\n return self.columns._format_flat(include_name=False)\n\n def write_style(self) -> None:\n # We use the "scoped" attribute here so that the desired\n # style properties for the data frame are not then applied\n # throughout the entire notebook.\n template_first = """\\n <style scoped>"""\n template_last = """\\n </style>"""\n template_select = """\\n .dataframe %s {\n %s: %s;\n }"""\n element_props = [\n ("tbody tr th:only-of-type", "vertical-align", "middle"),\n ("tbody tr th", "vertical-align", "top"),\n ]\n if isinstance(self.columns, MultiIndex):\n element_props.append(("thead tr th", "text-align", "left"))\n if self.show_row_idx_names:\n element_props.append(\n ("thead tr:last-of-type th", "text-align", "right")\n )\n else:\n element_props.append(("thead th", "text-align", "right"))\n template_mid = "\n\n".join(template_select % t for t in element_props)\n template = dedent(f"{template_first}\n{template_mid}\n{template_last}")\n self.write(template)\n\n def render(self) -> list[str]:\n self.write("<div>")\n self.write_style()\n super().render()\n self.write("</div>")\n return self.elements\n
.venv\Lib\site-packages\pandas\io\formats\html.py
html.py
Python
24,165
0.95
0.221362
0.102151
python-kit
482
2023-10-26T15:55:28.397295
MIT
false
10980c5ac81289717d908979f0dea491
from __future__ import annotations\n\nfrom abc import (\n ABC,\n abstractmethod,\n)\nimport sys\nfrom textwrap import dedent\nfrom typing import TYPE_CHECKING\n\nfrom pandas._config import get_option\n\nfrom pandas.io.formats import format as fmt\nfrom pandas.io.formats.printing import pprint_thing\n\nif TYPE_CHECKING:\n from collections.abc import (\n Iterable,\n Iterator,\n Mapping,\n Sequence,\n )\n\n from pandas._typing import (\n Dtype,\n WriteBuffer,\n )\n\n from pandas import (\n DataFrame,\n Index,\n Series,\n )\n\n\nframe_max_cols_sub = dedent(\n """\\n max_cols : int, optional\n When to switch from the verbose to the truncated output. If the\n DataFrame has more than `max_cols` columns, the truncated output\n is used. By default, the setting in\n ``pandas.options.display.max_info_columns`` is used."""\n)\n\n\nshow_counts_sub = dedent(\n """\\n show_counts : bool, optional\n Whether to show the non-null counts. By default, this is shown\n only if the DataFrame is smaller than\n ``pandas.options.display.max_info_rows`` and\n ``pandas.options.display.max_info_columns``. A value of True always\n shows the counts, and False never shows the counts."""\n)\n\n\nframe_examples_sub = dedent(\n """\\n >>> int_values = [1, 2, 3, 4, 5]\n >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']\n >>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]\n >>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values,\n ... "float_col": float_values})\n >>> df\n int_col text_col float_col\n 0 1 alpha 0.00\n 1 2 beta 0.25\n 2 3 gamma 0.50\n 3 4 delta 0.75\n 4 5 epsilon 1.00\n\n Prints information of all columns:\n\n >>> df.info(verbose=True)\n <class 'pandas.core.frame.DataFrame'>\n RangeIndex: 5 entries, 0 to 4\n Data columns (total 3 columns):\n # Column Non-Null Count Dtype\n --- ------ -------------- -----\n 0 int_col 5 non-null int64\n 1 text_col 5 non-null object\n 2 float_col 5 non-null float64\n dtypes: float64(1), int64(1), object(1)\n memory usage: 248.0+ bytes\n\n Prints a summary of columns count and its dtypes but not per column\n information:\n\n >>> df.info(verbose=False)\n <class 'pandas.core.frame.DataFrame'>\n RangeIndex: 5 entries, 0 to 4\n Columns: 3 entries, int_col to float_col\n dtypes: float64(1), int64(1), object(1)\n memory usage: 248.0+ bytes\n\n Pipe output of DataFrame.info to buffer instead of sys.stdout, get\n buffer content and writes to a text file:\n\n >>> import io\n >>> buffer = io.StringIO()\n >>> df.info(buf=buffer)\n >>> s = buffer.getvalue()\n >>> with open("df_info.txt", "w",\n ... encoding="utf-8") as f: # doctest: +SKIP\n ... f.write(s)\n 260\n\n The `memory_usage` parameter allows deep introspection mode, specially\n useful for big DataFrames and fine-tune memory optimization:\n\n >>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)\n >>> df = pd.DataFrame({\n ... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),\n ... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),\n ... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)\n ... })\n >>> df.info()\n <class 'pandas.core.frame.DataFrame'>\n RangeIndex: 1000000 entries, 0 to 999999\n Data columns (total 3 columns):\n # Column Non-Null Count Dtype\n --- ------ -------------- -----\n 0 column_1 1000000 non-null object\n 1 column_2 1000000 non-null object\n 2 column_3 1000000 non-null object\n dtypes: object(3)\n memory usage: 22.9+ MB\n\n >>> df.info(memory_usage='deep')\n <class 'pandas.core.frame.DataFrame'>\n RangeIndex: 1000000 entries, 0 to 999999\n Data columns (total 3 columns):\n # Column Non-Null Count Dtype\n --- ------ -------------- -----\n 0 column_1 1000000 non-null object\n 1 column_2 1000000 non-null object\n 2 column_3 1000000 non-null object\n dtypes: object(3)\n memory usage: 165.9 MB"""\n)\n\n\nframe_see_also_sub = dedent(\n """\\n DataFrame.describe: Generate descriptive statistics of DataFrame\n columns.\n DataFrame.memory_usage: Memory usage of DataFrame columns."""\n)\n\n\nframe_sub_kwargs = {\n "klass": "DataFrame",\n "type_sub": " and columns",\n "max_cols_sub": frame_max_cols_sub,\n "show_counts_sub": show_counts_sub,\n "examples_sub": frame_examples_sub,\n "see_also_sub": frame_see_also_sub,\n "version_added_sub": "",\n}\n\n\nseries_examples_sub = dedent(\n """\\n >>> int_values = [1, 2, 3, 4, 5]\n >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']\n >>> s = pd.Series(text_values, index=int_values)\n >>> s.info()\n <class 'pandas.core.series.Series'>\n Index: 5 entries, 1 to 5\n Series name: None\n Non-Null Count Dtype\n -------------- -----\n 5 non-null object\n dtypes: object(1)\n memory usage: 80.0+ bytes\n\n Prints a summary excluding information about its values:\n\n >>> s.info(verbose=False)\n <class 'pandas.core.series.Series'>\n Index: 5 entries, 1 to 5\n dtypes: object(1)\n memory usage: 80.0+ bytes\n\n Pipe output of Series.info to buffer instead of sys.stdout, get\n buffer content and writes to a text file:\n\n >>> import io\n >>> buffer = io.StringIO()\n >>> s.info(buf=buffer)\n >>> s = buffer.getvalue()\n >>> with open("df_info.txt", "w",\n ... encoding="utf-8") as f: # doctest: +SKIP\n ... f.write(s)\n 260\n\n The `memory_usage` parameter allows deep introspection mode, specially\n useful for big Series and fine-tune memory optimization:\n\n >>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)\n >>> s = pd.Series(np.random.choice(['a', 'b', 'c'], 10 ** 6))\n >>> s.info()\n <class 'pandas.core.series.Series'>\n RangeIndex: 1000000 entries, 0 to 999999\n Series name: None\n Non-Null Count Dtype\n -------------- -----\n 1000000 non-null object\n dtypes: object(1)\n memory usage: 7.6+ MB\n\n >>> s.info(memory_usage='deep')\n <class 'pandas.core.series.Series'>\n RangeIndex: 1000000 entries, 0 to 999999\n Series name: None\n Non-Null Count Dtype\n -------------- -----\n 1000000 non-null object\n dtypes: object(1)\n memory usage: 55.3 MB"""\n)\n\n\nseries_see_also_sub = dedent(\n """\\n Series.describe: Generate descriptive statistics of Series.\n Series.memory_usage: Memory usage of Series."""\n)\n\n\nseries_sub_kwargs = {\n "klass": "Series",\n "type_sub": "",\n "max_cols_sub": "",\n "show_counts_sub": show_counts_sub,\n "examples_sub": series_examples_sub,\n "see_also_sub": series_see_also_sub,\n "version_added_sub": "\n.. versionadded:: 1.4.0\n",\n}\n\n\nINFO_DOCSTRING = dedent(\n """\n Print a concise summary of a {klass}.\n\n This method prints information about a {klass} including\n the index dtype{type_sub}, non-null values and memory usage.\n {version_added_sub}\\n\n Parameters\n ----------\n verbose : bool, optional\n Whether to print the full summary. By default, the setting in\n ``pandas.options.display.max_info_columns`` is followed.\n buf : writable buffer, defaults to sys.stdout\n Where to send the output. By default, the output is printed to\n sys.stdout. Pass a writable buffer if you need to further process\n the output.\n {max_cols_sub}\n memory_usage : bool, str, optional\n Specifies whether total memory usage of the {klass}\n elements (including the index) should be displayed. By default,\n this follows the ``pandas.options.display.memory_usage`` setting.\n\n True always show memory usage. False never shows memory usage.\n A value of 'deep' is equivalent to "True with deep introspection".\n Memory usage is shown in human-readable units (base-2\n representation). Without deep introspection a memory estimation is\n made based in column dtype and number of rows assuming values\n consume the same memory amount for corresponding dtypes. With deep\n memory introspection, a real memory usage calculation is performed\n at the cost of computational resources. See the\n :ref:`Frequently Asked Questions <df-memory-usage>` for more\n details.\n {show_counts_sub}\n\n Returns\n -------\n None\n This method prints a summary of a {klass} and returns None.\n\n See Also\n --------\n {see_also_sub}\n\n Examples\n --------\n {examples_sub}\n """\n)\n\n\ndef _put_str(s: str | Dtype, space: int) -> str:\n """\n Make string of specified length, padding to the right if necessary.\n\n Parameters\n ----------\n s : Union[str, Dtype]\n String to be formatted.\n space : int\n Length to force string to be of.\n\n Returns\n -------\n str\n String coerced to given length.\n\n Examples\n --------\n >>> pd.io.formats.info._put_str("panda", 6)\n 'panda '\n >>> pd.io.formats.info._put_str("panda", 4)\n 'pand'\n """\n return str(s)[:space].ljust(space)\n\n\ndef _sizeof_fmt(num: float, size_qualifier: str) -> str:\n """\n Return size in human readable format.\n\n Parameters\n ----------\n num : int\n Size in bytes.\n size_qualifier : str\n Either empty, or '+' (if lower bound).\n\n Returns\n -------\n str\n Size in human readable format.\n\n Examples\n --------\n >>> _sizeof_fmt(23028, '')\n '22.5 KB'\n\n >>> _sizeof_fmt(23028, '+')\n '22.5+ KB'\n """\n for x in ["bytes", "KB", "MB", "GB", "TB"]:\n if num < 1024.0:\n return f"{num:3.1f}{size_qualifier} {x}"\n num /= 1024.0\n return f"{num:3.1f}{size_qualifier} PB"\n\n\ndef _initialize_memory_usage(\n memory_usage: bool | str | None = None,\n) -> bool | str:\n """Get memory usage based on inputs and display options."""\n if memory_usage is None:\n memory_usage = get_option("display.memory_usage")\n return memory_usage\n\n\nclass _BaseInfo(ABC):\n """\n Base class for DataFrameInfo and SeriesInfo.\n\n Parameters\n ----------\n data : DataFrame or Series\n Either dataframe or series.\n memory_usage : bool or str, optional\n If "deep", introspect the data deeply by interrogating object dtypes\n for system-level memory consumption, and include it in the returned\n values.\n """\n\n data: DataFrame | Series\n memory_usage: bool | str\n\n @property\n @abstractmethod\n def dtypes(self) -> Iterable[Dtype]:\n """\n Dtypes.\n\n Returns\n -------\n dtypes : sequence\n Dtype of each of the DataFrame's columns (or one series column).\n """\n\n @property\n @abstractmethod\n def dtype_counts(self) -> Mapping[str, int]:\n """Mapping dtype - number of counts."""\n\n @property\n @abstractmethod\n def non_null_counts(self) -> Sequence[int]:\n """Sequence of non-null counts for all columns or column (if series)."""\n\n @property\n @abstractmethod\n def memory_usage_bytes(self) -> int:\n """\n Memory usage in bytes.\n\n Returns\n -------\n memory_usage_bytes : int\n Object's total memory usage in bytes.\n """\n\n @property\n def memory_usage_string(self) -> str:\n """Memory usage in a form of human readable string."""\n return f"{_sizeof_fmt(self.memory_usage_bytes, self.size_qualifier)}\n"\n\n @property\n def size_qualifier(self) -> str:\n size_qualifier = ""\n if self.memory_usage:\n if self.memory_usage != "deep":\n # size_qualifier is just a best effort; not guaranteed to catch\n # all cases (e.g., it misses categorical data even with object\n # categories)\n if (\n "object" in self.dtype_counts\n or self.data.index._is_memory_usage_qualified()\n ):\n size_qualifier = "+"\n return size_qualifier\n\n @abstractmethod\n def render(\n self,\n *,\n buf: WriteBuffer[str] | None,\n max_cols: int | None,\n verbose: bool | None,\n show_counts: bool | None,\n ) -> None:\n pass\n\n\nclass DataFrameInfo(_BaseInfo):\n """\n Class storing dataframe-specific info.\n """\n\n def __init__(\n self,\n data: DataFrame,\n memory_usage: bool | str | None = None,\n ) -> None:\n self.data: DataFrame = data\n self.memory_usage = _initialize_memory_usage(memory_usage)\n\n @property\n def dtype_counts(self) -> Mapping[str, int]:\n return _get_dataframe_dtype_counts(self.data)\n\n @property\n def dtypes(self) -> Iterable[Dtype]:\n """\n Dtypes.\n\n Returns\n -------\n dtypes\n Dtype of each of the DataFrame's columns.\n """\n return self.data.dtypes\n\n @property\n def ids(self) -> Index:\n """\n Column names.\n\n Returns\n -------\n ids : Index\n DataFrame's column names.\n """\n return self.data.columns\n\n @property\n def col_count(self) -> int:\n """Number of columns to be summarized."""\n return len(self.ids)\n\n @property\n def non_null_counts(self) -> Sequence[int]:\n """Sequence of non-null counts for all columns or column (if series)."""\n return self.data.count()\n\n @property\n def memory_usage_bytes(self) -> int:\n deep = self.memory_usage == "deep"\n return self.data.memory_usage(index=True, deep=deep).sum()\n\n def render(\n self,\n *,\n buf: WriteBuffer[str] | None,\n max_cols: int | None,\n verbose: bool | None,\n show_counts: bool | None,\n ) -> None:\n printer = _DataFrameInfoPrinter(\n info=self,\n max_cols=max_cols,\n verbose=verbose,\n show_counts=show_counts,\n )\n printer.to_buffer(buf)\n\n\nclass SeriesInfo(_BaseInfo):\n """\n Class storing series-specific info.\n """\n\n def __init__(\n self,\n data: Series,\n memory_usage: bool | str | None = None,\n ) -> None:\n self.data: Series = data\n self.memory_usage = _initialize_memory_usage(memory_usage)\n\n def render(\n self,\n *,\n buf: WriteBuffer[str] | None = None,\n max_cols: int | None = None,\n verbose: bool | None = None,\n show_counts: bool | None = None,\n ) -> None:\n if max_cols is not None:\n raise ValueError(\n "Argument `max_cols` can only be passed "\n "in DataFrame.info, not Series.info"\n )\n printer = _SeriesInfoPrinter(\n info=self,\n verbose=verbose,\n show_counts=show_counts,\n )\n printer.to_buffer(buf)\n\n @property\n def non_null_counts(self) -> Sequence[int]:\n return [self.data.count()]\n\n @property\n def dtypes(self) -> Iterable[Dtype]:\n return [self.data.dtypes]\n\n @property\n def dtype_counts(self) -> Mapping[str, int]:\n from pandas.core.frame import DataFrame\n\n return _get_dataframe_dtype_counts(DataFrame(self.data))\n\n @property\n def memory_usage_bytes(self) -> int:\n """Memory usage in bytes.\n\n Returns\n -------\n memory_usage_bytes : int\n Object's total memory usage in bytes.\n """\n deep = self.memory_usage == "deep"\n return self.data.memory_usage(index=True, deep=deep)\n\n\nclass _InfoPrinterAbstract:\n """\n Class for printing dataframe or series info.\n """\n\n def to_buffer(self, buf: WriteBuffer[str] | None = None) -> None:\n """Save dataframe info into buffer."""\n table_builder = self._create_table_builder()\n lines = table_builder.get_lines()\n if buf is None: # pragma: no cover\n buf = sys.stdout\n fmt.buffer_put_lines(buf, lines)\n\n @abstractmethod\n def _create_table_builder(self) -> _TableBuilderAbstract:\n """Create instance of table builder."""\n\n\nclass _DataFrameInfoPrinter(_InfoPrinterAbstract):\n """\n Class for printing dataframe info.\n\n Parameters\n ----------\n info : DataFrameInfo\n Instance of DataFrameInfo.\n max_cols : int, optional\n When to switch from the verbose to the truncated output.\n verbose : bool, optional\n Whether to print the full summary.\n show_counts : bool, optional\n Whether to show the non-null counts.\n """\n\n def __init__(\n self,\n info: DataFrameInfo,\n max_cols: int | None = None,\n verbose: bool | None = None,\n show_counts: bool | None = None,\n ) -> None:\n self.info = info\n self.data = info.data\n self.verbose = verbose\n self.max_cols = self._initialize_max_cols(max_cols)\n self.show_counts = self._initialize_show_counts(show_counts)\n\n @property\n def max_rows(self) -> int:\n """Maximum info rows to be displayed."""\n return get_option("display.max_info_rows", len(self.data) + 1)\n\n @property\n def exceeds_info_cols(self) -> bool:\n """Check if number of columns to be summarized does not exceed maximum."""\n return bool(self.col_count > self.max_cols)\n\n @property\n def exceeds_info_rows(self) -> bool:\n """Check if number of rows to be summarized does not exceed maximum."""\n return bool(len(self.data) > self.max_rows)\n\n @property\n def col_count(self) -> int:\n """Number of columns to be summarized."""\n return self.info.col_count\n\n def _initialize_max_cols(self, max_cols: int | None) -> int:\n if max_cols is None:\n return get_option("display.max_info_columns", self.col_count + 1)\n return max_cols\n\n def _initialize_show_counts(self, show_counts: bool | None) -> bool:\n if show_counts is None:\n return bool(not self.exceeds_info_cols and not self.exceeds_info_rows)\n else:\n return show_counts\n\n def _create_table_builder(self) -> _DataFrameTableBuilder:\n """\n Create instance of table builder based on verbosity and display settings.\n """\n if self.verbose:\n return _DataFrameTableBuilderVerbose(\n info=self.info,\n with_counts=self.show_counts,\n )\n elif self.verbose is False: # specifically set to False, not necessarily None\n return _DataFrameTableBuilderNonVerbose(info=self.info)\n elif self.exceeds_info_cols:\n return _DataFrameTableBuilderNonVerbose(info=self.info)\n else:\n return _DataFrameTableBuilderVerbose(\n info=self.info,\n with_counts=self.show_counts,\n )\n\n\nclass _SeriesInfoPrinter(_InfoPrinterAbstract):\n """Class for printing series info.\n\n Parameters\n ----------\n info : SeriesInfo\n Instance of SeriesInfo.\n verbose : bool, optional\n Whether to print the full summary.\n show_counts : bool, optional\n Whether to show the non-null counts.\n """\n\n def __init__(\n self,\n info: SeriesInfo,\n verbose: bool | None = None,\n show_counts: bool | None = None,\n ) -> None:\n self.info = info\n self.data = info.data\n self.verbose = verbose\n self.show_counts = self._initialize_show_counts(show_counts)\n\n def _create_table_builder(self) -> _SeriesTableBuilder:\n """\n Create instance of table builder based on verbosity.\n """\n if self.verbose or self.verbose is None:\n return _SeriesTableBuilderVerbose(\n info=self.info,\n with_counts=self.show_counts,\n )\n else:\n return _SeriesTableBuilderNonVerbose(info=self.info)\n\n def _initialize_show_counts(self, show_counts: bool | None) -> bool:\n if show_counts is None:\n return True\n else:\n return show_counts\n\n\nclass _TableBuilderAbstract(ABC):\n """\n Abstract builder for info table.\n """\n\n _lines: list[str]\n info: _BaseInfo\n\n @abstractmethod\n def get_lines(self) -> list[str]:\n """Product in a form of list of lines (strings)."""\n\n @property\n def data(self) -> DataFrame | Series:\n return self.info.data\n\n @property\n def dtypes(self) -> Iterable[Dtype]:\n """Dtypes of each of the DataFrame's columns."""\n return self.info.dtypes\n\n @property\n def dtype_counts(self) -> Mapping[str, int]:\n """Mapping dtype - number of counts."""\n return self.info.dtype_counts\n\n @property\n def display_memory_usage(self) -> bool:\n """Whether to display memory usage."""\n return bool(self.info.memory_usage)\n\n @property\n def memory_usage_string(self) -> str:\n """Memory usage string with proper size qualifier."""\n return self.info.memory_usage_string\n\n @property\n def non_null_counts(self) -> Sequence[int]:\n return self.info.non_null_counts\n\n def add_object_type_line(self) -> None:\n """Add line with string representation of dataframe to the table."""\n self._lines.append(str(type(self.data)))\n\n def add_index_range_line(self) -> None:\n """Add line with range of indices to the table."""\n self._lines.append(self.data.index._summary())\n\n def add_dtypes_line(self) -> None:\n """Add summary line with dtypes present in dataframe."""\n collected_dtypes = [\n f"{key}({val:d})" for key, val in sorted(self.dtype_counts.items())\n ]\n self._lines.append(f"dtypes: {', '.join(collected_dtypes)}")\n\n\nclass _DataFrameTableBuilder(_TableBuilderAbstract):\n """\n Abstract builder for dataframe info table.\n\n Parameters\n ----------\n info : DataFrameInfo.\n Instance of DataFrameInfo.\n """\n\n def __init__(self, *, info: DataFrameInfo) -> None:\n self.info: DataFrameInfo = info\n\n def get_lines(self) -> list[str]:\n self._lines = []\n if self.col_count == 0:\n self._fill_empty_info()\n else:\n self._fill_non_empty_info()\n return self._lines\n\n def _fill_empty_info(self) -> None:\n """Add lines to the info table, pertaining to empty dataframe."""\n self.add_object_type_line()\n self.add_index_range_line()\n self._lines.append(f"Empty {type(self.data).__name__}\n")\n\n @abstractmethod\n def _fill_non_empty_info(self) -> None:\n """Add lines to the info table, pertaining to non-empty dataframe."""\n\n @property\n def data(self) -> DataFrame:\n """DataFrame."""\n return self.info.data\n\n @property\n def ids(self) -> Index:\n """Dataframe columns."""\n return self.info.ids\n\n @property\n def col_count(self) -> int:\n """Number of dataframe columns to be summarized."""\n return self.info.col_count\n\n def add_memory_usage_line(self) -> None:\n """Add line containing memory usage."""\n self._lines.append(f"memory usage: {self.memory_usage_string}")\n\n\nclass _DataFrameTableBuilderNonVerbose(_DataFrameTableBuilder):\n """\n Dataframe info table builder for non-verbose output.\n """\n\n def _fill_non_empty_info(self) -> None:\n """Add lines to the info table, pertaining to non-empty dataframe."""\n self.add_object_type_line()\n self.add_index_range_line()\n self.add_columns_summary_line()\n self.add_dtypes_line()\n if self.display_memory_usage:\n self.add_memory_usage_line()\n\n def add_columns_summary_line(self) -> None:\n self._lines.append(self.ids._summary(name="Columns"))\n\n\nclass _TableBuilderVerboseMixin(_TableBuilderAbstract):\n """\n Mixin for verbose info output.\n """\n\n SPACING: str = " " * 2\n strrows: Sequence[Sequence[str]]\n gross_column_widths: Sequence[int]\n with_counts: bool\n\n @property\n @abstractmethod\n def headers(self) -> Sequence[str]:\n """Headers names of the columns in verbose table."""\n\n @property\n def header_column_widths(self) -> Sequence[int]:\n """Widths of header columns (only titles)."""\n return [len(col) for col in self.headers]\n\n def _get_gross_column_widths(self) -> Sequence[int]:\n """Get widths of columns containing both headers and actual content."""\n body_column_widths = self._get_body_column_widths()\n return [\n max(*widths)\n for widths in zip(self.header_column_widths, body_column_widths)\n ]\n\n def _get_body_column_widths(self) -> Sequence[int]:\n """Get widths of table content columns."""\n strcols: Sequence[Sequence[str]] = list(zip(*self.strrows))\n return [max(len(x) for x in col) for col in strcols]\n\n def _gen_rows(self) -> Iterator[Sequence[str]]:\n """\n Generator function yielding rows content.\n\n Each element represents a row comprising a sequence of strings.\n """\n if self.with_counts:\n return self._gen_rows_with_counts()\n else:\n return self._gen_rows_without_counts()\n\n @abstractmethod\n def _gen_rows_with_counts(self) -> Iterator[Sequence[str]]:\n """Iterator with string representation of body data with counts."""\n\n @abstractmethod\n def _gen_rows_without_counts(self) -> Iterator[Sequence[str]]:\n """Iterator with string representation of body data without counts."""\n\n def add_header_line(self) -> None:\n header_line = self.SPACING.join(\n [\n _put_str(header, col_width)\n for header, col_width in zip(self.headers, self.gross_column_widths)\n ]\n )\n self._lines.append(header_line)\n\n def add_separator_line(self) -> None:\n separator_line = self.SPACING.join(\n [\n _put_str("-" * header_colwidth, gross_colwidth)\n for header_colwidth, gross_colwidth in zip(\n self.header_column_widths, self.gross_column_widths\n )\n ]\n )\n self._lines.append(separator_line)\n\n def add_body_lines(self) -> None:\n for row in self.strrows:\n body_line = self.SPACING.join(\n [\n _put_str(col, gross_colwidth)\n for col, gross_colwidth in zip(row, self.gross_column_widths)\n ]\n )\n self._lines.append(body_line)\n\n def _gen_non_null_counts(self) -> Iterator[str]:\n """Iterator with string representation of non-null counts."""\n for count in self.non_null_counts:\n yield f"{count} non-null"\n\n def _gen_dtypes(self) -> Iterator[str]:\n """Iterator with string representation of column dtypes."""\n for dtype in self.dtypes:\n yield pprint_thing(dtype)\n\n\nclass _DataFrameTableBuilderVerbose(_DataFrameTableBuilder, _TableBuilderVerboseMixin):\n """\n Dataframe info table builder for verbose output.\n """\n\n def __init__(\n self,\n *,\n info: DataFrameInfo,\n with_counts: bool,\n ) -> None:\n self.info = info\n self.with_counts = with_counts\n self.strrows: Sequence[Sequence[str]] = list(self._gen_rows())\n self.gross_column_widths: Sequence[int] = self._get_gross_column_widths()\n\n def _fill_non_empty_info(self) -> None:\n """Add lines to the info table, pertaining to non-empty dataframe."""\n self.add_object_type_line()\n self.add_index_range_line()\n self.add_columns_summary_line()\n self.add_header_line()\n self.add_separator_line()\n self.add_body_lines()\n self.add_dtypes_line()\n if self.display_memory_usage:\n self.add_memory_usage_line()\n\n @property\n def headers(self) -> Sequence[str]:\n """Headers names of the columns in verbose table."""\n if self.with_counts:\n return [" # ", "Column", "Non-Null Count", "Dtype"]\n return [" # ", "Column", "Dtype"]\n\n def add_columns_summary_line(self) -> None:\n self._lines.append(f"Data columns (total {self.col_count} columns):")\n\n def _gen_rows_without_counts(self) -> Iterator[Sequence[str]]:\n """Iterator with string representation of body data without counts."""\n yield from zip(\n self._gen_line_numbers(),\n self._gen_columns(),\n self._gen_dtypes(),\n )\n\n def _gen_rows_with_counts(self) -> Iterator[Sequence[str]]:\n """Iterator with string representation of body data with counts."""\n yield from zip(\n self._gen_line_numbers(),\n self._gen_columns(),\n self._gen_non_null_counts(),\n self._gen_dtypes(),\n )\n\n def _gen_line_numbers(self) -> Iterator[str]:\n """Iterator with string representation of column numbers."""\n for i, _ in enumerate(self.ids):\n yield f" {i}"\n\n def _gen_columns(self) -> Iterator[str]:\n """Iterator with string representation of column names."""\n for col in self.ids:\n yield pprint_thing(col)\n\n\nclass _SeriesTableBuilder(_TableBuilderAbstract):\n """\n Abstract builder for series info table.\n\n Parameters\n ----------\n info : SeriesInfo.\n Instance of SeriesInfo.\n """\n\n def __init__(self, *, info: SeriesInfo) -> None:\n self.info: SeriesInfo = info\n\n def get_lines(self) -> list[str]:\n self._lines = []\n self._fill_non_empty_info()\n return self._lines\n\n @property\n def data(self) -> Series:\n """Series."""\n return self.info.data\n\n def add_memory_usage_line(self) -> None:\n """Add line containing memory usage."""\n self._lines.append(f"memory usage: {self.memory_usage_string}")\n\n @abstractmethod\n def _fill_non_empty_info(self) -> None:\n """Add lines to the info table, pertaining to non-empty series."""\n\n\nclass _SeriesTableBuilderNonVerbose(_SeriesTableBuilder):\n """\n Series info table builder for non-verbose output.\n """\n\n def _fill_non_empty_info(self) -> None:\n """Add lines to the info table, pertaining to non-empty series."""\n self.add_object_type_line()\n self.add_index_range_line()\n self.add_dtypes_line()\n if self.display_memory_usage:\n self.add_memory_usage_line()\n\n\nclass _SeriesTableBuilderVerbose(_SeriesTableBuilder, _TableBuilderVerboseMixin):\n """\n Series info table builder for verbose output.\n """\n\n def __init__(\n self,\n *,\n info: SeriesInfo,\n with_counts: bool,\n ) -> None:\n self.info = info\n self.with_counts = with_counts\n self.strrows: Sequence[Sequence[str]] = list(self._gen_rows())\n self.gross_column_widths: Sequence[int] = self._get_gross_column_widths()\n\n def _fill_non_empty_info(self) -> None:\n """Add lines to the info table, pertaining to non-empty series."""\n self.add_object_type_line()\n self.add_index_range_line()\n self.add_series_name_line()\n self.add_header_line()\n self.add_separator_line()\n self.add_body_lines()\n self.add_dtypes_line()\n if self.display_memory_usage:\n self.add_memory_usage_line()\n\n def add_series_name_line(self) -> None:\n self._lines.append(f"Series name: {self.data.name}")\n\n @property\n def headers(self) -> Sequence[str]:\n """Headers names of the columns in verbose table."""\n if self.with_counts:\n return ["Non-Null Count", "Dtype"]\n return ["Dtype"]\n\n def _gen_rows_without_counts(self) -> Iterator[Sequence[str]]:\n """Iterator with string representation of body data without counts."""\n yield from self._gen_dtypes()\n\n def _gen_rows_with_counts(self) -> Iterator[Sequence[str]]:\n """Iterator with string representation of body data with counts."""\n yield from zip(\n self._gen_non_null_counts(),\n self._gen_dtypes(),\n )\n\n\ndef _get_dataframe_dtype_counts(df: DataFrame) -> Mapping[str, int]:\n """\n Create mapping between datatypes and their number of occurrences.\n """\n # groupby dtype.name to collect e.g. Categorical columns\n return df.dtypes.value_counts().groupby(lambda x: x.name).sum()\n
.venv\Lib\site-packages\pandas\io\formats\info.py
info.py
Python
32,621
0.95
0.162579
0.013172
awesome-app
717
2024-12-21T16:18:28.766278
BSD-3-Clause
false
feea8b4a45ee5862100c81427e4fe797
"""\nPrinting tools.\n"""\nfrom __future__ import annotations\n\nfrom collections.abc import (\n Iterable,\n Mapping,\n Sequence,\n)\nimport sys\nfrom typing import (\n Any,\n Callable,\n TypeVar,\n Union,\n)\nfrom unicodedata import east_asian_width\n\nfrom pandas._config import get_option\n\nfrom pandas.core.dtypes.inference import is_sequence\n\nfrom pandas.io.formats.console import get_console_size\n\nEscapeChars = Union[Mapping[str, str], Iterable[str]]\n_KT = TypeVar("_KT")\n_VT = TypeVar("_VT")\n\n\ndef adjoin(space: int, *lists: list[str], **kwargs) -> str:\n """\n Glues together two sets of strings using the amount of space requested.\n The idea is to prettify.\n\n ----------\n space : int\n number of spaces for padding\n lists : str\n list of str which being joined\n strlen : callable\n function used to calculate the length of each str. Needed for unicode\n handling.\n justfunc : callable\n function used to justify str. Needed for unicode handling.\n """\n strlen = kwargs.pop("strlen", len)\n justfunc = kwargs.pop("justfunc", _adj_justify)\n\n newLists = []\n lengths = [max(map(strlen, x)) + space for x in lists[:-1]]\n # not the last one\n lengths.append(max(map(len, lists[-1])))\n maxLen = max(map(len, lists))\n for i, lst in enumerate(lists):\n nl = justfunc(lst, lengths[i], mode="left")\n nl = ([" " * lengths[i]] * (maxLen - len(lst))) + nl\n newLists.append(nl)\n toJoin = zip(*newLists)\n return "\n".join("".join(lines) for lines in toJoin)\n\n\ndef _adj_justify(texts: Iterable[str], max_len: int, mode: str = "right") -> list[str]:\n """\n Perform ljust, center, rjust against string or list-like\n """\n if mode == "left":\n return [x.ljust(max_len) for x in texts]\n elif mode == "center":\n return [x.center(max_len) for x in texts]\n else:\n return [x.rjust(max_len) for x in texts]\n\n\n# Unicode consolidation\n# ---------------------\n#\n# pprinting utility functions for generating Unicode text or\n# bytes(3.x)/str(2.x) representations of objects.\n# Try to use these as much as possible rather than rolling your own.\n#\n# When to use\n# -----------\n#\n# 1) If you're writing code internal to pandas (no I/O directly involved),\n# use pprint_thing().\n#\n# It will always return unicode text which can handled by other\n# parts of the package without breakage.\n#\n# 2) if you need to write something out to file, use\n# pprint_thing_encoded(encoding).\n#\n# If no encoding is specified, it defaults to utf-8. Since encoding pure\n# ascii with utf-8 is a no-op you can safely use the default utf-8 if you're\n# working with straight ascii.\n\n\ndef _pprint_seq(\n seq: Sequence, _nest_lvl: int = 0, max_seq_items: int | None = None, **kwds\n) -> str:\n """\n internal. pprinter for iterables. you should probably use pprint_thing()\n rather than calling this directly.\n\n bounds length of printed sequence, depending on options\n """\n if isinstance(seq, set):\n fmt = "{{{body}}}"\n else:\n fmt = "[{body}]" if hasattr(seq, "__setitem__") else "({body})"\n\n if max_seq_items is False:\n nitems = len(seq)\n else:\n nitems = max_seq_items or get_option("max_seq_items") or len(seq)\n\n s = iter(seq)\n # handle sets, no slicing\n r = [\n pprint_thing(next(s), _nest_lvl + 1, max_seq_items=max_seq_items, **kwds)\n for i in range(min(nitems, len(seq)))\n ]\n body = ", ".join(r)\n\n if nitems < len(seq):\n body += ", ..."\n elif isinstance(seq, tuple) and len(seq) == 1:\n body += ","\n\n return fmt.format(body=body)\n\n\ndef _pprint_dict(\n seq: Mapping, _nest_lvl: int = 0, max_seq_items: int | None = None, **kwds\n) -> str:\n """\n internal. pprinter for iterables. you should probably use pprint_thing()\n rather than calling this directly.\n """\n fmt = "{{{things}}}"\n pairs = []\n\n pfmt = "{key}: {val}"\n\n if max_seq_items is False:\n nitems = len(seq)\n else:\n nitems = max_seq_items or get_option("max_seq_items") or len(seq)\n\n for k, v in list(seq.items())[:nitems]:\n pairs.append(\n pfmt.format(\n key=pprint_thing(k, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds),\n val=pprint_thing(v, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds),\n )\n )\n\n if nitems < len(seq):\n return fmt.format(things=", ".join(pairs) + ", ...")\n else:\n return fmt.format(things=", ".join(pairs))\n\n\ndef pprint_thing(\n thing: Any,\n _nest_lvl: int = 0,\n escape_chars: EscapeChars | None = None,\n default_escapes: bool = False,\n quote_strings: bool = False,\n max_seq_items: int | None = None,\n) -> str:\n """\n This function is the sanctioned way of converting objects\n to a string representation and properly handles nested sequences.\n\n Parameters\n ----------\n thing : anything to be formatted\n _nest_lvl : internal use only. pprint_thing() is mutually-recursive\n with pprint_sequence, this argument is used to keep track of the\n current nesting level, and limit it.\n escape_chars : list or dict, optional\n Characters to escape. If a dict is passed the values are the\n replacements\n default_escapes : bool, default False\n Whether the input escape characters replaces or adds to the defaults\n max_seq_items : int or None, default None\n Pass through to other pretty printers to limit sequence printing\n\n Returns\n -------\n str\n """\n\n def as_escaped_string(\n thing: Any, escape_chars: EscapeChars | None = escape_chars\n ) -> str:\n translate = {"\t": r"\t", "\n": r"\n", "\r": r"\r"}\n if isinstance(escape_chars, dict):\n if default_escapes:\n translate.update(escape_chars)\n else:\n translate = escape_chars\n escape_chars = list(escape_chars.keys())\n else:\n escape_chars = escape_chars or ()\n\n result = str(thing)\n for c in escape_chars:\n result = result.replace(c, translate[c])\n return result\n\n if hasattr(thing, "__next__"):\n return str(thing)\n elif isinstance(thing, dict) and _nest_lvl < get_option(\n "display.pprint_nest_depth"\n ):\n result = _pprint_dict(\n thing, _nest_lvl, quote_strings=True, max_seq_items=max_seq_items\n )\n elif is_sequence(thing) and _nest_lvl < get_option("display.pprint_nest_depth"):\n result = _pprint_seq(\n thing,\n _nest_lvl,\n escape_chars=escape_chars,\n quote_strings=quote_strings,\n max_seq_items=max_seq_items,\n )\n elif isinstance(thing, str) and quote_strings:\n result = f"'{as_escaped_string(thing)}'"\n else:\n result = as_escaped_string(thing)\n\n return result\n\n\ndef pprint_thing_encoded(\n object, encoding: str = "utf-8", errors: str = "replace"\n) -> bytes:\n value = pprint_thing(object) # get unicode representation of object\n return value.encode(encoding, errors)\n\n\ndef enable_data_resource_formatter(enable: bool) -> None:\n if "IPython" not in sys.modules:\n # definitely not in IPython\n return\n from IPython import get_ipython\n\n ip = get_ipython()\n if ip is None:\n # still not in IPython\n return\n\n formatters = ip.display_formatter.formatters\n mimetype = "application/vnd.dataresource+json"\n\n if enable:\n if mimetype not in formatters:\n # define tableschema formatter\n from IPython.core.formatters import BaseFormatter\n from traitlets import ObjectName\n\n class TableSchemaFormatter(BaseFormatter):\n print_method = ObjectName("_repr_data_resource_")\n _return_type = (dict,)\n\n # register it:\n formatters[mimetype] = TableSchemaFormatter()\n # enable it if it's been disabled:\n formatters[mimetype].enabled = True\n # unregister tableschema mime-type\n elif mimetype in formatters:\n formatters[mimetype].enabled = False\n\n\ndef default_pprint(thing: Any, max_seq_items: int | None = None) -> str:\n return pprint_thing(\n thing,\n escape_chars=("\t", "\r", "\n"),\n quote_strings=True,\n max_seq_items=max_seq_items,\n )\n\n\ndef format_object_summary(\n obj,\n formatter: Callable,\n is_justify: bool = True,\n name: str | None = None,\n indent_for_name: bool = True,\n line_break_each_value: bool = False,\n) -> str:\n """\n Return the formatted obj as a unicode string\n\n Parameters\n ----------\n obj : object\n must be iterable and support __getitem__\n formatter : callable\n string formatter for an element\n is_justify : bool\n should justify the display\n name : name, optional\n defaults to the class name of the obj\n indent_for_name : bool, default True\n Whether subsequent lines should be indented to\n align with the name.\n line_break_each_value : bool, default False\n If True, inserts a line break for each value of ``obj``.\n If False, only break lines when the a line of values gets wider\n than the display width.\n\n Returns\n -------\n summary string\n """\n display_width, _ = get_console_size()\n if display_width is None:\n display_width = get_option("display.width") or 80\n if name is None:\n name = type(obj).__name__\n\n if indent_for_name:\n name_len = len(name)\n space1 = f'\n{(" " * (name_len + 1))}'\n space2 = f'\n{(" " * (name_len + 2))}'\n else:\n space1 = "\n"\n space2 = "\n " # space for the opening '['\n\n n = len(obj)\n if line_break_each_value:\n # If we want to vertically align on each value of obj, we need to\n # separate values by a line break and indent the values\n sep = ",\n " + " " * len(name)\n else:\n sep = ","\n max_seq_items = get_option("display.max_seq_items") or n\n\n # are we a truncated display\n is_truncated = n > max_seq_items\n\n # adj can optionally handle unicode eastern asian width\n adj = get_adjustment()\n\n def _extend_line(\n s: str, line: str, value: str, display_width: int, next_line_prefix: str\n ) -> tuple[str, str]:\n if adj.len(line.rstrip()) + adj.len(value.rstrip()) >= display_width:\n s += line.rstrip()\n line = next_line_prefix\n line += value\n return s, line\n\n def best_len(values: list[str]) -> int:\n if values:\n return max(adj.len(x) for x in values)\n else:\n return 0\n\n close = ", "\n\n if n == 0:\n summary = f"[]{close}"\n elif n == 1 and not line_break_each_value:\n first = formatter(obj[0])\n summary = f"[{first}]{close}"\n elif n == 2 and not line_break_each_value:\n first = formatter(obj[0])\n last = formatter(obj[-1])\n summary = f"[{first}, {last}]{close}"\n else:\n if max_seq_items == 1:\n # If max_seq_items=1 show only last element\n head = []\n tail = [formatter(x) for x in obj[-1:]]\n elif n > max_seq_items:\n n = min(max_seq_items // 2, 10)\n head = [formatter(x) for x in obj[:n]]\n tail = [formatter(x) for x in obj[-n:]]\n else:\n head = []\n tail = [formatter(x) for x in obj]\n\n # adjust all values to max length if needed\n if is_justify:\n if line_break_each_value:\n # Justify each string in the values of head and tail, so the\n # strings will right align when head and tail are stacked\n # vertically.\n head, tail = _justify(head, tail)\n elif is_truncated or not (\n len(", ".join(head)) < display_width\n and len(", ".join(tail)) < display_width\n ):\n # Each string in head and tail should align with each other\n max_length = max(best_len(head), best_len(tail))\n head = [x.rjust(max_length) for x in head]\n tail = [x.rjust(max_length) for x in tail]\n # If we are not truncated and we are only a single\n # line, then don't justify\n\n if line_break_each_value:\n # Now head and tail are of type List[Tuple[str]]. Below we\n # convert them into List[str], so there will be one string per\n # value. Also truncate items horizontally if wider than\n # max_space\n max_space = display_width - len(space2)\n value = tail[0]\n max_items = 1\n for num_items in reversed(range(1, len(value) + 1)):\n pprinted_seq = _pprint_seq(value, max_seq_items=num_items)\n if len(pprinted_seq) < max_space:\n max_items = num_items\n break\n head = [_pprint_seq(x, max_seq_items=max_items) for x in head]\n tail = [_pprint_seq(x, max_seq_items=max_items) for x in tail]\n\n summary = ""\n line = space2\n\n for head_value in head:\n word = head_value + sep + " "\n summary, line = _extend_line(summary, line, word, display_width, space2)\n\n if is_truncated:\n # remove trailing space of last line\n summary += line.rstrip() + space2 + "..."\n line = space2\n\n for tail_item in tail[:-1]:\n word = tail_item + sep + " "\n summary, line = _extend_line(summary, line, word, display_width, space2)\n\n # last value: no sep added + 1 space of width used for trailing ','\n summary, line = _extend_line(summary, line, tail[-1], display_width - 2, space2)\n summary += line\n\n # right now close is either '' or ', '\n # Now we want to include the ']', but not the maybe space.\n close = "]" + close.rstrip(" ")\n summary += close\n\n if len(summary) > (display_width) or line_break_each_value:\n summary += space1\n else: # one row\n summary += " "\n\n # remove initial space\n summary = "[" + summary[len(space2) :]\n\n return summary\n\n\ndef _justify(\n head: list[Sequence[str]], tail: list[Sequence[str]]\n) -> tuple[list[tuple[str, ...]], list[tuple[str, ...]]]:\n """\n Justify items in head and tail, so they are right-aligned when stacked.\n\n Parameters\n ----------\n head : list-like of list-likes of strings\n tail : list-like of list-likes of strings\n\n Returns\n -------\n tuple of list of tuples of strings\n Same as head and tail, but items are right aligned when stacked\n vertically.\n\n Examples\n --------\n >>> _justify([['a', 'b']], [['abc', 'abcd']])\n ([(' a', ' b')], [('abc', 'abcd')])\n """\n combined = head + tail\n\n # For each position for the sequences in ``combined``,\n # find the length of the largest string.\n max_length = [0] * len(combined[0])\n for inner_seq in combined:\n length = [len(item) for item in inner_seq]\n max_length = [max(x, y) for x, y in zip(max_length, length)]\n\n # justify each item in each list-like in head and tail using max_length\n head_tuples = [\n tuple(x.rjust(max_len) for x, max_len in zip(seq, max_length)) for seq in head\n ]\n tail_tuples = [\n tuple(x.rjust(max_len) for x, max_len in zip(seq, max_length)) for seq in tail\n ]\n return head_tuples, tail_tuples\n\n\nclass PrettyDict(dict[_KT, _VT]):\n """Dict extension to support abbreviated __repr__"""\n\n def __repr__(self) -> str:\n return pprint_thing(self)\n\n\nclass _TextAdjustment:\n def __init__(self) -> None:\n self.encoding = get_option("display.encoding")\n\n def len(self, text: str) -> int:\n return len(text)\n\n def justify(self, texts: Any, max_len: int, mode: str = "right") -> list[str]:\n """\n Perform ljust, center, rjust against string or list-like\n """\n if mode == "left":\n return [x.ljust(max_len) for x in texts]\n elif mode == "center":\n return [x.center(max_len) for x in texts]\n else:\n return [x.rjust(max_len) for x in texts]\n\n def adjoin(self, space: int, *lists, **kwargs) -> str:\n return adjoin(space, *lists, strlen=self.len, justfunc=self.justify, **kwargs)\n\n\nclass _EastAsianTextAdjustment(_TextAdjustment):\n def __init__(self) -> None:\n super().__init__()\n if get_option("display.unicode.ambiguous_as_wide"):\n self.ambiguous_width = 2\n else:\n self.ambiguous_width = 1\n\n # Definition of East Asian Width\n # https://unicode.org/reports/tr11/\n # Ambiguous width can be changed by option\n self._EAW_MAP = {"Na": 1, "N": 1, "W": 2, "F": 2, "H": 1}\n\n def len(self, text: str) -> int:\n """\n Calculate display width considering unicode East Asian Width\n """\n if not isinstance(text, str):\n return len(text)\n\n return sum(\n self._EAW_MAP.get(east_asian_width(c), self.ambiguous_width) for c in text\n )\n\n def justify(\n self, texts: Iterable[str], max_len: int, mode: str = "right"\n ) -> list[str]:\n # re-calculate padding space per str considering East Asian Width\n def _get_pad(t):\n return max_len - self.len(t) + len(t)\n\n if mode == "left":\n return [x.ljust(_get_pad(x)) for x in texts]\n elif mode == "center":\n return [x.center(_get_pad(x)) for x in texts]\n else:\n return [x.rjust(_get_pad(x)) for x in texts]\n\n\ndef get_adjustment() -> _TextAdjustment:\n use_east_asian_width = get_option("display.unicode.east_asian_width")\n if use_east_asian_width:\n return _EastAsianTextAdjustment()\n else:\n return _TextAdjustment()\n
.venv\Lib\site-packages\pandas\io\formats\printing.py
printing.py
Python
17,950
0.95
0.201049
0.120833
node-utils
197
2023-12-06T19:42:46.452902
BSD-3-Clause
false
b67743f9e6a722e371368f76c02c8dec
"""\nModule for formatting output data in console (to string).\n"""\nfrom __future__ import annotations\n\nfrom shutil import get_terminal_size\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\n\nfrom pandas.io.formats.printing import pprint_thing\n\nif TYPE_CHECKING:\n from collections.abc import Iterable\n\n from pandas.io.formats.format import DataFrameFormatter\n\n\nclass StringFormatter:\n """Formatter for string representation of a dataframe."""\n\n def __init__(self, fmt: DataFrameFormatter, line_width: int | None = None) -> None:\n self.fmt = fmt\n self.adj = fmt.adj\n self.frame = fmt.frame\n self.line_width = line_width\n\n def to_string(self) -> str:\n text = self._get_string_representation()\n if self.fmt.should_show_dimensions:\n text = f"{text}{self.fmt.dimensions_info}"\n return text\n\n def _get_strcols(self) -> list[list[str]]:\n strcols = self.fmt.get_strcols()\n if self.fmt.is_truncated:\n strcols = self._insert_dot_separators(strcols)\n return strcols\n\n def _get_string_representation(self) -> str:\n if self.fmt.frame.empty:\n return self._empty_info_line\n\n strcols = self._get_strcols()\n\n if self.line_width is None:\n # no need to wrap around just print the whole frame\n return self.adj.adjoin(1, *strcols)\n\n if self._need_to_wrap_around:\n return self._join_multiline(strcols)\n\n return self._fit_strcols_to_terminal_width(strcols)\n\n @property\n def _empty_info_line(self) -> str:\n return (\n f"Empty {type(self.frame).__name__}\n"\n f"Columns: {pprint_thing(self.frame.columns)}\n"\n f"Index: {pprint_thing(self.frame.index)}"\n )\n\n @property\n def _need_to_wrap_around(self) -> bool:\n return bool(self.fmt.max_cols is None or self.fmt.max_cols > 0)\n\n def _insert_dot_separators(self, strcols: list[list[str]]) -> list[list[str]]:\n str_index = self.fmt._get_formatted_index(self.fmt.tr_frame)\n index_length = len(str_index)\n\n if self.fmt.is_truncated_horizontally:\n strcols = self._insert_dot_separator_horizontal(strcols, index_length)\n\n if self.fmt.is_truncated_vertically:\n strcols = self._insert_dot_separator_vertical(strcols, index_length)\n\n return strcols\n\n @property\n def _adjusted_tr_col_num(self) -> int:\n return self.fmt.tr_col_num + 1 if self.fmt.index else self.fmt.tr_col_num\n\n def _insert_dot_separator_horizontal(\n self, strcols: list[list[str]], index_length: int\n ) -> list[list[str]]:\n strcols.insert(self._adjusted_tr_col_num, [" ..."] * index_length)\n return strcols\n\n def _insert_dot_separator_vertical(\n self, strcols: list[list[str]], index_length: int\n ) -> list[list[str]]:\n n_header_rows = index_length - len(self.fmt.tr_frame)\n row_num = self.fmt.tr_row_num\n for ix, col in enumerate(strcols):\n cwidth = self.adj.len(col[row_num])\n\n if self.fmt.is_truncated_horizontally:\n is_dot_col = ix == self._adjusted_tr_col_num\n else:\n is_dot_col = False\n\n if cwidth > 3 or is_dot_col:\n dots = "..."\n else:\n dots = ".."\n\n if ix == 0 and self.fmt.index:\n dot_mode = "left"\n elif is_dot_col:\n cwidth = 4\n dot_mode = "right"\n else:\n dot_mode = "right"\n\n dot_str = self.adj.justify([dots], cwidth, mode=dot_mode)[0]\n col.insert(row_num + n_header_rows, dot_str)\n return strcols\n\n def _join_multiline(self, strcols_input: Iterable[list[str]]) -> str:\n lwidth = self.line_width\n adjoin_width = 1\n strcols = list(strcols_input)\n\n if self.fmt.index:\n idx = strcols.pop(0)\n lwidth -= np.array([self.adj.len(x) for x in idx]).max() + adjoin_width\n\n col_widths = [\n np.array([self.adj.len(x) for x in col]).max() if len(col) > 0 else 0\n for col in strcols\n ]\n\n assert lwidth is not None\n col_bins = _binify(col_widths, lwidth)\n nbins = len(col_bins)\n\n str_lst = []\n start = 0\n for i, end in enumerate(col_bins):\n row = strcols[start:end]\n if self.fmt.index:\n row.insert(0, idx)\n if nbins > 1:\n nrows = len(row[-1])\n if end <= len(strcols) and i < nbins - 1:\n row.append([" \\"] + [" "] * (nrows - 1))\n else:\n row.append([" "] * nrows)\n str_lst.append(self.adj.adjoin(adjoin_width, *row))\n start = end\n return "\n\n".join(str_lst)\n\n def _fit_strcols_to_terminal_width(self, strcols: list[list[str]]) -> str:\n from pandas import Series\n\n lines = self.adj.adjoin(1, *strcols).split("\n")\n max_len = Series(lines).str.len().max()\n # plus truncate dot col\n width, _ = get_terminal_size()\n dif = max_len - width\n # '+ 1' to avoid too wide repr (GH PR #17023)\n adj_dif = dif + 1\n col_lens = Series([Series(ele).str.len().max() for ele in strcols])\n n_cols = len(col_lens)\n counter = 0\n while adj_dif > 0 and n_cols > 1:\n counter += 1\n mid = round(n_cols / 2)\n mid_ix = col_lens.index[mid]\n col_len = col_lens[mid_ix]\n # adjoin adds one\n adj_dif -= col_len + 1\n col_lens = col_lens.drop(mid_ix)\n n_cols = len(col_lens)\n\n # subtract index column\n max_cols_fitted = n_cols - self.fmt.index\n # GH-21180. Ensure that we print at least two.\n max_cols_fitted = max(max_cols_fitted, 2)\n self.fmt.max_cols_fitted = max_cols_fitted\n\n # Call again _truncate to cut frame appropriately\n # and then generate string representation\n self.fmt.truncate()\n strcols = self._get_strcols()\n return self.adj.adjoin(1, *strcols)\n\n\ndef _binify(cols: list[int], line_width: int) -> list[int]:\n adjoin_width = 1\n bins = []\n curr_width = 0\n i_last_column = len(cols) - 1\n for i, w in enumerate(cols):\n w_adjoined = w + adjoin_width\n curr_width += w_adjoined\n if i_last_column == i:\n wrap = curr_width + 1 > line_width and i > 0\n else:\n wrap = curr_width + 2 > line_width and i > 0\n if wrap:\n bins.append(i)\n curr_width = w_adjoined\n\n bins.append(len(cols))\n return bins\n
.venv\Lib\site-packages\pandas\io\formats\string.py
string.py
Python
6,707
0.95
0.208738
0.048193
awesome-app
901
2024-06-13T01:55:00.185547
BSD-3-Clause
false
a3678b6e6e960988aba150fd809d319f
from __future__ import annotations\n\nfrom collections import defaultdict\nfrom collections.abc import Sequence\nfrom functools import partial\nimport re\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n DefaultDict,\n Optional,\n TypedDict,\n Union,\n)\nfrom uuid import uuid4\n\nimport numpy as np\n\nfrom pandas._config import get_option\n\nfrom pandas._libs import lib\nfrom pandas.compat._optional import import_optional_dependency\n\nfrom pandas.core.dtypes.common import (\n is_complex,\n is_float,\n is_integer,\n)\nfrom pandas.core.dtypes.generic import ABCSeries\n\nfrom pandas import (\n DataFrame,\n Index,\n IndexSlice,\n MultiIndex,\n Series,\n isna,\n)\nfrom pandas.api.types import is_list_like\nimport pandas.core.common as com\n\nif TYPE_CHECKING:\n from pandas._typing import (\n Axis,\n Level,\n )\njinja2 = import_optional_dependency("jinja2", extra="DataFrame.style requires jinja2.")\nfrom markupsafe import escape as escape_html # markupsafe is jinja2 dependency\n\nBaseFormatter = Union[str, Callable]\nExtFormatter = Union[BaseFormatter, dict[Any, Optional[BaseFormatter]]]\nCSSPair = tuple[str, Union[str, float]]\nCSSList = list[CSSPair]\nCSSProperties = Union[str, CSSList]\n\n\nclass CSSDict(TypedDict):\n selector: str\n props: CSSProperties\n\n\nCSSStyles = list[CSSDict]\nSubset = Union[slice, Sequence, Index]\n\n\nclass StylerRenderer:\n """\n Base class to process rendering a Styler with a specified jinja2 template.\n """\n\n loader = jinja2.PackageLoader("pandas", "io/formats/templates")\n env = jinja2.Environment(loader=loader, trim_blocks=True)\n template_html = env.get_template("html.tpl")\n template_html_table = env.get_template("html_table.tpl")\n template_html_style = env.get_template("html_style.tpl")\n template_latex = env.get_template("latex.tpl")\n template_string = env.get_template("string.tpl")\n\n def __init__(\n self,\n data: DataFrame | Series,\n uuid: str | None = None,\n uuid_len: int = 5,\n table_styles: CSSStyles | None = None,\n table_attributes: str | None = None,\n caption: str | tuple | list | None = None,\n cell_ids: bool = True,\n precision: int | None = None,\n ) -> None:\n # validate ordered args\n if isinstance(data, Series):\n data = data.to_frame()\n if not isinstance(data, DataFrame):\n raise TypeError("``data`` must be a Series or DataFrame")\n self.data: DataFrame = data\n self.index: Index = data.index\n self.columns: Index = data.columns\n if not isinstance(uuid_len, int) or uuid_len < 0:\n raise TypeError("``uuid_len`` must be an integer in range [0, 32].")\n self.uuid = uuid or uuid4().hex[: min(32, uuid_len)]\n self.uuid_len = len(self.uuid)\n self.table_styles = table_styles\n self.table_attributes = table_attributes\n self.caption = caption\n self.cell_ids = cell_ids\n self.css = {\n "row_heading": "row_heading",\n "col_heading": "col_heading",\n "index_name": "index_name",\n "col": "col",\n "row": "row",\n "col_trim": "col_trim",\n "row_trim": "row_trim",\n "level": "level",\n "data": "data",\n "blank": "blank",\n "foot": "foot",\n }\n self.concatenated: list[StylerRenderer] = []\n # add rendering variables\n self.hide_index_names: bool = False\n self.hide_column_names: bool = False\n self.hide_index_: list = [False] * self.index.nlevels\n self.hide_columns_: list = [False] * self.columns.nlevels\n self.hidden_rows: Sequence[int] = [] # sequence for specific hidden rows/cols\n self.hidden_columns: Sequence[int] = []\n self.ctx: DefaultDict[tuple[int, int], CSSList] = defaultdict(list)\n self.ctx_index: DefaultDict[tuple[int, int], CSSList] = defaultdict(list)\n self.ctx_columns: DefaultDict[tuple[int, int], CSSList] = defaultdict(list)\n self.cell_context: DefaultDict[tuple[int, int], str] = defaultdict(str)\n self._todo: list[tuple[Callable, tuple, dict]] = []\n self.tooltips: Tooltips | None = None\n precision = (\n get_option("styler.format.precision") if precision is None else precision\n )\n self._display_funcs: DefaultDict[ # maps (row, col) -> format func\n tuple[int, int], Callable[[Any], str]\n ] = defaultdict(lambda: partial(_default_formatter, precision=precision))\n self._display_funcs_index: DefaultDict[ # maps (row, level) -> format func\n tuple[int, int], Callable[[Any], str]\n ] = defaultdict(lambda: partial(_default_formatter, precision=precision))\n self._display_funcs_columns: DefaultDict[ # maps (level, col) -> format func\n tuple[int, int], Callable[[Any], str]\n ] = defaultdict(lambda: partial(_default_formatter, precision=precision))\n\n def _render(\n self,\n sparse_index: bool,\n sparse_columns: bool,\n max_rows: int | None = None,\n max_cols: int | None = None,\n blank: str = "",\n ):\n """\n Computes and applies styles and then generates the general render dicts.\n\n Also extends the `ctx` and `ctx_index` attributes with those of concatenated\n stylers for use within `_translate_latex`\n """\n self._compute()\n dxs = []\n ctx_len = len(self.index)\n for i, concatenated in enumerate(self.concatenated):\n concatenated.hide_index_ = self.hide_index_\n concatenated.hidden_columns = self.hidden_columns\n foot = f"{self.css['foot']}{i}"\n concatenated.css = {\n **self.css,\n "data": f"{foot}_data",\n "row_heading": f"{foot}_row_heading",\n "row": f"{foot}_row",\n "foot": f"{foot}_foot",\n }\n dx = concatenated._render(\n sparse_index, sparse_columns, max_rows, max_cols, blank\n )\n dxs.append(dx)\n\n for (r, c), v in concatenated.ctx.items():\n self.ctx[(r + ctx_len, c)] = v\n for (r, c), v in concatenated.ctx_index.items():\n self.ctx_index[(r + ctx_len, c)] = v\n\n ctx_len += len(concatenated.index)\n\n d = self._translate(\n sparse_index, sparse_columns, max_rows, max_cols, blank, dxs\n )\n return d\n\n def _render_html(\n self,\n sparse_index: bool,\n sparse_columns: bool,\n max_rows: int | None = None,\n max_cols: int | None = None,\n **kwargs,\n ) -> str:\n """\n Renders the ``Styler`` including all applied styles to HTML.\n Generates a dict with necessary kwargs passed to jinja2 template.\n """\n d = self._render(sparse_index, sparse_columns, max_rows, max_cols, "&nbsp;")\n d.update(kwargs)\n return self.template_html.render(\n **d,\n html_table_tpl=self.template_html_table,\n html_style_tpl=self.template_html_style,\n )\n\n def _render_latex(\n self, sparse_index: bool, sparse_columns: bool, clines: str | None, **kwargs\n ) -> str:\n """\n Render a Styler in latex format\n """\n d = self._render(sparse_index, sparse_columns, None, None)\n self._translate_latex(d, clines=clines)\n self.template_latex.globals["parse_wrap"] = _parse_latex_table_wrapping\n self.template_latex.globals["parse_table"] = _parse_latex_table_styles\n self.template_latex.globals["parse_cell"] = _parse_latex_cell_styles\n self.template_latex.globals["parse_header"] = _parse_latex_header_span\n d.update(kwargs)\n return self.template_latex.render(**d)\n\n def _render_string(\n self,\n sparse_index: bool,\n sparse_columns: bool,\n max_rows: int | None = None,\n max_cols: int | None = None,\n **kwargs,\n ) -> str:\n """\n Render a Styler in string format\n """\n d = self._render(sparse_index, sparse_columns, max_rows, max_cols)\n d.update(kwargs)\n return self.template_string.render(**d)\n\n def _compute(self):\n """\n Execute the style functions built up in `self._todo`.\n\n Relies on the conventions that all style functions go through\n .apply or .map. The append styles to apply as tuples of\n\n (application method, *args, **kwargs)\n """\n self.ctx.clear()\n self.ctx_index.clear()\n self.ctx_columns.clear()\n r = self\n for func, args, kwargs in self._todo:\n r = func(self)(*args, **kwargs)\n return r\n\n def _translate(\n self,\n sparse_index: bool,\n sparse_cols: bool,\n max_rows: int | None = None,\n max_cols: int | None = None,\n blank: str = "&nbsp;",\n dxs: list[dict] | None = None,\n ):\n """\n Process Styler data and settings into a dict for template rendering.\n\n Convert data and settings from ``Styler`` attributes such as ``self.data``,\n ``self.tooltips`` including applying any methods in ``self._todo``.\n\n Parameters\n ----------\n sparse_index : bool\n Whether to sparsify the index or print all hierarchical index elements.\n Upstream defaults are typically to `pandas.options.styler.sparse.index`.\n sparse_cols : bool\n Whether to sparsify the columns or print all hierarchical column elements.\n Upstream defaults are typically to `pandas.options.styler.sparse.columns`.\n max_rows, max_cols : int, optional\n Specific max rows and cols. max_elements always take precedence in render.\n blank : str\n Entry to top-left blank cells.\n dxs : list[dict]\n The render dicts of the concatenated Stylers.\n\n Returns\n -------\n d : dict\n The following structure: {uuid, table_styles, caption, head, body,\n cellstyle, table_attributes}\n """\n if dxs is None:\n dxs = []\n self.css["blank_value"] = blank\n\n # construct render dict\n d = {\n "uuid": self.uuid,\n "table_styles": format_table_styles(self.table_styles or []),\n "caption": self.caption,\n }\n\n max_elements = get_option("styler.render.max_elements")\n max_rows = max_rows if max_rows else get_option("styler.render.max_rows")\n max_cols = max_cols if max_cols else get_option("styler.render.max_columns")\n max_rows, max_cols = _get_trimming_maximums(\n len(self.data.index),\n len(self.data.columns),\n max_elements,\n max_rows,\n max_cols,\n )\n\n self.cellstyle_map_columns: DefaultDict[\n tuple[CSSPair, ...], list[str]\n ] = defaultdict(list)\n head = self._translate_header(sparse_cols, max_cols)\n d.update({"head": head})\n\n # for sparsifying a MultiIndex and for use with latex clines\n idx_lengths = _get_level_lengths(\n self.index, sparse_index, max_rows, self.hidden_rows\n )\n d.update({"index_lengths": idx_lengths})\n\n self.cellstyle_map: DefaultDict[tuple[CSSPair, ...], list[str]] = defaultdict(\n list\n )\n self.cellstyle_map_index: DefaultDict[\n tuple[CSSPair, ...], list[str]\n ] = defaultdict(list)\n body: list = self._translate_body(idx_lengths, max_rows, max_cols)\n d.update({"body": body})\n\n ctx_maps = {\n "cellstyle": "cellstyle_map",\n "cellstyle_index": "cellstyle_map_index",\n "cellstyle_columns": "cellstyle_map_columns",\n } # add the cell_ids styles map to the render dictionary in right format\n for k, attr in ctx_maps.items():\n map = [\n {"props": list(props), "selectors": selectors}\n for props, selectors in getattr(self, attr).items()\n ]\n d.update({k: map})\n\n for dx in dxs: # self.concatenated is not empty\n d["body"].extend(dx["body"]) # type: ignore[union-attr]\n d["cellstyle"].extend(dx["cellstyle"]) # type: ignore[union-attr]\n d["cellstyle_index"].extend( # type: ignore[union-attr]\n dx["cellstyle_index"]\n )\n\n table_attr = self.table_attributes\n if not get_option("styler.html.mathjax"):\n table_attr = table_attr or ""\n if 'class="' in table_attr:\n table_attr = table_attr.replace('class="', 'class="tex2jax_ignore ')\n else:\n table_attr += ' class="tex2jax_ignore"'\n d.update({"table_attributes": table_attr})\n\n if self.tooltips:\n d = self.tooltips._translate(self, d)\n\n return d\n\n def _translate_header(self, sparsify_cols: bool, max_cols: int):\n """\n Build each <tr> within table <head> as a list\n\n Using the structure:\n +----------------------------+---------------+---------------------------+\n | index_blanks ... | column_name_0 | column_headers (level_0) |\n 1) | .. | .. | .. |\n | index_blanks ... | column_name_n | column_headers (level_n) |\n +----------------------------+---------------+---------------------------+\n 2) | index_names (level_0 to level_n) ... | column_blanks ... |\n +----------------------------+---------------+---------------------------+\n\n Parameters\n ----------\n sparsify_cols : bool\n Whether column_headers section will add colspan attributes (>1) to elements.\n max_cols : int\n Maximum number of columns to render. If exceeded will contain `...` filler.\n\n Returns\n -------\n head : list\n The associated HTML elements needed for template rendering.\n """\n # for sparsifying a MultiIndex\n col_lengths = _get_level_lengths(\n self.columns, sparsify_cols, max_cols, self.hidden_columns\n )\n\n clabels = self.data.columns.tolist()\n if self.data.columns.nlevels == 1:\n clabels = [[x] for x in clabels]\n clabels = list(zip(*clabels))\n\n head = []\n # 1) column headers\n for r, hide in enumerate(self.hide_columns_):\n if hide or not clabels:\n continue\n\n header_row = self._generate_col_header_row(\n (r, clabels), max_cols, col_lengths\n )\n head.append(header_row)\n\n # 2) index names\n if (\n self.data.index.names\n and com.any_not_none(*self.data.index.names)\n and not all(self.hide_index_)\n and not self.hide_index_names\n ):\n index_names_row = self._generate_index_names_row(\n clabels, max_cols, col_lengths\n )\n head.append(index_names_row)\n\n return head\n\n def _generate_col_header_row(\n self, iter: Sequence, max_cols: int, col_lengths: dict\n ):\n """\n Generate the row containing column headers:\n\n +----------------------------+---------------+---------------------------+\n | index_blanks ... | column_name_i | column_headers (level_i) |\n +----------------------------+---------------+---------------------------+\n\n Parameters\n ----------\n iter : tuple\n Looping variables from outer scope\n max_cols : int\n Permissible number of columns\n col_lengths :\n c\n\n Returns\n -------\n list of elements\n """\n\n r, clabels = iter\n\n # number of index blanks is governed by number of hidden index levels\n index_blanks = [\n _element("th", self.css["blank"], self.css["blank_value"], True)\n ] * (self.index.nlevels - sum(self.hide_index_) - 1)\n\n name = self.data.columns.names[r]\n column_name = [\n _element(\n "th",\n (\n f"{self.css['blank']} {self.css['level']}{r}"\n if name is None\n else f"{self.css['index_name']} {self.css['level']}{r}"\n ),\n name\n if (name is not None and not self.hide_column_names)\n else self.css["blank_value"],\n not all(self.hide_index_),\n )\n ]\n\n column_headers: list = []\n visible_col_count: int = 0\n for c, value in enumerate(clabels[r]):\n header_element_visible = _is_visible(c, r, col_lengths)\n if header_element_visible:\n visible_col_count += col_lengths.get((r, c), 0)\n if self._check_trim(\n visible_col_count,\n max_cols,\n column_headers,\n "th",\n f"{self.css['col_heading']} {self.css['level']}{r} "\n f"{self.css['col_trim']}",\n ):\n break\n\n header_element = _element(\n "th",\n (\n f"{self.css['col_heading']} {self.css['level']}{r} "\n f"{self.css['col']}{c}"\n ),\n value,\n header_element_visible,\n display_value=self._display_funcs_columns[(r, c)](value),\n attributes=(\n f'colspan="{col_lengths.get((r, c), 0)}"'\n if col_lengths.get((r, c), 0) > 1\n else ""\n ),\n )\n\n if self.cell_ids:\n header_element["id"] = f"{self.css['level']}{r}_{self.css['col']}{c}"\n if (\n header_element_visible\n and (r, c) in self.ctx_columns\n and self.ctx_columns[r, c]\n ):\n header_element["id"] = f"{self.css['level']}{r}_{self.css['col']}{c}"\n self.cellstyle_map_columns[tuple(self.ctx_columns[r, c])].append(\n f"{self.css['level']}{r}_{self.css['col']}{c}"\n )\n\n column_headers.append(header_element)\n\n return index_blanks + column_name + column_headers\n\n def _generate_index_names_row(\n self, iter: Sequence, max_cols: int, col_lengths: dict\n ):\n """\n Generate the row containing index names\n\n +----------------------------+---------------+---------------------------+\n | index_names (level_0 to level_n) ... | column_blanks ... |\n +----------------------------+---------------+---------------------------+\n\n Parameters\n ----------\n iter : tuple\n Looping variables from outer scope\n max_cols : int\n Permissible number of columns\n\n Returns\n -------\n list of elements\n """\n\n clabels = iter\n\n index_names = [\n _element(\n "th",\n f"{self.css['index_name']} {self.css['level']}{c}",\n self.css["blank_value"] if name is None else name,\n not self.hide_index_[c],\n )\n for c, name in enumerate(self.data.index.names)\n ]\n\n column_blanks: list = []\n visible_col_count: int = 0\n if clabels:\n last_level = self.columns.nlevels - 1 # use last level since never sparsed\n for c, value in enumerate(clabels[last_level]):\n header_element_visible = _is_visible(c, last_level, col_lengths)\n if header_element_visible:\n visible_col_count += 1\n if self._check_trim(\n visible_col_count,\n max_cols,\n column_blanks,\n "th",\n f"{self.css['blank']} {self.css['col']}{c} {self.css['col_trim']}",\n self.css["blank_value"],\n ):\n break\n\n column_blanks.append(\n _element(\n "th",\n f"{self.css['blank']} {self.css['col']}{c}",\n self.css["blank_value"],\n c not in self.hidden_columns,\n )\n )\n\n return index_names + column_blanks\n\n def _translate_body(self, idx_lengths: dict, max_rows: int, max_cols: int):\n """\n Build each <tr> within table <body> as a list\n\n Use the following structure:\n +--------------------------------------------+---------------------------+\n | index_header_0 ... index_header_n | data_by_column ... |\n +--------------------------------------------+---------------------------+\n\n Also add elements to the cellstyle_map for more efficient grouped elements in\n <style></style> block\n\n Parameters\n ----------\n sparsify_index : bool\n Whether index_headers section will add rowspan attributes (>1) to elements.\n\n Returns\n -------\n body : list\n The associated HTML elements needed for template rendering.\n """\n rlabels = self.data.index.tolist()\n if not isinstance(self.data.index, MultiIndex):\n rlabels = [[x] for x in rlabels]\n\n body: list = []\n visible_row_count: int = 0\n for r, row_tup in [\n z for z in enumerate(self.data.itertuples()) if z[0] not in self.hidden_rows\n ]:\n visible_row_count += 1\n if self._check_trim(\n visible_row_count,\n max_rows,\n body,\n "row",\n ):\n break\n\n body_row = self._generate_body_row(\n (r, row_tup, rlabels), max_cols, idx_lengths\n )\n body.append(body_row)\n return body\n\n def _check_trim(\n self,\n count: int,\n max: int,\n obj: list,\n element: str,\n css: str | None = None,\n value: str = "...",\n ) -> bool:\n """\n Indicates whether to break render loops and append a trimming indicator\n\n Parameters\n ----------\n count : int\n The loop count of previous visible items.\n max : int\n The allowable rendered items in the loop.\n obj : list\n The current render collection of the rendered items.\n element : str\n The type of element to append in the case a trimming indicator is needed.\n css : str, optional\n The css to add to the trimming indicator element.\n value : str, optional\n The value of the elements display if necessary.\n\n Returns\n -------\n result : bool\n Whether a trimming element was required and appended.\n """\n if count > max:\n if element == "row":\n obj.append(self._generate_trimmed_row(max))\n else:\n obj.append(_element(element, css, value, True, attributes=""))\n return True\n return False\n\n def _generate_trimmed_row(self, max_cols: int) -> list:\n """\n When a render has too many rows we generate a trimming row containing "..."\n\n Parameters\n ----------\n max_cols : int\n Number of permissible columns\n\n Returns\n -------\n list of elements\n """\n index_headers = [\n _element(\n "th",\n (\n f"{self.css['row_heading']} {self.css['level']}{c} "\n f"{self.css['row_trim']}"\n ),\n "...",\n not self.hide_index_[c],\n attributes="",\n )\n for c in range(self.data.index.nlevels)\n ]\n\n data: list = []\n visible_col_count: int = 0\n for c, _ in enumerate(self.columns):\n data_element_visible = c not in self.hidden_columns\n if data_element_visible:\n visible_col_count += 1\n if self._check_trim(\n visible_col_count,\n max_cols,\n data,\n "td",\n f"{self.css['data']} {self.css['row_trim']} {self.css['col_trim']}",\n ):\n break\n\n data.append(\n _element(\n "td",\n f"{self.css['data']} {self.css['col']}{c} {self.css['row_trim']}",\n "...",\n data_element_visible,\n attributes="",\n )\n )\n\n return index_headers + data\n\n def _generate_body_row(\n self,\n iter: tuple,\n max_cols: int,\n idx_lengths: dict,\n ):\n """\n Generate a regular row for the body section of appropriate format.\n\n +--------------------------------------------+---------------------------+\n | index_header_0 ... index_header_n | data_by_column ... |\n +--------------------------------------------+---------------------------+\n\n Parameters\n ----------\n iter : tuple\n Iterable from outer scope: row number, row data tuple, row index labels.\n max_cols : int\n Number of permissible columns.\n idx_lengths : dict\n A map of the sparsification structure of the index\n\n Returns\n -------\n list of elements\n """\n r, row_tup, rlabels = iter\n\n index_headers = []\n for c, value in enumerate(rlabels[r]):\n header_element_visible = (\n _is_visible(r, c, idx_lengths) and not self.hide_index_[c]\n )\n header_element = _element(\n "th",\n (\n f"{self.css['row_heading']} {self.css['level']}{c} "\n f"{self.css['row']}{r}"\n ),\n value,\n header_element_visible,\n display_value=self._display_funcs_index[(r, c)](value),\n attributes=(\n f'rowspan="{idx_lengths.get((c, r), 0)}"'\n if idx_lengths.get((c, r), 0) > 1\n else ""\n ),\n )\n\n if self.cell_ids:\n header_element[\n "id"\n ] = f"{self.css['level']}{c}_{self.css['row']}{r}" # id is given\n if (\n header_element_visible\n and (r, c) in self.ctx_index\n and self.ctx_index[r, c]\n ):\n # always add id if a style is specified\n header_element["id"] = f"{self.css['level']}{c}_{self.css['row']}{r}"\n self.cellstyle_map_index[tuple(self.ctx_index[r, c])].append(\n f"{self.css['level']}{c}_{self.css['row']}{r}"\n )\n\n index_headers.append(header_element)\n\n data: list = []\n visible_col_count: int = 0\n for c, value in enumerate(row_tup[1:]):\n data_element_visible = (\n c not in self.hidden_columns and r not in self.hidden_rows\n )\n if data_element_visible:\n visible_col_count += 1\n if self._check_trim(\n visible_col_count,\n max_cols,\n data,\n "td",\n f"{self.css['data']} {self.css['row']}{r} {self.css['col_trim']}",\n ):\n break\n\n # add custom classes from cell context\n cls = ""\n if (r, c) in self.cell_context:\n cls = " " + self.cell_context[r, c]\n\n data_element = _element(\n "td",\n (\n f"{self.css['data']} {self.css['row']}{r} "\n f"{self.css['col']}{c}{cls}"\n ),\n value,\n data_element_visible,\n attributes="",\n display_value=self._display_funcs[(r, c)](value),\n )\n\n if self.cell_ids:\n data_element["id"] = f"{self.css['row']}{r}_{self.css['col']}{c}"\n if data_element_visible and (r, c) in self.ctx and self.ctx[r, c]:\n # always add id if needed due to specified style\n data_element["id"] = f"{self.css['row']}{r}_{self.css['col']}{c}"\n self.cellstyle_map[tuple(self.ctx[r, c])].append(\n f"{self.css['row']}{r}_{self.css['col']}{c}"\n )\n\n data.append(data_element)\n\n return index_headers + data\n\n def _translate_latex(self, d: dict, clines: str | None) -> None:\n r"""\n Post-process the default render dict for the LaTeX template format.\n\n Processing items included are:\n - Remove hidden columns from the non-headers part of the body.\n - Place cellstyles directly in td cells rather than use cellstyle_map.\n - Remove hidden indexes or reinsert missing th elements if part of multiindex\n or multirow sparsification (so that \multirow and \multicol work correctly).\n """\n index_levels = self.index.nlevels\n visible_index_level_n = index_levels - sum(self.hide_index_)\n d["head"] = [\n [\n {**col, "cellstyle": self.ctx_columns[r, c - visible_index_level_n]}\n for c, col in enumerate(row)\n if col["is_visible"]\n ]\n for r, row in enumerate(d["head"])\n ]\n\n def _concatenated_visible_rows(obj, n, row_indices):\n """\n Extract all visible row indices recursively from concatenated stylers.\n """\n row_indices.extend(\n [r + n for r in range(len(obj.index)) if r not in obj.hidden_rows]\n )\n n += len(obj.index)\n for concatenated in obj.concatenated:\n n = _concatenated_visible_rows(concatenated, n, row_indices)\n return n\n\n def concatenated_visible_rows(obj):\n row_indices: list[int] = []\n _concatenated_visible_rows(obj, 0, row_indices)\n # TODO try to consolidate the concat visible rows\n # methods to a single function / recursion for simplicity\n return row_indices\n\n body = []\n for r, row in zip(concatenated_visible_rows(self), d["body"]):\n # note: cannot enumerate d["body"] because rows were dropped if hidden\n # during _translate_body so must zip to acquire the true r-index associated\n # with the ctx obj which contains the cell styles.\n if all(self.hide_index_):\n row_body_headers = []\n else:\n row_body_headers = [\n {\n **col,\n "display_value": col["display_value"]\n if col["is_visible"]\n else "",\n "cellstyle": self.ctx_index[r, c],\n }\n for c, col in enumerate(row[:index_levels])\n if (col["type"] == "th" and not self.hide_index_[c])\n ]\n\n row_body_cells = [\n {**col, "cellstyle": self.ctx[r, c]}\n for c, col in enumerate(row[index_levels:])\n if (col["is_visible"] and col["type"] == "td")\n ]\n\n body.append(row_body_headers + row_body_cells)\n d["body"] = body\n\n # clines are determined from info on index_lengths and hidden_rows and input\n # to a dict defining which row clines should be added in the template.\n if clines not in [\n None,\n "all;data",\n "all;index",\n "skip-last;data",\n "skip-last;index",\n ]:\n raise ValueError(\n f"`clines` value of {clines} is invalid. Should either be None or one "\n f"of 'all;data', 'all;index', 'skip-last;data', 'skip-last;index'."\n )\n if clines is not None:\n data_len = len(row_body_cells) if "data" in clines and d["body"] else 0\n\n d["clines"] = defaultdict(list)\n visible_row_indexes: list[int] = [\n r for r in range(len(self.data.index)) if r not in self.hidden_rows\n ]\n visible_index_levels: list[int] = [\n i for i in range(index_levels) if not self.hide_index_[i]\n ]\n for rn, r in enumerate(visible_row_indexes):\n for lvln, lvl in enumerate(visible_index_levels):\n if lvl == index_levels - 1 and "skip-last" in clines:\n continue\n idx_len = d["index_lengths"].get((lvl, r), None)\n if idx_len is not None: # i.e. not a sparsified entry\n d["clines"][rn + idx_len].append(\n f"\\cline{{{lvln+1}-{len(visible_index_levels)+data_len}}}"\n )\n\n def format(\n self,\n formatter: ExtFormatter | None = None,\n subset: Subset | None = None,\n na_rep: str | None = None,\n precision: int | None = None,\n decimal: str = ".",\n thousands: str | None = None,\n escape: str | None = None,\n hyperlinks: str | None = None,\n ) -> StylerRenderer:\n r"""\n Format the text display value of cells.\n\n Parameters\n ----------\n formatter : str, callable, dict or None\n Object to define how values are displayed. See notes.\n subset : label, array-like, IndexSlice, optional\n A valid 2d input to `DataFrame.loc[<subset>]`, or, in the case of a 1d input\n or single key, to `DataFrame.loc[:, <subset>]` where the columns are\n prioritised, to limit ``data`` to *before* applying the function.\n na_rep : str, optional\n Representation for missing values.\n If ``na_rep`` is None, no special formatting is applied.\n precision : int, optional\n Floating point precision to use for display purposes, if not determined by\n the specified ``formatter``.\n\n .. versionadded:: 1.3.0\n\n decimal : str, default "."\n Character used as decimal separator for floats, complex and integers.\n\n .. versionadded:: 1.3.0\n\n thousands : str, optional, default None\n Character used as thousands separator for floats, complex and integers.\n\n .. versionadded:: 1.3.0\n\n escape : str, optional\n Use 'html' to replace the characters ``&``, ``<``, ``>``, ``'``, and ``"``\n in cell display string with HTML-safe sequences.\n Use 'latex' to replace the characters ``&``, ``%``, ``$``, ``#``, ``_``,\n ``{``, ``}``, ``~``, ``^``, and ``\`` in the cell display string with\n LaTeX-safe sequences.\n Use 'latex-math' to replace the characters the same way as in 'latex' mode,\n except for math substrings, which either are surrounded\n by two characters ``$`` or start with the character ``\(`` and\n end with ``\)``. Escaping is done before ``formatter``.\n\n .. versionadded:: 1.3.0\n\n hyperlinks : {"html", "latex"}, optional\n Convert string patterns containing https://, http://, ftp:// or www. to\n HTML <a> tags as clickable URL hyperlinks if "html", or LaTeX \href\n commands if "latex".\n\n .. versionadded:: 1.4.0\n\n Returns\n -------\n Styler\n\n See Also\n --------\n Styler.format_index: Format the text display value of index labels.\n\n Notes\n -----\n This method assigns a formatting function, ``formatter``, to each cell in the\n DataFrame. If ``formatter`` is ``None``, then the default formatter is used.\n If a callable then that function should take a data value as input and return\n a displayable representation, such as a string. If ``formatter`` is\n given as a string this is assumed to be a valid Python format specification\n and is wrapped to a callable as ``string.format(x)``. If a ``dict`` is given,\n keys should correspond to column names, and values should be string or\n callable, as above.\n\n The default formatter currently expresses floats and complex numbers with the\n pandas display precision unless using the ``precision`` argument here. The\n default formatter does not adjust the representation of missing values unless\n the ``na_rep`` argument is used.\n\n The ``subset`` argument defines which region to apply the formatting function\n to. If the ``formatter`` argument is given in dict form but does not include\n all columns within the subset then these columns will have the default formatter\n applied. Any columns in the formatter dict excluded from the subset will\n be ignored.\n\n When using a ``formatter`` string the dtypes must be compatible, otherwise a\n `ValueError` will be raised.\n\n When instantiating a Styler, default formatting can be applied be setting the\n ``pandas.options``:\n\n - ``styler.format.formatter``: default None.\n - ``styler.format.na_rep``: default None.\n - ``styler.format.precision``: default 6.\n - ``styler.format.decimal``: default ".".\n - ``styler.format.thousands``: default None.\n - ``styler.format.escape``: default None.\n\n .. warning::\n `Styler.format` is ignored when using the output format `Styler.to_excel`,\n since Excel and Python have inherrently different formatting structures.\n However, it is possible to use the `number-format` pseudo CSS attribute\n to force Excel permissible formatting. See examples.\n\n Examples\n --------\n Using ``na_rep`` and ``precision`` with the default ``formatter``\n\n >>> df = pd.DataFrame([[np.nan, 1.0, 'A'], [2.0, np.nan, 3.0]])\n >>> df.style.format(na_rep='MISS', precision=3) # doctest: +SKIP\n 0 1 2\n 0 MISS 1.000 A\n 1 2.000 MISS 3.000\n\n Using a ``formatter`` specification on consistent column dtypes\n\n >>> df.style.format('{:.2f}', na_rep='MISS', subset=[0,1]) # doctest: +SKIP\n 0 1 2\n 0 MISS 1.00 A\n 1 2.00 MISS 3.000000\n\n Using the default ``formatter`` for unspecified columns\n\n >>> df.style.format({0: '{:.2f}', 1: '£ {:.1f}'}, na_rep='MISS', precision=1)\n ... # doctest: +SKIP\n 0 1 2\n 0 MISS £ 1.0 A\n 1 2.00 MISS 3.0\n\n Multiple ``na_rep`` or ``precision`` specifications under the default\n ``formatter``.\n\n >>> (df.style.format(na_rep='MISS', precision=1, subset=[0])\n ... .format(na_rep='PASS', precision=2, subset=[1, 2])) # doctest: +SKIP\n 0 1 2\n 0 MISS 1.00 A\n 1 2.0 PASS 3.00\n\n Using a callable ``formatter`` function.\n\n >>> func = lambda s: 'STRING' if isinstance(s, str) else 'FLOAT'\n >>> df.style.format({0: '{:.1f}', 2: func}, precision=4, na_rep='MISS')\n ... # doctest: +SKIP\n 0 1 2\n 0 MISS 1.0000 STRING\n 1 2.0 MISS FLOAT\n\n Using a ``formatter`` with HTML ``escape`` and ``na_rep``.\n\n >>> df = pd.DataFrame([['<div></div>', '"A&B"', None]])\n >>> s = df.style.format(\n ... '<a href="a.com/{0}">{0}</a>', escape="html", na_rep="NA"\n ... )\n >>> s.to_html() # doctest: +SKIP\n ...\n <td .. ><a href="a.com/&lt;div&gt;&lt;/div&gt;">&lt;div&gt;&lt;/div&gt;</a></td>\n <td .. ><a href="a.com/&#34;A&amp;B&#34;">&#34;A&amp;B&#34;</a></td>\n <td .. >NA</td>\n ...\n\n Using a ``formatter`` with ``escape`` in 'latex' mode.\n\n >>> df = pd.DataFrame([["123"], ["~ ^"], ["$%#"]])\n >>> df.style.format("\\textbf{{{}}}", escape="latex").to_latex()\n ... # doctest: +SKIP\n \begin{tabular}{ll}\n & 0 \\\n 0 & \textbf{123} \\\n 1 & \textbf{\textasciitilde \space \textasciicircum } \\\n 2 & \textbf{\$\%\#} \\\n \end{tabular}\n\n Applying ``escape`` in 'latex-math' mode. In the example below\n we enter math mode using the character ``$``.\n\n >>> df = pd.DataFrame([[r"$\sum_{i=1}^{10} a_i$ a~b $\alpha \\n ... = \frac{\beta}{\zeta^2}$"], ["%#^ $ \$x^2 $"]])\n >>> df.style.format(escape="latex-math").to_latex()\n ... # doctest: +SKIP\n \begin{tabular}{ll}\n & 0 \\\n 0 & $\sum_{i=1}^{10} a_i$ a\textasciitilde b $\alpha = \frac{\beta}{\zeta^2}$ \\\n 1 & \%\#\textasciicircum \space $ \$x^2 $ \\\n \end{tabular}\n\n We can use the character ``\(`` to enter math mode and the character ``\)``\n to close math mode.\n\n >>> df = pd.DataFrame([[r"\(\sum_{i=1}^{10} a_i\) a~b \(\alpha \\n ... = \frac{\beta}{\zeta^2}\)"], ["%#^ \( \$x^2 \)"]])\n >>> df.style.format(escape="latex-math").to_latex()\n ... # doctest: +SKIP\n \begin{tabular}{ll}\n & 0 \\\n 0 & \(\sum_{i=1}^{10} a_i\) a\textasciitilde b \(\alpha\n = \frac{\beta}{\zeta^2}\) \\\n 1 & \%\#\textasciicircum \space \( \$x^2 \) \\\n \end{tabular}\n\n If we have in one DataFrame cell a combination of both shorthands\n for math formulas, the shorthand with the sign ``$`` will be applied.\n\n >>> df = pd.DataFrame([[r"\( x^2 \) $x^2$"], \\n ... [r"$\frac{\beta}{\zeta}$ \(\frac{\beta}{\zeta}\)"]])\n >>> df.style.format(escape="latex-math").to_latex()\n ... # doctest: +SKIP\n \begin{tabular}{ll}\n & 0 \\\n 0 & \textbackslash ( x\textasciicircum 2 \textbackslash ) $x^2$ \\\n 1 & $\frac{\beta}{\zeta}$ \textbackslash (\textbackslash\n frac\{\textbackslash beta\}\{\textbackslash zeta\}\textbackslash ) \\\n \end{tabular}\n\n Pandas defines a `number-format` pseudo CSS attribute instead of the `.format`\n method to create `to_excel` permissible formatting. Note that semi-colons are\n CSS protected characters but used as separators in Excel's format string.\n Replace semi-colons with the section separator character (ASCII-245) when\n defining the formatting here.\n\n >>> df = pd.DataFrame({"A": [1, 0, -1]})\n >>> pseudo_css = "number-format: 0§[Red](0)§-§@;"\n >>> filename = "formatted_file.xlsx"\n >>> df.style.map(lambda v: pseudo_css).to_excel(filename) # doctest: +SKIP\n\n .. figure:: ../../_static/style/format_excel_css.png\n """\n if all(\n (\n formatter is None,\n subset is None,\n precision is None,\n decimal == ".",\n thousands is None,\n na_rep is None,\n escape is None,\n hyperlinks is None,\n )\n ):\n self._display_funcs.clear()\n return self # clear the formatter / revert to default and avoid looping\n\n subset = slice(None) if subset is None else subset\n subset = non_reducing_slice(subset)\n data = self.data.loc[subset]\n\n if not isinstance(formatter, dict):\n formatter = {col: formatter for col in data.columns}\n\n cis = self.columns.get_indexer_for(data.columns)\n ris = self.index.get_indexer_for(data.index)\n for ci in cis:\n format_func = _maybe_wrap_formatter(\n formatter.get(self.columns[ci]),\n na_rep=na_rep,\n precision=precision,\n decimal=decimal,\n thousands=thousands,\n escape=escape,\n hyperlinks=hyperlinks,\n )\n for ri in ris:\n self._display_funcs[(ri, ci)] = format_func\n\n return self\n\n def format_index(\n self,\n formatter: ExtFormatter | None = None,\n axis: Axis = 0,\n level: Level | list[Level] | None = None,\n na_rep: str | None = None,\n precision: int | None = None,\n decimal: str = ".",\n thousands: str | None = None,\n escape: str | None = None,\n hyperlinks: str | None = None,\n ) -> StylerRenderer:\n r"""\n Format the text display value of index labels or column headers.\n\n .. versionadded:: 1.4.0\n\n Parameters\n ----------\n formatter : str, callable, dict or None\n Object to define how values are displayed. See notes.\n axis : {0, "index", 1, "columns"}\n Whether to apply the formatter to the index or column headers.\n level : int, str, list\n The level(s) over which to apply the generic formatter.\n na_rep : str, optional\n Representation for missing values.\n If ``na_rep`` is None, no special formatting is applied.\n precision : int, optional\n Floating point precision to use for display purposes, if not determined by\n the specified ``formatter``.\n decimal : str, default "."\n Character used as decimal separator for floats, complex and integers.\n thousands : str, optional, default None\n Character used as thousands separator for floats, complex and integers.\n escape : str, optional\n Use 'html' to replace the characters ``&``, ``<``, ``>``, ``'``, and ``"``\n in cell display string with HTML-safe sequences.\n Use 'latex' to replace the characters ``&``, ``%``, ``$``, ``#``, ``_``,\n ``{``, ``}``, ``~``, ``^``, and ``\`` in the cell display string with\n LaTeX-safe sequences.\n Escaping is done before ``formatter``.\n hyperlinks : {"html", "latex"}, optional\n Convert string patterns containing https://, http://, ftp:// or www. to\n HTML <a> tags as clickable URL hyperlinks if "html", or LaTeX \href\n commands if "latex".\n\n Returns\n -------\n Styler\n\n See Also\n --------\n Styler.format: Format the text display value of data cells.\n\n Notes\n -----\n This method assigns a formatting function, ``formatter``, to each level label\n in the DataFrame's index or column headers. If ``formatter`` is ``None``,\n then the default formatter is used.\n If a callable then that function should take a label value as input and return\n a displayable representation, such as a string. If ``formatter`` is\n given as a string this is assumed to be a valid Python format specification\n and is wrapped to a callable as ``string.format(x)``. If a ``dict`` is given,\n keys should correspond to MultiIndex level numbers or names, and values should\n be string or callable, as above.\n\n The default formatter currently expresses floats and complex numbers with the\n pandas display precision unless using the ``precision`` argument here. The\n default formatter does not adjust the representation of missing values unless\n the ``na_rep`` argument is used.\n\n The ``level`` argument defines which levels of a MultiIndex to apply the\n method to. If the ``formatter`` argument is given in dict form but does\n not include all levels within the level argument then these unspecified levels\n will have the default formatter applied. Any levels in the formatter dict\n specifically excluded from the level argument will be ignored.\n\n When using a ``formatter`` string the dtypes must be compatible, otherwise a\n `ValueError` will be raised.\n\n .. warning::\n `Styler.format_index` is ignored when using the output format\n `Styler.to_excel`, since Excel and Python have inherrently different\n formatting structures.\n However, it is possible to use the `number-format` pseudo CSS attribute\n to force Excel permissible formatting. See documentation for `Styler.format`.\n\n Examples\n --------\n Using ``na_rep`` and ``precision`` with the default ``formatter``\n\n >>> df = pd.DataFrame([[1, 2, 3]], columns=[2.0, np.nan, 4.0])\n >>> df.style.format_index(axis=1, na_rep='MISS', precision=3) # doctest: +SKIP\n 2.000 MISS 4.000\n 0 1 2 3\n\n Using a ``formatter`` specification on consistent dtypes in a level\n\n >>> df.style.format_index('{:.2f}', axis=1, na_rep='MISS') # doctest: +SKIP\n 2.00 MISS 4.00\n 0 1 2 3\n\n Using the default ``formatter`` for unspecified levels\n\n >>> df = pd.DataFrame([[1, 2, 3]],\n ... columns=pd.MultiIndex.from_arrays([["a", "a", "b"],[2, np.nan, 4]]))\n >>> df.style.format_index({0: lambda v: v.upper()}, axis=1, precision=1)\n ... # doctest: +SKIP\n A B\n 2.0 nan 4.0\n 0 1 2 3\n\n Using a callable ``formatter`` function.\n\n >>> func = lambda s: 'STRING' if isinstance(s, str) else 'FLOAT'\n >>> df.style.format_index(func, axis=1, na_rep='MISS')\n ... # doctest: +SKIP\n STRING STRING\n FLOAT MISS FLOAT\n 0 1 2 3\n\n Using a ``formatter`` with HTML ``escape`` and ``na_rep``.\n\n >>> df = pd.DataFrame([[1, 2, 3]], columns=['"A"', 'A&B', None])\n >>> s = df.style.format_index('$ {0}', axis=1, escape="html", na_rep="NA")\n ... # doctest: +SKIP\n <th .. >$ &#34;A&#34;</th>\n <th .. >$ A&amp;B</th>\n <th .. >NA</td>\n ...\n\n Using a ``formatter`` with LaTeX ``escape``.\n\n >>> df = pd.DataFrame([[1, 2, 3]], columns=["123", "~", "$%#"])\n >>> df.style.format_index("\\textbf{{{}}}", escape="latex", axis=1).to_latex()\n ... # doctest: +SKIP\n \begin{tabular}{lrrr}\n {} & {\textbf{123}} & {\textbf{\textasciitilde }} & {\textbf{\$\%\#}} \\\n 0 & 1 & 2 & 3 \\\n \end{tabular}\n """\n axis = self.data._get_axis_number(axis)\n if axis == 0:\n display_funcs_, obj = self._display_funcs_index, self.index\n else:\n display_funcs_, obj = self._display_funcs_columns, self.columns\n levels_ = refactor_levels(level, obj)\n\n if all(\n (\n formatter is None,\n level is None,\n precision is None,\n decimal == ".",\n thousands is None,\n na_rep is None,\n escape is None,\n hyperlinks is None,\n )\n ):\n display_funcs_.clear()\n return self # clear the formatter / revert to default and avoid looping\n\n if not isinstance(formatter, dict):\n formatter = {level: formatter for level in levels_}\n else:\n formatter = {\n obj._get_level_number(level): formatter_\n for level, formatter_ in formatter.items()\n }\n\n for lvl in levels_:\n format_func = _maybe_wrap_formatter(\n formatter.get(lvl),\n na_rep=na_rep,\n precision=precision,\n decimal=decimal,\n thousands=thousands,\n escape=escape,\n hyperlinks=hyperlinks,\n )\n\n for idx in [(i, lvl) if axis == 0 else (lvl, i) for i in range(len(obj))]:\n display_funcs_[idx] = format_func\n\n return self\n\n def relabel_index(\n self,\n labels: Sequence | Index,\n axis: Axis = 0,\n level: Level | list[Level] | None = None,\n ) -> StylerRenderer:\n r"""\n Relabel the index, or column header, keys to display a set of specified values.\n\n .. versionadded:: 1.5.0\n\n Parameters\n ----------\n labels : list-like or Index\n New labels to display. Must have same length as the underlying values not\n hidden.\n axis : {"index", 0, "columns", 1}\n Apply to the index or columns.\n level : int, str, list, optional\n The level(s) over which to apply the new labels. If `None` will apply\n to all levels of an Index or MultiIndex which are not hidden.\n\n Returns\n -------\n Styler\n\n See Also\n --------\n Styler.format_index: Format the text display value of index or column headers.\n Styler.hide: Hide the index, column headers, or specified data from display.\n\n Notes\n -----\n As part of Styler, this method allows the display of an index to be\n completely user-specified without affecting the underlying DataFrame data,\n index, or column headers. This means that the flexibility of indexing is\n maintained whilst the final display is customisable.\n\n Since Styler is designed to be progressively constructed with method chaining,\n this method is adapted to react to the **currently specified hidden elements**.\n This is useful because it means one does not have to specify all the new\n labels if the majority of an index, or column headers, have already been hidden.\n The following produce equivalent display (note the length of ``labels`` in\n each case).\n\n .. code-block:: python\n\n # relabel first, then hide\n df = pd.DataFrame({"col": ["a", "b", "c"]})\n df.style.relabel_index(["A", "B", "C"]).hide([0,1])\n # hide first, then relabel\n df = pd.DataFrame({"col": ["a", "b", "c"]})\n df.style.hide([0,1]).relabel_index(["C"])\n\n This method should be used, rather than :meth:`Styler.format_index`, in one of\n the following cases (see examples):\n\n - A specified set of labels are required which are not a function of the\n underlying index keys.\n - The function of the underlying index keys requires a counter variable,\n such as those available upon enumeration.\n\n Examples\n --------\n Basic use\n\n >>> df = pd.DataFrame({"col": ["a", "b", "c"]})\n >>> df.style.relabel_index(["A", "B", "C"]) # doctest: +SKIP\n col\n A a\n B b\n C c\n\n Chaining with pre-hidden elements\n\n >>> df.style.hide([0,1]).relabel_index(["C"]) # doctest: +SKIP\n col\n C c\n\n Using a MultiIndex\n\n >>> midx = pd.MultiIndex.from_product([[0, 1], [0, 1], [0, 1]])\n >>> df = pd.DataFrame({"col": list(range(8))}, index=midx)\n >>> styler = df.style # doctest: +SKIP\n col\n 0 0 0 0\n 1 1\n 1 0 2\n 1 3\n 1 0 0 4\n 1 5\n 1 0 6\n 1 7\n >>> styler.hide((midx.get_level_values(0)==0)|(midx.get_level_values(1)==0))\n ... # doctest: +SKIP\n >>> styler.hide(level=[0,1]) # doctest: +SKIP\n >>> styler.relabel_index(["binary6", "binary7"]) # doctest: +SKIP\n col\n binary6 6\n binary7 7\n\n We can also achieve the above by indexing first and then re-labeling\n\n >>> styler = df.loc[[(1,1,0), (1,1,1)]].style\n >>> styler.hide(level=[0,1]).relabel_index(["binary6", "binary7"])\n ... # doctest: +SKIP\n col\n binary6 6\n binary7 7\n\n Defining a formatting function which uses an enumeration counter. Also note\n that the value of the index key is passed in the case of string labels so it\n can also be inserted into the label, using curly brackets (or double curly\n brackets if the string if pre-formatted),\n\n >>> df = pd.DataFrame({"samples": np.random.rand(10)})\n >>> styler = df.loc[np.random.randint(0,10,3)].style\n >>> styler.relabel_index([f"sample{i+1} ({{}})" for i in range(3)])\n ... # doctest: +SKIP\n samples\n sample1 (5) 0.315811\n sample2 (0) 0.495941\n sample3 (2) 0.067946\n """\n axis = self.data._get_axis_number(axis)\n if axis == 0:\n display_funcs_, obj = self._display_funcs_index, self.index\n hidden_labels, hidden_lvls = self.hidden_rows, self.hide_index_\n else:\n display_funcs_, obj = self._display_funcs_columns, self.columns\n hidden_labels, hidden_lvls = self.hidden_columns, self.hide_columns_\n visible_len = len(obj) - len(set(hidden_labels))\n if len(labels) != visible_len:\n raise ValueError(\n "``labels`` must be of length equal to the number of "\n f"visible labels along ``axis`` ({visible_len})."\n )\n\n if level is None:\n level = [i for i in range(obj.nlevels) if not hidden_lvls[i]]\n levels_ = refactor_levels(level, obj)\n\n def alias_(x, value):\n if isinstance(value, str):\n return value.format(x)\n return value\n\n for ai, i in enumerate([i for i in range(len(obj)) if i not in hidden_labels]):\n if len(levels_) == 1:\n idx = (i, levels_[0]) if axis == 0 else (levels_[0], i)\n display_funcs_[idx] = partial(alias_, value=labels[ai])\n else:\n for aj, lvl in enumerate(levels_):\n idx = (i, lvl) if axis == 0 else (lvl, i)\n display_funcs_[idx] = partial(alias_, value=labels[ai][aj])\n\n return self\n\n\ndef _element(\n html_element: str,\n html_class: str | None,\n value: Any,\n is_visible: bool,\n **kwargs,\n) -> dict:\n """\n Template to return container with information for a <td></td> or <th></th> element.\n """\n if "display_value" not in kwargs:\n kwargs["display_value"] = value\n return {\n "type": html_element,\n "value": value,\n "class": html_class,\n "is_visible": is_visible,\n **kwargs,\n }\n\n\ndef _get_trimming_maximums(\n rn,\n cn,\n max_elements,\n max_rows=None,\n max_cols=None,\n scaling_factor: float = 0.8,\n) -> tuple[int, int]:\n """\n Recursively reduce the number of rows and columns to satisfy max elements.\n\n Parameters\n ----------\n rn, cn : int\n The number of input rows / columns\n max_elements : int\n The number of allowable elements\n max_rows, max_cols : int, optional\n Directly specify an initial maximum rows or columns before compression.\n scaling_factor : float\n Factor at which to reduce the number of rows / columns to fit.\n\n Returns\n -------\n rn, cn : tuple\n New rn and cn values that satisfy the max_elements constraint\n """\n\n def scale_down(rn, cn):\n if cn >= rn:\n return rn, int(cn * scaling_factor)\n else:\n return int(rn * scaling_factor), cn\n\n if max_rows:\n rn = max_rows if rn > max_rows else rn\n if max_cols:\n cn = max_cols if cn > max_cols else cn\n\n while rn * cn > max_elements:\n rn, cn = scale_down(rn, cn)\n\n return rn, cn\n\n\ndef _get_level_lengths(\n index: Index,\n sparsify: bool,\n max_index: int,\n hidden_elements: Sequence[int] | None = None,\n):\n """\n Given an index, find the level length for each element.\n\n Parameters\n ----------\n index : Index\n Index or columns to determine lengths of each element\n sparsify : bool\n Whether to hide or show each distinct element in a MultiIndex\n max_index : int\n The maximum number of elements to analyse along the index due to trimming\n hidden_elements : sequence of int\n Index positions of elements hidden from display in the index affecting\n length\n\n Returns\n -------\n Dict :\n Result is a dictionary of (level, initial_position): span\n """\n if isinstance(index, MultiIndex):\n levels = index._format_multi(sparsify=lib.no_default, include_names=False)\n else:\n levels = index._format_flat(include_name=False)\n\n if hidden_elements is None:\n hidden_elements = []\n\n lengths = {}\n if not isinstance(index, MultiIndex):\n for i, value in enumerate(levels):\n if i not in hidden_elements:\n lengths[(0, i)] = 1\n return lengths\n\n for i, lvl in enumerate(levels):\n visible_row_count = 0 # used to break loop due to display trimming\n for j, row in enumerate(lvl):\n if visible_row_count > max_index:\n break\n if not sparsify:\n # then lengths will always equal 1 since no aggregation.\n if j not in hidden_elements:\n lengths[(i, j)] = 1\n visible_row_count += 1\n elif (row is not lib.no_default) and (j not in hidden_elements):\n # this element has not been sparsified so must be the start of section\n last_label = j\n lengths[(i, last_label)] = 1\n visible_row_count += 1\n elif row is not lib.no_default:\n # even if the above is hidden, keep track of it in case length > 1 and\n # later elements are visible\n last_label = j\n lengths[(i, last_label)] = 0\n elif j not in hidden_elements:\n # then element must be part of sparsified section and is visible\n visible_row_count += 1\n if visible_row_count > max_index:\n break # do not add a length since the render trim limit reached\n if lengths[(i, last_label)] == 0:\n # if previous iteration was first-of-section but hidden then offset\n last_label = j\n lengths[(i, last_label)] = 1\n else:\n # else add to previous iteration\n lengths[(i, last_label)] += 1\n\n non_zero_lengths = {\n element: length for element, length in lengths.items() if length >= 1\n }\n\n return non_zero_lengths\n\n\ndef _is_visible(idx_row, idx_col, lengths) -> bool:\n """\n Index -> {(idx_row, idx_col): bool}).\n """\n return (idx_col, idx_row) in lengths\n\n\ndef format_table_styles(styles: CSSStyles) -> CSSStyles:\n """\n looks for multiple CSS selectors and separates them:\n [{'selector': 'td, th', 'props': 'a:v;'}]\n ---> [{'selector': 'td', 'props': 'a:v;'},\n {'selector': 'th', 'props': 'a:v;'}]\n """\n return [\n {"selector": selector, "props": css_dict["props"]}\n for css_dict in styles\n for selector in css_dict["selector"].split(",")\n ]\n\n\ndef _default_formatter(x: Any, precision: int, thousands: bool = False) -> Any:\n """\n Format the display of a value\n\n Parameters\n ----------\n x : Any\n Input variable to be formatted\n precision : Int\n Floating point precision used if ``x`` is float or complex.\n thousands : bool, default False\n Whether to group digits with thousands separated with ",".\n\n Returns\n -------\n value : Any\n Matches input type, or string if input is float or complex or int with sep.\n """\n if is_float(x) or is_complex(x):\n return f"{x:,.{precision}f}" if thousands else f"{x:.{precision}f}"\n elif is_integer(x):\n return f"{x:,}" if thousands else str(x)\n return x\n\n\ndef _wrap_decimal_thousands(\n formatter: Callable, decimal: str, thousands: str | None\n) -> Callable:\n """\n Takes a string formatting function and wraps logic to deal with thousands and\n decimal parameters, in the case that they are non-standard and that the input\n is a (float, complex, int).\n """\n\n def wrapper(x):\n if is_float(x) or is_integer(x) or is_complex(x):\n if decimal != "." and thousands is not None and thousands != ",":\n return (\n formatter(x)\n .replace(",", "§_§-") # rare string to avoid "," <-> "." clash.\n .replace(".", decimal)\n .replace("§_§-", thousands)\n )\n elif decimal != "." and (thousands is None or thousands == ","):\n return formatter(x).replace(".", decimal)\n elif decimal == "." and thousands is not None and thousands != ",":\n return formatter(x).replace(",", thousands)\n return formatter(x)\n\n return wrapper\n\n\ndef _str_escape(x, escape):\n """if escaping: only use on str, else return input"""\n if isinstance(x, str):\n if escape == "html":\n return escape_html(x)\n elif escape == "latex":\n return _escape_latex(x)\n elif escape == "latex-math":\n return _escape_latex_math(x)\n else:\n raise ValueError(\n f"`escape` only permitted in {{'html', 'latex', 'latex-math'}}, \\ngot {escape}"\n )\n return x\n\n\ndef _render_href(x, format):\n """uses regex to detect a common URL pattern and converts to href tag in format."""\n if isinstance(x, str):\n if format == "html":\n href = '<a href="{0}" target="_blank">{0}</a>'\n elif format == "latex":\n href = r"\href{{{0}}}{{{0}}}"\n else:\n raise ValueError("``hyperlinks`` format can only be 'html' or 'latex'")\n pat = r"((http|ftp)s?:\/\/|www.)[\w/\-?=%.:@]+\.[\w/\-&?=%.,':;~!@#$*()\[\]]+"\n return re.sub(pat, lambda m: href.format(m.group(0)), x)\n return x\n\n\ndef _maybe_wrap_formatter(\n formatter: BaseFormatter | None = None,\n na_rep: str | None = None,\n precision: int | None = None,\n decimal: str = ".",\n thousands: str | None = None,\n escape: str | None = None,\n hyperlinks: str | None = None,\n) -> Callable:\n """\n Allows formatters to be expressed as str, callable or None, where None returns\n a default formatting function. wraps with na_rep, and precision where they are\n available.\n """\n # Get initial func from input string, input callable, or from default factory\n if isinstance(formatter, str):\n func_0 = lambda x: formatter.format(x)\n elif callable(formatter):\n func_0 = formatter\n elif formatter is None:\n precision = (\n get_option("styler.format.precision") if precision is None else precision\n )\n func_0 = partial(\n _default_formatter, precision=precision, thousands=(thousands is not None)\n )\n else:\n raise TypeError(f"'formatter' expected str or callable, got {type(formatter)}")\n\n # Replace chars if escaping\n if escape is not None:\n func_1 = lambda x: func_0(_str_escape(x, escape=escape))\n else:\n func_1 = func_0\n\n # Replace decimals and thousands if non-standard inputs detected\n if decimal != "." or (thousands is not None and thousands != ","):\n func_2 = _wrap_decimal_thousands(func_1, decimal=decimal, thousands=thousands)\n else:\n func_2 = func_1\n\n # Render links\n if hyperlinks is not None:\n func_3 = lambda x: func_2(_render_href(x, format=hyperlinks))\n else:\n func_3 = func_2\n\n # Replace missing values if na_rep\n if na_rep is None:\n return func_3\n else:\n return lambda x: na_rep if (isna(x) is True) else func_3(x)\n\n\ndef non_reducing_slice(slice_: Subset):\n """\n Ensure that a slice doesn't reduce to a Series or Scalar.\n\n Any user-passed `subset` should have this called on it\n to make sure we're always working with DataFrames.\n """\n # default to column slice, like DataFrame\n # ['A', 'B'] -> IndexSlices[:, ['A', 'B']]\n kinds = (ABCSeries, np.ndarray, Index, list, str)\n if isinstance(slice_, kinds):\n slice_ = IndexSlice[:, slice_]\n\n def pred(part) -> bool:\n """\n Returns\n -------\n bool\n True if slice does *not* reduce,\n False if `part` is a tuple.\n """\n # true when slice does *not* reduce, False when part is a tuple,\n # i.e. MultiIndex slice\n if isinstance(part, tuple):\n # GH#39421 check for sub-slice:\n return any((isinstance(s, slice) or is_list_like(s)) for s in part)\n else:\n return isinstance(part, slice) or is_list_like(part)\n\n if not is_list_like(slice_):\n if not isinstance(slice_, slice):\n # a 1-d slice, like df.loc[1]\n slice_ = [[slice_]]\n else:\n # slice(a, b, c)\n slice_ = [slice_] # to tuplize later\n else:\n # error: Item "slice" of "Union[slice, Sequence[Any]]" has no attribute\n # "__iter__" (not iterable) -> is specifically list_like in conditional\n slice_ = [p if pred(p) else [p] for p in slice_] # type: ignore[union-attr]\n return tuple(slice_)\n\n\ndef maybe_convert_css_to_tuples(style: CSSProperties) -> CSSList:\n """\n Convert css-string to sequence of tuples format if needed.\n 'color:red; border:1px solid black;' -> [('color', 'red'),\n ('border','1px solid red')]\n """\n if isinstance(style, str):\n s = style.split(";")\n try:\n return [\n (x.split(":")[0].strip(), x.split(":")[1].strip())\n for x in s\n if x.strip() != ""\n ]\n except IndexError:\n raise ValueError(\n "Styles supplied as string must follow CSS rule formats, "\n f"for example 'attr: val;'. '{style}' was given."\n )\n return style\n\n\ndef refactor_levels(\n level: Level | list[Level] | None,\n obj: Index,\n) -> list[int]:\n """\n Returns a consistent levels arg for use in ``hide_index`` or ``hide_columns``.\n\n Parameters\n ----------\n level : int, str, list\n Original ``level`` arg supplied to above methods.\n obj:\n Either ``self.index`` or ``self.columns``\n\n Returns\n -------\n list : refactored arg with a list of levels to hide\n """\n if level is None:\n levels_: list[int] = list(range(obj.nlevels))\n elif isinstance(level, int):\n levels_ = [level]\n elif isinstance(level, str):\n levels_ = [obj._get_level_number(level)]\n elif isinstance(level, list):\n levels_ = [\n obj._get_level_number(lev) if not isinstance(lev, int) else lev\n for lev in level\n ]\n else:\n raise ValueError("`level` must be of type `int`, `str` or list of such")\n return levels_\n\n\nclass Tooltips:\n """\n An extension to ``Styler`` that allows for and manipulates tooltips on hover\n of ``<td>`` cells in the HTML result.\n\n Parameters\n ----------\n css_name: str, default "pd-t"\n Name of the CSS class that controls visualisation of tooltips.\n css_props: list-like, default; see Notes\n List of (attr, value) tuples defining properties of the CSS class.\n tooltips: DataFrame, default empty\n DataFrame of strings aligned with underlying Styler data for tooltip\n display.\n\n Notes\n -----\n The default properties for the tooltip CSS class are:\n\n - visibility: hidden\n - position: absolute\n - z-index: 1\n - background-color: black\n - color: white\n - transform: translate(-20px, -20px)\n\n Hidden visibility is a key prerequisite to the hover functionality, and should\n always be included in any manual properties specification.\n """\n\n def __init__(\n self,\n css_props: CSSProperties = [\n ("visibility", "hidden"),\n ("position", "absolute"),\n ("z-index", 1),\n ("background-color", "black"),\n ("color", "white"),\n ("transform", "translate(-20px, -20px)"),\n ],\n css_name: str = "pd-t",\n tooltips: DataFrame = DataFrame(),\n ) -> None:\n self.class_name = css_name\n self.class_properties = css_props\n self.tt_data = tooltips\n self.table_styles: CSSStyles = []\n\n @property\n def _class_styles(self):\n """\n Combine the ``_Tooltips`` CSS class name and CSS properties to the format\n required to extend the underlying ``Styler`` `table_styles` to allow\n tooltips to render in HTML.\n\n Returns\n -------\n styles : List\n """\n return [\n {\n "selector": f".{self.class_name}",\n "props": maybe_convert_css_to_tuples(self.class_properties),\n }\n ]\n\n def _pseudo_css(self, uuid: str, name: str, row: int, col: int, text: str):\n """\n For every table data-cell that has a valid tooltip (not None, NaN or\n empty string) must create two pseudo CSS entries for the specific\n <td> element id which are added to overall table styles:\n an on hover visibility change and a content change\n dependent upon the user's chosen display string.\n\n For example:\n [{"selector": "T__row1_col1:hover .pd-t",\n "props": [("visibility", "visible")]},\n {"selector": "T__row1_col1 .pd-t::after",\n "props": [("content", "Some Valid Text String")]}]\n\n Parameters\n ----------\n uuid: str\n The uuid of the Styler instance\n name: str\n The css-name of the class used for styling tooltips\n row : int\n The row index of the specified tooltip string data\n col : int\n The col index of the specified tooltip string data\n text : str\n The textual content of the tooltip to be displayed in HTML.\n\n Returns\n -------\n pseudo_css : List\n """\n selector_id = "#T_" + uuid + "_row" + str(row) + "_col" + str(col)\n return [\n {\n "selector": selector_id + f":hover .{name}",\n "props": [("visibility", "visible")],\n },\n {\n "selector": selector_id + f" .{name}::after",\n "props": [("content", f'"{text}"')],\n },\n ]\n\n def _translate(self, styler: StylerRenderer, d: dict):\n """\n Mutate the render dictionary to allow for tooltips:\n\n - Add ``<span>`` HTML element to each data cells ``display_value``. Ignores\n headers.\n - Add table level CSS styles to control pseudo classes.\n\n Parameters\n ----------\n styler_data : DataFrame\n Underlying ``Styler`` DataFrame used for reindexing.\n uuid : str\n The underlying ``Styler`` uuid for CSS id.\n d : dict\n The dictionary prior to final render\n\n Returns\n -------\n render_dict : Dict\n """\n self.tt_data = self.tt_data.reindex_like(styler.data)\n if self.tt_data.empty:\n return d\n\n name = self.class_name\n mask = (self.tt_data.isna()) | (self.tt_data.eq("")) # empty string = no ttip\n self.table_styles = [\n style\n for sublist in [\n self._pseudo_css(styler.uuid, name, i, j, str(self.tt_data.iloc[i, j]))\n for i in range(len(self.tt_data.index))\n for j in range(len(self.tt_data.columns))\n if not (\n mask.iloc[i, j]\n or i in styler.hidden_rows\n or j in styler.hidden_columns\n )\n ]\n for style in sublist\n ]\n\n if self.table_styles:\n # add span class to every cell only if at least 1 non-empty tooltip\n for row in d["body"]:\n for item in row:\n if item["type"] == "td":\n item["display_value"] = (\n str(item["display_value"])\n + f'<span class="{self.class_name}"></span>'\n )\n d["table_styles"].extend(self._class_styles)\n d["table_styles"].extend(self.table_styles)\n\n return d\n\n\ndef _parse_latex_table_wrapping(table_styles: CSSStyles, caption: str | None) -> bool:\n """\n Indicate whether LaTeX {tabular} should be wrapped with a {table} environment.\n\n Parses the `table_styles` and detects any selectors which must be included outside\n of {tabular}, i.e. indicating that wrapping must occur, and therefore return True,\n or if a caption exists and requires similar.\n """\n IGNORED_WRAPPERS = ["toprule", "midrule", "bottomrule", "column_format"]\n # ignored selectors are included with {tabular} so do not need wrapping\n return (\n table_styles is not None\n and any(d["selector"] not in IGNORED_WRAPPERS for d in table_styles)\n ) or caption is not None\n\n\ndef _parse_latex_table_styles(table_styles: CSSStyles, selector: str) -> str | None:\n """\n Return the first 'props' 'value' from ``tables_styles`` identified by ``selector``.\n\n Examples\n --------\n >>> table_styles = [{'selector': 'foo', 'props': [('attr','value')]},\n ... {'selector': 'bar', 'props': [('attr', 'overwritten')]},\n ... {'selector': 'bar', 'props': [('a1', 'baz'), ('a2', 'ignore')]}]\n >>> _parse_latex_table_styles(table_styles, selector='bar')\n 'baz'\n\n Notes\n -----\n The replacement of "§" with ":" is to avoid the CSS problem where ":" has structural\n significance and cannot be used in LaTeX labels, but is often required by them.\n """\n for style in table_styles[::-1]: # in reverse for most recently applied style\n if style["selector"] == selector:\n return str(style["props"][0][1]).replace("§", ":")\n return None\n\n\ndef _parse_latex_cell_styles(\n latex_styles: CSSList, display_value: str, convert_css: bool = False\n) -> str:\n r"""\n Mutate the ``display_value`` string including LaTeX commands from ``latex_styles``.\n\n This method builds a recursive latex chain of commands based on the\n CSSList input, nested around ``display_value``.\n\n If a CSS style is given as ('<command>', '<options>') this is translated to\n '\<command><options>{display_value}', and this value is treated as the\n display value for the next iteration.\n\n The most recent style forms the inner component, for example for styles:\n `[('c1', 'o1'), ('c2', 'o2')]` this returns: `\c1o1{\c2o2{display_value}}`\n\n Sometimes latex commands have to be wrapped with curly braces in different ways:\n We create some parsing flags to identify the different behaviours:\n\n - `--rwrap` : `\<command><options>{<display_value>}`\n - `--wrap` : `{\<command><options> <display_value>}`\n - `--nowrap` : `\<command><options> <display_value>`\n - `--lwrap` : `{\<command><options>} <display_value>`\n - `--dwrap` : `{\<command><options>}{<display_value>}`\n\n For example for styles:\n `[('c1', 'o1--wrap'), ('c2', 'o2')]` this returns: `{\c1o1 \c2o2{display_value}}\n """\n if convert_css:\n latex_styles = _parse_latex_css_conversion(latex_styles)\n for command, options in latex_styles[::-1]: # in reverse for most recent style\n formatter = {\n "--wrap": f"{{\\{command}--to_parse {display_value}}}",\n "--nowrap": f"\\{command}--to_parse {display_value}",\n "--lwrap": f"{{\\{command}--to_parse}} {display_value}",\n "--rwrap": f"\\{command}--to_parse{{{display_value}}}",\n "--dwrap": f"{{\\{command}--to_parse}}{{{display_value}}}",\n }\n display_value = f"\\{command}{options} {display_value}"\n for arg in ["--nowrap", "--wrap", "--lwrap", "--rwrap", "--dwrap"]:\n if arg in str(options):\n display_value = formatter[arg].replace(\n "--to_parse", _parse_latex_options_strip(value=options, arg=arg)\n )\n break # only ever one purposeful entry\n return display_value\n\n\ndef _parse_latex_header_span(\n cell: dict[str, Any],\n multirow_align: str,\n multicol_align: str,\n wrap: bool = False,\n convert_css: bool = False,\n) -> str:\n r"""\n Refactor the cell `display_value` if a 'colspan' or 'rowspan' attribute is present.\n\n 'rowspan' and 'colspan' do not occur simultaneouly. If they are detected then\n the `display_value` is altered to a LaTeX `multirow` or `multicol` command\n respectively, with the appropriate cell-span.\n\n ``wrap`` is used to enclose the `display_value` in braces which is needed for\n column headers using an siunitx package.\n\n Requires the package {multirow}, whereas multicol support is usually built in\n to the {tabular} environment.\n\n Examples\n --------\n >>> cell = {'cellstyle': '', 'display_value':'text', 'attributes': 'colspan="3"'}\n >>> _parse_latex_header_span(cell, 't', 'c')\n '\\multicolumn{3}{c}{text}'\n """\n display_val = _parse_latex_cell_styles(\n cell["cellstyle"], cell["display_value"], convert_css\n )\n if "attributes" in cell:\n attrs = cell["attributes"]\n if 'colspan="' in attrs:\n colspan = attrs[attrs.find('colspan="') + 9 :] # len('colspan="') = 9\n colspan = int(colspan[: colspan.find('"')])\n if "naive-l" == multicol_align:\n out = f"{{{display_val}}}" if wrap else f"{display_val}"\n blanks = " & {}" if wrap else " &"\n return out + blanks * (colspan - 1)\n elif "naive-r" == multicol_align:\n out = f"{{{display_val}}}" if wrap else f"{display_val}"\n blanks = "{} & " if wrap else "& "\n return blanks * (colspan - 1) + out\n return f"\\multicolumn{{{colspan}}}{{{multicol_align}}}{{{display_val}}}"\n elif 'rowspan="' in attrs:\n if multirow_align == "naive":\n return display_val\n rowspan = attrs[attrs.find('rowspan="') + 9 :]\n rowspan = int(rowspan[: rowspan.find('"')])\n return f"\\multirow[{multirow_align}]{{{rowspan}}}{{*}}{{{display_val}}}"\n if wrap:\n return f"{{{display_val}}}"\n else:\n return display_val\n\n\ndef _parse_latex_options_strip(value: str | float, arg: str) -> str:\n """\n Strip a css_value which may have latex wrapping arguments, css comment identifiers,\n and whitespaces, to a valid string for latex options parsing.\n\n For example: 'red /* --wrap */ ' --> 'red'\n """\n return str(value).replace(arg, "").replace("/*", "").replace("*/", "").strip()\n\n\ndef _parse_latex_css_conversion(styles: CSSList) -> CSSList:\n """\n Convert CSS (attribute,value) pairs to equivalent LaTeX (command,options) pairs.\n\n Ignore conversion if tagged with `--latex` option, skipped if no conversion found.\n """\n\n def font_weight(value, arg):\n if value in ("bold", "bolder"):\n return "bfseries", f"{arg}"\n return None\n\n def font_style(value, arg):\n if value == "italic":\n return "itshape", f"{arg}"\n if value == "oblique":\n return "slshape", f"{arg}"\n return None\n\n def color(value, user_arg, command, comm_arg):\n """\n CSS colors have 5 formats to process:\n\n - 6 digit hex code: "#ff23ee" --> [HTML]{FF23EE}\n - 3 digit hex code: "#f0e" --> [HTML]{FF00EE}\n - rgba: rgba(128, 255, 0, 0.5) --> [rgb]{0.502, 1.000, 0.000}\n - rgb: rgb(128, 255, 0,) --> [rbg]{0.502, 1.000, 0.000}\n - string: red --> {red}\n\n Additionally rgb or rgba can be expressed in % which is also parsed.\n """\n arg = user_arg if user_arg != "" else comm_arg\n\n if value[0] == "#" and len(value) == 7: # color is hex code\n return command, f"[HTML]{{{value[1:].upper()}}}{arg}"\n if value[0] == "#" and len(value) == 4: # color is short hex code\n val = f"{value[1].upper()*2}{value[2].upper()*2}{value[3].upper()*2}"\n return command, f"[HTML]{{{val}}}{arg}"\n elif value[:3] == "rgb": # color is rgb or rgba\n r = re.findall("(?<=\\()[0-9\\s%]+(?=,)", value)[0].strip()\n r = float(r[:-1]) / 100 if "%" in r else int(r) / 255\n g = re.findall("(?<=,)[0-9\\s%]+(?=,)", value)[0].strip()\n g = float(g[:-1]) / 100 if "%" in g else int(g) / 255\n if value[3] == "a": # color is rgba\n b = re.findall("(?<=,)[0-9\\s%]+(?=,)", value)[1].strip()\n else: # color is rgb\n b = re.findall("(?<=,)[0-9\\s%]+(?=\\))", value)[0].strip()\n b = float(b[:-1]) / 100 if "%" in b else int(b) / 255\n return command, f"[rgb]{{{r:.3f}, {g:.3f}, {b:.3f}}}{arg}"\n else:\n return command, f"{{{value}}}{arg}" # color is likely string-named\n\n CONVERTED_ATTRIBUTES: dict[str, Callable] = {\n "font-weight": font_weight,\n "background-color": partial(color, command="cellcolor", comm_arg="--lwrap"),\n "color": partial(color, command="color", comm_arg=""),\n "font-style": font_style,\n }\n\n latex_styles: CSSList = []\n for attribute, value in styles:\n if isinstance(value, str) and "--latex" in value:\n # return the style without conversion but drop '--latex'\n latex_styles.append((attribute, value.replace("--latex", "")))\n if attribute in CONVERTED_ATTRIBUTES:\n arg = ""\n for x in ["--wrap", "--nowrap", "--lwrap", "--dwrap", "--rwrap"]:\n if x in str(value):\n arg, value = x, _parse_latex_options_strip(value, x)\n break\n latex_style = CONVERTED_ATTRIBUTES[attribute](value, arg)\n if latex_style is not None:\n latex_styles.extend([latex_style])\n return latex_styles\n\n\ndef _escape_latex(s: str) -> str:\n r"""\n Replace the characters ``&``, ``%``, ``$``, ``#``, ``_``, ``{``, ``}``,\n ``~``, ``^``, and ``\`` in the string with LaTeX-safe sequences.\n\n Use this if you need to display text that might contain such characters in LaTeX.\n\n Parameters\n ----------\n s : str\n Input to be escaped\n\n Return\n ------\n str :\n Escaped string\n """\n return (\n s.replace("\\", "ab2§=§8yz") # rare string for final conversion: avoid \\ clash\n .replace("ab2§=§8yz ", "ab2§=§8yz\\space ") # since \backslash gobbles spaces\n .replace("&", "\\&")\n .replace("%", "\\%")\n .replace("$", "\\$")\n .replace("#", "\\#")\n .replace("_", "\\_")\n .replace("{", "\\{")\n .replace("}", "\\}")\n .replace("~ ", "~\\space ") # since \textasciitilde gobbles spaces\n .replace("~", "\\textasciitilde ")\n .replace("^ ", "^\\space ") # since \textasciicircum gobbles spaces\n .replace("^", "\\textasciicircum ")\n .replace("ab2§=§8yz", "\\textbackslash ")\n )\n\n\ndef _math_mode_with_dollar(s: str) -> str:\n r"""\n All characters in LaTeX math mode are preserved.\n\n The substrings in LaTeX math mode, which start with\n the character ``$`` and end with ``$``, are preserved\n without escaping. Otherwise regular LaTeX escaping applies.\n\n Parameters\n ----------\n s : str\n Input to be escaped\n\n Return\n ------\n str :\n Escaped string\n """\n s = s.replace(r"\$", r"rt8§=§7wz")\n pattern = re.compile(r"\$.*?\$")\n pos = 0\n ps = pattern.search(s, pos)\n res = []\n while ps:\n res.append(_escape_latex(s[pos : ps.span()[0]]))\n res.append(ps.group())\n pos = ps.span()[1]\n ps = pattern.search(s, pos)\n\n res.append(_escape_latex(s[pos : len(s)]))\n return "".join(res).replace(r"rt8§=§7wz", r"\$")\n\n\ndef _math_mode_with_parentheses(s: str) -> str:\n r"""\n All characters in LaTeX math mode are preserved.\n\n The substrings in LaTeX math mode, which start with\n the character ``\(`` and end with ``\)``, are preserved\n without escaping. Otherwise regular LaTeX escaping applies.\n\n Parameters\n ----------\n s : str\n Input to be escaped\n\n Return\n ------\n str :\n Escaped string\n """\n s = s.replace(r"\(", r"LEFT§=§6yzLEFT").replace(r"\)", r"RIGHTab5§=§RIGHT")\n res = []\n for item in re.split(r"LEFT§=§6yz|ab5§=§RIGHT", s):\n if item.startswith("LEFT") and item.endswith("RIGHT"):\n res.append(item.replace("LEFT", r"\(").replace("RIGHT", r"\)"))\n elif "LEFT" in item and "RIGHT" in item:\n res.append(\n _escape_latex(item).replace("LEFT", r"\(").replace("RIGHT", r"\)")\n )\n else:\n res.append(\n _escape_latex(item)\n .replace("LEFT", r"\textbackslash (")\n .replace("RIGHT", r"\textbackslash )")\n )\n return "".join(res)\n\n\ndef _escape_latex_math(s: str) -> str:\n r"""\n All characters in LaTeX math mode are preserved.\n\n The substrings in LaTeX math mode, which either are surrounded\n by two characters ``$`` or start with the character ``\(`` and end with ``\)``,\n are preserved without escaping. Otherwise regular LaTeX escaping applies.\n\n Parameters\n ----------\n s : str\n Input to be escaped\n\n Return\n ------\n str :\n Escaped string\n """\n s = s.replace(r"\$", r"rt8§=§7wz")\n ps_d = re.compile(r"\$.*?\$").search(s, 0)\n ps_p = re.compile(r"\(.*?\)").search(s, 0)\n mode = []\n if ps_d:\n mode.append(ps_d.span()[0])\n if ps_p:\n mode.append(ps_p.span()[0])\n if len(mode) == 0:\n return _escape_latex(s.replace(r"rt8§=§7wz", r"\$"))\n if s[mode[0]] == r"$":\n return _math_mode_with_dollar(s.replace(r"rt8§=§7wz", r"\$"))\n if s[mode[0] - 1 : mode[0] + 1] == r"\(":\n return _math_mode_with_parentheses(s.replace(r"rt8§=§7wz", r"\$"))\n else:\n return _escape_latex(s.replace(r"rt8§=§7wz", r"\$"))\n
.venv\Lib\site-packages\pandas\io\formats\style_render.py
style_render.py
Python
90,899
0.75
0.152583
0.0236
awesome-app
281
2024-03-21T14:04:10.714264
Apache-2.0
false
3be6a0a25ec4f74ecd6bc55e440eefd4
"""\n:mod:`pandas.io.formats.xml` is a module for formatting data in XML.\n"""\nfrom __future__ import annotations\n\nimport codecs\nimport io\nfrom typing import (\n TYPE_CHECKING,\n Any,\n final,\n)\nimport warnings\n\nfrom pandas.errors import AbstractMethodError\nfrom pandas.util._decorators import (\n cache_readonly,\n doc,\n)\n\nfrom pandas.core.dtypes.common import is_list_like\nfrom pandas.core.dtypes.missing import isna\n\nfrom pandas.core.shared_docs import _shared_docs\n\nfrom pandas.io.common import get_handle\nfrom pandas.io.xml import (\n get_data_from_filepath,\n preprocess_data,\n)\n\nif TYPE_CHECKING:\n from pandas._typing import (\n CompressionOptions,\n FilePath,\n ReadBuffer,\n StorageOptions,\n WriteBuffer,\n )\n\n from pandas import DataFrame\n\n\n@doc(\n storage_options=_shared_docs["storage_options"],\n compression_options=_shared_docs["compression_options"] % "path_or_buffer",\n)\nclass _BaseXMLFormatter:\n """\n Subclass for formatting data in XML.\n\n Parameters\n ----------\n path_or_buffer : str or file-like\n This can be either a string of raw XML, a valid URL,\n file or file-like object.\n\n index : bool\n Whether to include index in xml document.\n\n row_name : str\n Name for root of xml document. Default is 'data'.\n\n root_name : str\n Name for row elements of xml document. Default is 'row'.\n\n na_rep : str\n Missing data representation.\n\n attrs_cols : list\n List of columns to write as attributes in row element.\n\n elem_cols : list\n List of columns to write as children in row element.\n\n namespaces : dict\n The namespaces to define in XML document as dicts with key\n being namespace and value the URI.\n\n prefix : str\n The prefix for each element in XML document including root.\n\n encoding : str\n Encoding of xml object or document.\n\n xml_declaration : bool\n Whether to include xml declaration at top line item in xml.\n\n pretty_print : bool\n Whether to write xml document with line breaks and indentation.\n\n stylesheet : str or file-like\n A URL, file, file-like object, or a raw string containing XSLT.\n\n {compression_options}\n\n .. versionchanged:: 1.4.0 Zstandard support.\n\n {storage_options}\n\n See also\n --------\n pandas.io.formats.xml.EtreeXMLFormatter\n pandas.io.formats.xml.LxmlXMLFormatter\n\n """\n\n def __init__(\n self,\n frame: DataFrame,\n path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None,\n index: bool = True,\n root_name: str | None = "data",\n row_name: str | None = "row",\n na_rep: str | None = None,\n attr_cols: list[str] | None = None,\n elem_cols: list[str] | None = None,\n namespaces: dict[str | None, str] | None = None,\n prefix: str | None = None,\n encoding: str = "utf-8",\n xml_declaration: bool | None = True,\n pretty_print: bool | None = True,\n stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = None,\n compression: CompressionOptions = "infer",\n storage_options: StorageOptions | None = None,\n ) -> None:\n self.frame = frame\n self.path_or_buffer = path_or_buffer\n self.index = index\n self.root_name = root_name\n self.row_name = row_name\n self.na_rep = na_rep\n self.attr_cols = attr_cols\n self.elem_cols = elem_cols\n self.namespaces = namespaces\n self.prefix = prefix\n self.encoding = encoding\n self.xml_declaration = xml_declaration\n self.pretty_print = pretty_print\n self.stylesheet = stylesheet\n self.compression: CompressionOptions = compression\n self.storage_options = storage_options\n\n self.orig_cols = self.frame.columns.tolist()\n self.frame_dicts = self._process_dataframe()\n\n self._validate_columns()\n self._validate_encoding()\n self.prefix_uri = self._get_prefix_uri()\n self._handle_indexes()\n\n def _build_tree(self) -> bytes:\n """\n Build tree from data.\n\n This method initializes the root and builds attributes and elements\n with optional namespaces.\n """\n raise AbstractMethodError(self)\n\n @final\n def _validate_columns(self) -> None:\n """\n Validate elems_cols and attrs_cols.\n\n This method will check if columns is list-like.\n\n Raises\n ------\n ValueError\n * If value is not a list and less then length of nodes.\n """\n if self.attr_cols and not is_list_like(self.attr_cols):\n raise TypeError(\n f"{type(self.attr_cols).__name__} is not a valid type for attr_cols"\n )\n\n if self.elem_cols and not is_list_like(self.elem_cols):\n raise TypeError(\n f"{type(self.elem_cols).__name__} is not a valid type for elem_cols"\n )\n\n @final\n def _validate_encoding(self) -> None:\n """\n Validate encoding.\n\n This method will check if encoding is among listed under codecs.\n\n Raises\n ------\n LookupError\n * If encoding is not available in codecs.\n """\n\n codecs.lookup(self.encoding)\n\n @final\n def _process_dataframe(self) -> dict[int | str, dict[str, Any]]:\n """\n Adjust Data Frame to fit xml output.\n\n This method will adjust underlying data frame for xml output,\n including optionally replacing missing values and including indexes.\n """\n\n df = self.frame\n\n if self.index:\n df = df.reset_index()\n\n if self.na_rep is not None:\n with warnings.catch_warnings():\n warnings.filterwarnings(\n "ignore",\n "Downcasting object dtype arrays",\n category=FutureWarning,\n )\n df = df.fillna(self.na_rep)\n\n return df.to_dict(orient="index")\n\n @final\n def _handle_indexes(self) -> None:\n """\n Handle indexes.\n\n This method will add indexes into attr_cols or elem_cols.\n """\n\n if not self.index:\n return\n\n first_key = next(iter(self.frame_dicts))\n indexes: list[str] = [\n x for x in self.frame_dicts[first_key].keys() if x not in self.orig_cols\n ]\n\n if self.attr_cols:\n self.attr_cols = indexes + self.attr_cols\n\n if self.elem_cols:\n self.elem_cols = indexes + self.elem_cols\n\n def _get_prefix_uri(self) -> str:\n """\n Get uri of namespace prefix.\n\n This method retrieves corresponding URI to prefix in namespaces.\n\n Raises\n ------\n KeyError\n *If prefix is not included in namespace dict.\n """\n\n raise AbstractMethodError(self)\n\n @final\n def _other_namespaces(self) -> dict:\n """\n Define other namespaces.\n\n This method will build dictionary of namespaces attributes\n for root element, conditionally with optional namespaces and\n prefix.\n """\n\n nmsp_dict: dict[str, str] = {}\n if self.namespaces:\n nmsp_dict = {\n f"xmlns{p if p=='' else f':{p}'}": n\n for p, n in self.namespaces.items()\n if n != self.prefix_uri[1:-1]\n }\n\n return nmsp_dict\n\n @final\n def _build_attribs(self, d: dict[str, Any], elem_row: Any) -> Any:\n """\n Create attributes of row.\n\n This method adds attributes using attr_cols to row element and\n works with tuples for multindex or hierarchical columns.\n """\n\n if not self.attr_cols:\n return elem_row\n\n for col in self.attr_cols:\n attr_name = self._get_flat_col_name(col)\n try:\n if not isna(d[col]):\n elem_row.attrib[attr_name] = str(d[col])\n except KeyError:\n raise KeyError(f"no valid column, {col}")\n return elem_row\n\n @final\n def _get_flat_col_name(self, col: str | tuple) -> str:\n flat_col = col\n if isinstance(col, tuple):\n flat_col = (\n "".join([str(c) for c in col]).strip()\n if "" in col\n else "_".join([str(c) for c in col]).strip()\n )\n return f"{self.prefix_uri}{flat_col}"\n\n @cache_readonly\n def _sub_element_cls(self):\n raise AbstractMethodError(self)\n\n @final\n def _build_elems(self, d: dict[str, Any], elem_row: Any) -> None:\n """\n Create child elements of row.\n\n This method adds child elements using elem_cols to row element and\n works with tuples for multindex or hierarchical columns.\n """\n sub_element_cls = self._sub_element_cls\n\n if not self.elem_cols:\n return\n\n for col in self.elem_cols:\n elem_name = self._get_flat_col_name(col)\n try:\n val = None if isna(d[col]) or d[col] == "" else str(d[col])\n sub_element_cls(elem_row, elem_name).text = val\n except KeyError:\n raise KeyError(f"no valid column, {col}")\n\n @final\n def write_output(self) -> str | None:\n xml_doc = self._build_tree()\n\n if self.path_or_buffer is not None:\n with get_handle(\n self.path_or_buffer,\n "wb",\n compression=self.compression,\n storage_options=self.storage_options,\n is_text=False,\n ) as handles:\n handles.handle.write(xml_doc)\n return None\n\n else:\n return xml_doc.decode(self.encoding).rstrip()\n\n\nclass EtreeXMLFormatter(_BaseXMLFormatter):\n """\n Class for formatting data in xml using Python standard library\n modules: `xml.etree.ElementTree` and `xml.dom.minidom`.\n """\n\n def _build_tree(self) -> bytes:\n from xml.etree.ElementTree import (\n Element,\n SubElement,\n tostring,\n )\n\n self.root = Element(\n f"{self.prefix_uri}{self.root_name}", attrib=self._other_namespaces()\n )\n\n for d in self.frame_dicts.values():\n elem_row = SubElement(self.root, f"{self.prefix_uri}{self.row_name}")\n\n if not self.attr_cols and not self.elem_cols:\n self.elem_cols = list(d.keys())\n self._build_elems(d, elem_row)\n\n else:\n elem_row = self._build_attribs(d, elem_row)\n self._build_elems(d, elem_row)\n\n self.out_xml = tostring(\n self.root,\n method="xml",\n encoding=self.encoding,\n xml_declaration=self.xml_declaration,\n )\n\n if self.pretty_print:\n self.out_xml = self._prettify_tree()\n\n if self.stylesheet is not None:\n raise ValueError(\n "To use stylesheet, you need lxml installed and selected as parser."\n )\n\n return self.out_xml\n\n def _get_prefix_uri(self) -> str:\n from xml.etree.ElementTree import register_namespace\n\n uri = ""\n if self.namespaces:\n for p, n in self.namespaces.items():\n if isinstance(p, str) and isinstance(n, str):\n register_namespace(p, n)\n if self.prefix:\n try:\n uri = f"{{{self.namespaces[self.prefix]}}}"\n except KeyError:\n raise KeyError(f"{self.prefix} is not included in namespaces")\n elif "" in self.namespaces:\n uri = f'{{{self.namespaces[""]}}}'\n else:\n uri = ""\n\n return uri\n\n @cache_readonly\n def _sub_element_cls(self):\n from xml.etree.ElementTree import SubElement\n\n return SubElement\n\n def _prettify_tree(self) -> bytes:\n """\n Output tree for pretty print format.\n\n This method will pretty print xml with line breaks and indentation.\n """\n\n from xml.dom.minidom import parseString\n\n dom = parseString(self.out_xml)\n\n return dom.toprettyxml(indent=" ", encoding=self.encoding)\n\n\nclass LxmlXMLFormatter(_BaseXMLFormatter):\n """\n Class for formatting data in xml using Python standard library\n modules: `xml.etree.ElementTree` and `xml.dom.minidom`.\n """\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n\n self._convert_empty_str_key()\n\n def _build_tree(self) -> bytes:\n """\n Build tree from data.\n\n This method initializes the root and builds attributes and elements\n with optional namespaces.\n """\n from lxml.etree import (\n Element,\n SubElement,\n tostring,\n )\n\n self.root = Element(f"{self.prefix_uri}{self.root_name}", nsmap=self.namespaces)\n\n for d in self.frame_dicts.values():\n elem_row = SubElement(self.root, f"{self.prefix_uri}{self.row_name}")\n\n if not self.attr_cols and not self.elem_cols:\n self.elem_cols = list(d.keys())\n self._build_elems(d, elem_row)\n\n else:\n elem_row = self._build_attribs(d, elem_row)\n self._build_elems(d, elem_row)\n\n self.out_xml = tostring(\n self.root,\n pretty_print=self.pretty_print,\n method="xml",\n encoding=self.encoding,\n xml_declaration=self.xml_declaration,\n )\n\n if self.stylesheet is not None:\n self.out_xml = self._transform_doc()\n\n return self.out_xml\n\n def _convert_empty_str_key(self) -> None:\n """\n Replace zero-length string in `namespaces`.\n\n This method will replace '' with None to align to `lxml`\n requirement that empty string prefixes are not allowed.\n """\n\n if self.namespaces and "" in self.namespaces.keys():\n self.namespaces[None] = self.namespaces.pop("", "default")\n\n def _get_prefix_uri(self) -> str:\n uri = ""\n if self.namespaces:\n if self.prefix:\n try:\n uri = f"{{{self.namespaces[self.prefix]}}}"\n except KeyError:\n raise KeyError(f"{self.prefix} is not included in namespaces")\n elif "" in self.namespaces:\n uri = f'{{{self.namespaces[""]}}}'\n else:\n uri = ""\n\n return uri\n\n @cache_readonly\n def _sub_element_cls(self):\n from lxml.etree import SubElement\n\n return SubElement\n\n def _transform_doc(self) -> bytes:\n """\n Parse stylesheet from file or buffer and run it.\n\n This method will parse stylesheet object into tree for parsing\n conditionally by its specific object type, then transforms\n original tree with XSLT script.\n """\n from lxml.etree import (\n XSLT,\n XMLParser,\n fromstring,\n parse,\n )\n\n style_doc = self.stylesheet\n assert style_doc is not None # is ensured by caller\n\n handle_data = get_data_from_filepath(\n filepath_or_buffer=style_doc,\n encoding=self.encoding,\n compression=self.compression,\n storage_options=self.storage_options,\n )\n\n with preprocess_data(handle_data) as xml_data:\n curr_parser = XMLParser(encoding=self.encoding)\n\n if isinstance(xml_data, io.StringIO):\n xsl_doc = fromstring(\n xml_data.getvalue().encode(self.encoding), parser=curr_parser\n )\n else:\n xsl_doc = parse(xml_data, parser=curr_parser)\n\n transformer = XSLT(xsl_doc)\n new_doc = transformer(self.root)\n\n return bytes(new_doc)\n
.venv\Lib\site-packages\pandas\io\formats\xml.py
xml.py
Python
16,029
0.95
0.155357
0.006849
vue-tools
228
2025-03-11T18:50:36.188322
BSD-3-Clause
false
dc40dffec95b0b5735421c1d94325c87
# GH37967: Enable the use of CSS named colors, as defined in\n# matplotlib.colors.CSS4_COLORS, when exporting to Excel.\n# This data has been copied here, instead of being imported from matplotlib,\n# not to have ``to_excel`` methods require matplotlib.\n# source: matplotlib._color_data (3.3.3)\nfrom __future__ import annotations\n\nCSS4_COLORS = {\n "aliceblue": "F0F8FF",\n "antiquewhite": "FAEBD7",\n "aqua": "00FFFF",\n "aquamarine": "7FFFD4",\n "azure": "F0FFFF",\n "beige": "F5F5DC",\n "bisque": "FFE4C4",\n "black": "000000",\n "blanchedalmond": "FFEBCD",\n "blue": "0000FF",\n "blueviolet": "8A2BE2",\n "brown": "A52A2A",\n "burlywood": "DEB887",\n "cadetblue": "5F9EA0",\n "chartreuse": "7FFF00",\n "chocolate": "D2691E",\n "coral": "FF7F50",\n "cornflowerblue": "6495ED",\n "cornsilk": "FFF8DC",\n "crimson": "DC143C",\n "cyan": "00FFFF",\n "darkblue": "00008B",\n "darkcyan": "008B8B",\n "darkgoldenrod": "B8860B",\n "darkgray": "A9A9A9",\n "darkgreen": "006400",\n "darkgrey": "A9A9A9",\n "darkkhaki": "BDB76B",\n "darkmagenta": "8B008B",\n "darkolivegreen": "556B2F",\n "darkorange": "FF8C00",\n "darkorchid": "9932CC",\n "darkred": "8B0000",\n "darksalmon": "E9967A",\n "darkseagreen": "8FBC8F",\n "darkslateblue": "483D8B",\n "darkslategray": "2F4F4F",\n "darkslategrey": "2F4F4F",\n "darkturquoise": "00CED1",\n "darkviolet": "9400D3",\n "deeppink": "FF1493",\n "deepskyblue": "00BFFF",\n "dimgray": "696969",\n "dimgrey": "696969",\n "dodgerblue": "1E90FF",\n "firebrick": "B22222",\n "floralwhite": "FFFAF0",\n "forestgreen": "228B22",\n "fuchsia": "FF00FF",\n "gainsboro": "DCDCDC",\n "ghostwhite": "F8F8FF",\n "gold": "FFD700",\n "goldenrod": "DAA520",\n "gray": "808080",\n "green": "008000",\n "greenyellow": "ADFF2F",\n "grey": "808080",\n "honeydew": "F0FFF0",\n "hotpink": "FF69B4",\n "indianred": "CD5C5C",\n "indigo": "4B0082",\n "ivory": "FFFFF0",\n "khaki": "F0E68C",\n "lavender": "E6E6FA",\n "lavenderblush": "FFF0F5",\n "lawngreen": "7CFC00",\n "lemonchiffon": "FFFACD",\n "lightblue": "ADD8E6",\n "lightcoral": "F08080",\n "lightcyan": "E0FFFF",\n "lightgoldenrodyellow": "FAFAD2",\n "lightgray": "D3D3D3",\n "lightgreen": "90EE90",\n "lightgrey": "D3D3D3",\n "lightpink": "FFB6C1",\n "lightsalmon": "FFA07A",\n "lightseagreen": "20B2AA",\n "lightskyblue": "87CEFA",\n "lightslategray": "778899",\n "lightslategrey": "778899",\n "lightsteelblue": "B0C4DE",\n "lightyellow": "FFFFE0",\n "lime": "00FF00",\n "limegreen": "32CD32",\n "linen": "FAF0E6",\n "magenta": "FF00FF",\n "maroon": "800000",\n "mediumaquamarine": "66CDAA",\n "mediumblue": "0000CD",\n "mediumorchid": "BA55D3",\n "mediumpurple": "9370DB",\n "mediumseagreen": "3CB371",\n "mediumslateblue": "7B68EE",\n "mediumspringgreen": "00FA9A",\n "mediumturquoise": "48D1CC",\n "mediumvioletred": "C71585",\n "midnightblue": "191970",\n "mintcream": "F5FFFA",\n "mistyrose": "FFE4E1",\n "moccasin": "FFE4B5",\n "navajowhite": "FFDEAD",\n "navy": "000080",\n "oldlace": "FDF5E6",\n "olive": "808000",\n "olivedrab": "6B8E23",\n "orange": "FFA500",\n "orangered": "FF4500",\n "orchid": "DA70D6",\n "palegoldenrod": "EEE8AA",\n "palegreen": "98FB98",\n "paleturquoise": "AFEEEE",\n "palevioletred": "DB7093",\n "papayawhip": "FFEFD5",\n "peachpuff": "FFDAB9",\n "peru": "CD853F",\n "pink": "FFC0CB",\n "plum": "DDA0DD",\n "powderblue": "B0E0E6",\n "purple": "800080",\n "rebeccapurple": "663399",\n "red": "FF0000",\n "rosybrown": "BC8F8F",\n "royalblue": "4169E1",\n "saddlebrown": "8B4513",\n "salmon": "FA8072",\n "sandybrown": "F4A460",\n "seagreen": "2E8B57",\n "seashell": "FFF5EE",\n "sienna": "A0522D",\n "silver": "C0C0C0",\n "skyblue": "87CEEB",\n "slateblue": "6A5ACD",\n "slategray": "708090",\n "slategrey": "708090",\n "snow": "FFFAFA",\n "springgreen": "00FF7F",\n "steelblue": "4682B4",\n "tan": "D2B48C",\n "teal": "008080",\n "thistle": "D8BFD8",\n "tomato": "FF6347",\n "turquoise": "40E0D0",\n "violet": "EE82EE",\n "wheat": "F5DEB3",\n "white": "FFFFFF",\n "whitesmoke": "F5F5F5",\n "yellow": "FFFF00",\n "yellowgreen": "9ACD32",\n}\n
.venv\Lib\site-packages\pandas\io\formats\_color_data.py
_color_data.py
Python
4,332
0.95
0
0.032051
awesome-app
664
2024-10-08T14:27:43.418313
BSD-3-Clause
false
a1601190849f9e6470789dc374596b4d
# ruff: noqa: TCH004\nfrom typing import TYPE_CHECKING\n\nif TYPE_CHECKING:\n # import modules that have public classes/functions\n from pandas.io.formats import style\n\n # and mark only those modules as public\n __all__ = ["style"]\n
.venv\Lib\site-packages\pandas\io\formats\__init__.py
__init__.py
Python
238
0.95
0.111111
0.428571
react-lib
423
2024-08-14T11:33:09.879071
BSD-3-Clause
false
367ac44ebdad6b99acee490c9d7ec92d
{# Update the html_style/table_structure.html documentation too #}\n{% if doctype_html %}\n<!DOCTYPE html>\n<html>\n<head>\n<meta charset="{{encoding}}">\n{% if not exclude_styles %}{% include html_style_tpl %}{% endif %}\n</head>\n<body>\n{% include html_table_tpl %}\n</body>\n</html>\n{% elif not doctype_html %}\n{% if not exclude_styles %}{% include html_style_tpl %}{% endif %}\n{% include html_table_tpl %}\n{% endif %}\n
.venv\Lib\site-packages\pandas\io\formats\templates\html.tpl
html.tpl
Other
412
0.8
0.1875
0
python-kit
489
2024-05-19T04:47:00.414125
BSD-3-Clause
false
3f05e66967c6b0302441acfa32188906
{%- block before_style -%}{%- endblock before_style -%}\n{% block style %}\n<style type="text/css">\n{% block table_styles %}\n{% for s in table_styles %}\n#T_{{uuid}} {{s.selector}} {\n{% for p,val in s.props %}\n {{p}}: {{val}};\n{% endfor %}\n}\n{% endfor %}\n{% endblock table_styles %}\n{% block before_cellstyle %}{% endblock before_cellstyle %}\n{% block cellstyle %}\n{% for cs in [cellstyle, cellstyle_index, cellstyle_columns] %}\n{% for s in cs %}\n{% for selector in s.selectors %}{% if not loop.first %}, {% endif %}#T_{{uuid}}_{{selector}}{% endfor %} {\n{% for p,val in s.props %}\n {{p}}: {{val}};\n{% endfor %}\n}\n{% endfor %}\n{% endfor %}\n{% endblock cellstyle %}\n</style>\n{% endblock style %}\n
.venv\Lib\site-packages\pandas\io\formats\templates\html_style.tpl
html_style.tpl
Other
694
0.8
0.269231
0.038462
awesome-app
16
2023-08-19T00:10:33.938730
GPL-3.0
false
43ba850965e17e925bd6c5c4949a3f45
{% block before_table %}{% endblock before_table %}\n{% block table %}\n{% if exclude_styles %}\n<table>\n{% else %}\n<table id="T_{{uuid}}"{% if table_attributes %} {{table_attributes}}{% endif %}>\n{% endif %}\n{% block caption %}\n{% if caption and caption is string %}\n <caption>{{caption}}</caption>\n{% elif caption and caption is sequence %}\n <caption>{{caption[0]}}</caption>\n{% endif %}\n{% endblock caption %}\n{% block thead %}\n <thead>\n{% block before_head_rows %}{% endblock %}\n{% for r in head %}\n{% block head_tr scoped %}\n <tr>\n{% if exclude_styles %}\n{% for c in r %}\n{% if c.is_visible != False %}\n <{{c.type}} {{c.attributes}}>{{c.display_value}}</{{c.type}}>\n{% endif %}\n{% endfor %}\n{% else %}\n{% for c in r %}\n{% if c.is_visible != False %}\n <{{c.type}} {%- if c.id is defined %} id="T_{{uuid}}_{{c.id}}" {%- endif %} class="{{c.class}}" {{c.attributes}}>{{c.display_value}}</{{c.type}}>\n{% endif %}\n{% endfor %}\n{% endif %}\n </tr>\n{% endblock head_tr %}\n{% endfor %}\n{% block after_head_rows %}{% endblock %}\n </thead>\n{% endblock thead %}\n{% block tbody %}\n <tbody>\n{% block before_rows %}{% endblock before_rows %}\n{% for r in body %}\n{% block tr scoped %}\n <tr>\n{% if exclude_styles %}\n{% for c in r %}{% if c.is_visible != False %}\n <{{c.type}} {{c.attributes}}>{{c.display_value}}</{{c.type}}>\n{% endif %}{% endfor %}\n{% else %}\n{% for c in r %}{% if c.is_visible != False %}\n <{{c.type}} {%- if c.id is defined %} id="T_{{uuid}}_{{c.id}}" {%- endif %} class="{{c.class}}" {{c.attributes}}>{{c.display_value}}</{{c.type}}>\n{% endif %}{% endfor %}\n{% endif %}\n </tr>\n{% endblock tr %}\n{% endfor %}\n{% block after_rows %}{% endblock after_rows %}\n </tbody>\n{% endblock tbody %}\n</table>\n{% endblock table %}\n{% block after_table %}{% endblock after_table %}\n
.venv\Lib\site-packages\pandas\io\formats\templates\html_table.tpl
html_table.tpl
Other
1,811
0.7
0.333333
0
awesome-app
715
2024-09-21T18:53:31.405890
BSD-3-Clause
false
23c0d7924b208dace6821573d9d9cadf
{% if environment == "longtable" %}\n{% include "latex_longtable.tpl" %}\n{% else %}\n{% include "latex_table.tpl" %}\n{% endif %}\n
.venv\Lib\site-packages\pandas\io\formats\templates\latex.tpl
latex.tpl
Other
127
0.7
0.2
0
node-utils
986
2024-04-25T09:12:11.637478
MIT
false
51c8fe7f3635bb9f1be8fcf3b9acc415
\begin{longtable}\n{%- set position = parse_table(table_styles, 'position') %}\n{%- if position is not none %}\n[{{position}}]\n{%- endif %}\n{%- set column_format = parse_table(table_styles, 'column_format') %}\n{% raw %}{{% endraw %}{{column_format}}{% raw %}}{% endraw %}\n\n{% for style in table_styles %}\n{% if style['selector'] not in ['position', 'position_float', 'caption', 'toprule', 'midrule', 'bottomrule', 'column_format', 'label'] %}\n\{{style['selector']}}{{parse_table(table_styles, style['selector'])}}\n{% endif %}\n{% endfor %}\n{% if caption and caption is string %}\n\caption{% raw %}{{% endraw %}{{caption}}{% raw %}}{% endraw %}\n{%- set label = parse_table(table_styles, 'label') %}\n{%- if label is not none %}\n \label{{label}}\n{%- endif %} \\\n{% elif caption and caption is sequence %}\n\caption[{{caption[1]}}]{% raw %}{{% endraw %}{{caption[0]}}{% raw %}}{% endraw %}\n{%- set label = parse_table(table_styles, 'label') %}\n{%- if label is not none %}\n \label{{label}}\n{%- endif %} \\\n{% else %}\n{%- set label = parse_table(table_styles, 'label') %}\n{%- if label is not none %}\n\label{{label}} \\\n{% endif %}\n{% endif %}\n{% set toprule = parse_table(table_styles, 'toprule') %}\n{% if toprule is not none %}\n\{{toprule}}\n{% endif %}\n{% for row in head %}\n{% for c in row %}{%- if not loop.first %} & {% endif %}{{parse_header(c, multirow_align, multicol_align, siunitx)}}{% endfor %} \\\n{% endfor %}\n{% set midrule = parse_table(table_styles, 'midrule') %}\n{% if midrule is not none %}\n\{{midrule}}\n{% endif %}\n\endfirsthead\n{% if caption and caption is string %}\n\caption[]{% raw %}{{% endraw %}{{caption}}{% raw %}}{% endraw %} \\\n{% elif caption and caption is sequence %}\n\caption[]{% raw %}{{% endraw %}{{caption[0]}}{% raw %}}{% endraw %} \\\n{% endif %}\n{% if toprule is not none %}\n\{{toprule}}\n{% endif %}\n{% for row in head %}\n{% for c in row %}{%- if not loop.first %} & {% endif %}{{parse_header(c, multirow_align, multicol_align, siunitx)}}{% endfor %} \\\n{% endfor %}\n{% if midrule is not none %}\n\{{midrule}}\n{% endif %}\n\endhead\n{% if midrule is not none %}\n\{{midrule}}\n{% endif %}\n\multicolumn{% raw %}{{% endraw %}{{body[0]|length}}{% raw %}}{% endraw %}{r}{Continued on next page} \\\n{% if midrule is not none %}\n\{{midrule}}\n{% endif %}\n\endfoot\n{% set bottomrule = parse_table(table_styles, 'bottomrule') %}\n{% if bottomrule is not none %}\n\{{bottomrule}}\n{% endif %}\n\endlastfoot\n{% for row in body %}\n{% for c in row %}{% if not loop.first %} & {% endif %}\n {%- if c.type == 'th' %}{{parse_header(c, multirow_align, multicol_align)}}{% else %}{{parse_cell(c.cellstyle, c.display_value, convert_css)}}{% endif %}\n{%- endfor %} \\\n{% if clines and clines[loop.index] | length > 0 %}\n {%- for cline in clines[loop.index] %}{% if not loop.first %} {% endif %}{{ cline }}{% endfor %}\n\n{% endif %}\n{% endfor %}\n\end{longtable}\n{% raw %}{% endraw %}\n
.venv\Lib\site-packages\pandas\io\formats\templates\latex_longtable.tpl
latex_longtable.tpl
Other
2,877
0.7
0.341463
0
react-lib
143
2024-05-01T04:25:33.855067
Apache-2.0
false
a598d029ea7c36d86acd1a6e03d3a041
{% if environment or parse_wrap(table_styles, caption) %}\n\begin{% raw %}{{% endraw %}{{environment if environment else "table"}}{% raw %}}{% endraw %}\n{%- set position = parse_table(table_styles, 'position') %}\n{%- if position is not none %}\n[{{position}}]\n{%- endif %}\n\n{% set position_float = parse_table(table_styles, 'position_float') %}\n{% if position_float is not none%}\n\{{position_float}}\n{% endif %}\n{% if caption and caption is string %}\n\caption{% raw %}{{% endraw %}{{caption}}{% raw %}}{% endraw %}\n\n{% elif caption and caption is sequence %}\n\caption[{{caption[1]}}]{% raw %}{{% endraw %}{{caption[0]}}{% raw %}}{% endraw %}\n\n{% endif %}\n{% for style in table_styles %}\n{% if style['selector'] not in ['position', 'position_float', 'caption', 'toprule', 'midrule', 'bottomrule', 'column_format'] %}\n\{{style['selector']}}{{parse_table(table_styles, style['selector'])}}\n{% endif %}\n{% endfor %}\n{% endif %}\n\begin{tabular}\n{%- set column_format = parse_table(table_styles, 'column_format') %}\n{% raw %}{{% endraw %}{{column_format}}{% raw %}}{% endraw %}\n\n{% set toprule = parse_table(table_styles, 'toprule') %}\n{% if toprule is not none %}\n\{{toprule}}\n{% endif %}\n{% for row in head %}\n{% for c in row %}{%- if not loop.first %} & {% endif %}{{parse_header(c, multirow_align, multicol_align, siunitx, convert_css)}}{% endfor %} \\\n{% endfor %}\n{% set midrule = parse_table(table_styles, 'midrule') %}\n{% if midrule is not none %}\n\{{midrule}}\n{% endif %}\n{% for row in body %}\n{% for c in row %}{% if not loop.first %} & {% endif %}\n {%- if c.type == 'th' %}{{parse_header(c, multirow_align, multicol_align, False, convert_css)}}{% else %}{{parse_cell(c.cellstyle, c.display_value, convert_css)}}{% endif %}\n{%- endfor %} \\\n{% if clines and clines[loop.index] | length > 0 %}\n {%- for cline in clines[loop.index] %}{% if not loop.first %} {% endif %}{{ cline }}{% endfor %}\n\n{% endif %}\n{% endfor %}\n{% set bottomrule = parse_table(table_styles, 'bottomrule') %}\n{% if bottomrule is not none %}\n\{{bottomrule}}\n{% endif %}\n\end{tabular}\n{% if environment or parse_wrap(table_styles, caption) %}\n\end{% raw %}{{% endraw %}{{environment if environment else "table"}}{% raw %}}{% endraw %}\n\n{% endif %}\n
.venv\Lib\site-packages\pandas\io\formats\templates\latex_table.tpl
latex_table.tpl
Other
2,221
0.7
0.385965
0
react-lib
194
2025-03-18T03:58:20.373763
Apache-2.0
false
ca96849d67480728bac107c2b81f0545
{% for r in head %}\n{% for c in r %}{% if c["is_visible"] %}\n{{ c["display_value"] }}{% if not loop.last %}{{ delimiter }}{% endif %}\n{% endif %}{% endfor %}\n\n{% endfor %}\n{% for r in body %}\n{% for c in r %}{% if c["is_visible"] %}\n{{ c["display_value"] }}{% if not loop.last %}{{ delimiter }}{% endif %}\n{% endif %}{% endfor %}\n\n{% endfor %}\n
.venv\Lib\site-packages\pandas\io\formats\templates\string.tpl
string.tpl
Other
344
0.7
0.666667
0
react-lib
383
2025-04-11T15:34:33.792977
BSD-3-Clause
false
736a2e491b9b6f83de0bb15b8879390b
\n\n
.venv\Lib\site-packages\pandas\io\formats\__pycache__\console.cpython-313.pyc
console.cpython-313.pyc
Other
2,484
0.8
0.095238
0
node-utils
386
2023-09-20T07:13:01.024037
MIT
false
c374c0711c33e1cccb19c6d2d739cc7d
\n\n
.venv\Lib\site-packages\pandas\io\formats\__pycache__\css.cpython-313.pyc
css.cpython-313.pyc
Other
14,023
0.95
0.037234
0
awesome-app
659
2024-08-28T10:38:40.745664
Apache-2.0
false
ecac49dd158ef0f42bf17c896b2a3b44
\n\n
.venv\Lib\site-packages\pandas\io\formats\__pycache__\csvs.cpython-313.pyc
csvs.cpython-313.pyc
Other
15,871
0.8
0.017857
0
python-kit
683
2025-01-08T09:48:09.860715
Apache-2.0
false
bf95cd7a87fcfecf27c23cc66c5e8b38
\n\n
.venv\Lib\site-packages\pandas\io\formats\__pycache__\excel.cpython-313.pyc
excel.cpython-313.pyc
Other
37,665
0.8
0.038674
0.002907
vue-tools
479
2024-04-06T22:51:34.584794
BSD-3-Clause
false
e5821ca20accc95e8cbf143186bb4b19
\n\n
.venv\Lib\site-packages\pandas\io\formats\__pycache__\format.cpython-313.pyc
format.cpython-313.pyc
Other
83,496
0.75
0.034292
0.014388
python-kit
734
2023-08-05T08:30:41.133904
Apache-2.0
false
de4eede4fc4113de3c7d0e69b28bc9e3
\n\n
.venv\Lib\site-packages\pandas\io\formats\__pycache__\html.cpython-313.pyc
html.cpython-313.pyc
Other
27,807
0.95
0.057508
0.01087
awesome-app
197
2025-02-26T08:01:27.167204
MIT
false
04369215eb17907fcc700ed6c98dda4b
\n\n
.venv\Lib\site-packages\pandas\io\formats\__pycache__\info.cpython-313.pyc
info.cpython-313.pyc
Other
45,744
0.95
0.068901
0.01006
awesome-app
888
2024-02-23T14:33:29.559544
BSD-3-Clause
false
f3c7dfb308e9bd101833a70fe0f5beaa
\n\n
.venv\Lib\site-packages\pandas\io\formats\__pycache__\printing.cpython-313.pyc
printing.cpython-313.pyc
Other
22,247
0.95
0.04059
0
react-lib
907
2025-02-02T09:38:39.916876
GPL-3.0
false
4fad33e9e27a2ca9c49c08175a34a1c9
\n\n
.venv\Lib\site-packages\pandas\io\formats\__pycache__\string.cpython-313.pyc
string.cpython-313.pyc
Other
10,693
0.8
0.019802
0
react-lib
461
2024-08-26T12:56:04.177438
Apache-2.0
false
2fab67fb2f603617e5a27334335fc385
\n\n
.venv\Lib\site-packages\pandas\io\formats\__pycache__\style_render.cpython-313.pyc
style_render.cpython-313.pyc
Other
95,380
0.75
0.053538
0.006241
react-lib
783
2024-12-21T20:20:27.208667
GPL-3.0
false
09714342779e46c4653a8e16fa7e3439
\n\n
.venv\Lib\site-packages\pandas\io\formats\__pycache__\xml.cpython-313.pyc
xml.cpython-313.pyc
Other
20,999
0.95
0.060837
0.02193
awesome-app
410
2024-07-25T06:25:31.676776
Apache-2.0
false
a86222ac098a215264a31fad209f457c
\n\n
.venv\Lib\site-packages\pandas\io\formats\__pycache__\_color_data.cpython-313.pyc
_color_data.cpython-313.pyc
Other
5,948
0.8
0
0
awesome-app
611
2023-08-02T06:02:50.388678
MIT
false
776cc05cda1bc6662c6e346a9095fd4a
\n\n
.venv\Lib\site-packages\pandas\io\formats\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
340
0.7
0
0
node-utils
742
2023-08-06T03:54:34.236895
MIT
false
da649c6a0a08de63abddc0a472c6e5d6
from __future__ import annotations\n\nfrom abc import (\n ABC,\n abstractmethod,\n)\nfrom collections import abc\nfrom io import StringIO\nfrom itertools import islice\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Generic,\n Literal,\n TypeVar,\n final,\n overload,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import lib\nfrom pandas._libs.json import (\n ujson_dumps,\n ujson_loads,\n)\nfrom pandas._libs.tslibs import iNaT\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.errors import AbstractMethodError\nfrom pandas.util._decorators import doc\nfrom pandas.util._exceptions import find_stack_level\nfrom pandas.util._validators import check_dtype_backend\n\nfrom pandas.core.dtypes.common import (\n ensure_str,\n is_string_dtype,\n)\nfrom pandas.core.dtypes.dtypes import PeriodDtype\n\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n Series,\n isna,\n notna,\n to_datetime,\n)\nfrom pandas.core.reshape.concat import concat\nfrom pandas.core.shared_docs import _shared_docs\n\nfrom pandas.io._util import arrow_table_to_pandas\nfrom pandas.io.common import (\n IOHandles,\n dedup_names,\n extension_to_compression,\n file_exists,\n get_handle,\n is_fsspec_url,\n is_potential_multi_index,\n is_url,\n stringify_path,\n)\nfrom pandas.io.json._normalize import convert_to_line_delimits\nfrom pandas.io.json._table_schema import (\n build_table_schema,\n parse_table_schema,\n)\nfrom pandas.io.parsers.readers import validate_integer\n\nif TYPE_CHECKING:\n from collections.abc import (\n Hashable,\n Mapping,\n )\n from types import TracebackType\n\n from pandas._typing import (\n CompressionOptions,\n DtypeArg,\n DtypeBackend,\n FilePath,\n IndexLabel,\n JSONEngine,\n JSONSerializable,\n ReadBuffer,\n Self,\n StorageOptions,\n WriteBuffer,\n )\n\n from pandas.core.generic import NDFrame\n\nFrameSeriesStrT = TypeVar("FrameSeriesStrT", bound=Literal["frame", "series"])\n\n\n# interface to/from\n@overload\ndef to_json(\n path_or_buf: FilePath | WriteBuffer[str] | WriteBuffer[bytes],\n obj: NDFrame,\n orient: str | None = ...,\n date_format: str = ...,\n double_precision: int = ...,\n force_ascii: bool = ...,\n date_unit: str = ...,\n default_handler: Callable[[Any], JSONSerializable] | None = ...,\n lines: bool = ...,\n compression: CompressionOptions = ...,\n index: bool | None = ...,\n indent: int = ...,\n storage_options: StorageOptions = ...,\n mode: Literal["a", "w"] = ...,\n) -> None:\n ...\n\n\n@overload\ndef to_json(\n path_or_buf: None,\n obj: NDFrame,\n orient: str | None = ...,\n date_format: str = ...,\n double_precision: int = ...,\n force_ascii: bool = ...,\n date_unit: str = ...,\n default_handler: Callable[[Any], JSONSerializable] | None = ...,\n lines: bool = ...,\n compression: CompressionOptions = ...,\n index: bool | None = ...,\n indent: int = ...,\n storage_options: StorageOptions = ...,\n mode: Literal["a", "w"] = ...,\n) -> str:\n ...\n\n\ndef to_json(\n path_or_buf: FilePath | WriteBuffer[str] | WriteBuffer[bytes] | None,\n obj: NDFrame,\n orient: str | None = None,\n date_format: str = "epoch",\n double_precision: int = 10,\n force_ascii: bool = True,\n date_unit: str = "ms",\n default_handler: Callable[[Any], JSONSerializable] | None = None,\n lines: bool = False,\n compression: CompressionOptions = "infer",\n index: bool | None = None,\n indent: int = 0,\n storage_options: StorageOptions | None = None,\n mode: Literal["a", "w"] = "w",\n) -> str | None:\n if orient in ["records", "values"] and index is True:\n raise ValueError(\n "'index=True' is only valid when 'orient' is 'split', 'table', "\n "'index', or 'columns'."\n )\n elif orient in ["index", "columns"] and index is False:\n raise ValueError(\n "'index=False' is only valid when 'orient' is 'split', 'table', "\n "'records', or 'values'."\n )\n elif index is None:\n # will be ignored for orient='records' and 'values'\n index = True\n\n if lines and orient != "records":\n raise ValueError("'lines' keyword only valid when 'orient' is records")\n\n if mode not in ["a", "w"]:\n msg = (\n f"mode={mode} is not a valid option."\n "Only 'w' and 'a' are currently supported."\n )\n raise ValueError(msg)\n\n if mode == "a" and (not lines or orient != "records"):\n msg = (\n "mode='a' (append) is only supported when "\n "lines is True and orient is 'records'"\n )\n raise ValueError(msg)\n\n if orient == "table" and isinstance(obj, Series):\n obj = obj.to_frame(name=obj.name or "values")\n\n writer: type[Writer]\n if orient == "table" and isinstance(obj, DataFrame):\n writer = JSONTableWriter\n elif isinstance(obj, Series):\n writer = SeriesWriter\n elif isinstance(obj, DataFrame):\n writer = FrameWriter\n else:\n raise NotImplementedError("'obj' should be a Series or a DataFrame")\n\n s = writer(\n obj,\n orient=orient,\n date_format=date_format,\n double_precision=double_precision,\n ensure_ascii=force_ascii,\n date_unit=date_unit,\n default_handler=default_handler,\n index=index,\n indent=indent,\n ).write()\n\n if lines:\n s = convert_to_line_delimits(s)\n\n if path_or_buf is not None:\n # apply compression and byte/text conversion\n with get_handle(\n path_or_buf, mode, compression=compression, storage_options=storage_options\n ) as handles:\n handles.handle.write(s)\n else:\n return s\n return None\n\n\nclass Writer(ABC):\n _default_orient: str\n\n def __init__(\n self,\n obj: NDFrame,\n orient: str | None,\n date_format: str,\n double_precision: int,\n ensure_ascii: bool,\n date_unit: str,\n index: bool,\n default_handler: Callable[[Any], JSONSerializable] | None = None,\n indent: int = 0,\n ) -> None:\n self.obj = obj\n\n if orient is None:\n orient = self._default_orient\n\n self.orient = orient\n self.date_format = date_format\n self.double_precision = double_precision\n self.ensure_ascii = ensure_ascii\n self.date_unit = date_unit\n self.default_handler = default_handler\n self.index = index\n self.indent = indent\n\n self.is_copy = None\n self._format_axes()\n\n def _format_axes(self) -> None:\n raise AbstractMethodError(self)\n\n def write(self) -> str:\n iso_dates = self.date_format == "iso"\n return ujson_dumps(\n self.obj_to_write,\n orient=self.orient,\n double_precision=self.double_precision,\n ensure_ascii=self.ensure_ascii,\n date_unit=self.date_unit,\n iso_dates=iso_dates,\n default_handler=self.default_handler,\n indent=self.indent,\n )\n\n @property\n @abstractmethod\n def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]:\n """Object to write in JSON format."""\n\n\nclass SeriesWriter(Writer):\n _default_orient = "index"\n\n @property\n def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]:\n if not self.index and self.orient == "split":\n return {"name": self.obj.name, "data": self.obj.values}\n else:\n return self.obj\n\n def _format_axes(self) -> None:\n if not self.obj.index.is_unique and self.orient == "index":\n raise ValueError(f"Series index must be unique for orient='{self.orient}'")\n\n\nclass FrameWriter(Writer):\n _default_orient = "columns"\n\n @property\n def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]:\n if not self.index and self.orient == "split":\n obj_to_write = self.obj.to_dict(orient="split")\n del obj_to_write["index"]\n else:\n obj_to_write = self.obj\n return obj_to_write\n\n def _format_axes(self) -> None:\n """\n Try to format axes if they are datelike.\n """\n if not self.obj.index.is_unique and self.orient in ("index", "columns"):\n raise ValueError(\n f"DataFrame index must be unique for orient='{self.orient}'."\n )\n if not self.obj.columns.is_unique and self.orient in (\n "index",\n "columns",\n "records",\n ):\n raise ValueError(\n f"DataFrame columns must be unique for orient='{self.orient}'."\n )\n\n\nclass JSONTableWriter(FrameWriter):\n _default_orient = "records"\n\n def __init__(\n self,\n obj,\n orient: str | None,\n date_format: str,\n double_precision: int,\n ensure_ascii: bool,\n date_unit: str,\n index: bool,\n default_handler: Callable[[Any], JSONSerializable] | None = None,\n indent: int = 0,\n ) -> None:\n """\n Adds a `schema` attribute with the Table Schema, resets\n the index (can't do in caller, because the schema inference needs\n to know what the index is, forces orient to records, and forces\n date_format to 'iso'.\n """\n super().__init__(\n obj,\n orient,\n date_format,\n double_precision,\n ensure_ascii,\n date_unit,\n index,\n default_handler=default_handler,\n indent=indent,\n )\n\n if date_format != "iso":\n msg = (\n "Trying to write with `orient='table'` and "\n f"`date_format='{date_format}'`. Table Schema requires dates "\n "to be formatted with `date_format='iso'`"\n )\n raise ValueError(msg)\n\n self.schema = build_table_schema(obj, index=self.index)\n\n # NotImplemented on a column MultiIndex\n if obj.ndim == 2 and isinstance(obj.columns, MultiIndex):\n raise NotImplementedError(\n "orient='table' is not supported for MultiIndex columns"\n )\n\n # TODO: Do this timedelta properly in objToJSON.c See GH #15137\n if (\n (obj.ndim == 1)\n and (obj.name in set(obj.index.names))\n or len(obj.columns.intersection(obj.index.names))\n ):\n msg = "Overlapping names between the index and columns"\n raise ValueError(msg)\n\n obj = obj.copy()\n timedeltas = obj.select_dtypes(include=["timedelta"]).columns\n if len(timedeltas):\n obj[timedeltas] = obj[timedeltas].map(lambda x: x.isoformat())\n # Convert PeriodIndex to datetimes before serializing\n if isinstance(obj.index.dtype, PeriodDtype):\n obj.index = obj.index.to_timestamp()\n\n # exclude index from obj if index=False\n if not self.index:\n self.obj = obj.reset_index(drop=True)\n else:\n self.obj = obj.reset_index(drop=False)\n self.date_format = "iso"\n self.orient = "records"\n self.index = index\n\n @property\n def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]:\n return {"schema": self.schema, "data": self.obj}\n\n\n@overload\ndef read_json(\n path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes],\n *,\n orient: str | None = ...,\n typ: Literal["frame"] = ...,\n dtype: DtypeArg | None = ...,\n convert_axes: bool | None = ...,\n convert_dates: bool | list[str] = ...,\n keep_default_dates: bool = ...,\n precise_float: bool = ...,\n date_unit: str | None = ...,\n encoding: str | None = ...,\n encoding_errors: str | None = ...,\n lines: bool = ...,\n chunksize: int,\n compression: CompressionOptions = ...,\n nrows: int | None = ...,\n storage_options: StorageOptions = ...,\n dtype_backend: DtypeBackend | lib.NoDefault = ...,\n engine: JSONEngine = ...,\n) -> JsonReader[Literal["frame"]]:\n ...\n\n\n@overload\ndef read_json(\n path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes],\n *,\n orient: str | None = ...,\n typ: Literal["series"],\n dtype: DtypeArg | None = ...,\n convert_axes: bool | None = ...,\n convert_dates: bool | list[str] = ...,\n keep_default_dates: bool = ...,\n precise_float: bool = ...,\n date_unit: str | None = ...,\n encoding: str | None = ...,\n encoding_errors: str | None = ...,\n lines: bool = ...,\n chunksize: int,\n compression: CompressionOptions = ...,\n nrows: int | None = ...,\n storage_options: StorageOptions = ...,\n dtype_backend: DtypeBackend | lib.NoDefault = ...,\n engine: JSONEngine = ...,\n) -> JsonReader[Literal["series"]]:\n ...\n\n\n@overload\ndef read_json(\n path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes],\n *,\n orient: str | None = ...,\n typ: Literal["series"],\n dtype: DtypeArg | None = ...,\n convert_axes: bool | None = ...,\n convert_dates: bool | list[str] = ...,\n keep_default_dates: bool = ...,\n precise_float: bool = ...,\n date_unit: str | None = ...,\n encoding: str | None = ...,\n encoding_errors: str | None = ...,\n lines: bool = ...,\n chunksize: None = ...,\n compression: CompressionOptions = ...,\n nrows: int | None = ...,\n storage_options: StorageOptions = ...,\n dtype_backend: DtypeBackend | lib.NoDefault = ...,\n engine: JSONEngine = ...,\n) -> Series:\n ...\n\n\n@overload\ndef read_json(\n path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes],\n *,\n orient: str | None = ...,\n typ: Literal["frame"] = ...,\n dtype: DtypeArg | None = ...,\n convert_axes: bool | None = ...,\n convert_dates: bool | list[str] = ...,\n keep_default_dates: bool = ...,\n precise_float: bool = ...,\n date_unit: str | None = ...,\n encoding: str | None = ...,\n encoding_errors: str | None = ...,\n lines: bool = ...,\n chunksize: None = ...,\n compression: CompressionOptions = ...,\n nrows: int | None = ...,\n storage_options: StorageOptions = ...,\n dtype_backend: DtypeBackend | lib.NoDefault = ...,\n engine: JSONEngine = ...,\n) -> DataFrame:\n ...\n\n\n@doc(\n storage_options=_shared_docs["storage_options"],\n decompression_options=_shared_docs["decompression_options"] % "path_or_buf",\n)\ndef read_json(\n path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes],\n *,\n orient: str | None = None,\n typ: Literal["frame", "series"] = "frame",\n dtype: DtypeArg | None = None,\n convert_axes: bool | None = None,\n convert_dates: bool | list[str] = True,\n keep_default_dates: bool = True,\n precise_float: bool = False,\n date_unit: str | None = None,\n encoding: str | None = None,\n encoding_errors: str | None = "strict",\n lines: bool = False,\n chunksize: int | None = None,\n compression: CompressionOptions = "infer",\n nrows: int | None = None,\n storage_options: StorageOptions | None = None,\n dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,\n engine: JSONEngine = "ujson",\n) -> DataFrame | Series | JsonReader:\n """\n Convert a JSON string to pandas object.\n\n Parameters\n ----------\n path_or_buf : a valid JSON str, path object or file-like object\n Any valid string path is acceptable. The string could be a URL. Valid\n URL schemes include http, ftp, s3, and file. For file URLs, a host is\n expected. A local file could be:\n ``file://localhost/path/to/table.json``.\n\n If you want to pass in a path object, pandas accepts any\n ``os.PathLike``.\n\n By file-like object, we refer to objects with a ``read()`` method,\n such as a file handle (e.g. via builtin ``open`` function)\n or ``StringIO``.\n\n .. deprecated:: 2.1.0\n Passing json literal strings is deprecated.\n\n orient : str, optional\n Indication of expected JSON string format.\n Compatible JSON strings can be produced by ``to_json()`` with a\n corresponding orient value.\n The set of possible orients is:\n\n - ``'split'`` : dict like\n ``{{index -> [index], columns -> [columns], data -> [values]}}``\n - ``'records'`` : list like\n ``[{{column -> value}}, ... , {{column -> value}}]``\n - ``'index'`` : dict like ``{{index -> {{column -> value}}}}``\n - ``'columns'`` : dict like ``{{column -> {{index -> value}}}}``\n - ``'values'`` : just the values array\n - ``'table'`` : dict like ``{{'schema': {{schema}}, 'data': {{data}}}}``\n\n The allowed and default values depend on the value\n of the `typ` parameter.\n\n * when ``typ == 'series'``,\n\n - allowed orients are ``{{'split','records','index'}}``\n - default is ``'index'``\n - The Series index must be unique for orient ``'index'``.\n\n * when ``typ == 'frame'``,\n\n - allowed orients are ``{{'split','records','index',\n 'columns','values', 'table'}}``\n - default is ``'columns'``\n - The DataFrame index must be unique for orients ``'index'`` and\n ``'columns'``.\n - The DataFrame columns must be unique for orients ``'index'``,\n ``'columns'``, and ``'records'``.\n\n typ : {{'frame', 'series'}}, default 'frame'\n The type of object to recover.\n\n dtype : bool or dict, default None\n If True, infer dtypes; if a dict of column to dtype, then use those;\n if False, then don't infer dtypes at all, applies only to the data.\n\n For all ``orient`` values except ``'table'``, default is True.\n\n convert_axes : bool, default None\n Try to convert the axes to the proper dtypes.\n\n For all ``orient`` values except ``'table'``, default is True.\n\n convert_dates : bool or list of str, default True\n If True then default datelike columns may be converted (depending on\n keep_default_dates).\n If False, no dates will be converted.\n If a list of column names, then those columns will be converted and\n default datelike columns may also be converted (depending on\n keep_default_dates).\n\n keep_default_dates : bool, default True\n If parsing dates (convert_dates is not False), then try to parse the\n default datelike columns.\n A column label is datelike if\n\n * it ends with ``'_at'``,\n\n * it ends with ``'_time'``,\n\n * it begins with ``'timestamp'``,\n\n * it is ``'modified'``, or\n\n * it is ``'date'``.\n\n precise_float : bool, default False\n Set to enable usage of higher precision (strtod) function when\n decoding string to double values. Default (False) is to use fast but\n less precise builtin functionality.\n\n date_unit : str, default None\n The timestamp unit to detect if converting dates. The default behaviour\n is to try and detect the correct precision, but if this is not desired\n then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds,\n milliseconds, microseconds or nanoseconds respectively.\n\n encoding : str, default is 'utf-8'\n The encoding to use to decode py3 bytes.\n\n encoding_errors : str, optional, default "strict"\n How encoding errors are treated. `List of possible values\n <https://docs.python.org/3/library/codecs.html#error-handlers>`_ .\n\n .. versionadded:: 1.3.0\n\n lines : bool, default False\n Read the file as a json object per line.\n\n chunksize : int, optional\n Return JsonReader object for iteration.\n See the `line-delimited json docs\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#line-delimited-json>`_\n for more information on ``chunksize``.\n This can only be passed if `lines=True`.\n If this is None, the file will be read into memory all at once.\n {decompression_options}\n\n .. versionchanged:: 1.4.0 Zstandard support.\n\n nrows : int, optional\n The number of lines from the line-delimited jsonfile that has to be read.\n This can only be passed if `lines=True`.\n If this is None, all the rows will be returned.\n\n {storage_options}\n\n dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable'\n Back-end data type applied to the resultant :class:`DataFrame`\n (still experimental). Behaviour is as follows:\n\n * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`\n (default).\n * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`\n DataFrame.\n\n .. versionadded:: 2.0\n\n engine : {{"ujson", "pyarrow"}}, default "ujson"\n Parser engine to use. The ``"pyarrow"`` engine is only available when\n ``lines=True``.\n\n .. versionadded:: 2.0\n\n Returns\n -------\n Series, DataFrame, or pandas.api.typing.JsonReader\n A JsonReader is returned when ``chunksize`` is not ``0`` or ``None``.\n Otherwise, the type returned depends on the value of ``typ``.\n\n See Also\n --------\n DataFrame.to_json : Convert a DataFrame to a JSON string.\n Series.to_json : Convert a Series to a JSON string.\n json_normalize : Normalize semi-structured JSON data into a flat table.\n\n Notes\n -----\n Specific to ``orient='table'``, if a :class:`DataFrame` with a literal\n :class:`Index` name of `index` gets written with :func:`to_json`, the\n subsequent read operation will incorrectly set the :class:`Index` name to\n ``None``. This is because `index` is also used by :func:`DataFrame.to_json`\n to denote a missing :class:`Index` name, and the subsequent\n :func:`read_json` operation cannot distinguish between the two. The same\n limitation is encountered with a :class:`MultiIndex` and any names\n beginning with ``'level_'``.\n\n Examples\n --------\n >>> from io import StringIO\n >>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],\n ... index=['row 1', 'row 2'],\n ... columns=['col 1', 'col 2'])\n\n Encoding/decoding a Dataframe using ``'split'`` formatted JSON:\n\n >>> df.to_json(orient='split')\n '\\n{{\\n"columns":["col 1","col 2"],\\n"index":["row 1","row 2"],\\n"data":[["a","b"],["c","d"]]\\n}}\\n'\n >>> pd.read_json(StringIO(_), orient='split')\n col 1 col 2\n row 1 a b\n row 2 c d\n\n Encoding/decoding a Dataframe using ``'index'`` formatted JSON:\n\n >>> df.to_json(orient='index')\n '{{"row 1":{{"col 1":"a","col 2":"b"}},"row 2":{{"col 1":"c","col 2":"d"}}}}'\n\n >>> pd.read_json(StringIO(_), orient='index')\n col 1 col 2\n row 1 a b\n row 2 c d\n\n Encoding/decoding a Dataframe using ``'records'`` formatted JSON.\n Note that index labels are not preserved with this encoding.\n\n >>> df.to_json(orient='records')\n '[{{"col 1":"a","col 2":"b"}},{{"col 1":"c","col 2":"d"}}]'\n >>> pd.read_json(StringIO(_), orient='records')\n col 1 col 2\n 0 a b\n 1 c d\n\n Encoding with Table Schema\n\n >>> df.to_json(orient='table')\n '\\n{{"schema":{{"fields":[\\n{{"name":"index","type":"string"}},\\n{{"name":"col 1","type":"string"}},\\n{{"name":"col 2","type":"string"}}],\\n"primaryKey":["index"],\\n"pandas_version":"1.4.0"}},\\n"data":[\\n{{"index":"row 1","col 1":"a","col 2":"b"}},\\n{{"index":"row 2","col 1":"c","col 2":"d"}}]\\n}}\\n'\n\n The following example uses ``dtype_backend="numpy_nullable"``\n\n >>> data = '''{{"index": {{"0": 0, "1": 1}},\n ... "a": {{"0": 1, "1": null}},\n ... "b": {{"0": 2.5, "1": 4.5}},\n ... "c": {{"0": true, "1": false}},\n ... "d": {{"0": "a", "1": "b"}},\n ... "e": {{"0": 1577.2, "1": 1577.1}}}}'''\n >>> pd.read_json(StringIO(data), dtype_backend="numpy_nullable")\n index a b c d e\n 0 0 1 2.5 True a 1577.2\n 1 1 <NA> 4.5 False b 1577.1\n """\n if orient == "table" and dtype:\n raise ValueError("cannot pass both dtype and orient='table'")\n if orient == "table" and convert_axes:\n raise ValueError("cannot pass both convert_axes and orient='table'")\n\n check_dtype_backend(dtype_backend)\n\n if dtype is None and orient != "table":\n # error: Incompatible types in assignment (expression has type "bool", variable\n # has type "Union[ExtensionDtype, str, dtype[Any], Type[str], Type[float],\n # Type[int], Type[complex], Type[bool], Type[object], Dict[Hashable,\n # Union[ExtensionDtype, Union[str, dtype[Any]], Type[str], Type[float],\n # Type[int], Type[complex], Type[bool], Type[object]]], None]")\n dtype = True # type: ignore[assignment]\n if convert_axes is None and orient != "table":\n convert_axes = True\n\n json_reader = JsonReader(\n path_or_buf,\n orient=orient,\n typ=typ,\n dtype=dtype,\n convert_axes=convert_axes,\n convert_dates=convert_dates,\n keep_default_dates=keep_default_dates,\n precise_float=precise_float,\n date_unit=date_unit,\n encoding=encoding,\n lines=lines,\n chunksize=chunksize,\n compression=compression,\n nrows=nrows,\n storage_options=storage_options,\n encoding_errors=encoding_errors,\n dtype_backend=dtype_backend,\n engine=engine,\n )\n\n if chunksize:\n return json_reader\n else:\n return json_reader.read()\n\n\nclass JsonReader(abc.Iterator, Generic[FrameSeriesStrT]):\n """\n JsonReader provides an interface for reading in a JSON file.\n\n If initialized with ``lines=True`` and ``chunksize``, can be iterated over\n ``chunksize`` lines at a time. Otherwise, calling ``read`` reads in the\n whole document.\n """\n\n def __init__(\n self,\n filepath_or_buffer,\n orient,\n typ: FrameSeriesStrT,\n dtype,\n convert_axes: bool | None,\n convert_dates,\n keep_default_dates: bool,\n precise_float: bool,\n date_unit,\n encoding,\n lines: bool,\n chunksize: int | None,\n compression: CompressionOptions,\n nrows: int | None,\n storage_options: StorageOptions | None = None,\n encoding_errors: str | None = "strict",\n dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,\n engine: JSONEngine = "ujson",\n ) -> None:\n self.orient = orient\n self.typ = typ\n self.dtype = dtype\n self.convert_axes = convert_axes\n self.convert_dates = convert_dates\n self.keep_default_dates = keep_default_dates\n self.precise_float = precise_float\n self.date_unit = date_unit\n self.encoding = encoding\n self.engine = engine\n self.compression = compression\n self.storage_options = storage_options\n self.lines = lines\n self.chunksize = chunksize\n self.nrows_seen = 0\n self.nrows = nrows\n self.encoding_errors = encoding_errors\n self.handles: IOHandles[str] | None = None\n self.dtype_backend = dtype_backend\n\n if self.engine not in {"pyarrow", "ujson"}:\n raise ValueError(\n f"The engine type {self.engine} is currently not supported."\n )\n if self.chunksize is not None:\n self.chunksize = validate_integer("chunksize", self.chunksize, 1)\n if not self.lines:\n raise ValueError("chunksize can only be passed if lines=True")\n if self.engine == "pyarrow":\n raise ValueError(\n "currently pyarrow engine doesn't support chunksize parameter"\n )\n if self.nrows is not None:\n self.nrows = validate_integer("nrows", self.nrows, 0)\n if not self.lines:\n raise ValueError("nrows can only be passed if lines=True")\n if (\n isinstance(filepath_or_buffer, str)\n and not self.lines\n and "\n" in filepath_or_buffer\n ):\n warnings.warn(\n "Passing literal json to 'read_json' is deprecated and "\n "will be removed in a future version. To read from a "\n "literal string, wrap it in a 'StringIO' object.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n if self.engine == "pyarrow":\n if not self.lines:\n raise ValueError(\n "currently pyarrow engine only supports "\n "the line-delimited JSON format"\n )\n self.data = filepath_or_buffer\n elif self.engine == "ujson":\n data = self._get_data_from_filepath(filepath_or_buffer)\n self.data = self._preprocess_data(data)\n\n def _preprocess_data(self, data):\n """\n At this point, the data either has a `read` attribute (e.g. a file\n object or a StringIO) or is a string that is a JSON document.\n\n If self.chunksize, we prepare the data for the `__next__` method.\n Otherwise, we read it into memory for the `read` method.\n """\n if hasattr(data, "read") and not (self.chunksize or self.nrows):\n with self:\n data = data.read()\n if not hasattr(data, "read") and (self.chunksize or self.nrows):\n data = StringIO(data)\n\n return data\n\n def _get_data_from_filepath(self, filepath_or_buffer):\n """\n The function read_json accepts three input types:\n 1. filepath (string-like)\n 2. file-like object (e.g. open file object, StringIO)\n 3. JSON string\n\n This method turns (1) into (2) to simplify the rest of the processing.\n It returns input types (2) and (3) unchanged.\n\n It raises FileNotFoundError if the input is a string ending in\n one of .json, .json.gz, .json.bz2, etc. but no such file exists.\n """\n # if it is a string but the file does not exist, it might be a JSON string\n filepath_or_buffer = stringify_path(filepath_or_buffer)\n if (\n not isinstance(filepath_or_buffer, str)\n or is_url(filepath_or_buffer)\n or is_fsspec_url(filepath_or_buffer)\n or file_exists(filepath_or_buffer)\n ):\n self.handles = get_handle(\n filepath_or_buffer,\n "r",\n encoding=self.encoding,\n compression=self.compression,\n storage_options=self.storage_options,\n errors=self.encoding_errors,\n )\n filepath_or_buffer = self.handles.handle\n elif (\n isinstance(filepath_or_buffer, str)\n and filepath_or_buffer.lower().endswith(\n (".json",) + tuple(f".json{c}" for c in extension_to_compression)\n )\n and not file_exists(filepath_or_buffer)\n ):\n raise FileNotFoundError(f"File {filepath_or_buffer} does not exist")\n else:\n warnings.warn(\n "Passing literal json to 'read_json' is deprecated and "\n "will be removed in a future version. To read from a "\n "literal string, wrap it in a 'StringIO' object.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n return filepath_or_buffer\n\n def _combine_lines(self, lines) -> str:\n """\n Combines a list of JSON objects into one JSON object.\n """\n return (\n f'[{",".join([line for line in (line.strip() for line in lines) if line])}]'\n )\n\n @overload\n def read(self: JsonReader[Literal["frame"]]) -> DataFrame:\n ...\n\n @overload\n def read(self: JsonReader[Literal["series"]]) -> Series:\n ...\n\n @overload\n def read(self: JsonReader[Literal["frame", "series"]]) -> DataFrame | Series:\n ...\n\n def read(self) -> DataFrame | Series:\n """\n Read the whole JSON input into a pandas object.\n """\n obj: DataFrame | Series\n with self:\n if self.engine == "pyarrow":\n pyarrow_json = import_optional_dependency("pyarrow.json")\n pa_table = pyarrow_json.read_json(self.data)\n return arrow_table_to_pandas(pa_table, dtype_backend=self.dtype_backend)\n elif self.engine == "ujson":\n if self.lines:\n if self.chunksize:\n obj = concat(self)\n elif self.nrows:\n lines = list(islice(self.data, self.nrows))\n lines_json = self._combine_lines(lines)\n obj = self._get_object_parser(lines_json)\n else:\n data = ensure_str(self.data)\n data_lines = data.split("\n")\n obj = self._get_object_parser(self._combine_lines(data_lines))\n else:\n obj = self._get_object_parser(self.data)\n if self.dtype_backend is not lib.no_default:\n return obj.convert_dtypes(\n infer_objects=False, dtype_backend=self.dtype_backend\n )\n else:\n return obj\n\n def _get_object_parser(self, json) -> DataFrame | Series:\n """\n Parses a json document into a pandas object.\n """\n typ = self.typ\n dtype = self.dtype\n kwargs = {\n "orient": self.orient,\n "dtype": self.dtype,\n "convert_axes": self.convert_axes,\n "convert_dates": self.convert_dates,\n "keep_default_dates": self.keep_default_dates,\n "precise_float": self.precise_float,\n "date_unit": self.date_unit,\n "dtype_backend": self.dtype_backend,\n }\n obj = None\n if typ == "frame":\n obj = FrameParser(json, **kwargs).parse()\n\n if typ == "series" or obj is None:\n if not isinstance(dtype, bool):\n kwargs["dtype"] = dtype\n obj = SeriesParser(json, **kwargs).parse()\n\n return obj\n\n def close(self) -> None:\n """\n If we opened a stream earlier, in _get_data_from_filepath, we should\n close it.\n\n If an open stream or file was passed, we leave it open.\n """\n if self.handles is not None:\n self.handles.close()\n\n def __iter__(self) -> Self:\n return self\n\n @overload\n def __next__(self: JsonReader[Literal["frame"]]) -> DataFrame:\n ...\n\n @overload\n def __next__(self: JsonReader[Literal["series"]]) -> Series:\n ...\n\n @overload\n def __next__(self: JsonReader[Literal["frame", "series"]]) -> DataFrame | Series:\n ...\n\n def __next__(self) -> DataFrame | Series:\n if self.nrows and self.nrows_seen >= self.nrows:\n self.close()\n raise StopIteration\n\n lines = list(islice(self.data, self.chunksize))\n if not lines:\n self.close()\n raise StopIteration\n\n try:\n lines_json = self._combine_lines(lines)\n obj = self._get_object_parser(lines_json)\n\n # Make sure that the returned objects have the right index.\n obj.index = range(self.nrows_seen, self.nrows_seen + len(obj))\n self.nrows_seen += len(obj)\n except Exception as ex:\n self.close()\n raise ex\n\n if self.dtype_backend is not lib.no_default:\n return obj.convert_dtypes(\n infer_objects=False, dtype_backend=self.dtype_backend\n )\n else:\n return obj\n\n def __enter__(self) -> Self:\n return self\n\n def __exit__(\n self,\n exc_type: type[BaseException] | None,\n exc_value: BaseException | None,\n traceback: TracebackType | None,\n ) -> None:\n self.close()\n\n\nclass Parser:\n _split_keys: tuple[str, ...]\n _default_orient: str\n\n _STAMP_UNITS = ("s", "ms", "us", "ns")\n _MIN_STAMPS = {\n "s": 31536000,\n "ms": 31536000000,\n "us": 31536000000000,\n "ns": 31536000000000000,\n }\n json: str\n\n def __init__(\n self,\n json: str,\n orient,\n dtype: DtypeArg | None = None,\n convert_axes: bool = True,\n convert_dates: bool | list[str] = True,\n keep_default_dates: bool = False,\n precise_float: bool = False,\n date_unit=None,\n dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,\n ) -> None:\n self.json = json\n\n if orient is None:\n orient = self._default_orient\n\n self.orient = orient\n\n self.dtype = dtype\n\n if date_unit is not None:\n date_unit = date_unit.lower()\n if date_unit not in self._STAMP_UNITS:\n raise ValueError(f"date_unit must be one of {self._STAMP_UNITS}")\n self.min_stamp = self._MIN_STAMPS[date_unit]\n else:\n self.min_stamp = self._MIN_STAMPS["s"]\n\n self.precise_float = precise_float\n self.convert_axes = convert_axes\n self.convert_dates = convert_dates\n self.date_unit = date_unit\n self.keep_default_dates = keep_default_dates\n self.obj: DataFrame | Series | None = None\n self.dtype_backend = dtype_backend\n\n @final\n def check_keys_split(self, decoded: dict) -> None:\n """\n Checks that dict has only the appropriate keys for orient='split'.\n """\n bad_keys = set(decoded.keys()).difference(set(self._split_keys))\n if bad_keys:\n bad_keys_joined = ", ".join(bad_keys)\n raise ValueError(f"JSON data had unexpected key(s): {bad_keys_joined}")\n\n @final\n def parse(self):\n self._parse()\n\n if self.obj is None:\n return None\n if self.convert_axes:\n self._convert_axes()\n self._try_convert_types()\n return self.obj\n\n def _parse(self) -> None:\n raise AbstractMethodError(self)\n\n @final\n def _convert_axes(self) -> None:\n """\n Try to convert axes.\n """\n obj = self.obj\n assert obj is not None # for mypy\n for axis_name in obj._AXIS_ORDERS:\n ax = obj._get_axis(axis_name)\n ser = Series(ax, dtype=ax.dtype, copy=False)\n new_ser, result = self._try_convert_data(\n name=axis_name,\n data=ser,\n use_dtypes=False,\n convert_dates=True,\n is_axis=True,\n )\n if result:\n new_axis = Index(new_ser, dtype=new_ser.dtype, copy=False)\n setattr(self.obj, axis_name, new_axis)\n\n def _try_convert_types(self) -> None:\n raise AbstractMethodError(self)\n\n @final\n def _try_convert_data(\n self,\n name: Hashable,\n data: Series,\n use_dtypes: bool = True,\n convert_dates: bool | list[str] = True,\n is_axis: bool = False,\n ) -> tuple[Series, bool]:\n """\n Try to parse a Series into a column by inferring dtype.\n """\n # don't try to coerce, unless a force conversion\n if use_dtypes:\n if not self.dtype:\n if all(notna(data)):\n return data, False\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\n "ignore",\n "Downcasting object dtype arrays",\n category=FutureWarning,\n )\n filled = data.fillna(np.nan)\n\n return filled, True\n\n elif self.dtype is True:\n pass\n else:\n # dtype to force\n dtype = (\n self.dtype.get(name) if isinstance(self.dtype, dict) else self.dtype\n )\n if dtype is not None:\n try:\n return data.astype(dtype), True\n except (TypeError, ValueError):\n return data, False\n\n if convert_dates:\n new_data, result = self._try_convert_to_date(data)\n if result:\n return new_data, True\n\n converted = False\n if self.dtype_backend is not lib.no_default and not is_axis:\n # Fall through for conversion later on\n return data, True\n elif is_string_dtype(data.dtype):\n # try float\n try:\n data = data.astype("float64")\n converted = True\n except (TypeError, ValueError):\n pass\n\n if data.dtype.kind == "f" and data.dtype != "float64":\n # coerce floats to 64\n try:\n data = data.astype("float64")\n converted = True\n except (TypeError, ValueError):\n pass\n\n # don't coerce 0-len data\n if len(data) and data.dtype in ("float", "object"):\n # coerce ints if we can\n try:\n new_data = data.astype("int64")\n if (new_data == data).all():\n data = new_data\n converted = True\n except (TypeError, ValueError, OverflowError):\n pass\n\n if data.dtype == "int" and data.dtype != "int64":\n # coerce ints to 64\n try:\n data = data.astype("int64")\n converted = True\n except (TypeError, ValueError):\n pass\n\n # if we have an index, we want to preserve dtypes\n if name == "index" and len(data):\n if self.orient == "split":\n return data, False\n\n return data, converted\n\n @final\n def _try_convert_to_date(self, data: Series) -> tuple[Series, bool]:\n """\n Try to parse a ndarray like into a date column.\n\n Try to coerce object in epoch/iso formats and integer/float in epoch\n formats. Return a boolean if parsing was successful.\n """\n # no conversion on empty\n if not len(data):\n return data, False\n\n new_data = data\n\n if new_data.dtype == "string":\n new_data = new_data.astype(object)\n\n if new_data.dtype == "object":\n try:\n new_data = data.astype("int64")\n except OverflowError:\n return data, False\n except (TypeError, ValueError):\n pass\n\n # ignore numbers that are out of range\n if issubclass(new_data.dtype.type, np.number):\n in_range = (\n isna(new_data._values)\n | (new_data > self.min_stamp)\n | (new_data._values == iNaT)\n )\n if not in_range.all():\n return data, False\n\n date_units = (self.date_unit,) if self.date_unit else self._STAMP_UNITS\n for date_unit in date_units:\n try:\n with warnings.catch_warnings():\n warnings.filterwarnings(\n "ignore",\n ".*parsing datetimes with mixed time "\n "zones will raise an error",\n category=FutureWarning,\n )\n new_data = to_datetime(new_data, errors="raise", unit=date_unit)\n except (ValueError, OverflowError, TypeError):\n continue\n return new_data, True\n return data, False\n\n\nclass SeriesParser(Parser):\n _default_orient = "index"\n _split_keys = ("name", "index", "data")\n obj: Series | None\n\n def _parse(self) -> None:\n data = ujson_loads(self.json, precise_float=self.precise_float)\n\n if self.orient == "split":\n decoded = {str(k): v for k, v in data.items()}\n self.check_keys_split(decoded)\n self.obj = Series(**decoded)\n else:\n self.obj = Series(data)\n\n def _try_convert_types(self) -> None:\n if self.obj is None:\n return\n obj, result = self._try_convert_data(\n "data", self.obj, convert_dates=self.convert_dates\n )\n if result:\n self.obj = obj\n\n\nclass FrameParser(Parser):\n _default_orient = "columns"\n _split_keys = ("columns", "index", "data")\n obj: DataFrame | None\n\n def _parse(self) -> None:\n json = self.json\n orient = self.orient\n\n if orient == "columns":\n self.obj = DataFrame(\n ujson_loads(json, precise_float=self.precise_float), dtype=None\n )\n elif orient == "split":\n decoded = {\n str(k): v\n for k, v in ujson_loads(json, precise_float=self.precise_float).items()\n }\n self.check_keys_split(decoded)\n orig_names = [\n (tuple(col) if isinstance(col, list) else col)\n for col in decoded["columns"]\n ]\n decoded["columns"] = dedup_names(\n orig_names,\n is_potential_multi_index(orig_names, None),\n )\n self.obj = DataFrame(dtype=None, **decoded)\n elif orient == "index":\n self.obj = DataFrame.from_dict(\n ujson_loads(json, precise_float=self.precise_float),\n dtype=None,\n orient="index",\n )\n elif orient == "table":\n self.obj = parse_table_schema(json, precise_float=self.precise_float)\n else:\n self.obj = DataFrame(\n ujson_loads(json, precise_float=self.precise_float), dtype=None\n )\n\n def _process_converter(\n self,\n f: Callable[[Hashable, Series], tuple[Series, bool]],\n filt: Callable[[Hashable], bool] | None = None,\n ) -> None:\n """\n Take a conversion function and possibly recreate the frame.\n """\n if filt is None:\n filt = lambda col: True\n\n obj = self.obj\n assert obj is not None # for mypy\n\n needs_new_obj = False\n new_obj = {}\n for i, (col, c) in enumerate(obj.items()):\n if filt(col):\n new_data, result = f(col, c)\n if result:\n c = new_data\n needs_new_obj = True\n new_obj[i] = c\n\n if needs_new_obj:\n # possibly handle dup columns\n new_frame = DataFrame(new_obj, index=obj.index)\n new_frame.columns = obj.columns\n self.obj = new_frame\n\n def _try_convert_types(self) -> None:\n if self.obj is None:\n return\n if self.convert_dates:\n self._try_convert_dates()\n\n self._process_converter(\n lambda col, c: self._try_convert_data(col, c, convert_dates=False)\n )\n\n def _try_convert_dates(self) -> None:\n if self.obj is None:\n return\n\n # our columns to parse\n convert_dates_list_bool = self.convert_dates\n if isinstance(convert_dates_list_bool, bool):\n convert_dates_list_bool = []\n convert_dates = set(convert_dates_list_bool)\n\n def is_ok(col) -> bool:\n """\n Return if this col is ok to try for a date parse.\n """\n if col in convert_dates:\n return True\n if not self.keep_default_dates:\n return False\n if not isinstance(col, str):\n return False\n\n col_lower = col.lower()\n if (\n col_lower.endswith(("_at", "_time"))\n or col_lower == "modified"\n or col_lower == "date"\n or col_lower == "datetime"\n or col_lower.startswith("timestamp")\n ):\n return True\n return False\n\n self._process_converter(lambda col, c: self._try_convert_to_date(c), filt=is_ok)\n
.venv\Lib\site-packages\pandas\io\json\_json.py
_json.py
Python
48,231
0.95
0.148594
0.031758
vue-tools
71
2025-01-06T01:47:17.734899
BSD-3-Clause
false
a53ba0d71eb0ba59a4414c59af27669d
# ---------------------------------------------------------------------\n# JSON normalization routines\nfrom __future__ import annotations\n\nfrom collections import (\n abc,\n defaultdict,\n)\nimport copy\nfrom typing import (\n TYPE_CHECKING,\n Any,\n DefaultDict,\n)\n\nimport numpy as np\n\nfrom pandas._libs.writers import convert_json_to_lines\n\nimport pandas as pd\nfrom pandas import DataFrame\n\nif TYPE_CHECKING:\n from collections.abc import Iterable\n\n from pandas._typing import (\n IgnoreRaise,\n Scalar,\n )\n\n\ndef convert_to_line_delimits(s: str) -> str:\n """\n Helper function that converts JSON lists to line delimited JSON.\n """\n # Determine we have a JSON list to turn to lines otherwise just return the\n # json object, only lists can\n if not s[0] == "[" and s[-1] == "]":\n return s\n s = s[1:-1]\n\n return convert_json_to_lines(s)\n\n\ndef nested_to_record(\n ds,\n prefix: str = "",\n sep: str = ".",\n level: int = 0,\n max_level: int | None = None,\n):\n """\n A simplified json_normalize\n\n Converts a nested dict into a flat dict ("record"), unlike json_normalize,\n it does not attempt to extract a subset of the data.\n\n Parameters\n ----------\n ds : dict or list of dicts\n prefix: the prefix, optional, default: ""\n sep : str, default '.'\n Nested records will generate names separated by sep,\n e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar\n level: int, optional, default: 0\n The number of levels in the json string.\n\n max_level: int, optional, default: None\n The max depth to normalize.\n\n Returns\n -------\n d - dict or list of dicts, matching `ds`\n\n Examples\n --------\n >>> nested_to_record(\n ... dict(flat1=1, dict1=dict(c=1, d=2), nested=dict(e=dict(c=1, d=2), d=2))\n ... )\n {\\n'flat1': 1, \\n'dict1.c': 1, \\n'dict1.d': 2, \\n'nested.e.c': 1, \\n'nested.e.d': 2, \\n'nested.d': 2\\n}\n """\n singleton = False\n if isinstance(ds, dict):\n ds = [ds]\n singleton = True\n new_ds = []\n for d in ds:\n new_d = copy.deepcopy(d)\n for k, v in d.items():\n # each key gets renamed with prefix\n if not isinstance(k, str):\n k = str(k)\n if level == 0:\n newkey = k\n else:\n newkey = prefix + sep + k\n\n # flatten if type is dict and\n # current dict level < maximum level provided and\n # only dicts gets recurse-flattened\n # only at level>1 do we rename the rest of the keys\n if not isinstance(v, dict) or (\n max_level is not None and level >= max_level\n ):\n if level != 0: # so we skip copying for top level, common case\n v = new_d.pop(k)\n new_d[newkey] = v\n continue\n\n v = new_d.pop(k)\n new_d.update(nested_to_record(v, newkey, sep, level + 1, max_level))\n new_ds.append(new_d)\n\n if singleton:\n return new_ds[0]\n return new_ds\n\n\ndef _normalise_json(\n data: Any,\n key_string: str,\n normalized_dict: dict[str, Any],\n separator: str,\n) -> dict[str, Any]:\n """\n Main recursive function\n Designed for the most basic use case of pd.json_normalize(data)\n intended as a performance improvement, see #15621\n\n Parameters\n ----------\n data : Any\n Type dependent on types contained within nested Json\n key_string : str\n New key (with separator(s) in) for data\n normalized_dict : dict\n The new normalized/flattened Json dict\n separator : str, default '.'\n Nested records will generate names separated by sep,\n e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar\n """\n if isinstance(data, dict):\n for key, value in data.items():\n new_key = f"{key_string}{separator}{key}"\n\n if not key_string:\n new_key = new_key.removeprefix(separator)\n\n _normalise_json(\n data=value,\n key_string=new_key,\n normalized_dict=normalized_dict,\n separator=separator,\n )\n else:\n normalized_dict[key_string] = data\n return normalized_dict\n\n\ndef _normalise_json_ordered(data: dict[str, Any], separator: str) -> dict[str, Any]:\n """\n Order the top level keys and then recursively go to depth\n\n Parameters\n ----------\n data : dict or list of dicts\n separator : str, default '.'\n Nested records will generate names separated by sep,\n e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar\n\n Returns\n -------\n dict or list of dicts, matching `normalised_json_object`\n """\n top_dict_ = {k: v for k, v in data.items() if not isinstance(v, dict)}\n nested_dict_ = _normalise_json(\n data={k: v for k, v in data.items() if isinstance(v, dict)},\n key_string="",\n normalized_dict={},\n separator=separator,\n )\n return {**top_dict_, **nested_dict_}\n\n\ndef _simple_json_normalize(\n ds: dict | list[dict],\n sep: str = ".",\n) -> dict | list[dict] | Any:\n """\n A optimized basic json_normalize\n\n Converts a nested dict into a flat dict ("record"), unlike\n json_normalize and nested_to_record it doesn't do anything clever.\n But for the most basic use cases it enhances performance.\n E.g. pd.json_normalize(data)\n\n Parameters\n ----------\n ds : dict or list of dicts\n sep : str, default '.'\n Nested records will generate names separated by sep,\n e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar\n\n Returns\n -------\n frame : DataFrame\n d - dict or list of dicts, matching `normalised_json_object`\n\n Examples\n --------\n >>> _simple_json_normalize(\n ... {\n ... "flat1": 1,\n ... "dict1": {"c": 1, "d": 2},\n ... "nested": {"e": {"c": 1, "d": 2}, "d": 2},\n ... }\n ... )\n {\\n'flat1': 1, \\n'dict1.c': 1, \\n'dict1.d': 2, \\n'nested.e.c': 1, \\n'nested.e.d': 2, \\n'nested.d': 2\\n}\n\n """\n normalised_json_object = {}\n # expect a dictionary, as most jsons are. However, lists are perfectly valid\n if isinstance(ds, dict):\n normalised_json_object = _normalise_json_ordered(data=ds, separator=sep)\n elif isinstance(ds, list):\n normalised_json_list = [_simple_json_normalize(row, sep=sep) for row in ds]\n return normalised_json_list\n return normalised_json_object\n\n\ndef json_normalize(\n data: dict | list[dict],\n record_path: str | list | None = None,\n meta: str | list[str | list[str]] | None = None,\n meta_prefix: str | None = None,\n record_prefix: str | None = None,\n errors: IgnoreRaise = "raise",\n sep: str = ".",\n max_level: int | None = None,\n) -> DataFrame:\n """\n Normalize semi-structured JSON data into a flat table.\n\n Parameters\n ----------\n data : dict or list of dicts\n Unserialized JSON objects.\n record_path : str or list of str, default None\n Path in each object to list of records. If not passed, data will be\n assumed to be an array of records.\n meta : list of paths (str or list of str), default None\n Fields to use as metadata for each record in resulting table.\n meta_prefix : str, default None\n If True, prefix records with dotted (?) path, e.g. foo.bar.field if\n meta is ['foo', 'bar'].\n record_prefix : str, default None\n If True, prefix records with dotted (?) path, e.g. foo.bar.field if\n path to records is ['foo', 'bar'].\n errors : {'raise', 'ignore'}, default 'raise'\n Configures error handling.\n\n * 'ignore' : will ignore KeyError if keys listed in meta are not\n always present.\n * 'raise' : will raise KeyError if keys listed in meta are not\n always present.\n sep : str, default '.'\n Nested records will generate names separated by sep.\n e.g., for sep='.', {'foo': {'bar': 0}} -> foo.bar.\n max_level : int, default None\n Max number of levels(depth of dict) to normalize.\n if None, normalizes all levels.\n\n Returns\n -------\n frame : DataFrame\n Normalize semi-structured JSON data into a flat table.\n\n Examples\n --------\n >>> data = [\n ... {"id": 1, "name": {"first": "Coleen", "last": "Volk"}},\n ... {"name": {"given": "Mark", "family": "Regner"}},\n ... {"id": 2, "name": "Faye Raker"},\n ... ]\n >>> pd.json_normalize(data)\n id name.first name.last name.given name.family name\n 0 1.0 Coleen Volk NaN NaN NaN\n 1 NaN NaN NaN Mark Regner NaN\n 2 2.0 NaN NaN NaN NaN Faye Raker\n\n >>> data = [\n ... {\n ... "id": 1,\n ... "name": "Cole Volk",\n ... "fitness": {"height": 130, "weight": 60},\n ... },\n ... {"name": "Mark Reg", "fitness": {"height": 130, "weight": 60}},\n ... {\n ... "id": 2,\n ... "name": "Faye Raker",\n ... "fitness": {"height": 130, "weight": 60},\n ... },\n ... ]\n >>> pd.json_normalize(data, max_level=0)\n id name fitness\n 0 1.0 Cole Volk {'height': 130, 'weight': 60}\n 1 NaN Mark Reg {'height': 130, 'weight': 60}\n 2 2.0 Faye Raker {'height': 130, 'weight': 60}\n\n Normalizes nested data up to level 1.\n\n >>> data = [\n ... {\n ... "id": 1,\n ... "name": "Cole Volk",\n ... "fitness": {"height": 130, "weight": 60},\n ... },\n ... {"name": "Mark Reg", "fitness": {"height": 130, "weight": 60}},\n ... {\n ... "id": 2,\n ... "name": "Faye Raker",\n ... "fitness": {"height": 130, "weight": 60},\n ... },\n ... ]\n >>> pd.json_normalize(data, max_level=1)\n id name fitness.height fitness.weight\n 0 1.0 Cole Volk 130 60\n 1 NaN Mark Reg 130 60\n 2 2.0 Faye Raker 130 60\n\n >>> data = [\n ... {\n ... "state": "Florida",\n ... "shortname": "FL",\n ... "info": {"governor": "Rick Scott"},\n ... "counties": [\n ... {"name": "Dade", "population": 12345},\n ... {"name": "Broward", "population": 40000},\n ... {"name": "Palm Beach", "population": 60000},\n ... ],\n ... },\n ... {\n ... "state": "Ohio",\n ... "shortname": "OH",\n ... "info": {"governor": "John Kasich"},\n ... "counties": [\n ... {"name": "Summit", "population": 1234},\n ... {"name": "Cuyahoga", "population": 1337},\n ... ],\n ... },\n ... ]\n >>> result = pd.json_normalize(\n ... data, "counties", ["state", "shortname", ["info", "governor"]]\n ... )\n >>> result\n name population state shortname info.governor\n 0 Dade 12345 Florida FL Rick Scott\n 1 Broward 40000 Florida FL Rick Scott\n 2 Palm Beach 60000 Florida FL Rick Scott\n 3 Summit 1234 Ohio OH John Kasich\n 4 Cuyahoga 1337 Ohio OH John Kasich\n\n >>> data = {"A": [1, 2]}\n >>> pd.json_normalize(data, "A", record_prefix="Prefix.")\n Prefix.0\n 0 1\n 1 2\n\n Returns normalized data with columns prefixed with the given string.\n """\n\n def _pull_field(\n js: dict[str, Any], spec: list | str, extract_record: bool = False\n ) -> Scalar | Iterable:\n """Internal function to pull field"""\n result = js\n try:\n if isinstance(spec, list):\n for field in spec:\n if result is None:\n raise KeyError(field)\n result = result[field]\n else:\n result = result[spec]\n except KeyError as e:\n if extract_record:\n raise KeyError(\n f"Key {e} not found. If specifying a record_path, all elements of "\n f"data should have the path."\n ) from e\n if errors == "ignore":\n return np.nan\n else:\n raise KeyError(\n f"Key {e} not found. To replace missing values of {e} with "\n f"np.nan, pass in errors='ignore'"\n ) from e\n\n return result\n\n def _pull_records(js: dict[str, Any], spec: list | str) -> list:\n """\n Internal function to pull field for records, and similar to\n _pull_field, but require to return list. And will raise error\n if has non iterable value.\n """\n result = _pull_field(js, spec, extract_record=True)\n\n # GH 31507 GH 30145, GH 26284 if result is not list, raise TypeError if not\n # null, otherwise return an empty list\n if not isinstance(result, list):\n if pd.isnull(result):\n result = []\n else:\n raise TypeError(\n f"{js} has non list value {result} for path {spec}. "\n "Must be list or null."\n )\n return result\n\n if isinstance(data, list) and not data:\n return DataFrame()\n elif isinstance(data, dict):\n # A bit of a hackjob\n data = [data]\n elif isinstance(data, abc.Iterable) and not isinstance(data, str):\n # GH35923 Fix pd.json_normalize to not skip the first element of a\n # generator input\n data = list(data)\n else:\n raise NotImplementedError\n\n # check to see if a simple recursive function is possible to\n # improve performance (see #15621) but only for cases such\n # as pd.Dataframe(data) or pd.Dataframe(data, sep)\n if (\n record_path is None\n and meta is None\n and meta_prefix is None\n and record_prefix is None\n and max_level is None\n ):\n return DataFrame(_simple_json_normalize(data, sep=sep))\n\n if record_path is None:\n if any([isinstance(x, dict) for x in y.values()] for y in data):\n # naive normalization, this is idempotent for flat records\n # and potentially will inflate the data considerably for\n # deeply nested structures:\n # {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@}\n #\n # TODO: handle record value which are lists, at least error\n # reasonably\n data = nested_to_record(data, sep=sep, max_level=max_level)\n return DataFrame(data)\n elif not isinstance(record_path, list):\n record_path = [record_path]\n\n if meta is None:\n meta = []\n elif not isinstance(meta, list):\n meta = [meta]\n\n _meta = [m if isinstance(m, list) else [m] for m in meta]\n\n # Disastrously inefficient for now\n records: list = []\n lengths = []\n\n meta_vals: DefaultDict = defaultdict(list)\n meta_keys = [sep.join(val) for val in _meta]\n\n def _recursive_extract(data, path, seen_meta, level: int = 0) -> None:\n if isinstance(data, dict):\n data = [data]\n if len(path) > 1:\n for obj in data:\n for val, key in zip(_meta, meta_keys):\n if level + 1 == len(val):\n seen_meta[key] = _pull_field(obj, val[-1])\n\n _recursive_extract(obj[path[0]], path[1:], seen_meta, level=level + 1)\n else:\n for obj in data:\n recs = _pull_records(obj, path[0])\n recs = [\n nested_to_record(r, sep=sep, max_level=max_level)\n if isinstance(r, dict)\n else r\n for r in recs\n ]\n\n # For repeating the metadata later\n lengths.append(len(recs))\n for val, key in zip(_meta, meta_keys):\n if level + 1 > len(val):\n meta_val = seen_meta[key]\n else:\n meta_val = _pull_field(obj, val[level:])\n meta_vals[key].append(meta_val)\n records.extend(recs)\n\n _recursive_extract(data, record_path, {}, level=0)\n\n result = DataFrame(records)\n\n if record_prefix is not None:\n result = result.rename(columns=lambda x: f"{record_prefix}{x}")\n\n # Data types, a problem\n for k, v in meta_vals.items():\n if meta_prefix is not None:\n k = meta_prefix + k\n\n if k in result:\n raise ValueError(\n f"Conflicting metadata name {k}, need distinguishing prefix "\n )\n # GH 37782\n\n values = np.array(v, dtype=object)\n\n if values.ndim > 1:\n # GH 37782\n values = np.empty((len(v),), dtype=object)\n for i, v in enumerate(v):\n values[i] = v\n\n result[k] = values.repeat(lengths)\n return result\n
.venv\Lib\site-packages\pandas\io\json\_normalize.py
_normalize.py
Python
17,212
0.95
0.170956
0.067368
node-utils
960
2024-04-03T21:19:12.933332
MIT
false
3f5867702b14c3bf178b77b06bbf3ce9
"""\nTable Schema builders\n\nhttps://specs.frictionlessdata.io/table-schema/\n"""\nfrom __future__ import annotations\n\nfrom typing import (\n TYPE_CHECKING,\n Any,\n cast,\n)\nimport warnings\n\nfrom pandas._libs import lib\nfrom pandas._libs.json import ujson_loads\nfrom pandas._libs.tslibs import timezones\nfrom pandas._libs.tslibs.dtypes import freq_to_period_freqstr\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.base import _registry as registry\nfrom pandas.core.dtypes.common import (\n is_bool_dtype,\n is_integer_dtype,\n is_numeric_dtype,\n is_string_dtype,\n)\nfrom pandas.core.dtypes.dtypes import (\n CategoricalDtype,\n DatetimeTZDtype,\n ExtensionDtype,\n PeriodDtype,\n)\n\nfrom pandas import DataFrame\nimport pandas.core.common as com\n\nfrom pandas.tseries.frequencies import to_offset\n\nif TYPE_CHECKING:\n from pandas._typing import (\n DtypeObj,\n JSONSerializable,\n )\n\n from pandas import Series\n from pandas.core.indexes.multi import MultiIndex\n\n\nTABLE_SCHEMA_VERSION = "1.4.0"\n\n\ndef as_json_table_type(x: DtypeObj) -> str:\n """\n Convert a NumPy / pandas type to its corresponding json_table.\n\n Parameters\n ----------\n x : np.dtype or ExtensionDtype\n\n Returns\n -------\n str\n the Table Schema data types\n\n Notes\n -----\n This table shows the relationship between NumPy / pandas dtypes,\n and Table Schema dtypes.\n\n ============== =================\n Pandas type Table Schema type\n ============== =================\n int64 integer\n float64 number\n bool boolean\n datetime64[ns] datetime\n timedelta64[ns] duration\n object str\n categorical any\n =============== =================\n """\n if is_integer_dtype(x):\n return "integer"\n elif is_bool_dtype(x):\n return "boolean"\n elif is_numeric_dtype(x):\n return "number"\n elif lib.is_np_dtype(x, "M") or isinstance(x, (DatetimeTZDtype, PeriodDtype)):\n return "datetime"\n elif lib.is_np_dtype(x, "m"):\n return "duration"\n elif isinstance(x, ExtensionDtype):\n return "any"\n elif is_string_dtype(x):\n return "string"\n else:\n return "any"\n\n\ndef set_default_names(data):\n """Sets index names to 'index' for regular, or 'level_x' for Multi"""\n if com.all_not_none(*data.index.names):\n nms = data.index.names\n if len(nms) == 1 and data.index.name == "index":\n warnings.warn(\n "Index name of 'index' is not round-trippable.",\n stacklevel=find_stack_level(),\n )\n elif len(nms) > 1 and any(x.startswith("level_") for x in nms):\n warnings.warn(\n "Index names beginning with 'level_' are not round-trippable.",\n stacklevel=find_stack_level(),\n )\n return data\n\n data = data.copy()\n if data.index.nlevels > 1:\n data.index.names = com.fill_missing_names(data.index.names)\n else:\n data.index.name = data.index.name or "index"\n return data\n\n\ndef convert_pandas_type_to_json_field(arr) -> dict[str, JSONSerializable]:\n dtype = arr.dtype\n name: JSONSerializable\n if arr.name is None:\n name = "values"\n else:\n name = arr.name\n field: dict[str, JSONSerializable] = {\n "name": name,\n "type": as_json_table_type(dtype),\n }\n\n if isinstance(dtype, CategoricalDtype):\n cats = dtype.categories\n ordered = dtype.ordered\n\n field["constraints"] = {"enum": list(cats)}\n field["ordered"] = ordered\n elif isinstance(dtype, PeriodDtype):\n field["freq"] = dtype.freq.freqstr\n elif isinstance(dtype, DatetimeTZDtype):\n if timezones.is_utc(dtype.tz):\n # timezone.utc has no "zone" attr\n field["tz"] = "UTC"\n else:\n # error: "tzinfo" has no attribute "zone"\n field["tz"] = dtype.tz.zone # type: ignore[attr-defined]\n elif isinstance(dtype, ExtensionDtype):\n field["extDtype"] = dtype.name\n return field\n\n\ndef convert_json_field_to_pandas_type(field) -> str | CategoricalDtype:\n """\n Converts a JSON field descriptor into its corresponding NumPy / pandas type\n\n Parameters\n ----------\n field\n A JSON field descriptor\n\n Returns\n -------\n dtype\n\n Raises\n ------\n ValueError\n If the type of the provided field is unknown or currently unsupported\n\n Examples\n --------\n >>> convert_json_field_to_pandas_type({"name": "an_int", "type": "integer"})\n 'int64'\n\n >>> convert_json_field_to_pandas_type(\n ... {\n ... "name": "a_categorical",\n ... "type": "any",\n ... "constraints": {"enum": ["a", "b", "c"]},\n ... "ordered": True,\n ... }\n ... )\n CategoricalDtype(categories=['a', 'b', 'c'], ordered=True, categories_dtype=object)\n\n >>> convert_json_field_to_pandas_type({"name": "a_datetime", "type": "datetime"})\n 'datetime64[ns]'\n\n >>> convert_json_field_to_pandas_type(\n ... {"name": "a_datetime_with_tz", "type": "datetime", "tz": "US/Central"}\n ... )\n 'datetime64[ns, US/Central]'\n """\n typ = field["type"]\n if typ == "string":\n return "object"\n elif typ == "integer":\n return field.get("extDtype", "int64")\n elif typ == "number":\n return field.get("extDtype", "float64")\n elif typ == "boolean":\n return field.get("extDtype", "bool")\n elif typ == "duration":\n return "timedelta64"\n elif typ == "datetime":\n if field.get("tz"):\n return f"datetime64[ns, {field['tz']}]"\n elif field.get("freq"):\n # GH#9586 rename frequency M to ME for offsets\n offset = to_offset(field["freq"])\n freq_n, freq_name = offset.n, offset.name\n freq = freq_to_period_freqstr(freq_n, freq_name)\n # GH#47747 using datetime over period to minimize the change surface\n return f"period[{freq}]"\n else:\n return "datetime64[ns]"\n elif typ == "any":\n if "constraints" in field and "ordered" in field:\n return CategoricalDtype(\n categories=field["constraints"]["enum"], ordered=field["ordered"]\n )\n elif "extDtype" in field:\n return registry.find(field["extDtype"])\n else:\n return "object"\n\n raise ValueError(f"Unsupported or invalid field type: {typ}")\n\n\ndef build_table_schema(\n data: DataFrame | Series,\n index: bool = True,\n primary_key: bool | None = None,\n version: bool = True,\n) -> dict[str, JSONSerializable]:\n """\n Create a Table schema from ``data``.\n\n Parameters\n ----------\n data : Series, DataFrame\n index : bool, default True\n Whether to include ``data.index`` in the schema.\n primary_key : bool or None, default True\n Column names to designate as the primary key.\n The default `None` will set `'primaryKey'` to the index\n level or levels if the index is unique.\n version : bool, default True\n Whether to include a field `pandas_version` with the version\n of pandas that last revised the table schema. This version\n can be different from the installed pandas version.\n\n Returns\n -------\n dict\n\n Notes\n -----\n See `Table Schema\n <https://pandas.pydata.org/docs/user_guide/io.html#table-schema>`__ for\n conversion types.\n Timedeltas as converted to ISO8601 duration format with\n 9 decimal places after the seconds field for nanosecond precision.\n\n Categoricals are converted to the `any` dtype, and use the `enum` field\n constraint to list the allowed values. The `ordered` attribute is included\n in an `ordered` field.\n\n Examples\n --------\n >>> from pandas.io.json._table_schema import build_table_schema\n >>> df = pd.DataFrame(\n ... {'A': [1, 2, 3],\n ... 'B': ['a', 'b', 'c'],\n ... 'C': pd.date_range('2016-01-01', freq='d', periods=3),\n ... }, index=pd.Index(range(3), name='idx'))\n >>> build_table_schema(df)\n {'fields': \\n[{'name': 'idx', 'type': 'integer'}, \\n{'name': 'A', 'type': 'integer'}, \\n{'name': 'B', 'type': 'string'}, \\n{'name': 'C', 'type': 'datetime'}], \\n'primaryKey': ['idx'], \\n'pandas_version': '1.4.0'}\n """\n if index is True:\n data = set_default_names(data)\n\n schema: dict[str, Any] = {}\n fields = []\n\n if index:\n if data.index.nlevels > 1:\n data.index = cast("MultiIndex", data.index)\n for level, name in zip(data.index.levels, data.index.names):\n new_field = convert_pandas_type_to_json_field(level)\n new_field["name"] = name\n fields.append(new_field)\n else:\n fields.append(convert_pandas_type_to_json_field(data.index))\n\n if data.ndim > 1:\n for column, s in data.items():\n fields.append(convert_pandas_type_to_json_field(s))\n else:\n fields.append(convert_pandas_type_to_json_field(data))\n\n schema["fields"] = fields\n if index and data.index.is_unique and primary_key is None:\n if data.index.nlevels == 1:\n schema["primaryKey"] = [data.index.name]\n else:\n schema["primaryKey"] = data.index.names\n elif primary_key is not None:\n schema["primaryKey"] = primary_key\n\n if version:\n schema["pandas_version"] = TABLE_SCHEMA_VERSION\n return schema\n\n\ndef parse_table_schema(json, precise_float: bool) -> DataFrame:\n """\n Builds a DataFrame from a given schema\n\n Parameters\n ----------\n json :\n A JSON table schema\n precise_float : bool\n Flag controlling precision when decoding string to double values, as\n dictated by ``read_json``\n\n Returns\n -------\n df : DataFrame\n\n Raises\n ------\n NotImplementedError\n If the JSON table schema contains either timezone or timedelta data\n\n Notes\n -----\n Because :func:`DataFrame.to_json` uses the string 'index' to denote a\n name-less :class:`Index`, this function sets the name of the returned\n :class:`DataFrame` to ``None`` when said string is encountered with a\n normal :class:`Index`. For a :class:`MultiIndex`, the same limitation\n applies to any strings beginning with 'level_'. Therefore, an\n :class:`Index` name of 'index' and :class:`MultiIndex` names starting\n with 'level_' are not supported.\n\n See Also\n --------\n build_table_schema : Inverse function.\n pandas.read_json\n """\n table = ujson_loads(json, precise_float=precise_float)\n col_order = [field["name"] for field in table["schema"]["fields"]]\n df = DataFrame(table["data"], columns=col_order)[col_order]\n\n dtypes = {\n field["name"]: convert_json_field_to_pandas_type(field)\n for field in table["schema"]["fields"]\n }\n\n # No ISO constructor for Timedelta as of yet, so need to raise\n if "timedelta64" in dtypes.values():\n raise NotImplementedError(\n 'table="orient" can not yet read ISO-formatted Timedelta data'\n )\n\n df = df.astype(dtypes)\n\n if "primaryKey" in table["schema"]:\n df = df.set_index(table["schema"]["primaryKey"])\n if len(df.index.names) == 1:\n if df.index.name == "index":\n df.index.name = None\n else:\n df.index.names = [\n None if x.startswith("level_") else x for x in df.index.names\n ]\n\n return df\n
.venv\Lib\site-packages\pandas\io\json\_table_schema.py
_table_schema.py
Python
11,594
0.95
0.128535
0.01506
awesome-app
602
2023-12-09T13:38:11.814955
Apache-2.0
false
4d1948f85cd2f553ff315d6bddaf30ff
from pandas.io.json._json import (\n read_json,\n to_json,\n ujson_dumps,\n ujson_loads,\n)\nfrom pandas.io.json._table_schema import build_table_schema\n\n__all__ = [\n "ujson_dumps",\n "ujson_loads",\n "read_json",\n "to_json",\n "build_table_schema",\n]\n
.venv\Lib\site-packages\pandas\io\json\__init__.py
__init__.py
Python
270
0.85
0
0
vue-tools
292
2024-05-20T00:54:00.786078
BSD-3-Clause
false
918b93524104a1f520cc23a4a79af2ec
\n\n
.venv\Lib\site-packages\pandas\io\json\__pycache__\_json.cpython-313.pyc
_json.cpython-313.pyc
Other
54,857
0.75
0.066154
0.020583
vue-tools
404
2023-12-07T13:36:22.918078
BSD-3-Clause
false
91b917b3bb96304ebecc6635160ba22d
\n\n
.venv\Lib\site-packages\pandas\io\json\__pycache__\_normalize.cpython-313.pyc
_normalize.cpython-313.pyc
Other
17,367
0.95
0.064024
0.020134
react-lib
521
2024-09-12T11:01:50.854882
MIT
false
9ee6c455a5bc39f6f072a8eb84c1e662
\n\n
.venv\Lib\site-packages\pandas\io\json\__pycache__\_table_schema.cpython-313.pyc
_table_schema.cpython-313.pyc
Other
13,599
0.95
0.055085
0
awesome-app
298
2023-10-14T12:05:40.182765
BSD-3-Clause
false
4246ce3c44ab10d6e164dc5ff2443e2c
\n\n
.venv\Lib\site-packages\pandas\io\json\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
440
0.7
0
0
react-lib
417
2024-03-23T01:16:07.007174
MIT
false
8b55d7950532766603dfd62e919422c7
from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\nimport warnings\n\nfrom pandas._libs import lib\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.errors import (\n ParserError,\n ParserWarning,\n)\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.common import pandas_dtype\nfrom pandas.core.dtypes.inference import is_integer\n\nfrom pandas.io._util import arrow_table_to_pandas\nfrom pandas.io.parsers.base_parser import ParserBase\n\nif TYPE_CHECKING:\n from pandas._typing import ReadBuffer\n\n from pandas import DataFrame\n\n\nclass ArrowParserWrapper(ParserBase):\n """\n Wrapper for the pyarrow engine for read_csv()\n """\n\n def __init__(self, src: ReadBuffer[bytes], **kwds) -> None:\n super().__init__(kwds)\n self.kwds = kwds\n self.src = src\n\n self._parse_kwds()\n\n def _parse_kwds(self) -> None:\n """\n Validates keywords before passing to pyarrow.\n """\n encoding: str | None = self.kwds.get("encoding")\n self.encoding = "utf-8" if encoding is None else encoding\n\n na_values = self.kwds["na_values"]\n if isinstance(na_values, dict):\n raise ValueError(\n "The pyarrow engine doesn't support passing a dict for na_values"\n )\n self.na_values = list(self.kwds["na_values"])\n\n def _get_pyarrow_options(self) -> None:\n """\n Rename some arguments to pass to pyarrow\n """\n mapping = {\n "usecols": "include_columns",\n "na_values": "null_values",\n "escapechar": "escape_char",\n "skip_blank_lines": "ignore_empty_lines",\n "decimal": "decimal_point",\n "quotechar": "quote_char",\n }\n for pandas_name, pyarrow_name in mapping.items():\n if pandas_name in self.kwds and self.kwds.get(pandas_name) is not None:\n self.kwds[pyarrow_name] = self.kwds.pop(pandas_name)\n\n # Date format handling\n # If we get a string, we need to convert it into a list for pyarrow\n # If we get a dict, we want to parse those separately\n date_format = self.date_format\n if isinstance(date_format, str):\n date_format = [date_format]\n else:\n # In case of dict, we don't want to propagate through, so\n # just set to pyarrow default of None\n\n # Ideally, in future we disable pyarrow dtype inference (read in as string)\n # to prevent misreads.\n date_format = None\n self.kwds["timestamp_parsers"] = date_format\n\n self.parse_options = {\n option_name: option_value\n for option_name, option_value in self.kwds.items()\n if option_value is not None\n and option_name\n in ("delimiter", "quote_char", "escape_char", "ignore_empty_lines")\n }\n\n on_bad_lines = self.kwds.get("on_bad_lines")\n if on_bad_lines is not None:\n if callable(on_bad_lines):\n self.parse_options["invalid_row_handler"] = on_bad_lines\n elif on_bad_lines == ParserBase.BadLineHandleMethod.ERROR:\n self.parse_options[\n "invalid_row_handler"\n ] = None # PyArrow raises an exception by default\n elif on_bad_lines == ParserBase.BadLineHandleMethod.WARN:\n\n def handle_warning(invalid_row) -> str:\n warnings.warn(\n f"Expected {invalid_row.expected_columns} columns, but found "\n f"{invalid_row.actual_columns}: {invalid_row.text}",\n ParserWarning,\n stacklevel=find_stack_level(),\n )\n return "skip"\n\n self.parse_options["invalid_row_handler"] = handle_warning\n elif on_bad_lines == ParserBase.BadLineHandleMethod.SKIP:\n self.parse_options["invalid_row_handler"] = lambda _: "skip"\n\n self.convert_options = {\n option_name: option_value\n for option_name, option_value in self.kwds.items()\n if option_value is not None\n and option_name\n in (\n "include_columns",\n "null_values",\n "true_values",\n "false_values",\n "decimal_point",\n "timestamp_parsers",\n )\n }\n self.convert_options["strings_can_be_null"] = "" in self.kwds["null_values"]\n # autogenerated column names are prefixed with 'f' in pyarrow.csv\n if self.header is None and "include_columns" in self.convert_options:\n self.convert_options["include_columns"] = [\n f"f{n}" for n in self.convert_options["include_columns"]\n ]\n\n self.read_options = {\n "autogenerate_column_names": self.header is None,\n "skip_rows": self.header\n if self.header is not None\n else self.kwds["skiprows"],\n "encoding": self.encoding,\n }\n\n def _finalize_pandas_output(self, frame: DataFrame) -> DataFrame:\n """\n Processes data read in based on kwargs.\n\n Parameters\n ----------\n frame: DataFrame\n The DataFrame to process.\n\n Returns\n -------\n DataFrame\n The processed DataFrame.\n """\n num_cols = len(frame.columns)\n multi_index_named = True\n if self.header is None:\n if self.names is None:\n if self.header is None:\n self.names = range(num_cols)\n if len(self.names) != num_cols:\n # usecols is passed through to pyarrow, we only handle index col here\n # The only way self.names is not the same length as number of cols is\n # if we have int index_col. We should just pad the names(they will get\n # removed anyways) to expected length then.\n columns_prefix = [str(x) for x in range(num_cols - len(self.names))]\n self.names = columns_prefix + self.names\n multi_index_named = False\n frame.columns = self.names\n # we only need the frame not the names\n _, frame = self._do_date_conversions(frame.columns, frame)\n if self.index_col is not None:\n index_to_set = self.index_col.copy()\n for i, item in enumerate(self.index_col):\n if is_integer(item):\n index_to_set[i] = frame.columns[item]\n # String case\n elif item not in frame.columns:\n raise ValueError(f"Index {item} invalid")\n\n # Process dtype for index_col and drop from dtypes\n if self.dtype is not None:\n key, new_dtype = (\n (item, self.dtype.get(item))\n if self.dtype.get(item) is not None\n else (frame.columns[item], self.dtype.get(frame.columns[item]))\n )\n if new_dtype is not None:\n frame[key] = frame[key].astype(new_dtype)\n del self.dtype[key]\n\n frame.set_index(index_to_set, drop=True, inplace=True)\n # Clear names if headerless and no name given\n if self.header is None and not multi_index_named:\n frame.index.names = [None] * len(frame.index.names)\n\n if self.dtype is not None:\n # Ignore non-existent columns from dtype mapping\n # like other parsers do\n if isinstance(self.dtype, dict):\n self.dtype = {\n k: pandas_dtype(v)\n for k, v in self.dtype.items()\n if k in frame.columns\n }\n else:\n self.dtype = pandas_dtype(self.dtype)\n try:\n frame = frame.astype(self.dtype)\n except TypeError as e:\n # GH#44901 reraise to keep api consistent\n raise ValueError(e)\n return frame\n\n def _validate_usecols(self, usecols) -> None:\n if lib.is_list_like(usecols) and not all(isinstance(x, str) for x in usecols):\n raise ValueError(\n "The pyarrow engine does not allow 'usecols' to be integer "\n "column positions. Pass a list of string column names instead."\n )\n elif callable(usecols):\n raise ValueError(\n "The pyarrow engine does not allow 'usecols' to be a callable."\n )\n\n def read(self) -> DataFrame:\n """\n Reads the contents of a CSV file into a DataFrame and\n processes it according to the kwargs passed in the\n constructor.\n\n Returns\n -------\n DataFrame\n The DataFrame created from the CSV file.\n """\n pa = import_optional_dependency("pyarrow")\n pyarrow_csv = import_optional_dependency("pyarrow.csv")\n self._get_pyarrow_options()\n\n try:\n convert_options = pyarrow_csv.ConvertOptions(**self.convert_options)\n except TypeError:\n include = self.convert_options.get("include_columns", None)\n if include is not None:\n self._validate_usecols(include)\n\n nulls = self.convert_options.get("null_values", set())\n if not lib.is_list_like(nulls) or not all(\n isinstance(x, str) for x in nulls\n ):\n raise TypeError(\n "The 'pyarrow' engine requires all na_values to be strings"\n )\n\n raise\n\n try:\n table = pyarrow_csv.read_csv(\n self.src,\n read_options=pyarrow_csv.ReadOptions(**self.read_options),\n parse_options=pyarrow_csv.ParseOptions(**self.parse_options),\n convert_options=convert_options,\n )\n except pa.ArrowInvalid as e:\n raise ParserError(e) from e\n\n dtype_backend = self.kwds["dtype_backend"]\n\n # Convert all pa.null() cols -> float64 (non nullable)\n # else Int64 (nullable case, see below)\n if dtype_backend is lib.no_default:\n new_schema = table.schema\n new_type = pa.float64()\n for i, arrow_type in enumerate(table.schema.types):\n if pa.types.is_null(arrow_type):\n new_schema = new_schema.set(\n i, new_schema.field(i).with_type(new_type)\n )\n\n table = table.cast(new_schema)\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\n "ignore",\n "make_block is deprecated",\n DeprecationWarning,\n )\n frame = arrow_table_to_pandas(\n table, dtype_backend=dtype_backend, null_to_int64=True\n )\n\n return self._finalize_pandas_output(frame)\n
.venv\Lib\site-packages\pandas\io\parsers\arrow_parser_wrapper.py
arrow_parser_wrapper.py
Python
11,080
0.95
0.19322
0.082031
python-kit
276
2024-02-18T22:13:06.291505
GPL-3.0
false
2d178bc01403f6605a7708efb1b49654
from __future__ import annotations\n\nfrom collections import defaultdict\nfrom copy import copy\nimport csv\nimport datetime\nfrom enum import Enum\nimport itertools\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n cast,\n final,\n overload,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import (\n lib,\n parsers,\n)\nimport pandas._libs.ops as libops\nfrom pandas._libs.parsers import STR_NA_VALUES\nfrom pandas._libs.tslibs import parsing\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.errors import (\n ParserError,\n ParserWarning,\n)\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.astype import astype_array\nfrom pandas.core.dtypes.common import (\n ensure_object,\n is_bool_dtype,\n is_dict_like,\n is_extension_array_dtype,\n is_float_dtype,\n is_integer,\n is_integer_dtype,\n is_list_like,\n is_object_dtype,\n is_scalar,\n is_string_dtype,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.dtypes import (\n CategoricalDtype,\n ExtensionDtype,\n)\nfrom pandas.core.dtypes.missing import isna\n\nfrom pandas import (\n ArrowDtype,\n DataFrame,\n DatetimeIndex,\n StringDtype,\n concat,\n)\nfrom pandas.core import algorithms\nfrom pandas.core.arrays import (\n ArrowExtensionArray,\n BaseMaskedArray,\n BooleanArray,\n Categorical,\n ExtensionArray,\n FloatingArray,\n IntegerArray,\n)\nfrom pandas.core.arrays.boolean import BooleanDtype\nfrom pandas.core.indexes.api import (\n Index,\n MultiIndex,\n default_index,\n ensure_index_from_sequences,\n)\nfrom pandas.core.series import Series\nfrom pandas.core.tools import datetimes as tools\n\nfrom pandas.io.common import is_potential_multi_index\n\nif TYPE_CHECKING:\n from collections.abc import (\n Hashable,\n Iterable,\n Mapping,\n Sequence,\n )\n\n from pandas._typing import (\n ArrayLike,\n DtypeArg,\n DtypeObj,\n Scalar,\n )\n\n\nclass ParserBase:\n class BadLineHandleMethod(Enum):\n ERROR = 0\n WARN = 1\n SKIP = 2\n\n _implicit_index: bool\n _first_chunk: bool\n keep_default_na: bool\n dayfirst: bool\n cache_dates: bool\n keep_date_col: bool\n usecols_dtype: str | None\n\n def __init__(self, kwds) -> None:\n self._implicit_index = False\n\n self.names = kwds.get("names")\n self.orig_names: Sequence[Hashable] | None = None\n\n self.index_col = kwds.get("index_col", None)\n self.unnamed_cols: set = set()\n self.index_names: Sequence[Hashable] | None = None\n self.col_names: Sequence[Hashable] | None = None\n\n self.parse_dates = _validate_parse_dates_arg(kwds.pop("parse_dates", False))\n self._parse_date_cols: Iterable = []\n self.date_parser = kwds.pop("date_parser", lib.no_default)\n self.date_format = kwds.pop("date_format", None)\n self.dayfirst = kwds.pop("dayfirst", False)\n self.keep_date_col = kwds.pop("keep_date_col", False)\n\n self.na_values = kwds.get("na_values")\n self.na_fvalues = kwds.get("na_fvalues")\n self.na_filter = kwds.get("na_filter", False)\n self.keep_default_na = kwds.get("keep_default_na", True)\n\n self.dtype = copy(kwds.get("dtype", None))\n self.converters = kwds.get("converters")\n self.dtype_backend = kwds.get("dtype_backend")\n\n self.true_values = kwds.get("true_values")\n self.false_values = kwds.get("false_values")\n self.cache_dates = kwds.pop("cache_dates", True)\n\n self._date_conv = _make_date_converter(\n date_parser=self.date_parser,\n date_format=self.date_format,\n dayfirst=self.dayfirst,\n cache_dates=self.cache_dates,\n )\n\n # validate header options for mi\n self.header = kwds.get("header")\n if is_list_like(self.header, allow_sets=False):\n if kwds.get("usecols"):\n raise ValueError(\n "cannot specify usecols when specifying a multi-index header"\n )\n if kwds.get("names"):\n raise ValueError(\n "cannot specify names when specifying a multi-index header"\n )\n\n # validate index_col that only contains integers\n if self.index_col is not None:\n # In this case we can pin down index_col as list[int]\n if is_integer(self.index_col):\n self.index_col = [self.index_col]\n elif not (\n is_list_like(self.index_col, allow_sets=False)\n and all(map(is_integer, self.index_col))\n ):\n raise ValueError(\n "index_col must only contain row numbers "\n "when specifying a multi-index header"\n )\n else:\n self.index_col = list(self.index_col)\n\n self._name_processed = False\n\n self._first_chunk = True\n\n self.usecols, self.usecols_dtype = self._validate_usecols_arg(kwds["usecols"])\n\n # Fallback to error to pass a sketchy test(test_override_set_noconvert_columns)\n # Normally, this arg would get pre-processed earlier on\n self.on_bad_lines = kwds.get("on_bad_lines", self.BadLineHandleMethod.ERROR)\n\n def _validate_parse_dates_presence(self, columns: Sequence[Hashable]) -> Iterable:\n """\n Check if parse_dates are in columns.\n\n If user has provided names for parse_dates, check if those columns\n are available.\n\n Parameters\n ----------\n columns : list\n List of names of the dataframe.\n\n Returns\n -------\n The names of the columns which will get parsed later if a dict or list\n is given as specification.\n\n Raises\n ------\n ValueError\n If column to parse_date is not in dataframe.\n\n """\n cols_needed: Iterable\n if is_dict_like(self.parse_dates):\n cols_needed = itertools.chain(*self.parse_dates.values())\n elif is_list_like(self.parse_dates):\n # a column in parse_dates could be represented\n # ColReference = Union[int, str]\n # DateGroups = List[ColReference]\n # ParseDates = Union[DateGroups, List[DateGroups],\n # Dict[ColReference, DateGroups]]\n cols_needed = itertools.chain.from_iterable(\n col if is_list_like(col) and not isinstance(col, tuple) else [col]\n for col in self.parse_dates\n )\n else:\n cols_needed = []\n\n cols_needed = list(cols_needed)\n\n # get only columns that are references using names (str), not by index\n missing_cols = ", ".join(\n sorted(\n {\n col\n for col in cols_needed\n if isinstance(col, str) and col not in columns\n }\n )\n )\n if missing_cols:\n raise ValueError(\n f"Missing column provided to 'parse_dates': '{missing_cols}'"\n )\n # Convert positions to actual column names\n return [\n col if (isinstance(col, str) or col in columns) else columns[col]\n for col in cols_needed\n ]\n\n def close(self) -> None:\n pass\n\n @final\n @property\n def _has_complex_date_col(self) -> bool:\n return isinstance(self.parse_dates, dict) or (\n isinstance(self.parse_dates, list)\n and len(self.parse_dates) > 0\n and isinstance(self.parse_dates[0], list)\n )\n\n @final\n def _should_parse_dates(self, i: int) -> bool:\n if lib.is_bool(self.parse_dates):\n return bool(self.parse_dates)\n else:\n if self.index_names is not None:\n name = self.index_names[i]\n else:\n name = None\n j = i if self.index_col is None else self.index_col[i]\n\n return (j in self.parse_dates) or (\n name is not None and name in self.parse_dates\n )\n\n @final\n def _extract_multi_indexer_columns(\n self,\n header,\n index_names: Sequence[Hashable] | None,\n passed_names: bool = False,\n ) -> tuple[\n Sequence[Hashable], Sequence[Hashable] | None, Sequence[Hashable] | None, bool\n ]:\n """\n Extract and return the names, index_names, col_names if the column\n names are a MultiIndex.\n\n Parameters\n ----------\n header: list of lists\n The header rows\n index_names: list, optional\n The names of the future index\n passed_names: bool, default False\n A flag specifying if names where passed\n\n """\n if len(header) < 2:\n return header[0], index_names, None, passed_names\n\n # the names are the tuples of the header that are not the index cols\n # 0 is the name of the index, assuming index_col is a list of column\n # numbers\n ic = self.index_col\n if ic is None:\n ic = []\n\n if not isinstance(ic, (list, tuple, np.ndarray)):\n ic = [ic]\n sic = set(ic)\n\n # clean the index_names\n index_names = header.pop(-1)\n index_names, _, _ = self._clean_index_names(index_names, self.index_col)\n\n # extract the columns\n field_count = len(header[0])\n\n # check if header lengths are equal\n if not all(len(header_iter) == field_count for header_iter in header[1:]):\n raise ParserError("Header rows must have an equal number of columns.")\n\n def extract(r):\n return tuple(r[i] for i in range(field_count) if i not in sic)\n\n columns = list(zip(*(extract(r) for r in header)))\n names = columns.copy()\n for single_ic in sorted(ic):\n names.insert(single_ic, single_ic)\n\n # Clean the column names (if we have an index_col).\n if len(ic):\n col_names = [\n r[ic[0]]\n if ((r[ic[0]] is not None) and r[ic[0]] not in self.unnamed_cols)\n else None\n for r in header\n ]\n else:\n col_names = [None] * len(header)\n\n passed_names = True\n\n return names, index_names, col_names, passed_names\n\n @final\n def _maybe_make_multi_index_columns(\n self,\n columns: Sequence[Hashable],\n col_names: Sequence[Hashable] | None = None,\n ) -> Sequence[Hashable] | MultiIndex:\n # possibly create a column mi here\n if is_potential_multi_index(columns):\n list_columns = cast(list[tuple], columns)\n return MultiIndex.from_tuples(list_columns, names=col_names)\n return columns\n\n @final\n def _make_index(\n self, data, alldata, columns, indexnamerow: list[Scalar] | None = None\n ) -> tuple[Index | None, Sequence[Hashable] | MultiIndex]:\n index: Index | None\n if not is_index_col(self.index_col) or not self.index_col:\n index = None\n\n elif not self._has_complex_date_col:\n simple_index = self._get_simple_index(alldata, columns)\n index = self._agg_index(simple_index)\n elif self._has_complex_date_col:\n if not self._name_processed:\n (self.index_names, _, self.index_col) = self._clean_index_names(\n list(columns), self.index_col\n )\n self._name_processed = True\n date_index = self._get_complex_date_index(data, columns)\n index = self._agg_index(date_index, try_parse_dates=False)\n\n # add names for the index\n if indexnamerow:\n coffset = len(indexnamerow) - len(columns)\n assert index is not None\n index = index.set_names(indexnamerow[:coffset])\n\n # maybe create a mi on the columns\n columns = self._maybe_make_multi_index_columns(columns, self.col_names)\n\n return index, columns\n\n @final\n def _get_simple_index(self, data, columns):\n def ix(col):\n if not isinstance(col, str):\n return col\n raise ValueError(f"Index {col} invalid")\n\n to_remove = []\n index = []\n for idx in self.index_col:\n i = ix(idx)\n to_remove.append(i)\n index.append(data[i])\n\n # remove index items from content and columns, don't pop in\n # loop\n for i in sorted(to_remove, reverse=True):\n data.pop(i)\n if not self._implicit_index:\n columns.pop(i)\n\n return index\n\n @final\n def _get_complex_date_index(self, data, col_names):\n def _get_name(icol):\n if isinstance(icol, str):\n return icol\n\n if col_names is None:\n raise ValueError(f"Must supply column order to use {icol!s} as index")\n\n for i, c in enumerate(col_names):\n if i == icol:\n return c\n\n to_remove = []\n index = []\n for idx in self.index_col:\n name = _get_name(idx)\n to_remove.append(name)\n index.append(data[name])\n\n # remove index items from content and columns, don't pop in\n # loop\n for c in sorted(to_remove, reverse=True):\n data.pop(c)\n col_names.remove(c)\n\n return index\n\n @final\n def _clean_mapping(self, mapping):\n """converts col numbers to names"""\n if not isinstance(mapping, dict):\n return mapping\n clean = {}\n # for mypy\n assert self.orig_names is not None\n\n for col, v in mapping.items():\n if isinstance(col, int) and col not in self.orig_names:\n col = self.orig_names[col]\n clean[col] = v\n if isinstance(mapping, defaultdict):\n remaining_cols = set(self.orig_names) - set(clean.keys())\n clean.update({col: mapping[col] for col in remaining_cols})\n return clean\n\n @final\n def _agg_index(self, index, try_parse_dates: bool = True) -> Index:\n arrays = []\n converters = self._clean_mapping(self.converters)\n\n if self.index_names is not None:\n names: Iterable = self.index_names\n else:\n names = itertools.cycle([None])\n for i, (arr, name) in enumerate(zip(index, names)):\n if try_parse_dates and self._should_parse_dates(i):\n arr = self._date_conv(\n arr,\n col=self.index_names[i] if self.index_names is not None else None,\n )\n\n if self.na_filter:\n col_na_values = self.na_values\n col_na_fvalues = self.na_fvalues\n else:\n col_na_values = set()\n col_na_fvalues = set()\n\n if isinstance(self.na_values, dict):\n assert self.index_names is not None\n col_name = self.index_names[i]\n if col_name is not None:\n col_na_values, col_na_fvalues = _get_na_values(\n col_name, self.na_values, self.na_fvalues, self.keep_default_na\n )\n\n clean_dtypes = self._clean_mapping(self.dtype)\n\n cast_type = None\n index_converter = False\n if self.index_names is not None:\n if isinstance(clean_dtypes, dict):\n cast_type = clean_dtypes.get(self.index_names[i], None)\n\n if isinstance(converters, dict):\n index_converter = converters.get(self.index_names[i]) is not None\n\n try_num_bool = not (\n cast_type and is_string_dtype(cast_type) or index_converter\n )\n\n arr, _ = self._infer_types(\n arr, col_na_values | col_na_fvalues, cast_type is None, try_num_bool\n )\n if cast_type is not None:\n # Don't perform RangeIndex inference\n idx = Index(arr, name=name, dtype=cast_type)\n else:\n idx = ensure_index_from_sequences([arr], [name])\n arrays.append(idx)\n\n if len(arrays) == 1:\n return arrays[0]\n else:\n return MultiIndex.from_arrays(arrays)\n\n @final\n def _convert_to_ndarrays(\n self,\n dct: Mapping,\n na_values,\n na_fvalues,\n verbose: bool = False,\n converters=None,\n dtypes=None,\n ):\n result = {}\n for c, values in dct.items():\n conv_f = None if converters is None else converters.get(c, None)\n if isinstance(dtypes, dict):\n cast_type = dtypes.get(c, None)\n else:\n # single dtype or None\n cast_type = dtypes\n\n if self.na_filter:\n col_na_values, col_na_fvalues = _get_na_values(\n c, na_values, na_fvalues, self.keep_default_na\n )\n else:\n col_na_values, col_na_fvalues = set(), set()\n\n if c in self._parse_date_cols:\n # GH#26203 Do not convert columns which get converted to dates\n # but replace nans to ensure to_datetime works\n mask = algorithms.isin(values, set(col_na_values) | col_na_fvalues)\n np.putmask(values, mask, np.nan)\n result[c] = values\n continue\n\n if conv_f is not None:\n # conv_f applied to data before inference\n if cast_type is not None:\n warnings.warn(\n (\n "Both a converter and dtype were specified "\n f"for column {c} - only the converter will be used."\n ),\n ParserWarning,\n stacklevel=find_stack_level(),\n )\n\n try:\n values = lib.map_infer(values, conv_f)\n except ValueError:\n mask = algorithms.isin(values, list(na_values)).view(np.uint8)\n values = lib.map_infer_mask(values, conv_f, mask)\n\n cvals, na_count = self._infer_types(\n values,\n set(col_na_values) | col_na_fvalues,\n cast_type is None,\n try_num_bool=False,\n )\n else:\n is_ea = is_extension_array_dtype(cast_type)\n is_str_or_ea_dtype = is_ea or is_string_dtype(cast_type)\n # skip inference if specified dtype is object\n # or casting to an EA\n try_num_bool = not (cast_type and is_str_or_ea_dtype)\n\n # general type inference and conversion\n cvals, na_count = self._infer_types(\n values,\n set(col_na_values) | col_na_fvalues,\n cast_type is None,\n try_num_bool,\n )\n\n # type specified in dtype param or cast_type is an EA\n if cast_type is not None:\n cast_type = pandas_dtype(cast_type)\n if cast_type and (cvals.dtype != cast_type or is_ea):\n if not is_ea and na_count > 0:\n if is_bool_dtype(cast_type):\n raise ValueError(f"Bool column has NA values in column {c}")\n cvals = self._cast_types(cvals, cast_type, c)\n\n result[c] = cvals\n if verbose and na_count:\n print(f"Filled {na_count} NA values in column {c!s}")\n return result\n\n @final\n def _set_noconvert_dtype_columns(\n self, col_indices: list[int], names: Sequence[Hashable]\n ) -> set[int]:\n """\n Set the columns that should not undergo dtype conversions.\n\n Currently, any column that is involved with date parsing will not\n undergo such conversions. If usecols is specified, the positions of the columns\n not to cast is relative to the usecols not to all columns.\n\n Parameters\n ----------\n col_indices: The indices specifying order and positions of the columns\n names: The column names which order is corresponding with the order\n of col_indices\n\n Returns\n -------\n A set of integers containing the positions of the columns not to convert.\n """\n usecols: list[int] | list[str] | None\n noconvert_columns = set()\n if self.usecols_dtype == "integer":\n # A set of integers will be converted to a list in\n # the correct order every single time.\n usecols = sorted(self.usecols)\n elif callable(self.usecols) or self.usecols_dtype not in ("empty", None):\n # The names attribute should have the correct columns\n # in the proper order for indexing with parse_dates.\n usecols = col_indices\n else:\n # Usecols is empty.\n usecols = None\n\n def _set(x) -> int:\n if usecols is not None and is_integer(x):\n x = usecols[x]\n\n if not is_integer(x):\n x = col_indices[names.index(x)]\n\n return x\n\n if isinstance(self.parse_dates, list):\n for val in self.parse_dates:\n if isinstance(val, list):\n for k in val:\n noconvert_columns.add(_set(k))\n else:\n noconvert_columns.add(_set(val))\n\n elif isinstance(self.parse_dates, dict):\n for val in self.parse_dates.values():\n if isinstance(val, list):\n for k in val:\n noconvert_columns.add(_set(k))\n else:\n noconvert_columns.add(_set(val))\n\n elif self.parse_dates:\n if isinstance(self.index_col, list):\n for k in self.index_col:\n noconvert_columns.add(_set(k))\n elif self.index_col is not None:\n noconvert_columns.add(_set(self.index_col))\n\n return noconvert_columns\n\n @final\n def _infer_types(\n self, values, na_values, no_dtype_specified, try_num_bool: bool = True\n ) -> tuple[ArrayLike, int]:\n """\n Infer types of values, possibly casting\n\n Parameters\n ----------\n values : ndarray\n na_values : set\n no_dtype_specified: Specifies if we want to cast explicitly\n try_num_bool : bool, default try\n try to cast values to numeric (first preference) or boolean\n\n Returns\n -------\n converted : ndarray or ExtensionArray\n na_count : int\n """\n na_count = 0\n if issubclass(values.dtype.type, (np.number, np.bool_)):\n # If our array has numeric dtype, we don't have to check for strings in isin\n na_values = np.array([val for val in na_values if not isinstance(val, str)])\n mask = algorithms.isin(values, na_values)\n na_count = mask.astype("uint8", copy=False).sum()\n if na_count > 0:\n if is_integer_dtype(values):\n values = values.astype(np.float64)\n np.putmask(values, mask, np.nan)\n return values, na_count\n\n dtype_backend = self.dtype_backend\n non_default_dtype_backend = (\n no_dtype_specified and dtype_backend is not lib.no_default\n )\n result: ArrayLike\n\n if try_num_bool and is_object_dtype(values.dtype):\n # exclude e.g DatetimeIndex here\n try:\n result, result_mask = lib.maybe_convert_numeric(\n values,\n na_values,\n False,\n convert_to_masked_nullable=non_default_dtype_backend, # type: ignore[arg-type]\n )\n except (ValueError, TypeError):\n # e.g. encountering datetime string gets ValueError\n # TypeError can be raised in floatify\n na_count = parsers.sanitize_objects(values, na_values)\n result = values\n else:\n if non_default_dtype_backend:\n if result_mask is None:\n result_mask = np.zeros(result.shape, dtype=np.bool_)\n\n if result_mask.all():\n result = IntegerArray(\n np.ones(result_mask.shape, dtype=np.int64), result_mask\n )\n elif is_integer_dtype(result):\n result = IntegerArray(result, result_mask)\n elif is_bool_dtype(result):\n result = BooleanArray(result, result_mask)\n elif is_float_dtype(result):\n result = FloatingArray(result, result_mask)\n\n na_count = result_mask.sum()\n else:\n na_count = isna(result).sum()\n else:\n result = values\n if values.dtype == np.object_:\n na_count = parsers.sanitize_objects(values, na_values)\n\n if result.dtype == np.object_ and try_num_bool:\n result, bool_mask = libops.maybe_convert_bool(\n np.asarray(values),\n true_values=self.true_values,\n false_values=self.false_values,\n convert_to_masked_nullable=non_default_dtype_backend, # type: ignore[arg-type]\n )\n if result.dtype == np.bool_ and non_default_dtype_backend:\n if bool_mask is None:\n bool_mask = np.zeros(result.shape, dtype=np.bool_)\n result = BooleanArray(result, bool_mask)\n elif result.dtype == np.object_ and non_default_dtype_backend:\n # read_excel sends array of datetime objects\n if not lib.is_datetime_array(result, skipna=True):\n dtype = StringDtype()\n cls = dtype.construct_array_type()\n result = cls._from_sequence(values, dtype=dtype)\n\n if dtype_backend == "pyarrow":\n pa = import_optional_dependency("pyarrow")\n if isinstance(result, np.ndarray):\n result = ArrowExtensionArray(pa.array(result, from_pandas=True))\n elif isinstance(result, BaseMaskedArray):\n if result._mask.all():\n # We want an arrow null array here\n result = ArrowExtensionArray(pa.array([None] * len(result)))\n else:\n result = ArrowExtensionArray(\n pa.array(result._data, mask=result._mask)\n )\n else:\n result = ArrowExtensionArray(\n pa.array(result.to_numpy(), from_pandas=True)\n )\n\n return result, na_count\n\n @final\n def _cast_types(self, values: ArrayLike, cast_type: DtypeObj, column) -> ArrayLike:\n """\n Cast values to specified type\n\n Parameters\n ----------\n values : ndarray or ExtensionArray\n cast_type : np.dtype or ExtensionDtype\n dtype to cast values to\n column : string\n column name - used only for error reporting\n\n Returns\n -------\n converted : ndarray or ExtensionArray\n """\n if isinstance(cast_type, CategoricalDtype):\n known_cats = cast_type.categories is not None\n\n if not is_object_dtype(values.dtype) and not known_cats:\n # TODO: this is for consistency with\n # c-parser which parses all categories\n # as strings\n values = lib.ensure_string_array(\n values, skipna=False, convert_na_value=False\n )\n\n cats = Index(values).unique().dropna()\n values = Categorical._from_inferred_categories(\n cats, cats.get_indexer(values), cast_type, true_values=self.true_values\n )\n\n # use the EA's implementation of casting\n elif isinstance(cast_type, ExtensionDtype):\n array_type = cast_type.construct_array_type()\n try:\n if isinstance(cast_type, BooleanDtype):\n # error: Unexpected keyword argument "true_values" for\n # "_from_sequence_of_strings" of "ExtensionArray"\n return array_type._from_sequence_of_strings( # type: ignore[call-arg]\n values,\n dtype=cast_type,\n true_values=self.true_values,\n false_values=self.false_values,\n )\n else:\n return array_type._from_sequence_of_strings(values, dtype=cast_type)\n except NotImplementedError as err:\n raise NotImplementedError(\n f"Extension Array: {array_type} must implement "\n "_from_sequence_of_strings in order to be used in parser methods"\n ) from err\n\n elif isinstance(values, ExtensionArray):\n values = values.astype(cast_type, copy=False)\n elif issubclass(cast_type.type, str):\n # TODO: why skipna=True here and False above? some tests depend\n # on it here, but nothing fails if we change it above\n # (as no tests get there as of 2022-12-06)\n values = lib.ensure_string_array(\n values, skipna=True, convert_na_value=False\n )\n else:\n try:\n values = astype_array(values, cast_type, copy=True)\n except ValueError as err:\n raise ValueError(\n f"Unable to convert column {column} to type {cast_type}"\n ) from err\n return values\n\n @overload\n def _do_date_conversions(\n self,\n names: Index,\n data: DataFrame,\n ) -> tuple[Sequence[Hashable] | Index, DataFrame]:\n ...\n\n @overload\n def _do_date_conversions(\n self,\n names: Sequence[Hashable],\n data: Mapping[Hashable, ArrayLike],\n ) -> tuple[Sequence[Hashable], Mapping[Hashable, ArrayLike]]:\n ...\n\n @final\n def _do_date_conversions(\n self,\n names: Sequence[Hashable] | Index,\n data: Mapping[Hashable, ArrayLike] | DataFrame,\n ) -> tuple[Sequence[Hashable] | Index, Mapping[Hashable, ArrayLike] | DataFrame]:\n # returns data, columns\n\n if self.parse_dates is not None:\n data, names = _process_date_conversion(\n data,\n self._date_conv,\n self.parse_dates,\n self.index_col,\n self.index_names,\n names,\n keep_date_col=self.keep_date_col,\n dtype_backend=self.dtype_backend,\n )\n\n return names, data\n\n @final\n def _check_data_length(\n self,\n columns: Sequence[Hashable],\n data: Sequence[ArrayLike],\n ) -> None:\n """Checks if length of data is equal to length of column names.\n\n One set of trailing commas is allowed. self.index_col not False\n results in a ParserError previously when lengths do not match.\n\n Parameters\n ----------\n columns: list of column names\n data: list of array-likes containing the data column-wise.\n """\n if not self.index_col and len(columns) != len(data) and columns:\n empty_str = is_object_dtype(data[-1]) and data[-1] == ""\n # error: No overload variant of "__ror__" of "ndarray" matches\n # argument type "ExtensionArray"\n empty_str_or_na = empty_str | isna(data[-1]) # type: ignore[operator]\n if len(columns) == len(data) - 1 and np.all(empty_str_or_na):\n return\n warnings.warn(\n "Length of header or names does not match length of data. This leads "\n "to a loss of data with index_col=False.",\n ParserWarning,\n stacklevel=find_stack_level(),\n )\n\n @overload\n def _evaluate_usecols(\n self,\n usecols: set[int] | Callable[[Hashable], object],\n names: Sequence[Hashable],\n ) -> set[int]:\n ...\n\n @overload\n def _evaluate_usecols(\n self, usecols: set[str], names: Sequence[Hashable]\n ) -> set[str]:\n ...\n\n @final\n def _evaluate_usecols(\n self,\n usecols: Callable[[Hashable], object] | set[str] | set[int],\n names: Sequence[Hashable],\n ) -> set[str] | set[int]:\n """\n Check whether or not the 'usecols' parameter\n is a callable. If so, enumerates the 'names'\n parameter and returns a set of indices for\n each entry in 'names' that evaluates to True.\n If not a callable, returns 'usecols'.\n """\n if callable(usecols):\n return {i for i, name in enumerate(names) if usecols(name)}\n return usecols\n\n @final\n def _validate_usecols_names(self, usecols, names: Sequence):\n """\n Validates that all usecols are present in a given\n list of names. If not, raise a ValueError that\n shows what usecols are missing.\n\n Parameters\n ----------\n usecols : iterable of usecols\n The columns to validate are present in names.\n names : iterable of names\n The column names to check against.\n\n Returns\n -------\n usecols : iterable of usecols\n The `usecols` parameter if the validation succeeds.\n\n Raises\n ------\n ValueError : Columns were missing. Error message will list them.\n """\n missing = [c for c in usecols if c not in names]\n if len(missing) > 0:\n raise ValueError(\n f"Usecols do not match columns, columns expected but not found: "\n f"{missing}"\n )\n\n return usecols\n\n @final\n def _validate_usecols_arg(self, usecols):\n """\n Validate the 'usecols' parameter.\n\n Checks whether or not the 'usecols' parameter contains all integers\n (column selection by index), strings (column by name) or is a callable.\n Raises a ValueError if that is not the case.\n\n Parameters\n ----------\n usecols : list-like, callable, or None\n List of columns to use when parsing or a callable that can be used\n to filter a list of table columns.\n\n Returns\n -------\n usecols_tuple : tuple\n A tuple of (verified_usecols, usecols_dtype).\n\n 'verified_usecols' is either a set if an array-like is passed in or\n 'usecols' if a callable or None is passed in.\n\n 'usecols_dtype` is the inferred dtype of 'usecols' if an array-like\n is passed in or None if a callable or None is passed in.\n """\n msg = (\n "'usecols' must either be list-like of all strings, all unicode, "\n "all integers or a callable."\n )\n if usecols is not None:\n if callable(usecols):\n return usecols, None\n\n if not is_list_like(usecols):\n # see gh-20529\n #\n # Ensure it is iterable container but not string.\n raise ValueError(msg)\n\n usecols_dtype = lib.infer_dtype(usecols, skipna=False)\n\n if usecols_dtype not in ("empty", "integer", "string"):\n raise ValueError(msg)\n\n usecols = set(usecols)\n\n return usecols, usecols_dtype\n return usecols, None\n\n @final\n def _clean_index_names(self, columns, index_col) -> tuple[list | None, list, list]:\n if not is_index_col(index_col):\n return None, columns, index_col\n\n columns = list(columns)\n\n # In case of no rows and multiindex columns we have to set index_names to\n # list of Nones GH#38292\n if not columns:\n return [None] * len(index_col), columns, index_col\n\n cp_cols = list(columns)\n index_names: list[str | int | None] = []\n\n # don't mutate\n index_col = list(index_col)\n\n for i, c in enumerate(index_col):\n if isinstance(c, str):\n index_names.append(c)\n for j, name in enumerate(cp_cols):\n if name == c:\n index_col[i] = j\n columns.remove(name)\n break\n else:\n name = cp_cols[c]\n columns.remove(name)\n index_names.append(name)\n\n # Only clean index names that were placeholders.\n for i, name in enumerate(index_names):\n if isinstance(name, str) and name in self.unnamed_cols:\n index_names[i] = None\n\n return index_names, columns, index_col\n\n @final\n def _get_empty_meta(self, columns, dtype: DtypeArg | None = None):\n columns = list(columns)\n\n index_col = self.index_col\n index_names = self.index_names\n\n # Convert `dtype` to a defaultdict of some kind.\n # This will enable us to write `dtype[col_name]`\n # without worrying about KeyError issues later on.\n dtype_dict: defaultdict[Hashable, Any]\n if not is_dict_like(dtype):\n # if dtype == None, default will be object.\n dtype_dict = defaultdict(lambda: dtype)\n else:\n dtype = cast(dict, dtype)\n dtype_dict = defaultdict(\n lambda: None,\n {columns[k] if is_integer(k) else k: v for k, v in dtype.items()},\n )\n\n # Even though we have no data, the "index" of the empty DataFrame\n # could for example still be an empty MultiIndex. Thus, we need to\n # check whether we have any index columns specified, via either:\n #\n # 1) index_col (column indices)\n # 2) index_names (column names)\n #\n # Both must be non-null to ensure a successful construction. Otherwise,\n # we have to create a generic empty Index.\n index: Index\n if (index_col is None or index_col is False) or index_names is None:\n index = default_index(0)\n else:\n # TODO: We could return default_index(0) if dtype_dict[name] is None\n data = [\n Index([], name=name, dtype=dtype_dict[name]) for name in index_names\n ]\n if len(data) == 1:\n index = data[0]\n else:\n index = MultiIndex.from_arrays(data)\n index_col.sort()\n\n for i, n in enumerate(index_col):\n columns.pop(n - i)\n\n col_dict = {\n col_name: Series([], dtype=dtype_dict[col_name]) for col_name in columns\n }\n\n return index, columns, col_dict\n\n\ndef _make_date_converter(\n date_parser=lib.no_default,\n dayfirst: bool = False,\n cache_dates: bool = True,\n date_format: dict[Hashable, str] | str | None = None,\n):\n if date_parser is not lib.no_default:\n warnings.warn(\n "The argument 'date_parser' is deprecated and will "\n "be removed in a future version. "\n "Please use 'date_format' instead, or read your data in as 'object' dtype "\n "and then call 'to_datetime'.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n if date_parser is not lib.no_default and date_format is not None:\n raise TypeError("Cannot use both 'date_parser' and 'date_format'")\n\n def unpack_if_single_element(arg):\n # NumPy 1.25 deprecation: https://github.com/numpy/numpy/pull/10615\n if isinstance(arg, np.ndarray) and arg.ndim == 1 and len(arg) == 1:\n return arg[0]\n return arg\n\n def converter(*date_cols, col: Hashable):\n if len(date_cols) == 1 and date_cols[0].dtype.kind in "Mm":\n return date_cols[0]\n\n if date_parser is lib.no_default:\n strs = parsing.concat_date_cols(date_cols)\n date_fmt = (\n date_format.get(col) if isinstance(date_format, dict) else date_format\n )\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\n "ignore",\n ".*parsing datetimes with mixed time zones will raise an error",\n category=FutureWarning,\n )\n str_objs = ensure_object(strs)\n try:\n result = tools.to_datetime(\n str_objs,\n format=date_fmt,\n utc=False,\n dayfirst=dayfirst,\n cache=cache_dates,\n )\n except (ValueError, TypeError):\n # test_usecols_with_parse_dates4\n return str_objs\n\n if isinstance(result, DatetimeIndex):\n arr = result.to_numpy()\n arr.flags.writeable = True\n return arr\n return result._values\n else:\n try:\n with warnings.catch_warnings():\n warnings.filterwarnings(\n "ignore",\n ".*parsing datetimes with mixed time zones "\n "will raise an error",\n category=FutureWarning,\n )\n pre_parsed = date_parser(\n *(unpack_if_single_element(arg) for arg in date_cols)\n )\n try:\n result = tools.to_datetime(\n pre_parsed,\n cache=cache_dates,\n )\n except (ValueError, TypeError):\n # test_read_csv_with_custom_date_parser\n result = pre_parsed\n if isinstance(result, datetime.datetime):\n raise Exception("scalar parser")\n return result\n except Exception:\n # e.g. test_datetime_fractional_seconds\n with warnings.catch_warnings():\n warnings.filterwarnings(\n "ignore",\n ".*parsing datetimes with mixed time zones "\n "will raise an error",\n category=FutureWarning,\n )\n pre_parsed = parsing.try_parse_dates(\n parsing.concat_date_cols(date_cols),\n parser=date_parser,\n )\n try:\n return tools.to_datetime(pre_parsed)\n except (ValueError, TypeError):\n # TODO: not reached in tests 2023-10-27; needed?\n return pre_parsed\n\n return converter\n\n\nparser_defaults = {\n "delimiter": None,\n "escapechar": None,\n "quotechar": '"',\n "quoting": csv.QUOTE_MINIMAL,\n "doublequote": True,\n "skipinitialspace": False,\n "lineterminator": None,\n "header": "infer",\n "index_col": None,\n "names": None,\n "skiprows": None,\n "skipfooter": 0,\n "nrows": None,\n "na_values": None,\n "keep_default_na": True,\n "true_values": None,\n "false_values": None,\n "converters": None,\n "dtype": None,\n "cache_dates": True,\n "thousands": None,\n "comment": None,\n "decimal": ".",\n # 'engine': 'c',\n "parse_dates": False,\n "keep_date_col": False,\n "dayfirst": False,\n "date_parser": lib.no_default,\n "date_format": None,\n "usecols": None,\n # 'iterator': False,\n "chunksize": None,\n "verbose": False,\n "encoding": None,\n "compression": None,\n "skip_blank_lines": True,\n "encoding_errors": "strict",\n "on_bad_lines": ParserBase.BadLineHandleMethod.ERROR,\n "dtype_backend": lib.no_default,\n}\n\n\ndef _process_date_conversion(\n data_dict,\n converter: Callable,\n parse_spec,\n index_col,\n index_names,\n columns,\n keep_date_col: bool = False,\n dtype_backend=lib.no_default,\n):\n def _isindex(colspec):\n return (isinstance(index_col, list) and colspec in index_col) or (\n isinstance(index_names, list) and colspec in index_names\n )\n\n new_cols = []\n new_data = {}\n\n orig_names = columns\n columns = list(columns)\n\n date_cols = set()\n\n if parse_spec is None or isinstance(parse_spec, bool):\n return data_dict, columns\n\n if isinstance(parse_spec, list):\n # list of column lists\n for colspec in parse_spec:\n if is_scalar(colspec) or isinstance(colspec, tuple):\n if isinstance(colspec, int) and colspec not in data_dict:\n colspec = orig_names[colspec]\n if _isindex(colspec):\n continue\n elif dtype_backend == "pyarrow":\n import pyarrow as pa\n\n dtype = data_dict[colspec].dtype\n if isinstance(dtype, ArrowDtype) and (\n pa.types.is_timestamp(dtype.pyarrow_dtype)\n or pa.types.is_date(dtype.pyarrow_dtype)\n ):\n continue\n\n # Pyarrow engine returns Series which we need to convert to\n # numpy array before converter, its a no-op for other parsers\n data_dict[colspec] = converter(\n np.asarray(data_dict[colspec]), col=colspec\n )\n else:\n new_name, col, old_names = _try_convert_dates(\n converter, colspec, data_dict, orig_names\n )\n if new_name in data_dict:\n raise ValueError(f"New date column already in dict {new_name}")\n new_data[new_name] = col\n new_cols.append(new_name)\n date_cols.update(old_names)\n\n elif isinstance(parse_spec, dict):\n # dict of new name to column list\n for new_name, colspec in parse_spec.items():\n if new_name in data_dict:\n raise ValueError(f"Date column {new_name} already in dict")\n\n _, col, old_names = _try_convert_dates(\n converter,\n colspec,\n data_dict,\n orig_names,\n target_name=new_name,\n )\n\n new_data[new_name] = col\n\n # If original column can be converted to date we keep the converted values\n # This can only happen if values are from single column\n if len(colspec) == 1:\n new_data[colspec[0]] = col\n\n new_cols.append(new_name)\n date_cols.update(old_names)\n\n if isinstance(data_dict, DataFrame):\n data_dict = concat([DataFrame(new_data), data_dict], axis=1, copy=False)\n else:\n data_dict.update(new_data)\n new_cols.extend(columns)\n\n if not keep_date_col:\n for c in list(date_cols):\n data_dict.pop(c)\n new_cols.remove(c)\n\n return data_dict, new_cols\n\n\ndef _try_convert_dates(\n parser: Callable, colspec, data_dict, columns, target_name: str | None = None\n):\n colset = set(columns)\n colnames = []\n\n for c in colspec:\n if c in colset:\n colnames.append(c)\n elif isinstance(c, int) and c not in columns:\n colnames.append(columns[c])\n else:\n colnames.append(c)\n\n new_name: tuple | str\n if all(isinstance(x, tuple) for x in colnames):\n new_name = tuple(map("_".join, zip(*colnames)))\n else:\n new_name = "_".join([str(x) for x in colnames])\n to_parse = [np.asarray(data_dict[c]) for c in colnames if c in data_dict]\n\n new_col = parser(*to_parse, col=new_name if target_name is None else target_name)\n return new_name, new_col, colnames\n\n\ndef _get_na_values(col, na_values, na_fvalues, keep_default_na: bool):\n """\n Get the NaN values for a given column.\n\n Parameters\n ----------\n col : str\n The name of the column.\n na_values : array-like, dict\n The object listing the NaN values as strings.\n na_fvalues : array-like, dict\n The object listing the NaN values as floats.\n keep_default_na : bool\n If `na_values` is a dict, and the column is not mapped in the\n dictionary, whether to return the default NaN values or the empty set.\n\n Returns\n -------\n nan_tuple : A length-two tuple composed of\n\n 1) na_values : the string NaN values for that column.\n 2) na_fvalues : the float NaN values for that column.\n """\n if isinstance(na_values, dict):\n if col in na_values:\n return na_values[col], na_fvalues[col]\n else:\n if keep_default_na:\n return STR_NA_VALUES, set()\n\n return set(), set()\n else:\n return na_values, na_fvalues\n\n\ndef _validate_parse_dates_arg(parse_dates):\n """\n Check whether or not the 'parse_dates' parameter\n is a non-boolean scalar. Raises a ValueError if\n that is the case.\n """\n msg = (\n "Only booleans, lists, and dictionaries are accepted "\n "for the 'parse_dates' parameter"\n )\n\n if not (\n parse_dates is None\n or lib.is_bool(parse_dates)\n or isinstance(parse_dates, (list, dict))\n ):\n raise TypeError(msg)\n\n return parse_dates\n\n\ndef is_index_col(col) -> bool:\n return col is not None and col is not False\n
.venv\Lib\site-packages\pandas\io\parsers\base_parser.py
base_parser.py
Python
49,980
0.95
0.176471
0.074544
python-kit
569
2023-11-03T23:34:29.872885
Apache-2.0
false
f7d27f4379e70c1885e2040a6ec9ce0a
from __future__ import annotations\n\nfrom collections import defaultdict\nfrom typing import TYPE_CHECKING\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import (\n lib,\n parsers,\n)\nfrom pandas.compat._optional import import_optional_dependency\nfrom pandas.errors import DtypeWarning\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.common import pandas_dtype\nfrom pandas.core.dtypes.concat import (\n concat_compat,\n union_categoricals,\n)\nfrom pandas.core.dtypes.dtypes import CategoricalDtype\n\nfrom pandas.core.indexes.api import ensure_index_from_sequences\n\nfrom pandas.io.common import (\n dedup_names,\n is_potential_multi_index,\n)\nfrom pandas.io.parsers.base_parser import (\n ParserBase,\n ParserError,\n is_index_col,\n)\n\nif TYPE_CHECKING:\n from collections.abc import (\n Hashable,\n Mapping,\n Sequence,\n )\n\n from pandas._typing import (\n ArrayLike,\n DtypeArg,\n DtypeObj,\n ReadCsvBuffer,\n )\n\n from pandas import (\n Index,\n MultiIndex,\n )\n\n\nclass CParserWrapper(ParserBase):\n low_memory: bool\n _reader: parsers.TextReader\n\n def __init__(self, src: ReadCsvBuffer[str], **kwds) -> None:\n super().__init__(kwds)\n self.kwds = kwds\n kwds = kwds.copy()\n\n self.low_memory = kwds.pop("low_memory", False)\n\n # #2442\n # error: Cannot determine type of 'index_col'\n kwds["allow_leading_cols"] = (\n self.index_col is not False # type: ignore[has-type]\n )\n\n # GH20529, validate usecol arg before TextReader\n kwds["usecols"] = self.usecols\n\n # Have to pass int, would break tests using TextReader directly otherwise :(\n kwds["on_bad_lines"] = self.on_bad_lines.value\n\n for key in (\n "storage_options",\n "encoding",\n "memory_map",\n "compression",\n ):\n kwds.pop(key, None)\n\n kwds["dtype"] = ensure_dtype_objs(kwds.get("dtype", None))\n if "dtype_backend" not in kwds or kwds["dtype_backend"] is lib.no_default:\n kwds["dtype_backend"] = "numpy"\n if kwds["dtype_backend"] == "pyarrow":\n # Fail here loudly instead of in cython after reading\n import_optional_dependency("pyarrow")\n self._reader = parsers.TextReader(src, **kwds)\n\n self.unnamed_cols = self._reader.unnamed_cols\n\n # error: Cannot determine type of 'names'\n passed_names = self.names is None # type: ignore[has-type]\n\n if self._reader.header is None:\n self.names = None\n else:\n # error: Cannot determine type of 'names'\n # error: Cannot determine type of 'index_names'\n (\n self.names, # type: ignore[has-type]\n self.index_names,\n self.col_names,\n passed_names,\n ) = self._extract_multi_indexer_columns(\n self._reader.header,\n self.index_names, # type: ignore[has-type]\n passed_names,\n )\n\n # error: Cannot determine type of 'names'\n if self.names is None: # type: ignore[has-type]\n self.names = list(range(self._reader.table_width))\n\n # gh-9755\n #\n # need to set orig_names here first\n # so that proper indexing can be done\n # with _set_noconvert_columns\n #\n # once names has been filtered, we will\n # then set orig_names again to names\n # error: Cannot determine type of 'names'\n self.orig_names = self.names[:] # type: ignore[has-type]\n\n if self.usecols:\n usecols = self._evaluate_usecols(self.usecols, self.orig_names)\n\n # GH 14671\n # assert for mypy, orig_names is List or None, None would error in issubset\n assert self.orig_names is not None\n if self.usecols_dtype == "string" and not set(usecols).issubset(\n self.orig_names\n ):\n self._validate_usecols_names(usecols, self.orig_names)\n\n # error: Cannot determine type of 'names'\n if len(self.names) > len(usecols): # type: ignore[has-type]\n # error: Cannot determine type of 'names'\n self.names = [ # type: ignore[has-type]\n n\n # error: Cannot determine type of 'names'\n for i, n in enumerate(self.names) # type: ignore[has-type]\n if (i in usecols or n in usecols)\n ]\n\n # error: Cannot determine type of 'names'\n if len(self.names) < len(usecols): # type: ignore[has-type]\n # error: Cannot determine type of 'names'\n self._validate_usecols_names(\n usecols,\n self.names, # type: ignore[has-type]\n )\n\n # error: Cannot determine type of 'names'\n self._validate_parse_dates_presence(self.names) # type: ignore[has-type]\n self._set_noconvert_columns()\n\n # error: Cannot determine type of 'names'\n self.orig_names = self.names # type: ignore[has-type]\n\n if not self._has_complex_date_col:\n # error: Cannot determine type of 'index_col'\n if self._reader.leading_cols == 0 and is_index_col(\n self.index_col # type: ignore[has-type]\n ):\n self._name_processed = True\n (\n index_names,\n # error: Cannot determine type of 'names'\n self.names, # type: ignore[has-type]\n self.index_col,\n ) = self._clean_index_names(\n # error: Cannot determine type of 'names'\n self.names, # type: ignore[has-type]\n # error: Cannot determine type of 'index_col'\n self.index_col, # type: ignore[has-type]\n )\n\n if self.index_names is None:\n self.index_names = index_names\n\n if self._reader.header is None and not passed_names:\n assert self.index_names is not None\n self.index_names = [None] * len(self.index_names)\n\n self._implicit_index = self._reader.leading_cols > 0\n\n def close(self) -> None:\n # close handles opened by C parser\n try:\n self._reader.close()\n except ValueError:\n pass\n\n def _set_noconvert_columns(self) -> None:\n """\n Set the columns that should not undergo dtype conversions.\n\n Currently, any column that is involved with date parsing will not\n undergo such conversions.\n """\n assert self.orig_names is not None\n # error: Cannot determine type of 'names'\n\n # much faster than using orig_names.index(x) xref GH#44106\n names_dict = {x: i for i, x in enumerate(self.orig_names)}\n col_indices = [names_dict[x] for x in self.names] # type: ignore[has-type]\n # error: Cannot determine type of 'names'\n noconvert_columns = self._set_noconvert_dtype_columns(\n col_indices,\n self.names, # type: ignore[has-type]\n )\n for col in noconvert_columns:\n self._reader.set_noconvert(col)\n\n def read(\n self,\n nrows: int | None = None,\n ) -> tuple[\n Index | MultiIndex | None,\n Sequence[Hashable] | MultiIndex,\n Mapping[Hashable, ArrayLike],\n ]:\n index: Index | MultiIndex | None\n column_names: Sequence[Hashable] | MultiIndex\n try:\n if self.low_memory:\n chunks = self._reader.read_low_memory(nrows)\n # destructive to chunks\n data = _concatenate_chunks(chunks)\n\n else:\n data = self._reader.read(nrows)\n except StopIteration:\n if self._first_chunk:\n self._first_chunk = False\n names = dedup_names(\n self.orig_names,\n is_potential_multi_index(self.orig_names, self.index_col),\n )\n index, columns, col_dict = self._get_empty_meta(\n names,\n dtype=self.dtype,\n )\n columns = self._maybe_make_multi_index_columns(columns, self.col_names)\n\n if self.usecols is not None:\n columns = self._filter_usecols(columns)\n\n col_dict = {k: v for k, v in col_dict.items() if k in columns}\n\n return index, columns, col_dict\n\n else:\n self.close()\n raise\n\n # Done with first read, next time raise StopIteration\n self._first_chunk = False\n\n # error: Cannot determine type of 'names'\n names = self.names # type: ignore[has-type]\n\n if self._reader.leading_cols:\n if self._has_complex_date_col:\n raise NotImplementedError("file structure not yet supported")\n\n # implicit index, no index names\n arrays = []\n\n if self.index_col and self._reader.leading_cols != len(self.index_col):\n raise ParserError(\n "Could not construct index. Requested to use "\n f"{len(self.index_col)} number of columns, but "\n f"{self._reader.leading_cols} left to parse."\n )\n\n for i in range(self._reader.leading_cols):\n if self.index_col is None:\n values = data.pop(i)\n else:\n values = data.pop(self.index_col[i])\n\n values = self._maybe_parse_dates(values, i, try_parse_dates=True)\n arrays.append(values)\n\n index = ensure_index_from_sequences(arrays)\n\n if self.usecols is not None:\n names = self._filter_usecols(names)\n\n names = dedup_names(names, is_potential_multi_index(names, self.index_col))\n\n # rename dict keys\n data_tups = sorted(data.items())\n data = {k: v for k, (i, v) in zip(names, data_tups)}\n\n column_names, date_data = self._do_date_conversions(names, data)\n\n # maybe create a mi on the columns\n column_names = self._maybe_make_multi_index_columns(\n column_names, self.col_names\n )\n\n else:\n # rename dict keys\n data_tups = sorted(data.items())\n\n # ugh, mutation\n\n # assert for mypy, orig_names is List or None, None would error in list(...)\n assert self.orig_names is not None\n names = list(self.orig_names)\n names = dedup_names(names, is_potential_multi_index(names, self.index_col))\n\n if self.usecols is not None:\n names = self._filter_usecols(names)\n\n # columns as list\n alldata = [x[1] for x in data_tups]\n if self.usecols is None:\n self._check_data_length(names, alldata)\n\n data = {k: v for k, (i, v) in zip(names, data_tups)}\n\n names, date_data = self._do_date_conversions(names, data)\n index, column_names = self._make_index(date_data, alldata, names)\n\n return index, column_names, date_data\n\n def _filter_usecols(self, names: Sequence[Hashable]) -> Sequence[Hashable]:\n # hackish\n usecols = self._evaluate_usecols(self.usecols, names)\n if usecols is not None and len(names) != len(usecols):\n names = [\n name for i, name in enumerate(names) if i in usecols or name in usecols\n ]\n return names\n\n def _maybe_parse_dates(self, values, index: int, try_parse_dates: bool = True):\n if try_parse_dates and self._should_parse_dates(index):\n values = self._date_conv(\n values,\n col=self.index_names[index] if self.index_names is not None else None,\n )\n return values\n\n\ndef _concatenate_chunks(chunks: list[dict[int, ArrayLike]]) -> dict:\n """\n Concatenate chunks of data read with low_memory=True.\n\n The tricky part is handling Categoricals, where different chunks\n may have different inferred categories.\n """\n names = list(chunks[0].keys())\n warning_columns = []\n\n result: dict = {}\n for name in names:\n arrs = [chunk.pop(name) for chunk in chunks]\n # Check each arr for consistent types.\n dtypes = {a.dtype for a in arrs}\n non_cat_dtypes = {x for x in dtypes if not isinstance(x, CategoricalDtype)}\n\n dtype = dtypes.pop()\n if isinstance(dtype, CategoricalDtype):\n result[name] = union_categoricals(arrs, sort_categories=False)\n else:\n result[name] = concat_compat(arrs)\n if len(non_cat_dtypes) > 1 and result[name].dtype == np.dtype(object):\n warning_columns.append(str(name))\n\n if warning_columns:\n warning_names = ",".join(warning_columns)\n warning_message = " ".join(\n [\n f"Columns ({warning_names}) have mixed types. "\n f"Specify dtype option on import or set low_memory=False."\n ]\n )\n warnings.warn(warning_message, DtypeWarning, stacklevel=find_stack_level())\n return result\n\n\ndef ensure_dtype_objs(\n dtype: DtypeArg | dict[Hashable, DtypeArg] | None\n) -> DtypeObj | dict[Hashable, DtypeObj] | None:\n """\n Ensure we have either None, a dtype object, or a dictionary mapping to\n dtype objects.\n """\n if isinstance(dtype, defaultdict):\n # "None" not callable [misc]\n default_dtype = pandas_dtype(dtype.default_factory()) # type: ignore[misc]\n dtype_converted: defaultdict = defaultdict(lambda: default_dtype)\n for key in dtype.keys():\n dtype_converted[key] = pandas_dtype(dtype[key])\n return dtype_converted\n elif isinstance(dtype, dict):\n return {k: pandas_dtype(dtype[k]) for k in dtype}\n elif dtype is not None:\n return pandas_dtype(dtype)\n return dtype\n
.venv\Lib\site-packages\pandas\io\parsers\c_parser_wrapper.py
c_parser_wrapper.py
Python
14,199
0.95
0.158537
0.142857
vue-tools
216
2024-11-09T05:45:53.561348
MIT
false
279a641715c8b63ff2a9163fd20d560c
from __future__ import annotations\n\nfrom collections import (\n abc,\n defaultdict,\n)\nfrom collections.abc import (\n Hashable,\n Iterator,\n Mapping,\n Sequence,\n)\nimport csv\nfrom io import StringIO\nimport re\nfrom typing import (\n IO,\n TYPE_CHECKING,\n DefaultDict,\n Literal,\n cast,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._libs import lib\nfrom pandas.errors import (\n EmptyDataError,\n ParserError,\n ParserWarning,\n)\nfrom pandas.util._decorators import cache_readonly\nfrom pandas.util._exceptions import find_stack_level\n\nfrom pandas.core.dtypes.common import (\n is_bool_dtype,\n is_integer,\n is_numeric_dtype,\n)\nfrom pandas.core.dtypes.inference import is_dict_like\n\nfrom pandas.io.common import (\n dedup_names,\n is_potential_multi_index,\n)\nfrom pandas.io.parsers.base_parser import (\n ParserBase,\n parser_defaults,\n)\n\nif TYPE_CHECKING:\n from pandas._typing import (\n ArrayLike,\n ReadCsvBuffer,\n Scalar,\n )\n\n from pandas import (\n Index,\n MultiIndex,\n )\n\n# BOM character (byte order mark)\n# This exists at the beginning of a file to indicate endianness\n# of a file (stream). Unfortunately, this marker screws up parsing,\n# so we need to remove it if we see it.\n_BOM = "\ufeff"\n\n\nclass PythonParser(ParserBase):\n _no_thousands_columns: set[int]\n\n def __init__(self, f: ReadCsvBuffer[str] | list, **kwds) -> None:\n """\n Workhorse function for processing nested list into DataFrame\n """\n super().__init__(kwds)\n\n self.data: Iterator[str] | None = None\n self.buf: list = []\n self.pos = 0\n self.line_pos = 0\n\n self.skiprows = kwds["skiprows"]\n\n if callable(self.skiprows):\n self.skipfunc = self.skiprows\n else:\n self.skipfunc = lambda x: x in self.skiprows\n\n self.skipfooter = _validate_skipfooter_arg(kwds["skipfooter"])\n self.delimiter = kwds["delimiter"]\n\n self.quotechar = kwds["quotechar"]\n if isinstance(self.quotechar, str):\n self.quotechar = str(self.quotechar)\n\n self.escapechar = kwds["escapechar"]\n self.doublequote = kwds["doublequote"]\n self.skipinitialspace = kwds["skipinitialspace"]\n self.lineterminator = kwds["lineterminator"]\n self.quoting = kwds["quoting"]\n self.skip_blank_lines = kwds["skip_blank_lines"]\n\n self.has_index_names = False\n if "has_index_names" in kwds:\n self.has_index_names = kwds["has_index_names"]\n\n self.verbose = kwds["verbose"]\n\n self.thousands = kwds["thousands"]\n self.decimal = kwds["decimal"]\n\n self.comment = kwds["comment"]\n\n # Set self.data to something that can read lines.\n if isinstance(f, list):\n # read_excel: f is a list\n self.data = cast(Iterator[str], f)\n else:\n assert hasattr(f, "readline")\n self.data = self._make_reader(f)\n\n # Get columns in two steps: infer from data, then\n # infer column indices from self.usecols if it is specified.\n self._col_indices: list[int] | None = None\n columns: list[list[Scalar | None]]\n (\n columns,\n self.num_original_columns,\n self.unnamed_cols,\n ) = self._infer_columns()\n\n # Now self.columns has the set of columns that we will process.\n # The original set is stored in self.original_columns.\n # error: Cannot determine type of 'index_names'\n (\n self.columns,\n self.index_names,\n self.col_names,\n _,\n ) = self._extract_multi_indexer_columns(\n columns,\n self.index_names, # type: ignore[has-type]\n )\n\n # get popped off for index\n self.orig_names: list[Hashable] = list(self.columns)\n\n # needs to be cleaned/refactored\n # multiple date column thing turning into a real spaghetti factory\n\n if not self._has_complex_date_col:\n (index_names, self.orig_names, self.columns) = self._get_index_name()\n self._name_processed = True\n if self.index_names is None:\n self.index_names = index_names\n\n if self._col_indices is None:\n self._col_indices = list(range(len(self.columns)))\n\n self._parse_date_cols = self._validate_parse_dates_presence(self.columns)\n self._no_thousands_columns = self._set_no_thousand_columns()\n\n if len(self.decimal) != 1:\n raise ValueError("Only length-1 decimal markers supported")\n\n @cache_readonly\n def num(self) -> re.Pattern:\n decimal = re.escape(self.decimal)\n if self.thousands is None:\n regex = rf"^[\-\+]?[0-9]*({decimal}[0-9]*)?([0-9]?(E|e)\-?[0-9]+)?$"\n else:\n thousands = re.escape(self.thousands)\n regex = (\n rf"^[\-\+]?([0-9]+{thousands}|[0-9])*({decimal}[0-9]*)?"\n rf"([0-9]?(E|e)\-?[0-9]+)?$"\n )\n return re.compile(regex)\n\n def _make_reader(self, f: IO[str] | ReadCsvBuffer[str]):\n sep = self.delimiter\n\n if sep is None or len(sep) == 1:\n if self.lineterminator:\n raise ValueError(\n "Custom line terminators not supported in python parser (yet)"\n )\n\n class MyDialect(csv.Dialect):\n delimiter = self.delimiter\n quotechar = self.quotechar\n escapechar = self.escapechar\n doublequote = self.doublequote\n skipinitialspace = self.skipinitialspace\n quoting = self.quoting\n lineterminator = "\n"\n\n dia = MyDialect\n\n if sep is not None:\n dia.delimiter = sep\n else:\n # attempt to sniff the delimiter from the first valid line,\n # i.e. no comment line and not in skiprows\n line = f.readline()\n lines = self._check_comments([[line]])[0]\n while self.skipfunc(self.pos) or not lines:\n self.pos += 1\n line = f.readline()\n lines = self._check_comments([[line]])[0]\n lines_str = cast(list[str], lines)\n\n # since `line` was a string, lines will be a list containing\n # only a single string\n line = lines_str[0]\n\n self.pos += 1\n self.line_pos += 1\n sniffed = csv.Sniffer().sniff(line)\n dia.delimiter = sniffed.delimiter\n\n # Note: encoding is irrelevant here\n line_rdr = csv.reader(StringIO(line), dialect=dia)\n self.buf.extend(list(line_rdr))\n\n # Note: encoding is irrelevant here\n reader = csv.reader(f, dialect=dia, strict=True)\n\n else:\n\n def _read():\n line = f.readline()\n pat = re.compile(sep)\n\n yield pat.split(line.strip())\n\n for line in f:\n yield pat.split(line.strip())\n\n reader = _read()\n\n return reader\n\n def read(\n self, rows: int | None = None\n ) -> tuple[\n Index | None, Sequence[Hashable] | MultiIndex, Mapping[Hashable, ArrayLike]\n ]:\n try:\n content = self._get_lines(rows)\n except StopIteration:\n if self._first_chunk:\n content = []\n else:\n self.close()\n raise\n\n # done with first read, next time raise StopIteration\n self._first_chunk = False\n\n columns: Sequence[Hashable] = list(self.orig_names)\n if not len(content): # pragma: no cover\n # DataFrame with the right metadata, even though it's length 0\n # error: Cannot determine type of 'index_col'\n names = dedup_names(\n self.orig_names,\n is_potential_multi_index(\n self.orig_names,\n self.index_col, # type: ignore[has-type]\n ),\n )\n index, columns, col_dict = self._get_empty_meta(\n names,\n self.dtype,\n )\n conv_columns = self._maybe_make_multi_index_columns(columns, self.col_names)\n return index, conv_columns, col_dict\n\n # handle new style for names in index\n count_empty_content_vals = count_empty_vals(content[0])\n indexnamerow = None\n if self.has_index_names and count_empty_content_vals == len(columns):\n indexnamerow = content[0]\n content = content[1:]\n\n alldata = self._rows_to_cols(content)\n data, columns = self._exclude_implicit_index(alldata)\n\n conv_data = self._convert_data(data)\n columns, conv_data = self._do_date_conversions(columns, conv_data)\n\n index, result_columns = self._make_index(\n conv_data, alldata, columns, indexnamerow\n )\n\n return index, result_columns, conv_data\n\n def _exclude_implicit_index(\n self,\n alldata: list[np.ndarray],\n ) -> tuple[Mapping[Hashable, np.ndarray], Sequence[Hashable]]:\n # error: Cannot determine type of 'index_col'\n names = dedup_names(\n self.orig_names,\n is_potential_multi_index(\n self.orig_names,\n self.index_col, # type: ignore[has-type]\n ),\n )\n\n offset = 0\n if self._implicit_index:\n # error: Cannot determine type of 'index_col'\n offset = len(self.index_col) # type: ignore[has-type]\n\n len_alldata = len(alldata)\n self._check_data_length(names, alldata)\n\n return {\n name: alldata[i + offset] for i, name in enumerate(names) if i < len_alldata\n }, names\n\n # legacy\n def get_chunk(\n self, size: int | None = None\n ) -> tuple[\n Index | None, Sequence[Hashable] | MultiIndex, Mapping[Hashable, ArrayLike]\n ]:\n if size is None:\n # error: "PythonParser" has no attribute "chunksize"\n size = self.chunksize # type: ignore[attr-defined]\n return self.read(rows=size)\n\n def _convert_data(\n self,\n data: Mapping[Hashable, np.ndarray],\n ) -> Mapping[Hashable, ArrayLike]:\n # apply converters\n clean_conv = self._clean_mapping(self.converters)\n clean_dtypes = self._clean_mapping(self.dtype)\n\n # Apply NA values.\n clean_na_values = {}\n clean_na_fvalues = {}\n\n if isinstance(self.na_values, dict):\n for col in self.na_values:\n na_value = self.na_values[col]\n na_fvalue = self.na_fvalues[col]\n\n if isinstance(col, int) and col not in self.orig_names:\n col = self.orig_names[col]\n\n clean_na_values[col] = na_value\n clean_na_fvalues[col] = na_fvalue\n else:\n clean_na_values = self.na_values\n clean_na_fvalues = self.na_fvalues\n\n return self._convert_to_ndarrays(\n data,\n clean_na_values,\n clean_na_fvalues,\n self.verbose,\n clean_conv,\n clean_dtypes,\n )\n\n @cache_readonly\n def _have_mi_columns(self) -> bool:\n if self.header is None:\n return False\n\n header = self.header\n if isinstance(header, (list, tuple, np.ndarray)):\n return len(header) > 1\n else:\n return False\n\n def _infer_columns(\n self,\n ) -> tuple[list[list[Scalar | None]], int, set[Scalar | None]]:\n names = self.names\n num_original_columns = 0\n clear_buffer = True\n unnamed_cols: set[Scalar | None] = set()\n\n if self.header is not None:\n header = self.header\n have_mi_columns = self._have_mi_columns\n\n if isinstance(header, (list, tuple, np.ndarray)):\n # we have a mi columns, so read an extra line\n if have_mi_columns:\n header = list(header) + [header[-1] + 1]\n else:\n header = [header]\n\n columns: list[list[Scalar | None]] = []\n for level, hr in enumerate(header):\n try:\n line = self._buffered_line()\n\n while self.line_pos <= hr:\n line = self._next_line()\n\n except StopIteration as err:\n if 0 < self.line_pos <= hr and (\n not have_mi_columns or hr != header[-1]\n ):\n # If no rows we want to raise a different message and if\n # we have mi columns, the last line is not part of the header\n joi = list(map(str, header[:-1] if have_mi_columns else header))\n msg = f"[{','.join(joi)}], len of {len(joi)}, "\n raise ValueError(\n f"Passed header={msg}"\n f"but only {self.line_pos} lines in file"\n ) from err\n\n # We have an empty file, so check\n # if columns are provided. That will\n # serve as the 'line' for parsing\n if have_mi_columns and hr > 0:\n if clear_buffer:\n self._clear_buffer()\n columns.append([None] * len(columns[-1]))\n return columns, num_original_columns, unnamed_cols\n\n if not self.names:\n raise EmptyDataError("No columns to parse from file") from err\n\n line = self.names[:]\n\n this_columns: list[Scalar | None] = []\n this_unnamed_cols = []\n\n for i, c in enumerate(line):\n if c == "":\n if have_mi_columns:\n col_name = f"Unnamed: {i}_level_{level}"\n else:\n col_name = f"Unnamed: {i}"\n\n this_unnamed_cols.append(i)\n this_columns.append(col_name)\n else:\n this_columns.append(c)\n\n if not have_mi_columns:\n counts: DefaultDict = defaultdict(int)\n # Ensure that regular columns are used before unnamed ones\n # to keep given names and mangle unnamed columns\n col_loop_order = [\n i\n for i in range(len(this_columns))\n if i not in this_unnamed_cols\n ] + this_unnamed_cols\n\n # TODO: Use pandas.io.common.dedup_names instead (see #50371)\n for i in col_loop_order:\n col = this_columns[i]\n old_col = col\n cur_count = counts[col]\n\n if cur_count > 0:\n while cur_count > 0:\n counts[old_col] = cur_count + 1\n col = f"{old_col}.{cur_count}"\n if col in this_columns:\n cur_count += 1\n else:\n cur_count = counts[col]\n\n if (\n self.dtype is not None\n and is_dict_like(self.dtype)\n and self.dtype.get(old_col) is not None\n and self.dtype.get(col) is None\n ):\n self.dtype.update({col: self.dtype.get(old_col)})\n this_columns[i] = col\n counts[col] = cur_count + 1\n elif have_mi_columns:\n # if we have grabbed an extra line, but its not in our\n # format so save in the buffer, and create an blank extra\n # line for the rest of the parsing code\n if hr == header[-1]:\n lc = len(this_columns)\n # error: Cannot determine type of 'index_col'\n sic = self.index_col # type: ignore[has-type]\n ic = len(sic) if sic is not None else 0\n unnamed_count = len(this_unnamed_cols)\n\n # if wrong number of blanks or no index, not our format\n if (lc != unnamed_count and lc - ic > unnamed_count) or ic == 0:\n clear_buffer = False\n this_columns = [None] * lc\n self.buf = [self.buf[-1]]\n\n columns.append(this_columns)\n unnamed_cols.update({this_columns[i] for i in this_unnamed_cols})\n\n if len(columns) == 1:\n num_original_columns = len(this_columns)\n\n if clear_buffer:\n self._clear_buffer()\n\n first_line: list[Scalar] | None\n if names is not None:\n # Read first row after header to check if data are longer\n try:\n first_line = self._next_line()\n except StopIteration:\n first_line = None\n\n len_first_data_row = 0 if first_line is None else len(first_line)\n\n if len(names) > len(columns[0]) and len(names) > len_first_data_row:\n raise ValueError(\n "Number of passed names did not match "\n "number of header fields in the file"\n )\n if len(columns) > 1:\n raise TypeError("Cannot pass names with multi-index columns")\n\n if self.usecols is not None:\n # Set _use_cols. We don't store columns because they are\n # overwritten.\n self._handle_usecols(columns, names, num_original_columns)\n else:\n num_original_columns = len(names)\n if self._col_indices is not None and len(names) != len(\n self._col_indices\n ):\n columns = [[names[i] for i in sorted(self._col_indices)]]\n else:\n columns = [names]\n else:\n columns = self._handle_usecols(\n columns, columns[0], num_original_columns\n )\n else:\n ncols = len(self._header_line)\n num_original_columns = ncols\n\n if not names:\n columns = [list(range(ncols))]\n columns = self._handle_usecols(columns, columns[0], ncols)\n elif self.usecols is None or len(names) >= ncols:\n columns = self._handle_usecols([names], names, ncols)\n num_original_columns = len(names)\n elif not callable(self.usecols) and len(names) != len(self.usecols):\n raise ValueError(\n "Number of passed names did not match number of "\n "header fields in the file"\n )\n else:\n # Ignore output but set used columns.\n columns = [names]\n self._handle_usecols(columns, columns[0], ncols)\n\n return columns, num_original_columns, unnamed_cols\n\n @cache_readonly\n def _header_line(self):\n # Store line for reuse in _get_index_name\n if self.header is not None:\n return None\n\n try:\n line = self._buffered_line()\n except StopIteration as err:\n if not self.names:\n raise EmptyDataError("No columns to parse from file") from err\n\n line = self.names[:]\n return line\n\n def _handle_usecols(\n self,\n columns: list[list[Scalar | None]],\n usecols_key: list[Scalar | None],\n num_original_columns: int,\n ) -> list[list[Scalar | None]]:\n """\n Sets self._col_indices\n\n usecols_key is used if there are string usecols.\n """\n col_indices: set[int] | list[int]\n if self.usecols is not None:\n if callable(self.usecols):\n col_indices = self._evaluate_usecols(self.usecols, usecols_key)\n elif any(isinstance(u, str) for u in self.usecols):\n if len(columns) > 1:\n raise ValueError(\n "If using multiple headers, usecols must be integers."\n )\n col_indices = []\n\n for col in self.usecols:\n if isinstance(col, str):\n try:\n col_indices.append(usecols_key.index(col))\n except ValueError:\n self._validate_usecols_names(self.usecols, usecols_key)\n else:\n col_indices.append(col)\n else:\n missing_usecols = [\n col for col in self.usecols if col >= num_original_columns\n ]\n if missing_usecols:\n raise ParserError(\n "Defining usecols with out-of-bounds indices is not allowed. "\n f"{missing_usecols} are out-of-bounds.",\n )\n col_indices = self.usecols\n\n columns = [\n [n for i, n in enumerate(column) if i in col_indices]\n for column in columns\n ]\n self._col_indices = sorted(col_indices)\n return columns\n\n def _buffered_line(self) -> list[Scalar]:\n """\n Return a line from buffer, filling buffer if required.\n """\n if len(self.buf) > 0:\n return self.buf[0]\n else:\n return self._next_line()\n\n def _check_for_bom(self, first_row: list[Scalar]) -> list[Scalar]:\n """\n Checks whether the file begins with the BOM character.\n If it does, remove it. In addition, if there is quoting\n in the field subsequent to the BOM, remove it as well\n because it technically takes place at the beginning of\n the name, not the middle of it.\n """\n # first_row will be a list, so we need to check\n # that that list is not empty before proceeding.\n if not first_row:\n return first_row\n\n # The first element of this row is the one that could have the\n # BOM that we want to remove. Check that the first element is a\n # string before proceeding.\n if not isinstance(first_row[0], str):\n return first_row\n\n # Check that the string is not empty, as that would\n # obviously not have a BOM at the start of it.\n if not first_row[0]:\n return first_row\n\n # Since the string is non-empty, check that it does\n # in fact begin with a BOM.\n first_elt = first_row[0][0]\n if first_elt != _BOM:\n return first_row\n\n first_row_bom = first_row[0]\n new_row: str\n\n if len(first_row_bom) > 1 and first_row_bom[1] == self.quotechar:\n start = 2\n quote = first_row_bom[1]\n end = first_row_bom[2:].index(quote) + 2\n\n # Extract the data between the quotation marks\n new_row = first_row_bom[start:end]\n\n # Extract any remaining data after the second\n # quotation mark.\n if len(first_row_bom) > end + 1:\n new_row += first_row_bom[end + 1 :]\n\n else:\n # No quotation so just remove BOM from first element\n new_row = first_row_bom[1:]\n\n new_row_list: list[Scalar] = [new_row]\n return new_row_list + first_row[1:]\n\n def _is_line_empty(self, line: list[Scalar]) -> bool:\n """\n Check if a line is empty or not.\n\n Parameters\n ----------\n line : str, array-like\n The line of data to check.\n\n Returns\n -------\n boolean : Whether or not the line is empty.\n """\n return not line or all(not x for x in line)\n\n def _next_line(self) -> list[Scalar]:\n if isinstance(self.data, list):\n while self.skipfunc(self.pos):\n if self.pos >= len(self.data):\n break\n self.pos += 1\n\n while True:\n try:\n line = self._check_comments([self.data[self.pos]])[0]\n self.pos += 1\n # either uncommented or blank to begin with\n if not self.skip_blank_lines and (\n self._is_line_empty(self.data[self.pos - 1]) or line\n ):\n break\n if self.skip_blank_lines:\n ret = self._remove_empty_lines([line])\n if ret:\n line = ret[0]\n break\n except IndexError:\n raise StopIteration\n else:\n while self.skipfunc(self.pos):\n self.pos += 1\n # assert for mypy, data is Iterator[str] or None, would error in next\n assert self.data is not None\n next(self.data)\n\n while True:\n orig_line = self._next_iter_line(row_num=self.pos + 1)\n self.pos += 1\n\n if orig_line is not None:\n line = self._check_comments([orig_line])[0]\n\n if self.skip_blank_lines:\n ret = self._remove_empty_lines([line])\n\n if ret:\n line = ret[0]\n break\n elif self._is_line_empty(orig_line) or line:\n break\n\n # This was the first line of the file,\n # which could contain the BOM at the\n # beginning of it.\n if self.pos == 1:\n line = self._check_for_bom(line)\n\n self.line_pos += 1\n self.buf.append(line)\n return line\n\n def _alert_malformed(self, msg: str, row_num: int) -> None:\n """\n Alert a user about a malformed row, depending on value of\n `self.on_bad_lines` enum.\n\n If `self.on_bad_lines` is ERROR, the alert will be `ParserError`.\n If `self.on_bad_lines` is WARN, the alert will be printed out.\n\n Parameters\n ----------\n msg: str\n The error message to display.\n row_num: int\n The row number where the parsing error occurred.\n Because this row number is displayed, we 1-index,\n even though we 0-index internally.\n """\n if self.on_bad_lines == self.BadLineHandleMethod.ERROR:\n raise ParserError(msg)\n if self.on_bad_lines == self.BadLineHandleMethod.WARN:\n warnings.warn(\n f"Skipping line {row_num}: {msg}\n",\n ParserWarning,\n stacklevel=find_stack_level(),\n )\n\n def _next_iter_line(self, row_num: int) -> list[Scalar] | None:\n """\n Wrapper around iterating through `self.data` (CSV source).\n\n When a CSV error is raised, we check for specific\n error messages that allow us to customize the\n error message displayed to the user.\n\n Parameters\n ----------\n row_num: int\n The row number of the line being parsed.\n """\n try:\n # assert for mypy, data is Iterator[str] or None, would error in next\n assert self.data is not None\n line = next(self.data)\n # for mypy\n assert isinstance(line, list)\n return line\n except csv.Error as e:\n if self.on_bad_lines in (\n self.BadLineHandleMethod.ERROR,\n self.BadLineHandleMethod.WARN,\n ):\n msg = str(e)\n\n if "NULL byte" in msg or "line contains NUL" in msg:\n msg = (\n "NULL byte detected. This byte "\n "cannot be processed in Python's "\n "native csv library at the moment, "\n "so please pass in engine='c' instead"\n )\n\n if self.skipfooter > 0:\n reason = (\n "Error could possibly be due to "\n "parsing errors in the skipped footer rows "\n "(the skipfooter keyword is only applied "\n "after Python's csv library has parsed "\n "all rows)."\n )\n msg += ". " + reason\n\n self._alert_malformed(msg, row_num)\n return None\n\n def _check_comments(self, lines: list[list[Scalar]]) -> list[list[Scalar]]:\n if self.comment is None:\n return lines\n ret = []\n for line in lines:\n rl = []\n for x in line:\n if (\n not isinstance(x, str)\n or self.comment not in x\n or x in self.na_values\n ):\n rl.append(x)\n else:\n x = x[: x.find(self.comment)]\n if len(x) > 0:\n rl.append(x)\n break\n ret.append(rl)\n return ret\n\n def _remove_empty_lines(self, lines: list[list[Scalar]]) -> list[list[Scalar]]:\n """\n Iterate through the lines and remove any that are\n either empty or contain only one whitespace value\n\n Parameters\n ----------\n lines : list of list of Scalars\n The array of lines that we are to filter.\n\n Returns\n -------\n filtered_lines : list of list of Scalars\n The same array of lines with the "empty" ones removed.\n """\n # Remove empty lines and lines with only one whitespace value\n ret = [\n line\n for line in lines\n if (\n len(line) > 1\n or len(line) == 1\n and (not isinstance(line[0], str) or line[0].strip())\n )\n ]\n return ret\n\n def _check_thousands(self, lines: list[list[Scalar]]) -> list[list[Scalar]]:\n if self.thousands is None:\n return lines\n\n return self._search_replace_num_columns(\n lines=lines, search=self.thousands, replace=""\n )\n\n def _search_replace_num_columns(\n self, lines: list[list[Scalar]], search: str, replace: str\n ) -> list[list[Scalar]]:\n ret = []\n for line in lines:\n rl = []\n for i, x in enumerate(line):\n if (\n not isinstance(x, str)\n or search not in x\n or i in self._no_thousands_columns\n or not self.num.search(x.strip())\n ):\n rl.append(x)\n else:\n rl.append(x.replace(search, replace))\n ret.append(rl)\n return ret\n\n def _check_decimal(self, lines: list[list[Scalar]]) -> list[list[Scalar]]:\n if self.decimal == parser_defaults["decimal"]:\n return lines\n\n return self._search_replace_num_columns(\n lines=lines, search=self.decimal, replace="."\n )\n\n def _clear_buffer(self) -> None:\n self.buf = []\n\n def _get_index_name(\n self,\n ) -> tuple[Sequence[Hashable] | None, list[Hashable], list[Hashable]]:\n """\n Try several cases to get lines:\n\n 0) There are headers on row 0 and row 1 and their\n total summed lengths equals the length of the next line.\n Treat row 0 as columns and row 1 as indices\n 1) Look for implicit index: there are more columns\n on row 1 than row 0. If this is true, assume that row\n 1 lists index columns and row 0 lists normal columns.\n 2) Get index from the columns if it was listed.\n """\n columns: Sequence[Hashable] = self.orig_names\n orig_names = list(columns)\n columns = list(columns)\n\n line: list[Scalar] | None\n if self._header_line is not None:\n line = self._header_line\n else:\n try:\n line = self._next_line()\n except StopIteration:\n line = None\n\n next_line: list[Scalar] | None\n try:\n next_line = self._next_line()\n except StopIteration:\n next_line = None\n\n # implicitly index_col=0 b/c 1 fewer column names\n implicit_first_cols = 0\n if line is not None:\n # leave it 0, #2442\n # Case 1\n # error: Cannot determine type of 'index_col'\n index_col = self.index_col # type: ignore[has-type]\n if index_col is not False:\n implicit_first_cols = len(line) - self.num_original_columns\n\n # Case 0\n if (\n next_line is not None\n and self.header is not None\n and index_col is not False\n ):\n if len(next_line) == len(line) + self.num_original_columns:\n # column and index names on diff rows\n self.index_col = list(range(len(line)))\n self.buf = self.buf[1:]\n\n for c in reversed(line):\n columns.insert(0, c)\n\n # Update list of original names to include all indices.\n orig_names = list(columns)\n self.num_original_columns = len(columns)\n return line, orig_names, columns\n\n if implicit_first_cols > 0:\n # Case 1\n self._implicit_index = True\n if self.index_col is None:\n self.index_col = list(range(implicit_first_cols))\n\n index_name = None\n\n else:\n # Case 2\n (index_name, _, self.index_col) = self._clean_index_names(\n columns, self.index_col\n )\n\n return index_name, orig_names, columns\n\n def _rows_to_cols(self, content: list[list[Scalar]]) -> list[np.ndarray]:\n col_len = self.num_original_columns\n\n if self._implicit_index:\n col_len += len(self.index_col)\n\n max_len = max(len(row) for row in content)\n\n # Check that there are no rows with too many\n # elements in their row (rows with too few\n # elements are padded with NaN).\n # error: Non-overlapping identity check (left operand type: "List[int]",\n # right operand type: "Literal[False]")\n if (\n max_len > col_len\n and self.index_col is not False # type: ignore[comparison-overlap]\n and self.usecols is None\n ):\n footers = self.skipfooter if self.skipfooter else 0\n bad_lines = []\n\n iter_content = enumerate(content)\n content_len = len(content)\n content = []\n\n for i, _content in iter_content:\n actual_len = len(_content)\n\n if actual_len > col_len:\n if callable(self.on_bad_lines):\n new_l = self.on_bad_lines(_content)\n if new_l is not None:\n content.append(new_l)\n elif self.on_bad_lines in (\n self.BadLineHandleMethod.ERROR,\n self.BadLineHandleMethod.WARN,\n ):\n row_num = self.pos - (content_len - i + footers)\n bad_lines.append((row_num, actual_len))\n\n if self.on_bad_lines == self.BadLineHandleMethod.ERROR:\n break\n else:\n content.append(_content)\n\n for row_num, actual_len in bad_lines:\n msg = (\n f"Expected {col_len} fields in line {row_num + 1}, saw "\n f"{actual_len}"\n )\n if (\n self.delimiter\n and len(self.delimiter) > 1\n and self.quoting != csv.QUOTE_NONE\n ):\n # see gh-13374\n reason = (\n "Error could possibly be due to quotes being "\n "ignored when a multi-char delimiter is used."\n )\n msg += ". " + reason\n\n self._alert_malformed(msg, row_num + 1)\n\n # see gh-13320\n zipped_content = list(lib.to_object_array(content, min_width=col_len).T)\n\n if self.usecols:\n assert self._col_indices is not None\n col_indices = self._col_indices\n\n if self._implicit_index:\n zipped_content = [\n a\n for i, a in enumerate(zipped_content)\n if (\n i < len(self.index_col)\n or i - len(self.index_col) in col_indices\n )\n ]\n else:\n zipped_content = [\n a for i, a in enumerate(zipped_content) if i in col_indices\n ]\n return zipped_content\n\n def _get_lines(self, rows: int | None = None) -> list[list[Scalar]]:\n lines = self.buf\n new_rows = None\n\n # already fetched some number\n if rows is not None:\n # we already have the lines in the buffer\n if len(self.buf) >= rows:\n new_rows, self.buf = self.buf[:rows], self.buf[rows:]\n\n # need some lines\n else:\n rows -= len(self.buf)\n\n if new_rows is None:\n if isinstance(self.data, list):\n if self.pos > len(self.data):\n raise StopIteration\n if rows is None:\n new_rows = self.data[self.pos :]\n new_pos = len(self.data)\n else:\n new_rows = self.data[self.pos : self.pos + rows]\n new_pos = self.pos + rows\n\n new_rows = self._remove_skipped_rows(new_rows)\n lines.extend(new_rows)\n self.pos = new_pos\n\n else:\n new_rows = []\n try:\n if rows is not None:\n row_index = 0\n row_ct = 0\n offset = self.pos if self.pos is not None else 0\n while row_ct < rows:\n # assert for mypy, data is Iterator[str] or None, would\n # error in next\n assert self.data is not None\n new_row = next(self.data)\n if not self.skipfunc(offset + row_index):\n row_ct += 1\n row_index += 1\n new_rows.append(new_row)\n\n len_new_rows = len(new_rows)\n new_rows = self._remove_skipped_rows(new_rows)\n lines.extend(new_rows)\n else:\n rows = 0\n\n while True:\n next_row = self._next_iter_line(row_num=self.pos + rows + 1)\n rows += 1\n\n if next_row is not None:\n new_rows.append(next_row)\n len_new_rows = len(new_rows)\n\n except StopIteration:\n len_new_rows = len(new_rows)\n new_rows = self._remove_skipped_rows(new_rows)\n lines.extend(new_rows)\n if len(lines) == 0:\n raise\n self.pos += len_new_rows\n\n self.buf = []\n else:\n lines = new_rows\n\n if self.skipfooter:\n lines = lines[: -self.skipfooter]\n\n lines = self._check_comments(lines)\n if self.skip_blank_lines:\n lines = self._remove_empty_lines(lines)\n lines = self._check_thousands(lines)\n return self._check_decimal(lines)\n\n def _remove_skipped_rows(self, new_rows: list[list[Scalar]]) -> list[list[Scalar]]:\n if self.skiprows:\n return [\n row for i, row in enumerate(new_rows) if not self.skipfunc(i + self.pos)\n ]\n return new_rows\n\n def _set_no_thousand_columns(self) -> set[int]:\n no_thousands_columns: set[int] = set()\n if self.columns and self.parse_dates:\n assert self._col_indices is not None\n no_thousands_columns = self._set_noconvert_dtype_columns(\n self._col_indices, self.columns\n )\n if self.columns and self.dtype:\n assert self._col_indices is not None\n for i, col in zip(self._col_indices, self.columns):\n if not isinstance(self.dtype, dict) and not is_numeric_dtype(\n self.dtype\n ):\n no_thousands_columns.add(i)\n if (\n isinstance(self.dtype, dict)\n and col in self.dtype\n and (\n not is_numeric_dtype(self.dtype[col])\n or is_bool_dtype(self.dtype[col])\n )\n ):\n no_thousands_columns.add(i)\n return no_thousands_columns\n\n\nclass FixedWidthReader(abc.Iterator):\n """\n A reader of fixed-width lines.\n """\n\n def __init__(\n self,\n f: IO[str] | ReadCsvBuffer[str],\n colspecs: list[tuple[int, int]] | Literal["infer"],\n delimiter: str | None,\n comment: str | None,\n skiprows: set[int] | None = None,\n infer_nrows: int = 100,\n ) -> None:\n self.f = f\n self.buffer: Iterator | None = None\n self.delimiter = "\r\n" + delimiter if delimiter else "\n\r\t "\n self.comment = comment\n if colspecs == "infer":\n self.colspecs = self.detect_colspecs(\n infer_nrows=infer_nrows, skiprows=skiprows\n )\n else:\n self.colspecs = colspecs\n\n if not isinstance(self.colspecs, (tuple, list)):\n raise TypeError(\n "column specifications must be a list or tuple, "\n f"input was a {type(colspecs).__name__}"\n )\n\n for colspec in self.colspecs:\n if not (\n isinstance(colspec, (tuple, list))\n and len(colspec) == 2\n and isinstance(colspec[0], (int, np.integer, type(None)))\n and isinstance(colspec[1], (int, np.integer, type(None)))\n ):\n raise TypeError(\n "Each column specification must be "\n "2 element tuple or list of integers"\n )\n\n def get_rows(self, infer_nrows: int, skiprows: set[int] | None = None) -> list[str]:\n """\n Read rows from self.f, skipping as specified.\n\n We distinguish buffer_rows (the first <= infer_nrows\n lines) from the rows returned to detect_colspecs\n because it's simpler to leave the other locations\n with skiprows logic alone than to modify them to\n deal with the fact we skipped some rows here as\n well.\n\n Parameters\n ----------\n infer_nrows : int\n Number of rows to read from self.f, not counting\n rows that are skipped.\n skiprows: set, optional\n Indices of rows to skip.\n\n Returns\n -------\n detect_rows : list of str\n A list containing the rows to read.\n\n """\n if skiprows is None:\n skiprows = set()\n buffer_rows = []\n detect_rows = []\n for i, row in enumerate(self.f):\n if i not in skiprows:\n detect_rows.append(row)\n buffer_rows.append(row)\n if len(detect_rows) >= infer_nrows:\n break\n self.buffer = iter(buffer_rows)\n return detect_rows\n\n def detect_colspecs(\n self, infer_nrows: int = 100, skiprows: set[int] | None = None\n ) -> list[tuple[int, int]]:\n # Regex escape the delimiters\n delimiters = "".join([rf"\{x}" for x in self.delimiter])\n pattern = re.compile(f"([^{delimiters}]+)")\n rows = self.get_rows(infer_nrows, skiprows)\n if not rows:\n raise EmptyDataError("No rows from which to infer column width")\n max_len = max(map(len, rows))\n mask = np.zeros(max_len + 1, dtype=int)\n if self.comment is not None:\n rows = [row.partition(self.comment)[0] for row in rows]\n for row in rows:\n for m in pattern.finditer(row):\n mask[m.start() : m.end()] = 1\n shifted = np.roll(mask, 1)\n shifted[0] = 0\n edges = np.where((mask ^ shifted) == 1)[0]\n edge_pairs = list(zip(edges[::2], edges[1::2]))\n return edge_pairs\n\n def __next__(self) -> list[str]:\n # Argument 1 to "next" has incompatible type "Union[IO[str],\n # ReadCsvBuffer[str]]"; expected "SupportsNext[str]"\n if self.buffer is not None:\n try:\n line = next(self.buffer)\n except StopIteration:\n self.buffer = None\n line = next(self.f) # type: ignore[arg-type]\n else:\n line = next(self.f) # type: ignore[arg-type]\n # Note: 'colspecs' is a sequence of half-open intervals.\n return [line[from_:to].strip(self.delimiter) for (from_, to) in self.colspecs]\n\n\nclass FixedWidthFieldParser(PythonParser):\n """\n Specialization that Converts fixed-width fields into DataFrames.\n See PythonParser for details.\n """\n\n def __init__(self, f: ReadCsvBuffer[str], **kwds) -> None:\n # Support iterators, convert to a list.\n self.colspecs = kwds.pop("colspecs")\n self.infer_nrows = kwds.pop("infer_nrows")\n PythonParser.__init__(self, f, **kwds)\n\n def _make_reader(self, f: IO[str] | ReadCsvBuffer[str]) -> FixedWidthReader:\n return FixedWidthReader(\n f,\n self.colspecs,\n self.delimiter,\n self.comment,\n self.skiprows,\n self.infer_nrows,\n )\n\n def _remove_empty_lines(self, lines: list[list[Scalar]]) -> list[list[Scalar]]:\n """\n Returns the list of lines without the empty ones. With fixed-width\n fields, empty lines become arrays of empty strings.\n\n See PythonParser._remove_empty_lines.\n """\n return [\n line\n for line in lines\n if any(not isinstance(e, str) or e.strip() for e in line)\n ]\n\n\ndef count_empty_vals(vals) -> int:\n return sum(1 for v in vals if v == "" or v is None)\n\n\ndef _validate_skipfooter_arg(skipfooter: int) -> int:\n """\n Validate the 'skipfooter' parameter.\n\n Checks whether 'skipfooter' is a non-negative integer.\n Raises a ValueError if that is not the case.\n\n Parameters\n ----------\n skipfooter : non-negative integer\n The number of rows to skip at the end of the file.\n\n Returns\n -------\n validated_skipfooter : non-negative integer\n The original input if the validation succeeds.\n\n Raises\n ------\n ValueError : 'skipfooter' was not a non-negative integer.\n """\n if not is_integer(skipfooter):\n raise ValueError("skipfooter must be an integer")\n\n if skipfooter < 0:\n raise ValueError("skipfooter cannot be negative")\n\n # Incompatible return value type (got "Union[int, integer[Any]]", expected "int")\n return skipfooter # type: ignore[return-value]\n
.venv\Lib\site-packages\pandas\io\parsers\python_parser.py
python_parser.py
Python
48,456
0.95
0.192502
0.082343
python-kit
601
2023-10-03T22:08:36.688675
Apache-2.0
false
2ad6fbd95f06f46abc83af2bfff3a62f
"""\nModule contains tools for processing files into DataFrames or other objects\n\nGH#48849 provides a convenient way of deprecating keyword arguments\n"""\nfrom __future__ import annotations\n\nfrom collections import (\n abc,\n defaultdict,\n)\nimport csv\nimport sys\nfrom textwrap import fill\nfrom typing import (\n IO,\n TYPE_CHECKING,\n Any,\n Callable,\n Literal,\n NamedTuple,\n TypedDict,\n overload,\n)\nimport warnings\n\nimport numpy as np\n\nfrom pandas._config import using_copy_on_write\n\nfrom pandas._libs import lib\nfrom pandas._libs.parsers import STR_NA_VALUES\nfrom pandas.errors import (\n AbstractMethodError,\n ParserWarning,\n)\nfrom pandas.util._decorators import Appender\nfrom pandas.util._exceptions import find_stack_level\nfrom pandas.util._validators import check_dtype_backend\n\nfrom pandas.core.dtypes.common import (\n is_file_like,\n is_float,\n is_hashable,\n is_integer,\n is_list_like,\n pandas_dtype,\n)\n\nfrom pandas import Series\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.indexes.api import RangeIndex\nfrom pandas.core.shared_docs import _shared_docs\n\nfrom pandas.io.common import (\n IOHandles,\n get_handle,\n stringify_path,\n validate_header_arg,\n)\nfrom pandas.io.parsers.arrow_parser_wrapper import ArrowParserWrapper\nfrom pandas.io.parsers.base_parser import (\n ParserBase,\n is_index_col,\n parser_defaults,\n)\nfrom pandas.io.parsers.c_parser_wrapper import CParserWrapper\nfrom pandas.io.parsers.python_parser import (\n FixedWidthFieldParser,\n PythonParser,\n)\n\nif TYPE_CHECKING:\n from collections.abc import (\n Hashable,\n Iterable,\n Mapping,\n Sequence,\n )\n from types import TracebackType\n\n from pandas._typing import (\n CompressionOptions,\n CSVEngine,\n DtypeArg,\n DtypeBackend,\n FilePath,\n IndexLabel,\n ReadCsvBuffer,\n Self,\n StorageOptions,\n UsecolsArgType,\n )\n_doc_read_csv_and_table = (\n r"""\n{summary}\n\nAlso supports optionally iterating or breaking of the file\ninto chunks.\n\nAdditional help can be found in the online docs for\n`IO Tools <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html>`_.\n\nParameters\n----------\nfilepath_or_buffer : str, path object or file-like object\n Any valid string path is acceptable. The string could be a URL. Valid\n URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is\n expected. A local file could be: file://localhost/path/to/table.csv.\n\n If you want to pass in a path object, pandas accepts any ``os.PathLike``.\n\n By file-like object, we refer to objects with a ``read()`` method, such as\n a file handle (e.g. via builtin ``open`` function) or ``StringIO``.\nsep : str, default {_default_sep}\n Character or regex pattern to treat as the delimiter. If ``sep=None``, the\n C engine cannot automatically detect\n the separator, but the Python parsing engine can, meaning the latter will\n be used and automatically detect the separator from only the first valid\n row of the file by Python's builtin sniffer tool, ``csv.Sniffer``.\n In addition, separators longer than 1 character and different from\n ``'\s+'`` will be interpreted as regular expressions and will also force\n the use of the Python parsing engine. Note that regex delimiters are prone\n to ignoring quoted data. Regex example: ``'\r\t'``.\ndelimiter : str, optional\n Alias for ``sep``.\nheader : int, Sequence of int, 'infer' or None, default 'infer'\n Row number(s) containing column labels and marking the start of the\n data (zero-indexed). Default behavior is to infer the column names: if no ``names``\n are passed the behavior is identical to ``header=0`` and column\n names are inferred from the first line of the file, if column\n names are passed explicitly to ``names`` then the behavior is identical to\n ``header=None``. Explicitly pass ``header=0`` to be able to\n replace existing names. The header can be a list of integers that\n specify row locations for a :class:`~pandas.MultiIndex` on the columns\n e.g. ``[0, 1, 3]``. Intervening rows that are not specified will be\n skipped (e.g. 2 in this example is skipped). Note that this\n parameter ignores commented lines and empty lines if\n ``skip_blank_lines=True``, so ``header=0`` denotes the first line of\n data rather than the first line of the file.\nnames : Sequence of Hashable, optional\n Sequence of column labels to apply. If the file contains a header row,\n then you should explicitly pass ``header=0`` to override the column names.\n Duplicates in this list are not allowed.\nindex_col : Hashable, Sequence of Hashable or False, optional\n Column(s) to use as row label(s), denoted either by column labels or column\n indices. If a sequence of labels or indices is given, :class:`~pandas.MultiIndex`\n will be formed for the row labels.\n\n Note: ``index_col=False`` can be used to force pandas to *not* use the first\n column as the index, e.g., when you have a malformed file with delimiters at\n the end of each line.\nusecols : Sequence of Hashable or Callable, optional\n Subset of columns to select, denoted either by column labels or column indices.\n If list-like, all elements must either\n be positional (i.e. integer indices into the document columns) or strings\n that correspond to column names provided either by the user in ``names`` or\n inferred from the document header row(s). If ``names`` are given, the document\n header row(s) are not taken into account. For example, a valid list-like\n ``usecols`` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``.\n Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.\n To instantiate a :class:`~pandas.DataFrame` from ``data`` with element order\n preserved use ``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]``\n for columns in ``['foo', 'bar']`` order or\n ``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]``\n for ``['bar', 'foo']`` order.\n\n If callable, the callable function will be evaluated against the column\n names, returning names where the callable function evaluates to ``True``. An\n example of a valid callable argument would be ``lambda x: x.upper() in\n ['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster\n parsing time and lower memory usage.\ndtype : dtype or dict of {{Hashable : dtype}}, optional\n Data type(s) to apply to either the whole dataset or individual columns.\n E.g., ``{{'a': np.float64, 'b': np.int32, 'c': 'Int64'}}``\n Use ``str`` or ``object`` together with suitable ``na_values`` settings\n to preserve and not interpret ``dtype``.\n If ``converters`` are specified, they will be applied INSTEAD\n of ``dtype`` conversion.\n\n .. versionadded:: 1.5.0\n\n Support for ``defaultdict`` was added. Specify a ``defaultdict`` as input where\n the default determines the ``dtype`` of the columns which are not explicitly\n listed.\nengine : {{'c', 'python', 'pyarrow'}}, optional\n Parser engine to use. The C and pyarrow engines are faster, while the python engine\n is currently more feature-complete. Multithreading is currently only supported by\n the pyarrow engine.\n\n .. versionadded:: 1.4.0\n\n The 'pyarrow' engine was added as an *experimental* engine, and some features\n are unsupported, or may not work correctly, with this engine.\nconverters : dict of {{Hashable : Callable}}, optional\n Functions for converting values in specified columns. Keys can either\n be column labels or column indices.\ntrue_values : list, optional\n Values to consider as ``True`` in addition to case-insensitive variants of 'True'.\nfalse_values : list, optional\n Values to consider as ``False`` in addition to case-insensitive variants of 'False'.\nskipinitialspace : bool, default False\n Skip spaces after delimiter.\nskiprows : int, list of int or Callable, optional\n Line numbers to skip (0-indexed) or number of lines to skip (``int``)\n at the start of the file.\n\n If callable, the callable function will be evaluated against the row\n indices, returning ``True`` if the row should be skipped and ``False`` otherwise.\n An example of a valid callable argument would be ``lambda x: x in [0, 2]``.\nskipfooter : int, default 0\n Number of lines at bottom of file to skip (Unsupported with ``engine='c'``).\nnrows : int, optional\n Number of rows of file to read. Useful for reading pieces of large files.\nna_values : Hashable, Iterable of Hashable or dict of {{Hashable : Iterable}}, optional\n Additional strings to recognize as ``NA``/``NaN``. If ``dict`` passed, specific\n per-column ``NA`` values. By default the following values are interpreted as\n ``NaN``: " """\n + fill('", "'.join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ")\n + """ ".\n\nkeep_default_na : bool, default True\n Whether or not to include the default ``NaN`` values when parsing the data.\n Depending on whether ``na_values`` is passed in, the behavior is as follows:\n\n * If ``keep_default_na`` is ``True``, and ``na_values`` are specified, ``na_values``\n is appended to the default ``NaN`` values used for parsing.\n * If ``keep_default_na`` is ``True``, and ``na_values`` are not specified, only\n the default ``NaN`` values are used for parsing.\n * If ``keep_default_na`` is ``False``, and ``na_values`` are specified, only\n the ``NaN`` values specified ``na_values`` are used for parsing.\n * If ``keep_default_na`` is ``False``, and ``na_values`` are not specified, no\n strings will be parsed as ``NaN``.\n\n Note that if ``na_filter`` is passed in as ``False``, the ``keep_default_na`` and\n ``na_values`` parameters will be ignored.\nna_filter : bool, default True\n Detect missing value markers (empty strings and the value of ``na_values``). In\n data without any ``NA`` values, passing ``na_filter=False`` can improve the\n performance of reading a large file.\nverbose : bool, default False\n Indicate number of ``NA`` values placed in non-numeric columns.\n\n .. deprecated:: 2.2.0\nskip_blank_lines : bool, default True\n If ``True``, skip over blank lines rather than interpreting as ``NaN`` values.\nparse_dates : bool, list of Hashable, list of lists or dict of {{Hashable : list}}, \\ndefault False\n The behavior is as follows:\n\n * ``bool``. If ``True`` -> try parsing the index. Note: Automatically set to\n ``True`` if ``date_format`` or ``date_parser`` arguments have been passed.\n * ``list`` of ``int`` or names. e.g. If ``[1, 2, 3]`` -> try parsing columns 1, 2, 3\n each as a separate date column.\n * ``list`` of ``list``. e.g. If ``[[1, 3]]`` -> combine columns 1 and 3 and parse\n as a single date column. Values are joined with a space before parsing.\n * ``dict``, e.g. ``{{'foo' : [1, 3]}}`` -> parse columns 1, 3 as date and call\n result 'foo'. Values are joined with a space before parsing.\n\n If a column or index cannot be represented as an array of ``datetime``,\n say because of an unparsable value or a mixture of timezones, the column\n or index will be returned unaltered as an ``object`` data type. For\n non-standard ``datetime`` parsing, use :func:`~pandas.to_datetime` after\n :func:`~pandas.read_csv`.\n\n Note: A fast-path exists for iso8601-formatted dates.\ninfer_datetime_format : bool, default False\n If ``True`` and ``parse_dates`` is enabled, pandas will attempt to infer the\n format of the ``datetime`` strings in the columns, and if it can be inferred,\n switch to a faster method of parsing them. In some cases this can increase\n the parsing speed by 5-10x.\n\n .. deprecated:: 2.0.0\n A strict version of this argument is now the default, passing it has no effect.\n\nkeep_date_col : bool, default False\n If ``True`` and ``parse_dates`` specifies combining multiple columns then\n keep the original columns.\ndate_parser : Callable, optional\n Function to use for converting a sequence of string columns to an array of\n ``datetime`` instances. The default uses ``dateutil.parser.parser`` to do the\n conversion. pandas will try to call ``date_parser`` in three different ways,\n advancing to the next if an exception occurs: 1) Pass one or more arrays\n (as defined by ``parse_dates``) as arguments; 2) concatenate (row-wise) the\n string values from the columns defined by ``parse_dates`` into a single array\n and pass that; and 3) call ``date_parser`` once for each row using one or\n more strings (corresponding to the columns defined by ``parse_dates``) as\n arguments.\n\n .. deprecated:: 2.0.0\n Use ``date_format`` instead, or read in as ``object`` and then apply\n :func:`~pandas.to_datetime` as-needed.\ndate_format : str or dict of column -> format, optional\n Format to use for parsing dates when used in conjunction with ``parse_dates``.\n The strftime to parse time, e.g. :const:`"%d/%m/%Y"`. See\n `strftime documentation\n <https://docs.python.org/3/library/datetime.html\n #strftime-and-strptime-behavior>`_ for more information on choices, though\n note that :const:`"%f"` will parse all the way up to nanoseconds.\n You can also pass:\n\n - "ISO8601", to parse any `ISO8601 <https://en.wikipedia.org/wiki/ISO_8601>`_\n time string (not necessarily in exactly the same format);\n - "mixed", to infer the format for each element individually. This is risky,\n and you should probably use it along with `dayfirst`.\n\n .. versionadded:: 2.0.0\ndayfirst : bool, default False\n DD/MM format dates, international and European format.\ncache_dates : bool, default True\n If ``True``, use a cache of unique, converted dates to apply the ``datetime``\n conversion. May produce significant speed-up when parsing duplicate\n date strings, especially ones with timezone offsets.\n\niterator : bool, default False\n Return ``TextFileReader`` object for iteration or getting chunks with\n ``get_chunk()``.\nchunksize : int, optional\n Number of lines to read from the file per chunk. Passing a value will cause the\n function to return a ``TextFileReader`` object for iteration.\n See the `IO Tools docs\n <https://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking>`_\n for more information on ``iterator`` and ``chunksize``.\n\n{decompression_options}\n\n .. versionchanged:: 1.4.0 Zstandard support.\n\nthousands : str (length 1), optional\n Character acting as the thousands separator in numerical values.\ndecimal : str (length 1), default '.'\n Character to recognize as decimal point (e.g., use ',' for European data).\nlineterminator : str (length 1), optional\n Character used to denote a line break. Only valid with C parser.\nquotechar : str (length 1), optional\n Character used to denote the start and end of a quoted item. Quoted\n items can include the ``delimiter`` and it will be ignored.\nquoting : {{0 or csv.QUOTE_MINIMAL, 1 or csv.QUOTE_ALL, 2 or csv.QUOTE_NONNUMERIC, \\n3 or csv.QUOTE_NONE}}, default csv.QUOTE_MINIMAL\n Control field quoting behavior per ``csv.QUOTE_*`` constants. Default is\n ``csv.QUOTE_MINIMAL`` (i.e., 0) which implies that only fields containing special\n characters are quoted (e.g., characters defined in ``quotechar``, ``delimiter``,\n or ``lineterminator``.\ndoublequote : bool, default True\n When ``quotechar`` is specified and ``quoting`` is not ``QUOTE_NONE``, indicate\n whether or not to interpret two consecutive ``quotechar`` elements INSIDE a\n field as a single ``quotechar`` element.\nescapechar : str (length 1), optional\n Character used to escape other characters.\ncomment : str (length 1), optional\n Character indicating that the remainder of line should not be parsed.\n If found at the beginning\n of a line, the line will be ignored altogether. This parameter must be a\n single character. Like empty lines (as long as ``skip_blank_lines=True``),\n fully commented lines are ignored by the parameter ``header`` but not by\n ``skiprows``. For example, if ``comment='#'``, parsing\n ``#empty\\na,b,c\\n1,2,3`` with ``header=0`` will result in ``'a,b,c'`` being\n treated as the header.\nencoding : str, optional, default 'utf-8'\n Encoding to use for UTF when reading/writing (ex. ``'utf-8'``). `List of Python\n standard encodings\n <https://docs.python.org/3/library/codecs.html#standard-encodings>`_ .\n\nencoding_errors : str, optional, default 'strict'\n How encoding errors are treated. `List of possible values\n <https://docs.python.org/3/library/codecs.html#error-handlers>`_ .\n\n .. versionadded:: 1.3.0\n\ndialect : str or csv.Dialect, optional\n If provided, this parameter will override values (default or not) for the\n following parameters: ``delimiter``, ``doublequote``, ``escapechar``,\n ``skipinitialspace``, ``quotechar``, and ``quoting``. If it is necessary to\n override values, a ``ParserWarning`` will be issued. See ``csv.Dialect``\n documentation for more details.\non_bad_lines : {{'error', 'warn', 'skip'}} or Callable, default 'error'\n Specifies what to do upon encountering a bad line (a line with too many fields).\n Allowed values are :\n\n - ``'error'``, raise an Exception when a bad line is encountered.\n - ``'warn'``, raise a warning when a bad line is encountered and skip that line.\n - ``'skip'``, skip bad lines without raising or warning when they are encountered.\n\n .. versionadded:: 1.3.0\n\n .. versionadded:: 1.4.0\n\n - Callable, function with signature\n ``(bad_line: list[str]) -> list[str] | None`` that will process a single\n bad line. ``bad_line`` is a list of strings split by the ``sep``.\n If the function returns ``None``, the bad line will be ignored.\n If the function returns a new ``list`` of strings with more elements than\n expected, a ``ParserWarning`` will be emitted while dropping extra elements.\n Only supported when ``engine='python'``\n\n .. versionchanged:: 2.2.0\n\n - Callable, function with signature\n as described in `pyarrow documentation\n <https://arrow.apache.org/docs/python/generated/pyarrow.csv.ParseOptions.html\n #pyarrow.csv.ParseOptions.invalid_row_handler>`_ when ``engine='pyarrow'``\n\ndelim_whitespace : bool, default False\n Specifies whether or not whitespace (e.g. ``' '`` or ``'\\t'``) will be\n used as the ``sep`` delimiter. Equivalent to setting ``sep='\\s+'``. If this option\n is set to ``True``, nothing should be passed in for the ``delimiter``\n parameter.\n\n .. deprecated:: 2.2.0\n Use ``sep="\\s+"`` instead.\nlow_memory : bool, default True\n Internally process the file in chunks, resulting in lower memory use\n while parsing, but possibly mixed type inference. To ensure no mixed\n types either set ``False``, or specify the type with the ``dtype`` parameter.\n Note that the entire file is read into a single :class:`~pandas.DataFrame`\n regardless, use the ``chunksize`` or ``iterator`` parameter to return the data in\n chunks. (Only valid with C parser).\nmemory_map : bool, default False\n If a filepath is provided for ``filepath_or_buffer``, map the file object\n directly onto memory and access the data directly from there. Using this\n option can improve performance because there is no longer any I/O overhead.\nfloat_precision : {{'high', 'legacy', 'round_trip'}}, optional\n Specifies which converter the C engine should use for floating-point\n values. The options are ``None`` or ``'high'`` for the ordinary converter,\n ``'legacy'`` for the original lower precision pandas converter, and\n ``'round_trip'`` for the round-trip converter.\n\n{storage_options}\n\ndtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable'\n Back-end data type applied to the resultant :class:`DataFrame`\n (still experimental). Behaviour is as follows:\n\n * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`\n (default).\n * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`\n DataFrame.\n\n .. versionadded:: 2.0\n\nReturns\n-------\nDataFrame or TextFileReader\n A comma-separated values (csv) file is returned as two-dimensional\n data structure with labeled axes.\n\nSee Also\n--------\nDataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.\n{see_also_func_name} : {see_also_func_summary}\nread_fwf : Read a table of fixed-width formatted lines into DataFrame.\n\nExamples\n--------\n>>> pd.{func_name}('data.csv') # doctest: +SKIP\n"""\n)\n\n\nclass _C_Parser_Defaults(TypedDict):\n delim_whitespace: Literal[False]\n na_filter: Literal[True]\n low_memory: Literal[True]\n memory_map: Literal[False]\n float_precision: None\n\n\n_c_parser_defaults: _C_Parser_Defaults = {\n "delim_whitespace": False,\n "na_filter": True,\n "low_memory": True,\n "memory_map": False,\n "float_precision": None,\n}\n\n\nclass _Fwf_Defaults(TypedDict):\n colspecs: Literal["infer"]\n infer_nrows: Literal[100]\n widths: None\n\n\n_fwf_defaults: _Fwf_Defaults = {"colspecs": "infer", "infer_nrows": 100, "widths": None}\n_c_unsupported = {"skipfooter"}\n_python_unsupported = {"low_memory", "float_precision"}\n_pyarrow_unsupported = {\n "skipfooter",\n "float_precision",\n "chunksize",\n "comment",\n "nrows",\n "thousands",\n "memory_map",\n "dialect",\n "delim_whitespace",\n "quoting",\n "lineterminator",\n "converters",\n "iterator",\n "dayfirst",\n "verbose",\n "skipinitialspace",\n "low_memory",\n}\n\n\nclass _DeprecationConfig(NamedTuple):\n default_value: Any\n msg: str | None\n\n\n@overload\ndef validate_integer(name: str, val: None, min_val: int = ...) -> None:\n ...\n\n\n@overload\ndef validate_integer(name: str, val: float, min_val: int = ...) -> int:\n ...\n\n\n@overload\ndef validate_integer(name: str, val: int | None, min_val: int = ...) -> int | None:\n ...\n\n\ndef validate_integer(\n name: str, val: int | float | None, min_val: int = 0\n) -> int | None:\n """\n Checks whether the 'name' parameter for parsing is either\n an integer OR float that can SAFELY be cast to an integer\n without losing accuracy. Raises a ValueError if that is\n not the case.\n\n Parameters\n ----------\n name : str\n Parameter name (used for error reporting)\n val : int or float\n The value to check\n min_val : int\n Minimum allowed value (val < min_val will result in a ValueError)\n """\n if val is None:\n return val\n\n msg = f"'{name:s}' must be an integer >={min_val:d}"\n if is_float(val):\n if int(val) != val:\n raise ValueError(msg)\n val = int(val)\n elif not (is_integer(val) and val >= min_val):\n raise ValueError(msg)\n\n return int(val)\n\n\ndef _validate_names(names: Sequence[Hashable] | None) -> None:\n """\n Raise ValueError if the `names` parameter contains duplicates or has an\n invalid data type.\n\n Parameters\n ----------\n names : array-like or None\n An array containing a list of the names used for the output DataFrame.\n\n Raises\n ------\n ValueError\n If names are not unique or are not ordered (e.g. set).\n """\n if names is not None:\n if len(names) != len(set(names)):\n raise ValueError("Duplicate names are not allowed.")\n if not (\n is_list_like(names, allow_sets=False) or isinstance(names, abc.KeysView)\n ):\n raise ValueError("Names should be an ordered collection.")\n\n\ndef _read(\n filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], kwds\n) -> DataFrame | TextFileReader:\n """Generic reader of line files."""\n # if we pass a date_parser and parse_dates=False, we should not parse the\n # dates GH#44366\n if kwds.get("parse_dates", None) is None:\n if (\n kwds.get("date_parser", lib.no_default) is lib.no_default\n and kwds.get("date_format", None) is None\n ):\n kwds["parse_dates"] = False\n else:\n kwds["parse_dates"] = True\n\n # Extract some of the arguments (pass chunksize on).\n iterator = kwds.get("iterator", False)\n chunksize = kwds.get("chunksize", None)\n if kwds.get("engine") == "pyarrow":\n if iterator:\n raise ValueError(\n "The 'iterator' option is not supported with the 'pyarrow' engine"\n )\n\n if chunksize is not None:\n raise ValueError(\n "The 'chunksize' option is not supported with the 'pyarrow' engine"\n )\n else:\n chunksize = validate_integer("chunksize", chunksize, 1)\n\n nrows = kwds.get("nrows", None)\n\n # Check for duplicates in names.\n _validate_names(kwds.get("names", None))\n\n # Create the parser.\n parser = TextFileReader(filepath_or_buffer, **kwds)\n\n if chunksize or iterator:\n return parser\n\n with parser:\n return parser.read(nrows)\n\n\n# iterator=True -> TextFileReader\n@overload\ndef read_csv(\n filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],\n *,\n sep: str | None | lib.NoDefault = ...,\n delimiter: str | None | lib.NoDefault = ...,\n header: int | Sequence[int] | None | Literal["infer"] = ...,\n names: Sequence[Hashable] | None | lib.NoDefault = ...,\n index_col: IndexLabel | Literal[False] | None = ...,\n usecols: UsecolsArgType = ...,\n dtype: DtypeArg | None = ...,\n engine: CSVEngine | None = ...,\n converters: Mapping[Hashable, Callable] | None = ...,\n true_values: list | None = ...,\n false_values: list | None = ...,\n skipinitialspace: bool = ...,\n skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,\n skipfooter: int = ...,\n nrows: int | None = ...,\n na_values: Hashable\n | Iterable[Hashable]\n | Mapping[Hashable, Iterable[Hashable]]\n | None = ...,\n na_filter: bool = ...,\n verbose: bool | lib.NoDefault = ...,\n skip_blank_lines: bool = ...,\n parse_dates: bool | Sequence[Hashable] | None = ...,\n infer_datetime_format: bool | lib.NoDefault = ...,\n keep_date_col: bool | lib.NoDefault = ...,\n date_parser: Callable | lib.NoDefault = ...,\n date_format: str | dict[Hashable, str] | None = ...,\n dayfirst: bool = ...,\n cache_dates: bool = ...,\n iterator: Literal[True],\n chunksize: int | None = ...,\n compression: CompressionOptions = ...,\n thousands: str | None = ...,\n decimal: str = ...,\n lineterminator: str | None = ...,\n quotechar: str = ...,\n quoting: int = ...,\n doublequote: bool = ...,\n escapechar: str | None = ...,\n comment: str | None = ...,\n encoding: str | None = ...,\n encoding_errors: str | None = ...,\n dialect: str | csv.Dialect | None = ...,\n on_bad_lines=...,\n delim_whitespace: bool | lib.NoDefault = ...,\n low_memory: bool = ...,\n memory_map: bool = ...,\n float_precision: Literal["high", "legacy"] | None = ...,\n storage_options: StorageOptions = ...,\n dtype_backend: DtypeBackend | lib.NoDefault = ...,\n) -> TextFileReader:\n ...\n\n\n# chunksize=int -> TextFileReader\n@overload\ndef read_csv(\n filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],\n *,\n sep: str | None | lib.NoDefault = ...,\n delimiter: str | None | lib.NoDefault = ...,\n header: int | Sequence[int] | None | Literal["infer"] = ...,\n names: Sequence[Hashable] | None | lib.NoDefault = ...,\n index_col: IndexLabel | Literal[False] | None = ...,\n usecols: UsecolsArgType = ...,\n dtype: DtypeArg | None = ...,\n engine: CSVEngine | None = ...,\n converters: Mapping[Hashable, Callable] | None = ...,\n true_values: list | None = ...,\n false_values: list | None = ...,\n skipinitialspace: bool = ...,\n skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,\n skipfooter: int = ...,\n nrows: int | None = ...,\n na_values: Hashable\n | Iterable[Hashable]\n | Mapping[Hashable, Iterable[Hashable]]\n | None = ...,\n keep_default_na: bool = ...,\n na_filter: bool = ...,\n verbose: bool | lib.NoDefault = ...,\n skip_blank_lines: bool = ...,\n parse_dates: bool | Sequence[Hashable] | None = ...,\n infer_datetime_format: bool | lib.NoDefault = ...,\n keep_date_col: bool | lib.NoDefault = ...,\n date_parser: Callable | lib.NoDefault = ...,\n date_format: str | dict[Hashable, str] | None = ...,\n dayfirst: bool = ...,\n cache_dates: bool = ...,\n iterator: bool = ...,\n chunksize: int,\n compression: CompressionOptions = ...,\n thousands: str | None = ...,\n decimal: str = ...,\n lineterminator: str | None = ...,\n quotechar: str = ...,\n quoting: int = ...,\n doublequote: bool = ...,\n escapechar: str | None = ...,\n comment: str | None = ...,\n encoding: str | None = ...,\n encoding_errors: str | None = ...,\n dialect: str | csv.Dialect | None = ...,\n on_bad_lines=...,\n delim_whitespace: bool | lib.NoDefault = ...,\n low_memory: bool = ...,\n memory_map: bool = ...,\n float_precision: Literal["high", "legacy"] | None = ...,\n storage_options: StorageOptions = ...,\n dtype_backend: DtypeBackend | lib.NoDefault = ...,\n) -> TextFileReader:\n ...\n\n\n# default case -> DataFrame\n@overload\ndef read_csv(\n filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],\n *,\n sep: str | None | lib.NoDefault = ...,\n delimiter: str | None | lib.NoDefault = ...,\n header: int | Sequence[int] | None | Literal["infer"] = ...,\n names: Sequence[Hashable] | None | lib.NoDefault = ...,\n index_col: IndexLabel | Literal[False] | None = ...,\n usecols: UsecolsArgType = ...,\n dtype: DtypeArg | None = ...,\n engine: CSVEngine | None = ...,\n converters: Mapping[Hashable, Callable] | None = ...,\n true_values: list | None = ...,\n false_values: list | None = ...,\n skipinitialspace: bool = ...,\n skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,\n skipfooter: int = ...,\n nrows: int | None = ...,\n na_values: Hashable\n | Iterable[Hashable]\n | Mapping[Hashable, Iterable[Hashable]]\n | None = ...,\n keep_default_na: bool = ...,\n na_filter: bool = ...,\n verbose: bool | lib.NoDefault = ...,\n skip_blank_lines: bool = ...,\n parse_dates: bool | Sequence[Hashable] | None = ...,\n infer_datetime_format: bool | lib.NoDefault = ...,\n keep_date_col: bool | lib.NoDefault = ...,\n date_parser: Callable | lib.NoDefault = ...,\n date_format: str | dict[Hashable, str] | None = ...,\n dayfirst: bool = ...,\n cache_dates: bool = ...,\n iterator: Literal[False] = ...,\n chunksize: None = ...,\n compression: CompressionOptions = ...,\n thousands: str | None = ...,\n decimal: str = ...,\n lineterminator: str | None = ...,\n quotechar: str = ...,\n quoting: int = ...,\n doublequote: bool = ...,\n escapechar: str | None = ...,\n comment: str | None = ...,\n encoding: str | None = ...,\n encoding_errors: str | None = ...,\n dialect: str | csv.Dialect | None = ...,\n on_bad_lines=...,\n delim_whitespace: bool | lib.NoDefault = ...,\n low_memory: bool = ...,\n memory_map: bool = ...,\n float_precision: Literal["high", "legacy"] | None = ...,\n storage_options: StorageOptions = ...,\n dtype_backend: DtypeBackend | lib.NoDefault = ...,\n) -> DataFrame:\n ...\n\n\n# Unions -> DataFrame | TextFileReader\n@overload\ndef read_csv(\n filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],\n *,\n sep: str | None | lib.NoDefault = ...,\n delimiter: str | None | lib.NoDefault = ...,\n header: int | Sequence[int] | None | Literal["infer"] = ...,\n names: Sequence[Hashable] | None | lib.NoDefault = ...,\n index_col: IndexLabel | Literal[False] | None = ...,\n usecols: UsecolsArgType = ...,\n dtype: DtypeArg | None = ...,\n engine: CSVEngine | None = ...,\n converters: Mapping[Hashable, Callable] | None = ...,\n true_values: list | None = ...,\n false_values: list | None = ...,\n skipinitialspace: bool = ...,\n skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,\n skipfooter: int = ...,\n nrows: int | None = ...,\n na_values: Hashable\n | Iterable[Hashable]\n | Mapping[Hashable, Iterable[Hashable]]\n | None = ...,\n keep_default_na: bool = ...,\n na_filter: bool = ...,\n verbose: bool | lib.NoDefault = ...,\n skip_blank_lines: bool = ...,\n parse_dates: bool | Sequence[Hashable] | None = ...,\n infer_datetime_format: bool | lib.NoDefault = ...,\n keep_date_col: bool | lib.NoDefault = ...,\n date_parser: Callable | lib.NoDefault = ...,\n date_format: str | dict[Hashable, str] | None = ...,\n dayfirst: bool = ...,\n cache_dates: bool = ...,\n iterator: bool = ...,\n chunksize: int | None = ...,\n compression: CompressionOptions = ...,\n thousands: str | None = ...,\n decimal: str = ...,\n lineterminator: str | None = ...,\n quotechar: str = ...,\n quoting: int = ...,\n doublequote: bool = ...,\n escapechar: str | None = ...,\n comment: str | None = ...,\n encoding: str | None = ...,\n encoding_errors: str | None = ...,\n dialect: str | csv.Dialect | None = ...,\n on_bad_lines=...,\n delim_whitespace: bool | lib.NoDefault = ...,\n low_memory: bool = ...,\n memory_map: bool = ...,\n float_precision: Literal["high", "legacy"] | None = ...,\n storage_options: StorageOptions = ...,\n dtype_backend: DtypeBackend | lib.NoDefault = ...,\n) -> DataFrame | TextFileReader:\n ...\n\n\n@Appender(\n _doc_read_csv_and_table.format(\n func_name="read_csv",\n summary="Read a comma-separated values (csv) file into DataFrame.",\n see_also_func_name="read_table",\n see_also_func_summary="Read general delimited file into DataFrame.",\n _default_sep="','",\n storage_options=_shared_docs["storage_options"],\n decompression_options=_shared_docs["decompression_options"]\n % "filepath_or_buffer",\n )\n)\ndef read_csv(\n filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],\n *,\n sep: str | None | lib.NoDefault = lib.no_default,\n delimiter: str | None | lib.NoDefault = None,\n # Column and Index Locations and Names\n header: int | Sequence[int] | None | Literal["infer"] = "infer",\n names: Sequence[Hashable] | None | lib.NoDefault = lib.no_default,\n index_col: IndexLabel | Literal[False] | None = None,\n usecols: UsecolsArgType = None,\n # General Parsing Configuration\n dtype: DtypeArg | None = None,\n engine: CSVEngine | None = None,\n converters: Mapping[Hashable, Callable] | None = None,\n true_values: list | None = None,\n false_values: list | None = None,\n skipinitialspace: bool = False,\n skiprows: list[int] | int | Callable[[Hashable], bool] | None = None,\n skipfooter: int = 0,\n nrows: int | None = None,\n # NA and Missing Data Handling\n na_values: Hashable\n | Iterable[Hashable]\n | Mapping[Hashable, Iterable[Hashable]]\n | None = None,\n keep_default_na: bool = True,\n na_filter: bool = True,\n verbose: bool | lib.NoDefault = lib.no_default,\n skip_blank_lines: bool = True,\n # Datetime Handling\n parse_dates: bool | Sequence[Hashable] | None = None,\n infer_datetime_format: bool | lib.NoDefault = lib.no_default,\n keep_date_col: bool | lib.NoDefault = lib.no_default,\n date_parser: Callable | lib.NoDefault = lib.no_default,\n date_format: str | dict[Hashable, str] | None = None,\n dayfirst: bool = False,\n cache_dates: bool = True,\n # Iteration\n iterator: bool = False,\n chunksize: int | None = None,\n # Quoting, Compression, and File Format\n compression: CompressionOptions = "infer",\n thousands: str | None = None,\n decimal: str = ".",\n lineterminator: str | None = None,\n quotechar: str = '"',\n quoting: int = csv.QUOTE_MINIMAL,\n doublequote: bool = True,\n escapechar: str | None = None,\n comment: str | None = None,\n encoding: str | None = None,\n encoding_errors: str | None = "strict",\n dialect: str | csv.Dialect | None = None,\n # Error Handling\n on_bad_lines: str = "error",\n # Internal\n delim_whitespace: bool | lib.NoDefault = lib.no_default,\n low_memory: bool = _c_parser_defaults["low_memory"],\n memory_map: bool = False,\n float_precision: Literal["high", "legacy"] | None = None,\n storage_options: StorageOptions | None = None,\n dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,\n) -> DataFrame | TextFileReader:\n if keep_date_col is not lib.no_default:\n # GH#55569\n warnings.warn(\n "The 'keep_date_col' keyword in pd.read_csv is deprecated and "\n "will be removed in a future version. Explicitly remove unwanted "\n "columns after parsing instead.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n else:\n keep_date_col = False\n\n if lib.is_list_like(parse_dates):\n # GH#55569\n depr = False\n # error: Item "bool" of "bool | Sequence[Hashable] | None" has no\n # attribute "__iter__" (not iterable)\n if not all(is_hashable(x) for x in parse_dates): # type: ignore[union-attr]\n depr = True\n elif isinstance(parse_dates, dict) and any(\n lib.is_list_like(x) for x in parse_dates.values()\n ):\n depr = True\n if depr:\n warnings.warn(\n "Support for nested sequences for 'parse_dates' in pd.read_csv "\n "is deprecated. Combine the desired columns with pd.to_datetime "\n "after parsing instead.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n if infer_datetime_format is not lib.no_default:\n warnings.warn(\n "The argument 'infer_datetime_format' is deprecated and will "\n "be removed in a future version. "\n "A strict version of it is now the default, see "\n "https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. "\n "You can safely remove this argument.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n if delim_whitespace is not lib.no_default:\n # GH#55569\n warnings.warn(\n "The 'delim_whitespace' keyword in pd.read_csv is deprecated and "\n "will be removed in a future version. Use ``sep='\\s+'`` instead",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n else:\n delim_whitespace = False\n\n if verbose is not lib.no_default:\n # GH#55569\n warnings.warn(\n "The 'verbose' keyword in pd.read_csv is deprecated and "\n "will be removed in a future version.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n else:\n verbose = False\n\n # locals() should never be modified\n kwds = locals().copy()\n del kwds["filepath_or_buffer"]\n del kwds["sep"]\n\n kwds_defaults = _refine_defaults_read(\n dialect,\n delimiter,\n delim_whitespace,\n engine,\n sep,\n on_bad_lines,\n names,\n defaults={"delimiter": ","},\n dtype_backend=dtype_backend,\n )\n kwds.update(kwds_defaults)\n\n return _read(filepath_or_buffer, kwds)\n\n\n# iterator=True -> TextFileReader\n@overload\ndef read_table(\n filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],\n *,\n sep: str | None | lib.NoDefault = ...,\n delimiter: str | None | lib.NoDefault = ...,\n header: int | Sequence[int] | None | Literal["infer"] = ...,\n names: Sequence[Hashable] | None | lib.NoDefault = ...,\n index_col: IndexLabel | Literal[False] | None = ...,\n usecols: UsecolsArgType = ...,\n dtype: DtypeArg | None = ...,\n engine: CSVEngine | None = ...,\n converters: Mapping[Hashable, Callable] | None = ...,\n true_values: list | None = ...,\n false_values: list | None = ...,\n skipinitialspace: bool = ...,\n skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,\n skipfooter: int = ...,\n nrows: int | None = ...,\n na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ...,\n keep_default_na: bool = ...,\n na_filter: bool = ...,\n verbose: bool | lib.NoDefault = ...,\n skip_blank_lines: bool = ...,\n parse_dates: bool | Sequence[Hashable] = ...,\n infer_datetime_format: bool | lib.NoDefault = ...,\n keep_date_col: bool | lib.NoDefault = ...,\n date_parser: Callable | lib.NoDefault = ...,\n date_format: str | dict[Hashable, str] | None = ...,\n dayfirst: bool = ...,\n cache_dates: bool = ...,\n iterator: Literal[True],\n chunksize: int | None = ...,\n compression: CompressionOptions = ...,\n thousands: str | None = ...,\n decimal: str = ...,\n lineterminator: str | None = ...,\n quotechar: str = ...,\n quoting: int = ...,\n doublequote: bool = ...,\n escapechar: str | None = ...,\n comment: str | None = ...,\n encoding: str | None = ...,\n encoding_errors: str | None = ...,\n dialect: str | csv.Dialect | None = ...,\n on_bad_lines=...,\n delim_whitespace: bool = ...,\n low_memory: bool = ...,\n memory_map: bool = ...,\n float_precision: str | None = ...,\n storage_options: StorageOptions = ...,\n dtype_backend: DtypeBackend | lib.NoDefault = ...,\n) -> TextFileReader:\n ...\n\n\n# chunksize=int -> TextFileReader\n@overload\ndef read_table(\n filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],\n *,\n sep: str | None | lib.NoDefault = ...,\n delimiter: str | None | lib.NoDefault = ...,\n header: int | Sequence[int] | None | Literal["infer"] = ...,\n names: Sequence[Hashable] | None | lib.NoDefault = ...,\n index_col: IndexLabel | Literal[False] | None = ...,\n usecols: UsecolsArgType = ...,\n dtype: DtypeArg | None = ...,\n engine: CSVEngine | None = ...,\n converters: Mapping[Hashable, Callable] | None = ...,\n true_values: list | None = ...,\n false_values: list | None = ...,\n skipinitialspace: bool = ...,\n skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,\n skipfooter: int = ...,\n nrows: int | None = ...,\n na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ...,\n keep_default_na: bool = ...,\n na_filter: bool = ...,\n verbose: bool | lib.NoDefault = ...,\n skip_blank_lines: bool = ...,\n parse_dates: bool | Sequence[Hashable] = ...,\n infer_datetime_format: bool | lib.NoDefault = ...,\n keep_date_col: bool | lib.NoDefault = ...,\n date_parser: Callable | lib.NoDefault = ...,\n date_format: str | dict[Hashable, str] | None = ...,\n dayfirst: bool = ...,\n cache_dates: bool = ...,\n iterator: bool = ...,\n chunksize: int,\n compression: CompressionOptions = ...,\n thousands: str | None = ...,\n decimal: str = ...,\n lineterminator: str | None = ...,\n quotechar: str = ...,\n quoting: int = ...,\n doublequote: bool = ...,\n escapechar: str | None = ...,\n comment: str | None = ...,\n encoding: str | None = ...,\n encoding_errors: str | None = ...,\n dialect: str | csv.Dialect | None = ...,\n on_bad_lines=...,\n delim_whitespace: bool = ...,\n low_memory: bool = ...,\n memory_map: bool = ...,\n float_precision: str | None = ...,\n storage_options: StorageOptions = ...,\n dtype_backend: DtypeBackend | lib.NoDefault = ...,\n) -> TextFileReader:\n ...\n\n\n# default -> DataFrame\n@overload\ndef read_table(\n filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],\n *,\n sep: str | None | lib.NoDefault = ...,\n delimiter: str | None | lib.NoDefault = ...,\n header: int | Sequence[int] | None | Literal["infer"] = ...,\n names: Sequence[Hashable] | None | lib.NoDefault = ...,\n index_col: IndexLabel | Literal[False] | None = ...,\n usecols: UsecolsArgType = ...,\n dtype: DtypeArg | None = ...,\n engine: CSVEngine | None = ...,\n converters: Mapping[Hashable, Callable] | None = ...,\n true_values: list | None = ...,\n false_values: list | None = ...,\n skipinitialspace: bool = ...,\n skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,\n skipfooter: int = ...,\n nrows: int | None = ...,\n na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ...,\n keep_default_na: bool = ...,\n na_filter: bool = ...,\n verbose: bool | lib.NoDefault = ...,\n skip_blank_lines: bool = ...,\n parse_dates: bool | Sequence[Hashable] = ...,\n infer_datetime_format: bool | lib.NoDefault = ...,\n keep_date_col: bool | lib.NoDefault = ...,\n date_parser: Callable | lib.NoDefault = ...,\n date_format: str | dict[Hashable, str] | None = ...,\n dayfirst: bool = ...,\n cache_dates: bool = ...,\n iterator: Literal[False] = ...,\n chunksize: None = ...,\n compression: CompressionOptions = ...,\n thousands: str | None = ...,\n decimal: str = ...,\n lineterminator: str | None = ...,\n quotechar: str = ...,\n quoting: int = ...,\n doublequote: bool = ...,\n escapechar: str | None = ...,\n comment: str | None = ...,\n encoding: str | None = ...,\n encoding_errors: str | None = ...,\n dialect: str | csv.Dialect | None = ...,\n on_bad_lines=...,\n delim_whitespace: bool = ...,\n low_memory: bool = ...,\n memory_map: bool = ...,\n float_precision: str | None = ...,\n storage_options: StorageOptions = ...,\n dtype_backend: DtypeBackend | lib.NoDefault = ...,\n) -> DataFrame:\n ...\n\n\n# Unions -> DataFrame | TextFileReader\n@overload\ndef read_table(\n filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],\n *,\n sep: str | None | lib.NoDefault = ...,\n delimiter: str | None | lib.NoDefault = ...,\n header: int | Sequence[int] | None | Literal["infer"] = ...,\n names: Sequence[Hashable] | None | lib.NoDefault = ...,\n index_col: IndexLabel | Literal[False] | None = ...,\n usecols: UsecolsArgType = ...,\n dtype: DtypeArg | None = ...,\n engine: CSVEngine | None = ...,\n converters: Mapping[Hashable, Callable] | None = ...,\n true_values: list | None = ...,\n false_values: list | None = ...,\n skipinitialspace: bool = ...,\n skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,\n skipfooter: int = ...,\n nrows: int | None = ...,\n na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ...,\n keep_default_na: bool = ...,\n na_filter: bool = ...,\n verbose: bool | lib.NoDefault = ...,\n skip_blank_lines: bool = ...,\n parse_dates: bool | Sequence[Hashable] = ...,\n infer_datetime_format: bool | lib.NoDefault = ...,\n keep_date_col: bool | lib.NoDefault = ...,\n date_parser: Callable | lib.NoDefault = ...,\n date_format: str | dict[Hashable, str] | None = ...,\n dayfirst: bool = ...,\n cache_dates: bool = ...,\n iterator: bool = ...,\n chunksize: int | None = ...,\n compression: CompressionOptions = ...,\n thousands: str | None = ...,\n decimal: str = ...,\n lineterminator: str | None = ...,\n quotechar: str = ...,\n quoting: int = ...,\n doublequote: bool = ...,\n escapechar: str | None = ...,\n comment: str | None = ...,\n encoding: str | None = ...,\n encoding_errors: str | None = ...,\n dialect: str | csv.Dialect | None = ...,\n on_bad_lines=...,\n delim_whitespace: bool = ...,\n low_memory: bool = ...,\n memory_map: bool = ...,\n float_precision: str | None = ...,\n storage_options: StorageOptions = ...,\n dtype_backend: DtypeBackend | lib.NoDefault = ...,\n) -> DataFrame | TextFileReader:\n ...\n\n\n@Appender(\n _doc_read_csv_and_table.format(\n func_name="read_table",\n summary="Read general delimited file into DataFrame.",\n see_also_func_name="read_csv",\n see_also_func_summary=(\n "Read a comma-separated values (csv) file into DataFrame."\n ),\n _default_sep=r"'\\t' (tab-stop)",\n storage_options=_shared_docs["storage_options"],\n decompression_options=_shared_docs["decompression_options"]\n % "filepath_or_buffer",\n )\n)\ndef read_table(\n filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],\n *,\n sep: str | None | lib.NoDefault = lib.no_default,\n delimiter: str | None | lib.NoDefault = None,\n # Column and Index Locations and Names\n header: int | Sequence[int] | None | Literal["infer"] = "infer",\n names: Sequence[Hashable] | None | lib.NoDefault = lib.no_default,\n index_col: IndexLabel | Literal[False] | None = None,\n usecols: UsecolsArgType = None,\n # General Parsing Configuration\n dtype: DtypeArg | None = None,\n engine: CSVEngine | None = None,\n converters: Mapping[Hashable, Callable] | None = None,\n true_values: list | None = None,\n false_values: list | None = None,\n skipinitialspace: bool = False,\n skiprows: list[int] | int | Callable[[Hashable], bool] | None = None,\n skipfooter: int = 0,\n nrows: int | None = None,\n # NA and Missing Data Handling\n na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = None,\n keep_default_na: bool = True,\n na_filter: bool = True,\n verbose: bool | lib.NoDefault = lib.no_default,\n skip_blank_lines: bool = True,\n # Datetime Handling\n parse_dates: bool | Sequence[Hashable] = False,\n infer_datetime_format: bool | lib.NoDefault = lib.no_default,\n keep_date_col: bool | lib.NoDefault = lib.no_default,\n date_parser: Callable | lib.NoDefault = lib.no_default,\n date_format: str | dict[Hashable, str] | None = None,\n dayfirst: bool = False,\n cache_dates: bool = True,\n # Iteration\n iterator: bool = False,\n chunksize: int | None = None,\n # Quoting, Compression, and File Format\n compression: CompressionOptions = "infer",\n thousands: str | None = None,\n decimal: str = ".",\n lineterminator: str | None = None,\n quotechar: str = '"',\n quoting: int = csv.QUOTE_MINIMAL,\n doublequote: bool = True,\n escapechar: str | None = None,\n comment: str | None = None,\n encoding: str | None = None,\n encoding_errors: str | None = "strict",\n dialect: str | csv.Dialect | None = None,\n # Error Handling\n on_bad_lines: str = "error",\n # Internal\n delim_whitespace: bool | lib.NoDefault = lib.no_default,\n low_memory: bool = _c_parser_defaults["low_memory"],\n memory_map: bool = False,\n float_precision: str | None = None,\n storage_options: StorageOptions | None = None,\n dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,\n) -> DataFrame | TextFileReader:\n if keep_date_col is not lib.no_default:\n # GH#55569\n warnings.warn(\n "The 'keep_date_col' keyword in pd.read_table is deprecated and "\n "will be removed in a future version. Explicitly remove unwanted "\n "columns after parsing instead.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n else:\n keep_date_col = False\n\n # error: Item "bool" of "bool | Sequence[Hashable]" has no attribute "__iter__"\n if lib.is_list_like(parse_dates) and not all(is_hashable(x) for x in parse_dates): # type: ignore[union-attr]\n # GH#55569\n warnings.warn(\n "Support for nested sequences for 'parse_dates' in pd.read_table "\n "is deprecated. Combine the desired columns with pd.to_datetime "\n "after parsing instead.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n if infer_datetime_format is not lib.no_default:\n warnings.warn(\n "The argument 'infer_datetime_format' is deprecated and will "\n "be removed in a future version. "\n "A strict version of it is now the default, see "\n "https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. "\n "You can safely remove this argument.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n\n if delim_whitespace is not lib.no_default:\n # GH#55569\n warnings.warn(\n "The 'delim_whitespace' keyword in pd.read_table is deprecated and "\n "will be removed in a future version. Use ``sep='\\s+'`` instead",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n else:\n delim_whitespace = False\n\n if verbose is not lib.no_default:\n # GH#55569\n warnings.warn(\n "The 'verbose' keyword in pd.read_table is deprecated and "\n "will be removed in a future version.",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n else:\n verbose = False\n\n # locals() should never be modified\n kwds = locals().copy()\n del kwds["filepath_or_buffer"]\n del kwds["sep"]\n\n kwds_defaults = _refine_defaults_read(\n dialect,\n delimiter,\n delim_whitespace,\n engine,\n sep,\n on_bad_lines,\n names,\n defaults={"delimiter": "\t"},\n dtype_backend=dtype_backend,\n )\n kwds.update(kwds_defaults)\n\n return _read(filepath_or_buffer, kwds)\n\n\n@overload\ndef read_fwf(\n filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],\n *,\n colspecs: Sequence[tuple[int, int]] | str | None = ...,\n widths: Sequence[int] | None = ...,\n infer_nrows: int = ...,\n dtype_backend: DtypeBackend | lib.NoDefault = ...,\n iterator: Literal[True],\n chunksize: int | None = ...,\n **kwds,\n) -> TextFileReader:\n ...\n\n\n@overload\ndef read_fwf(\n filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],\n *,\n colspecs: Sequence[tuple[int, int]] | str | None = ...,\n widths: Sequence[int] | None = ...,\n infer_nrows: int = ...,\n dtype_backend: DtypeBackend | lib.NoDefault = ...,\n iterator: bool = ...,\n chunksize: int,\n **kwds,\n) -> TextFileReader:\n ...\n\n\n@overload\ndef read_fwf(\n filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],\n *,\n colspecs: Sequence[tuple[int, int]] | str | None = ...,\n widths: Sequence[int] | None = ...,\n infer_nrows: int = ...,\n dtype_backend: DtypeBackend | lib.NoDefault = ...,\n iterator: Literal[False] = ...,\n chunksize: None = ...,\n **kwds,\n) -> DataFrame:\n ...\n\n\ndef read_fwf(\n filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],\n *,\n colspecs: Sequence[tuple[int, int]] | str | None = "infer",\n widths: Sequence[int] | None = None,\n infer_nrows: int = 100,\n dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,\n iterator: bool = False,\n chunksize: int | None = None,\n **kwds,\n) -> DataFrame | TextFileReader:\n r"""\n Read a table of fixed-width formatted lines into DataFrame.\n\n Also supports optionally iterating or breaking of the file\n into chunks.\n\n Additional help can be found in the `online docs for IO Tools\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html>`_.\n\n Parameters\n ----------\n filepath_or_buffer : str, path object, or file-like object\n String, path object (implementing ``os.PathLike[str]``), or file-like\n object implementing a text ``read()`` function.The string could be a URL.\n Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is\n expected. A local file could be:\n ``file://localhost/path/to/table.csv``.\n colspecs : list of tuple (int, int) or 'infer'. optional\n A list of tuples giving the extents of the fixed-width\n fields of each line as half-open intervals (i.e., [from, to[ ).\n String value 'infer' can be used to instruct the parser to try\n detecting the column specifications from the first 100 rows of\n the data which are not being skipped via skiprows (default='infer').\n widths : list of int, optional\n A list of field widths which can be used instead of 'colspecs' if\n the intervals are contiguous.\n infer_nrows : int, default 100\n The number of rows to consider when letting the parser determine the\n `colspecs`.\n dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'\n Back-end data type applied to the resultant :class:`DataFrame`\n (still experimental). Behaviour is as follows:\n\n * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`\n (default).\n * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`\n DataFrame.\n\n .. versionadded:: 2.0\n\n **kwds : optional\n Optional keyword arguments can be passed to ``TextFileReader``.\n\n Returns\n -------\n DataFrame or TextFileReader\n A comma-separated values (csv) file is returned as two-dimensional\n data structure with labeled axes.\n\n See Also\n --------\n DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.\n read_csv : Read a comma-separated values (csv) file into DataFrame.\n\n Examples\n --------\n >>> pd.read_fwf('data.csv') # doctest: +SKIP\n """\n # Check input arguments.\n if colspecs is None and widths is None:\n raise ValueError("Must specify either colspecs or widths")\n if colspecs not in (None, "infer") and widths is not None:\n raise ValueError("You must specify only one of 'widths' and 'colspecs'")\n\n # Compute 'colspecs' from 'widths', if specified.\n if widths is not None:\n colspecs, col = [], 0\n for w in widths:\n colspecs.append((col, col + w))\n col += w\n\n # for mypy\n assert colspecs is not None\n\n # GH#40830\n # Ensure length of `colspecs` matches length of `names`\n names = kwds.get("names")\n if names is not None:\n if len(names) != len(colspecs) and colspecs != "infer":\n # need to check len(index_col) as it might contain\n # unnamed indices, in which case it's name is not required\n len_index = 0\n if kwds.get("index_col") is not None:\n index_col: Any = kwds.get("index_col")\n if index_col is not False:\n if not is_list_like(index_col):\n len_index = 1\n else:\n len_index = len(index_col)\n if kwds.get("usecols") is None and len(names) + len_index != len(colspecs):\n # If usecols is used colspec may be longer than names\n raise ValueError("Length of colspecs must match length of names")\n\n kwds["colspecs"] = colspecs\n kwds["infer_nrows"] = infer_nrows\n kwds["engine"] = "python-fwf"\n kwds["iterator"] = iterator\n kwds["chunksize"] = chunksize\n\n check_dtype_backend(dtype_backend)\n kwds["dtype_backend"] = dtype_backend\n return _read(filepath_or_buffer, kwds)\n\n\nclass TextFileReader(abc.Iterator):\n """\n\n Passed dialect overrides any of the related parser options\n\n """\n\n def __init__(\n self,\n f: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str] | list,\n engine: CSVEngine | None = None,\n **kwds,\n ) -> None:\n if engine is not None:\n engine_specified = True\n else:\n engine = "python"\n engine_specified = False\n self.engine = engine\n self._engine_specified = kwds.get("engine_specified", engine_specified)\n\n _validate_skipfooter(kwds)\n\n dialect = _extract_dialect(kwds)\n if dialect is not None:\n if engine == "pyarrow":\n raise ValueError(\n "The 'dialect' option is not supported with the 'pyarrow' engine"\n )\n kwds = _merge_with_dialect_properties(dialect, kwds)\n\n if kwds.get("header", "infer") == "infer":\n kwds["header"] = 0 if kwds.get("names") is None else None\n\n self.orig_options = kwds\n\n # miscellanea\n self._currow = 0\n\n options = self._get_options_with_defaults(engine)\n options["storage_options"] = kwds.get("storage_options", None)\n\n self.chunksize = options.pop("chunksize", None)\n self.nrows = options.pop("nrows", None)\n\n self._check_file_or_buffer(f, engine)\n self.options, self.engine = self._clean_options(options, engine)\n\n if "has_index_names" in kwds:\n self.options["has_index_names"] = kwds["has_index_names"]\n\n self.handles: IOHandles | None = None\n self._engine = self._make_engine(f, self.engine)\n\n def close(self) -> None:\n if self.handles is not None:\n self.handles.close()\n self._engine.close()\n\n def _get_options_with_defaults(self, engine: CSVEngine) -> dict[str, Any]:\n kwds = self.orig_options\n\n options = {}\n default: object | None\n\n for argname, default in parser_defaults.items():\n value = kwds.get(argname, default)\n\n # see gh-12935\n if (\n engine == "pyarrow"\n and argname in _pyarrow_unsupported\n and value != default\n and value != getattr(value, "value", default)\n ):\n raise ValueError(\n f"The {repr(argname)} option is not supported with the "\n f"'pyarrow' engine"\n )\n options[argname] = value\n\n for argname, default in _c_parser_defaults.items():\n if argname in kwds:\n value = kwds[argname]\n\n if engine != "c" and value != default:\n # TODO: Refactor this logic, its pretty convoluted\n if "python" in engine and argname not in _python_unsupported:\n pass\n elif "pyarrow" in engine and argname not in _pyarrow_unsupported:\n pass\n else:\n raise ValueError(\n f"The {repr(argname)} option is not supported with the "\n f"{repr(engine)} engine"\n )\n else:\n value = default\n options[argname] = value\n\n if engine == "python-fwf":\n for argname, default in _fwf_defaults.items():\n options[argname] = kwds.get(argname, default)\n\n return options\n\n def _check_file_or_buffer(self, f, engine: CSVEngine) -> None:\n # see gh-16530\n if is_file_like(f) and engine != "c" and not hasattr(f, "__iter__"):\n # The C engine doesn't need the file-like to have the "__iter__"\n # attribute. However, the Python engine needs "__iter__(...)"\n # when iterating through such an object, meaning it\n # needs to have that attribute\n raise ValueError(\n "The 'python' engine cannot iterate through this file buffer."\n )\n\n def _clean_options(\n self, options: dict[str, Any], engine: CSVEngine\n ) -> tuple[dict[str, Any], CSVEngine]:\n result = options.copy()\n\n fallback_reason = None\n\n # C engine not supported yet\n if engine == "c":\n if options["skipfooter"] > 0:\n fallback_reason = "the 'c' engine does not support skipfooter"\n engine = "python"\n\n sep = options["delimiter"]\n delim_whitespace = options["delim_whitespace"]\n\n if sep is None and not delim_whitespace:\n if engine in ("c", "pyarrow"):\n fallback_reason = (\n f"the '{engine}' engine does not support "\n "sep=None with delim_whitespace=False"\n )\n engine = "python"\n elif sep is not None and len(sep) > 1:\n if engine == "c" and sep == r"\s+":\n result["delim_whitespace"] = True\n del result["delimiter"]\n elif engine not in ("python", "python-fwf"):\n # wait until regex engine integrated\n fallback_reason = (\n f"the '{engine}' engine does not support "\n "regex separators (separators > 1 char and "\n r"different from '\s+' are interpreted as regex)"\n )\n engine = "python"\n elif delim_whitespace:\n if "python" in engine:\n result["delimiter"] = r"\s+"\n elif sep is not None:\n encodeable = True\n encoding = sys.getfilesystemencoding() or "utf-8"\n try:\n if len(sep.encode(encoding)) > 1:\n encodeable = False\n except UnicodeDecodeError:\n encodeable = False\n if not encodeable and engine not in ("python", "python-fwf"):\n fallback_reason = (\n f"the separator encoded in {encoding} "\n f"is > 1 char long, and the '{engine}' engine "\n "does not support such separators"\n )\n engine = "python"\n\n quotechar = options["quotechar"]\n if quotechar is not None and isinstance(quotechar, (str, bytes)):\n if (\n len(quotechar) == 1\n and ord(quotechar) > 127\n and engine not in ("python", "python-fwf")\n ):\n fallback_reason = (\n "ord(quotechar) > 127, meaning the "\n "quotechar is larger than one byte, "\n f"and the '{engine}' engine does not support such quotechars"\n )\n engine = "python"\n\n if fallback_reason and self._engine_specified:\n raise ValueError(fallback_reason)\n\n if engine == "c":\n for arg in _c_unsupported:\n del result[arg]\n\n if "python" in engine:\n for arg in _python_unsupported:\n if fallback_reason and result[arg] != _c_parser_defaults.get(arg):\n raise ValueError(\n "Falling back to the 'python' engine because "\n f"{fallback_reason}, but this causes {repr(arg)} to be "\n "ignored as it is not supported by the 'python' engine."\n )\n del result[arg]\n\n if fallback_reason:\n warnings.warn(\n (\n "Falling back to the 'python' engine because "\n f"{fallback_reason}; you can avoid this warning by specifying "\n "engine='python'."\n ),\n ParserWarning,\n stacklevel=find_stack_level(),\n )\n\n index_col = options["index_col"]\n names = options["names"]\n converters = options["converters"]\n na_values = options["na_values"]\n skiprows = options["skiprows"]\n\n validate_header_arg(options["header"])\n\n if index_col is True:\n raise ValueError("The value of index_col couldn't be 'True'")\n if is_index_col(index_col):\n if not isinstance(index_col, (list, tuple, np.ndarray)):\n index_col = [index_col]\n result["index_col"] = index_col\n\n names = list(names) if names is not None else names\n\n # type conversion-related\n if converters is not None:\n if not isinstance(converters, dict):\n raise TypeError(\n "Type converters must be a dict or subclass, "\n f"input was a {type(converters).__name__}"\n )\n else:\n converters = {}\n\n # Converting values to NA\n keep_default_na = options["keep_default_na"]\n floatify = engine != "pyarrow"\n na_values, na_fvalues = _clean_na_values(\n na_values, keep_default_na, floatify=floatify\n )\n\n # handle skiprows; this is internally handled by the\n # c-engine, so only need for python and pyarrow parsers\n if engine == "pyarrow":\n if not is_integer(skiprows) and skiprows is not None:\n # pyarrow expects skiprows to be passed as an integer\n raise ValueError(\n "skiprows argument must be an integer when using "\n "engine='pyarrow'"\n )\n else:\n if is_integer(skiprows):\n skiprows = list(range(skiprows))\n if skiprows is None:\n skiprows = set()\n elif not callable(skiprows):\n skiprows = set(skiprows)\n\n # put stuff back\n result["names"] = names\n result["converters"] = converters\n result["na_values"] = na_values\n result["na_fvalues"] = na_fvalues\n result["skiprows"] = skiprows\n\n return result, engine\n\n def __next__(self) -> DataFrame:\n try:\n return self.get_chunk()\n except StopIteration:\n self.close()\n raise\n\n def _make_engine(\n self,\n f: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str] | list | IO,\n engine: CSVEngine = "c",\n ) -> ParserBase:\n mapping: dict[str, type[ParserBase]] = {\n "c": CParserWrapper,\n "python": PythonParser,\n "pyarrow": ArrowParserWrapper,\n "python-fwf": FixedWidthFieldParser,\n }\n if engine not in mapping:\n raise ValueError(\n f"Unknown engine: {engine} (valid options are {mapping.keys()})"\n )\n if not isinstance(f, list):\n # open file here\n is_text = True\n mode = "r"\n if engine == "pyarrow":\n is_text = False\n mode = "rb"\n elif (\n engine == "c"\n and self.options.get("encoding", "utf-8") == "utf-8"\n and isinstance(stringify_path(f), str)\n ):\n # c engine can decode utf-8 bytes, adding TextIOWrapper makes\n # the c-engine especially for memory_map=True far slower\n is_text = False\n if "b" not in mode:\n mode += "b"\n self.handles = get_handle(\n f,\n mode,\n encoding=self.options.get("encoding", None),\n compression=self.options.get("compression", None),\n memory_map=self.options.get("memory_map", False),\n is_text=is_text,\n errors=self.options.get("encoding_errors", "strict"),\n storage_options=self.options.get("storage_options", None),\n )\n assert self.handles is not None\n f = self.handles.handle\n\n elif engine != "python":\n msg = f"Invalid file path or buffer object type: {type(f)}"\n raise ValueError(msg)\n\n try:\n return mapping[engine](f, **self.options)\n except Exception:\n if self.handles is not None:\n self.handles.close()\n raise\n\n def _failover_to_python(self) -> None:\n raise AbstractMethodError(self)\n\n def read(self, nrows: int | None = None) -> DataFrame:\n if self.engine == "pyarrow":\n try:\n # error: "ParserBase" has no attribute "read"\n df = self._engine.read() # type: ignore[attr-defined]\n except Exception:\n self.close()\n raise\n else:\n nrows = validate_integer("nrows", nrows)\n try:\n # error: "ParserBase" has no attribute "read"\n (\n index,\n columns,\n col_dict,\n ) = self._engine.read( # type: ignore[attr-defined]\n nrows\n )\n except Exception:\n self.close()\n raise\n\n if index is None:\n if col_dict:\n # Any column is actually fine:\n new_rows = len(next(iter(col_dict.values())))\n index = RangeIndex(self._currow, self._currow + new_rows)\n else:\n new_rows = 0\n else:\n new_rows = len(index)\n\n if hasattr(self, "orig_options"):\n dtype_arg = self.orig_options.get("dtype", None)\n else:\n dtype_arg = None\n\n if isinstance(dtype_arg, dict):\n dtype = defaultdict(lambda: None) # type: ignore[var-annotated]\n dtype.update(dtype_arg)\n elif dtype_arg is not None and pandas_dtype(dtype_arg) in (\n np.str_,\n np.object_,\n ):\n dtype = defaultdict(lambda: dtype_arg)\n else:\n dtype = None\n\n if dtype is not None:\n new_col_dict = {}\n for k, v in col_dict.items():\n d = (\n dtype[k]\n if pandas_dtype(dtype[k]) in (np.str_, np.object_)\n else None\n )\n new_col_dict[k] = Series(v, index=index, dtype=d, copy=False)\n else:\n new_col_dict = col_dict\n\n df = DataFrame(\n new_col_dict,\n columns=columns,\n index=index,\n copy=not using_copy_on_write(),\n )\n\n self._currow += new_rows\n return df\n\n def get_chunk(self, size: int | None = None) -> DataFrame:\n if size is None:\n size = self.chunksize\n if self.nrows is not None:\n if self._currow >= self.nrows:\n raise StopIteration\n size = min(size, self.nrows - self._currow)\n return self.read(nrows=size)\n\n def __enter__(self) -> Self:\n return self\n\n def __exit__(\n self,\n exc_type: type[BaseException] | None,\n exc_value: BaseException | None,\n traceback: TracebackType | None,\n ) -> None:\n self.close()\n\n\ndef TextParser(*args, **kwds) -> TextFileReader:\n """\n Converts lists of lists/tuples into DataFrames with proper type inference\n and optional (e.g. string to datetime) conversion. Also enables iterating\n lazily over chunks of large files\n\n Parameters\n ----------\n data : file-like object or list\n delimiter : separator character to use\n dialect : str or csv.Dialect instance, optional\n Ignored if delimiter is longer than 1 character\n names : sequence, default\n header : int, default 0\n Row to use to parse column labels. Defaults to the first row. Prior\n rows will be discarded\n index_col : int or list, optional\n Column or columns to use as the (possibly hierarchical) index\n has_index_names: bool, default False\n True if the cols defined in index_col have an index name and are\n not in the header.\n na_values : scalar, str, list-like, or dict, optional\n Additional strings to recognize as NA/NaN.\n keep_default_na : bool, default True\n thousands : str, optional\n Thousands separator\n comment : str, optional\n Comment out remainder of line\n parse_dates : bool, default False\n keep_date_col : bool, default False\n date_parser : function, optional\n\n .. deprecated:: 2.0.0\n date_format : str or dict of column -> format, default ``None``\n\n .. versionadded:: 2.0.0\n skiprows : list of integers\n Row numbers to skip\n skipfooter : int\n Number of line at bottom of file to skip\n converters : dict, optional\n Dict of functions for converting values in certain columns. Keys can\n either be integers or column labels, values are functions that take one\n input argument, the cell (not column) content, and return the\n transformed content.\n encoding : str, optional\n Encoding to use for UTF when reading/writing (ex. 'utf-8')\n float_precision : str, optional\n Specifies which converter the C engine should use for floating-point\n values. The options are `None` or `high` for the ordinary converter,\n `legacy` for the original lower precision pandas converter, and\n `round_trip` for the round-trip converter.\n """\n kwds["engine"] = "python"\n return TextFileReader(*args, **kwds)\n\n\ndef _clean_na_values(na_values, keep_default_na: bool = True, floatify: bool = True):\n na_fvalues: set | dict\n if na_values is None:\n if keep_default_na:\n na_values = STR_NA_VALUES\n else:\n na_values = set()\n na_fvalues = set()\n elif isinstance(na_values, dict):\n old_na_values = na_values.copy()\n na_values = {} # Prevent aliasing.\n\n # Convert the values in the na_values dictionary\n # into array-likes for further use. This is also\n # where we append the default NaN values, provided\n # that `keep_default_na=True`.\n for k, v in old_na_values.items():\n if not is_list_like(v):\n v = [v]\n\n if keep_default_na:\n v = set(v) | STR_NA_VALUES\n\n na_values[k] = v\n na_fvalues = {k: _floatify_na_values(v) for k, v in na_values.items()}\n else:\n if not is_list_like(na_values):\n na_values = [na_values]\n na_values = _stringify_na_values(na_values, floatify)\n if keep_default_na:\n na_values = na_values | STR_NA_VALUES\n\n na_fvalues = _floatify_na_values(na_values)\n\n return na_values, na_fvalues\n\n\ndef _floatify_na_values(na_values):\n # create float versions of the na_values\n result = set()\n for v in na_values:\n try:\n v = float(v)\n if not np.isnan(v):\n result.add(v)\n except (TypeError, ValueError, OverflowError):\n pass\n return result\n\n\ndef _stringify_na_values(na_values, floatify: bool):\n """return a stringified and numeric for these values"""\n result: list[str | float] = []\n for x in na_values:\n result.append(str(x))\n result.append(x)\n try:\n v = float(x)\n\n # we are like 999 here\n if v == int(v):\n v = int(v)\n result.append(f"{v}.0")\n result.append(str(v))\n\n if floatify:\n result.append(v)\n except (TypeError, ValueError, OverflowError):\n pass\n if floatify:\n try:\n result.append(int(x))\n except (TypeError, ValueError, OverflowError):\n pass\n return set(result)\n\n\ndef _refine_defaults_read(\n dialect: str | csv.Dialect | None,\n delimiter: str | None | lib.NoDefault,\n delim_whitespace: bool,\n engine: CSVEngine | None,\n sep: str | None | lib.NoDefault,\n on_bad_lines: str | Callable,\n names: Sequence[Hashable] | None | lib.NoDefault,\n defaults: dict[str, Any],\n dtype_backend: DtypeBackend | lib.NoDefault,\n):\n """Validate/refine default values of input parameters of read_csv, read_table.\n\n Parameters\n ----------\n dialect : str or csv.Dialect\n If provided, this parameter will override values (default or not) for the\n following parameters: `delimiter`, `doublequote`, `escapechar`,\n `skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to\n override values, a ParserWarning will be issued. See csv.Dialect\n documentation for more details.\n delimiter : str or object\n Alias for sep.\n delim_whitespace : bool\n Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be\n used as the sep. Equivalent to setting ``sep='\\s+'``. If this option\n is set to True, nothing should be passed in for the ``delimiter``\n parameter.\n\n .. deprecated:: 2.2.0\n Use ``sep="\\s+"`` instead.\n engine : {{'c', 'python'}}\n Parser engine to use. The C engine is faster while the python engine is\n currently more feature-complete.\n sep : str or object\n A delimiter provided by the user (str) or a sentinel value, i.e.\n pandas._libs.lib.no_default.\n on_bad_lines : str, callable\n An option for handling bad lines or a sentinel value(None).\n names : array-like, optional\n List of column names to use. If the file contains a header row,\n then you should explicitly pass ``header=0`` to override the column names.\n Duplicates in this list are not allowed.\n defaults: dict\n Default values of input parameters.\n\n Returns\n -------\n kwds : dict\n Input parameters with correct values.\n\n Raises\n ------\n ValueError :\n If a delimiter was specified with ``sep`` (or ``delimiter``) and\n ``delim_whitespace=True``.\n """\n # fix types for sep, delimiter to Union(str, Any)\n delim_default = defaults["delimiter"]\n kwds: dict[str, Any] = {}\n # gh-23761\n #\n # When a dialect is passed, it overrides any of the overlapping\n # parameters passed in directly. We don't want to warn if the\n # default parameters were passed in (since it probably means\n # that the user didn't pass them in explicitly in the first place).\n #\n # "delimiter" is the annoying corner case because we alias it to\n # "sep" before doing comparison to the dialect values later on.\n # Thus, we need a flag to indicate that we need to "override"\n # the comparison to dialect values by checking if default values\n # for BOTH "delimiter" and "sep" were provided.\n if dialect is not None:\n kwds["sep_override"] = delimiter is None and (\n sep is lib.no_default or sep == delim_default\n )\n\n if delimiter and (sep is not lib.no_default):\n raise ValueError("Specified a sep and a delimiter; you can only specify one.")\n\n kwds["names"] = None if names is lib.no_default else names\n\n # Alias sep -> delimiter.\n if delimiter is None:\n delimiter = sep\n\n if delim_whitespace and (delimiter is not lib.no_default):\n raise ValueError(\n "Specified a delimiter with both sep and "\n "delim_whitespace=True; you can only specify one."\n )\n\n if delimiter == "\n":\n raise ValueError(\n r"Specified \n as separator or delimiter. This forces the python engine "\n "which does not accept a line terminator. Hence it is not allowed to use "\n "the line terminator as separator.",\n )\n\n if delimiter is lib.no_default:\n # assign default separator value\n kwds["delimiter"] = delim_default\n else:\n kwds["delimiter"] = delimiter\n\n if engine is not None:\n kwds["engine_specified"] = True\n else:\n kwds["engine"] = "c"\n kwds["engine_specified"] = False\n\n if on_bad_lines == "error":\n kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.ERROR\n elif on_bad_lines == "warn":\n kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.WARN\n elif on_bad_lines == "skip":\n kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.SKIP\n elif callable(on_bad_lines):\n if engine not in ["python", "pyarrow"]:\n raise ValueError(\n "on_bad_line can only be a callable function "\n "if engine='python' or 'pyarrow'"\n )\n kwds["on_bad_lines"] = on_bad_lines\n else:\n raise ValueError(f"Argument {on_bad_lines} is invalid for on_bad_lines")\n\n check_dtype_backend(dtype_backend)\n\n kwds["dtype_backend"] = dtype_backend\n\n return kwds\n\n\ndef _extract_dialect(kwds: dict[str, Any]) -> csv.Dialect | None:\n """\n Extract concrete csv dialect instance.\n\n Returns\n -------\n csv.Dialect or None\n """\n if kwds.get("dialect") is None:\n return None\n\n dialect = kwds["dialect"]\n if dialect in csv.list_dialects():\n dialect = csv.get_dialect(dialect)\n\n _validate_dialect(dialect)\n\n return dialect\n\n\nMANDATORY_DIALECT_ATTRS = (\n "delimiter",\n "doublequote",\n "escapechar",\n "skipinitialspace",\n "quotechar",\n "quoting",\n)\n\n\ndef _validate_dialect(dialect: csv.Dialect) -> None:\n """\n Validate csv dialect instance.\n\n Raises\n ------\n ValueError\n If incorrect dialect is provided.\n """\n for param in MANDATORY_DIALECT_ATTRS:\n if not hasattr(dialect, param):\n raise ValueError(f"Invalid dialect {dialect} provided")\n\n\ndef _merge_with_dialect_properties(\n dialect: csv.Dialect,\n defaults: dict[str, Any],\n) -> dict[str, Any]:\n """\n Merge default kwargs in TextFileReader with dialect parameters.\n\n Parameters\n ----------\n dialect : csv.Dialect\n Concrete csv dialect. See csv.Dialect documentation for more details.\n defaults : dict\n Keyword arguments passed to TextFileReader.\n\n Returns\n -------\n kwds : dict\n Updated keyword arguments, merged with dialect parameters.\n """\n kwds = defaults.copy()\n\n for param in MANDATORY_DIALECT_ATTRS:\n dialect_val = getattr(dialect, param)\n\n parser_default = parser_defaults[param]\n provided = kwds.get(param, parser_default)\n\n # Messages for conflicting values between the dialect\n # instance and the actual parameters provided.\n conflict_msgs = []\n\n # Don't warn if the default parameter was passed in,\n # even if it conflicts with the dialect (gh-23761).\n if provided not in (parser_default, dialect_val):\n msg = (\n f"Conflicting values for '{param}': '{provided}' was "\n f"provided, but the dialect specifies '{dialect_val}'. "\n "Using the dialect-specified value."\n )\n\n # Annoying corner case for not warning about\n # conflicts between dialect and delimiter parameter.\n # Refer to the outer "_read_" function for more info.\n if not (param == "delimiter" and kwds.pop("sep_override", False)):\n conflict_msgs.append(msg)\n\n if conflict_msgs:\n warnings.warn(\n "\n\n".join(conflict_msgs), ParserWarning, stacklevel=find_stack_level()\n )\n kwds[param] = dialect_val\n return kwds\n\n\ndef _validate_skipfooter(kwds: dict[str, Any]) -> None:\n """\n Check whether skipfooter is compatible with other kwargs in TextFileReader.\n\n Parameters\n ----------\n kwds : dict\n Keyword arguments passed to TextFileReader.\n\n Raises\n ------\n ValueError\n If skipfooter is not compatible with other parameters.\n """\n if kwds.get("skipfooter"):\n if kwds.get("iterator") or kwds.get("chunksize"):\n raise ValueError("'skipfooter' not supported for iteration")\n if kwds.get("nrows"):\n raise ValueError("'skipfooter' not supported with 'nrows'")\n
.venv\Lib\site-packages\pandas\io\parsers\readers.py
readers.py
Python
87,157
0.75
0.12757
0.063267
node-utils
6
2024-09-23T04:56:16.238429
BSD-3-Clause
false
6081e92b13a850a748077dacef9ce8df
from pandas.io.parsers.readers import (\n TextFileReader,\n TextParser,\n read_csv,\n read_fwf,\n read_table,\n)\n\n__all__ = ["TextFileReader", "TextParser", "read_csv", "read_fwf", "read_table"]\n
.venv\Lib\site-packages\pandas\io\parsers\__init__.py
__init__.py
Python
204
0.85
0
0
node-utils
573
2024-06-23T00:23:53.995461
MIT
false
0d5da962fceb47f4dd2ce7f635a26446
\n\n
.venv\Lib\site-packages\pandas\io\parsers\__pycache__\arrow_parser_wrapper.cpython-313.pyc
arrow_parser_wrapper.cpython-313.pyc
Other
12,937
0.95
0.02069
0
vue-tools
204
2023-12-12T08:59:36.187456
BSD-3-Clause
false
2b501385b7622dc8077609b82378a8c6
\n\n
.venv\Lib\site-packages\pandas\io\parsers\__pycache__\base_parser.cpython-313.pyc
base_parser.cpython-313.pyc
Other
54,078
0.95
0.042781
0.011472
node-utils
19
2024-12-09T11:37:17.113484
Apache-2.0
false
9ead159ac33a7ab21e3786a79926c2b4
\n\n
.venv\Lib\site-packages\pandas\io\parsers\__pycache__\c_parser_wrapper.cpython-313.pyc
c_parser_wrapper.cpython-313.pyc
Other
15,079
0.95
0
0.017857
react-lib
739
2025-06-28T12:57:06.015331
Apache-2.0
false
da84b77110acda71439d528686a426cc
\n\n
.venv\Lib\site-packages\pandas\io\parsers\__pycache__\python_parser.cpython-313.pyc
python_parser.cpython-313.pyc
Other
50,940
0.95
0.024742
0.004425
vue-tools
954
2024-11-21T23:40:41.717277
MIT
false
37a2ab07c550a3dcd1f50cb90856a05e
\n\n
.venv\Lib\site-packages\pandas\io\parsers\__pycache__\readers.cpython-313.pyc
readers.cpython-313.pyc
Other
83,022
0.75
0.098933
0.022556
react-lib
823
2023-12-04T23:18:14.312372
GPL-3.0
false
b010066624766ad86bb135627426dbfe
\n\n
.venv\Lib\site-packages\pandas\io\parsers\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
371
0.7
0
0
react-lib
918
2023-08-03T17:50:30.817959
Apache-2.0
false
612f1423be665ae0ef1e0eb23827d364
"""\nRead SAS7BDAT files\n\nBased on code written by Jared Hobbs:\n https://bitbucket.org/jaredhobbs/sas7bdat\n\nSee also:\n https://github.com/BioStatMatt/sas7bdat\n\nPartial documentation of the file format:\n https://cran.r-project.org/package=sas7bdat/vignettes/sas7bdat.pdf\n\nReference for binary data compression:\n http://collaboration.cmc.ec.gc.ca/science/rpn/biblio/ddj/Website/articles/CUJ/1992/9210/ross/ross.htm\n"""\nfrom __future__ import annotations\n\nfrom collections import abc\nfrom datetime import (\n datetime,\n timedelta,\n)\nimport sys\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\n\nfrom pandas._config import get_option\n\nfrom pandas._libs.byteswap import (\n read_double_with_byteswap,\n read_float_with_byteswap,\n read_uint16_with_byteswap,\n read_uint32_with_byteswap,\n read_uint64_with_byteswap,\n)\nfrom pandas._libs.sas import (\n Parser,\n get_subheader_index,\n)\nfrom pandas._libs.tslibs.conversion import cast_from_unit_vectorized\nfrom pandas.errors import EmptyDataError\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Timestamp,\n isna,\n)\n\nfrom pandas.io.common import get_handle\nimport pandas.io.sas.sas_constants as const\nfrom pandas.io.sas.sasreader import ReaderBase\n\nif TYPE_CHECKING:\n from pandas._typing import (\n CompressionOptions,\n FilePath,\n ReadBuffer,\n )\n\n\n_unix_origin = Timestamp("1970-01-01")\n_sas_origin = Timestamp("1960-01-01")\n\n\ndef _parse_datetime(sas_datetime: float, unit: str):\n if isna(sas_datetime):\n return pd.NaT\n\n if unit == "s":\n return datetime(1960, 1, 1) + timedelta(seconds=sas_datetime)\n\n elif unit == "d":\n return datetime(1960, 1, 1) + timedelta(days=sas_datetime)\n\n else:\n raise ValueError("unit must be 'd' or 's'")\n\n\ndef _convert_datetimes(sas_datetimes: pd.Series, unit: str) -> pd.Series:\n """\n Convert to Timestamp if possible, otherwise to datetime.datetime.\n SAS float64 lacks precision for more than ms resolution so the fit\n to datetime.datetime is ok.\n\n Parameters\n ----------\n sas_datetimes : {Series, Sequence[float]}\n Dates or datetimes in SAS\n unit : {'d', 's'}\n "d" if the floats represent dates, "s" for datetimes\n\n Returns\n -------\n Series\n Series of datetime64 dtype or datetime.datetime.\n """\n td = (_sas_origin - _unix_origin).as_unit("s")\n if unit == "s":\n millis = cast_from_unit_vectorized(\n sas_datetimes._values, unit="s", out_unit="ms"\n )\n dt64ms = millis.view("M8[ms]") + td\n return pd.Series(dt64ms, index=sas_datetimes.index, copy=False)\n else:\n vals = np.array(sas_datetimes, dtype="M8[D]") + td\n return pd.Series(vals, dtype="M8[s]", index=sas_datetimes.index, copy=False)\n\n\nclass _Column:\n col_id: int\n name: str | bytes\n label: str | bytes\n format: str | bytes\n ctype: bytes\n length: int\n\n def __init__(\n self,\n col_id: int,\n # These can be bytes when convert_header_text is False\n name: str | bytes,\n label: str | bytes,\n format: str | bytes,\n ctype: bytes,\n length: int,\n ) -> None:\n self.col_id = col_id\n self.name = name\n self.label = label\n self.format = format\n self.ctype = ctype\n self.length = length\n\n\n# SAS7BDAT represents a SAS data file in SAS7BDAT format.\nclass SAS7BDATReader(ReaderBase, abc.Iterator):\n """\n Read SAS files in SAS7BDAT format.\n\n Parameters\n ----------\n path_or_buf : path name or buffer\n Name of SAS file or file-like object pointing to SAS file\n contents.\n index : column identifier, defaults to None\n Column to use as index.\n convert_dates : bool, defaults to True\n Attempt to convert dates to Pandas datetime values. Note that\n some rarely used SAS date formats may be unsupported.\n blank_missing : bool, defaults to True\n Convert empty strings to missing values (SAS uses blanks to\n indicate missing character variables).\n chunksize : int, defaults to None\n Return SAS7BDATReader object for iterations, returns chunks\n with given number of lines.\n encoding : str, 'infer', defaults to None\n String encoding acc. to Python standard encodings,\n encoding='infer' tries to detect the encoding from the file header,\n encoding=None will leave the data in binary format.\n convert_text : bool, defaults to True\n If False, text variables are left as raw bytes.\n convert_header_text : bool, defaults to True\n If False, header text, including column names, are left as raw\n bytes.\n """\n\n _int_length: int\n _cached_page: bytes | None\n\n def __init__(\n self,\n path_or_buf: FilePath | ReadBuffer[bytes],\n index=None,\n convert_dates: bool = True,\n blank_missing: bool = True,\n chunksize: int | None = None,\n encoding: str | None = None,\n convert_text: bool = True,\n convert_header_text: bool = True,\n compression: CompressionOptions = "infer",\n ) -> None:\n self.index = index\n self.convert_dates = convert_dates\n self.blank_missing = blank_missing\n self.chunksize = chunksize\n self.encoding = encoding\n self.convert_text = convert_text\n self.convert_header_text = convert_header_text\n\n self.default_encoding = "latin-1"\n self.compression = b""\n self.column_names_raw: list[bytes] = []\n self.column_names: list[str | bytes] = []\n self.column_formats: list[str | bytes] = []\n self.columns: list[_Column] = []\n\n self._current_page_data_subheader_pointers: list[tuple[int, int]] = []\n self._cached_page = None\n self._column_data_lengths: list[int] = []\n self._column_data_offsets: list[int] = []\n self._column_types: list[bytes] = []\n\n self._current_row_in_file_index = 0\n self._current_row_on_page_index = 0\n self._current_row_in_file_index = 0\n\n self.handles = get_handle(\n path_or_buf, "rb", is_text=False, compression=compression\n )\n\n self._path_or_buf = self.handles.handle\n\n # Same order as const.SASIndex\n self._subheader_processors = [\n self._process_rowsize_subheader,\n self._process_columnsize_subheader,\n self._process_subheader_counts,\n self._process_columntext_subheader,\n self._process_columnname_subheader,\n self._process_columnattributes_subheader,\n self._process_format_subheader,\n self._process_columnlist_subheader,\n None, # Data\n ]\n\n try:\n self._get_properties()\n self._parse_metadata()\n except Exception:\n self.close()\n raise\n\n def column_data_lengths(self) -> np.ndarray:\n """Return a numpy int64 array of the column data lengths"""\n return np.asarray(self._column_data_lengths, dtype=np.int64)\n\n def column_data_offsets(self) -> np.ndarray:\n """Return a numpy int64 array of the column offsets"""\n return np.asarray(self._column_data_offsets, dtype=np.int64)\n\n def column_types(self) -> np.ndarray:\n """\n Returns a numpy character array of the column types:\n s (string) or d (double)\n """\n return np.asarray(self._column_types, dtype=np.dtype("S1"))\n\n def close(self) -> None:\n self.handles.close()\n\n def _get_properties(self) -> None:\n # Check magic number\n self._path_or_buf.seek(0)\n self._cached_page = self._path_or_buf.read(288)\n if self._cached_page[0 : len(const.magic)] != const.magic:\n raise ValueError("magic number mismatch (not a SAS file?)")\n\n # Get alignment information\n buf = self._read_bytes(const.align_1_offset, const.align_1_length)\n if buf == const.u64_byte_checker_value:\n self.U64 = True\n self._int_length = 8\n self._page_bit_offset = const.page_bit_offset_x64\n self._subheader_pointer_length = const.subheader_pointer_length_x64\n else:\n self.U64 = False\n self._page_bit_offset = const.page_bit_offset_x86\n self._subheader_pointer_length = const.subheader_pointer_length_x86\n self._int_length = 4\n buf = self._read_bytes(const.align_2_offset, const.align_2_length)\n if buf == const.align_1_checker_value:\n align1 = const.align_2_value\n else:\n align1 = 0\n\n # Get endianness information\n buf = self._read_bytes(const.endianness_offset, const.endianness_length)\n if buf == b"\x01":\n self.byte_order = "<"\n self.need_byteswap = sys.byteorder == "big"\n else:\n self.byte_order = ">"\n self.need_byteswap = sys.byteorder == "little"\n\n # Get encoding information\n buf = self._read_bytes(const.encoding_offset, const.encoding_length)[0]\n if buf in const.encoding_names:\n self.inferred_encoding = const.encoding_names[buf]\n if self.encoding == "infer":\n self.encoding = self.inferred_encoding\n else:\n self.inferred_encoding = f"unknown (code={buf})"\n\n # Timestamp is epoch 01/01/1960\n epoch = datetime(1960, 1, 1)\n x = self._read_float(\n const.date_created_offset + align1, const.date_created_length\n )\n self.date_created = epoch + pd.to_timedelta(x, unit="s")\n x = self._read_float(\n const.date_modified_offset + align1, const.date_modified_length\n )\n self.date_modified = epoch + pd.to_timedelta(x, unit="s")\n\n self.header_length = self._read_uint(\n const.header_size_offset + align1, const.header_size_length\n )\n\n # Read the rest of the header into cached_page.\n buf = self._path_or_buf.read(self.header_length - 288)\n self._cached_page += buf\n # error: Argument 1 to "len" has incompatible type "Optional[bytes]";\n # expected "Sized"\n if len(self._cached_page) != self.header_length: # type: ignore[arg-type]\n raise ValueError("The SAS7BDAT file appears to be truncated.")\n\n self._page_length = self._read_uint(\n const.page_size_offset + align1, const.page_size_length\n )\n\n def __next__(self) -> DataFrame:\n da = self.read(nrows=self.chunksize or 1)\n if da.empty:\n self.close()\n raise StopIteration\n return da\n\n # Read a single float of the given width (4 or 8).\n def _read_float(self, offset: int, width: int):\n assert self._cached_page is not None\n if width == 4:\n return read_float_with_byteswap(\n self._cached_page, offset, self.need_byteswap\n )\n elif width == 8:\n return read_double_with_byteswap(\n self._cached_page, offset, self.need_byteswap\n )\n else:\n self.close()\n raise ValueError("invalid float width")\n\n # Read a single unsigned integer of the given width (1, 2, 4 or 8).\n def _read_uint(self, offset: int, width: int) -> int:\n assert self._cached_page is not None\n if width == 1:\n return self._read_bytes(offset, 1)[0]\n elif width == 2:\n return read_uint16_with_byteswap(\n self._cached_page, offset, self.need_byteswap\n )\n elif width == 4:\n return read_uint32_with_byteswap(\n self._cached_page, offset, self.need_byteswap\n )\n elif width == 8:\n return read_uint64_with_byteswap(\n self._cached_page, offset, self.need_byteswap\n )\n else:\n self.close()\n raise ValueError("invalid int width")\n\n def _read_bytes(self, offset: int, length: int):\n assert self._cached_page is not None\n if offset + length > len(self._cached_page):\n self.close()\n raise ValueError("The cached page is too small.")\n return self._cached_page[offset : offset + length]\n\n def _read_and_convert_header_text(self, offset: int, length: int) -> str | bytes:\n return self._convert_header_text(\n self._read_bytes(offset, length).rstrip(b"\x00 ")\n )\n\n def _parse_metadata(self) -> None:\n done = False\n while not done:\n self._cached_page = self._path_or_buf.read(self._page_length)\n if len(self._cached_page) <= 0:\n break\n if len(self._cached_page) != self._page_length:\n raise ValueError("Failed to read a meta data page from the SAS file.")\n done = self._process_page_meta()\n\n def _process_page_meta(self) -> bool:\n self._read_page_header()\n pt = const.page_meta_types + [const.page_amd_type, const.page_mix_type]\n if self._current_page_type in pt:\n self._process_page_metadata()\n is_data_page = self._current_page_type == const.page_data_type\n is_mix_page = self._current_page_type == const.page_mix_type\n return bool(\n is_data_page\n or is_mix_page\n or self._current_page_data_subheader_pointers != []\n )\n\n def _read_page_header(self) -> None:\n bit_offset = self._page_bit_offset\n tx = const.page_type_offset + bit_offset\n self._current_page_type = (\n self._read_uint(tx, const.page_type_length) & const.page_type_mask2\n )\n tx = const.block_count_offset + bit_offset\n self._current_page_block_count = self._read_uint(tx, const.block_count_length)\n tx = const.subheader_count_offset + bit_offset\n self._current_page_subheaders_count = self._read_uint(\n tx, const.subheader_count_length\n )\n\n def _process_page_metadata(self) -> None:\n bit_offset = self._page_bit_offset\n\n for i in range(self._current_page_subheaders_count):\n offset = const.subheader_pointers_offset + bit_offset\n total_offset = offset + self._subheader_pointer_length * i\n\n subheader_offset = self._read_uint(total_offset, self._int_length)\n total_offset += self._int_length\n\n subheader_length = self._read_uint(total_offset, self._int_length)\n total_offset += self._int_length\n\n subheader_compression = self._read_uint(total_offset, 1)\n total_offset += 1\n\n subheader_type = self._read_uint(total_offset, 1)\n\n if (\n subheader_length == 0\n or subheader_compression == const.truncated_subheader_id\n ):\n continue\n\n subheader_signature = self._read_bytes(subheader_offset, self._int_length)\n subheader_index = get_subheader_index(subheader_signature)\n subheader_processor = self._subheader_processors[subheader_index]\n\n if subheader_processor is None:\n f1 = subheader_compression in (const.compressed_subheader_id, 0)\n f2 = subheader_type == const.compressed_subheader_type\n if self.compression and f1 and f2:\n self._current_page_data_subheader_pointers.append(\n (subheader_offset, subheader_length)\n )\n else:\n self.close()\n raise ValueError(\n f"Unknown subheader signature {subheader_signature}"\n )\n else:\n subheader_processor(subheader_offset, subheader_length)\n\n def _process_rowsize_subheader(self, offset: int, length: int) -> None:\n int_len = self._int_length\n lcs_offset = offset\n lcp_offset = offset\n if self.U64:\n lcs_offset += 682\n lcp_offset += 706\n else:\n lcs_offset += 354\n lcp_offset += 378\n\n self.row_length = self._read_uint(\n offset + const.row_length_offset_multiplier * int_len,\n int_len,\n )\n self.row_count = self._read_uint(\n offset + const.row_count_offset_multiplier * int_len,\n int_len,\n )\n self.col_count_p1 = self._read_uint(\n offset + const.col_count_p1_multiplier * int_len, int_len\n )\n self.col_count_p2 = self._read_uint(\n offset + const.col_count_p2_multiplier * int_len, int_len\n )\n mx = const.row_count_on_mix_page_offset_multiplier * int_len\n self._mix_page_row_count = self._read_uint(offset + mx, int_len)\n self._lcs = self._read_uint(lcs_offset, 2)\n self._lcp = self._read_uint(lcp_offset, 2)\n\n def _process_columnsize_subheader(self, offset: int, length: int) -> None:\n int_len = self._int_length\n offset += int_len\n self.column_count = self._read_uint(offset, int_len)\n if self.col_count_p1 + self.col_count_p2 != self.column_count:\n print(\n f"Warning: column count mismatch ({self.col_count_p1} + "\n f"{self.col_count_p2} != {self.column_count})\n"\n )\n\n # Unknown purpose\n def _process_subheader_counts(self, offset: int, length: int) -> None:\n pass\n\n def _process_columntext_subheader(self, offset: int, length: int) -> None:\n offset += self._int_length\n text_block_size = self._read_uint(offset, const.text_block_size_length)\n\n buf = self._read_bytes(offset, text_block_size)\n cname_raw = buf[0:text_block_size].rstrip(b"\x00 ")\n self.column_names_raw.append(cname_raw)\n\n if len(self.column_names_raw) == 1:\n compression_literal = b""\n for cl in const.compression_literals:\n if cl in cname_raw:\n compression_literal = cl\n self.compression = compression_literal\n offset -= self._int_length\n\n offset1 = offset + 16\n if self.U64:\n offset1 += 4\n\n buf = self._read_bytes(offset1, self._lcp)\n compression_literal = buf.rstrip(b"\x00")\n if compression_literal == b"":\n self._lcs = 0\n offset1 = offset + 32\n if self.U64:\n offset1 += 4\n buf = self._read_bytes(offset1, self._lcp)\n self.creator_proc = buf[0 : self._lcp]\n elif compression_literal == const.rle_compression:\n offset1 = offset + 40\n if self.U64:\n offset1 += 4\n buf = self._read_bytes(offset1, self._lcp)\n self.creator_proc = buf[0 : self._lcp]\n elif self._lcs > 0:\n self._lcp = 0\n offset1 = offset + 16\n if self.U64:\n offset1 += 4\n buf = self._read_bytes(offset1, self._lcs)\n self.creator_proc = buf[0 : self._lcp]\n if hasattr(self, "creator_proc"):\n self.creator_proc = self._convert_header_text(self.creator_proc)\n\n def _process_columnname_subheader(self, offset: int, length: int) -> None:\n int_len = self._int_length\n offset += int_len\n column_name_pointers_count = (length - 2 * int_len - 12) // 8\n for i in range(column_name_pointers_count):\n text_subheader = (\n offset\n + const.column_name_pointer_length * (i + 1)\n + const.column_name_text_subheader_offset\n )\n col_name_offset = (\n offset\n + const.column_name_pointer_length * (i + 1)\n + const.column_name_offset_offset\n )\n col_name_length = (\n offset\n + const.column_name_pointer_length * (i + 1)\n + const.column_name_length_offset\n )\n\n idx = self._read_uint(\n text_subheader, const.column_name_text_subheader_length\n )\n col_offset = self._read_uint(\n col_name_offset, const.column_name_offset_length\n )\n col_len = self._read_uint(col_name_length, const.column_name_length_length)\n\n name_raw = self.column_names_raw[idx]\n cname = name_raw[col_offset : col_offset + col_len]\n self.column_names.append(self._convert_header_text(cname))\n\n def _process_columnattributes_subheader(self, offset: int, length: int) -> None:\n int_len = self._int_length\n column_attributes_vectors_count = (length - 2 * int_len - 12) // (int_len + 8)\n for i in range(column_attributes_vectors_count):\n col_data_offset = (\n offset + int_len + const.column_data_offset_offset + i * (int_len + 8)\n )\n col_data_len = (\n offset\n + 2 * int_len\n + const.column_data_length_offset\n + i * (int_len + 8)\n )\n col_types = (\n offset + 2 * int_len + const.column_type_offset + i * (int_len + 8)\n )\n\n x = self._read_uint(col_data_offset, int_len)\n self._column_data_offsets.append(x)\n\n x = self._read_uint(col_data_len, const.column_data_length_length)\n self._column_data_lengths.append(x)\n\n x = self._read_uint(col_types, const.column_type_length)\n self._column_types.append(b"d" if x == 1 else b"s")\n\n def _process_columnlist_subheader(self, offset: int, length: int) -> None:\n # unknown purpose\n pass\n\n def _process_format_subheader(self, offset: int, length: int) -> None:\n int_len = self._int_length\n text_subheader_format = (\n offset + const.column_format_text_subheader_index_offset + 3 * int_len\n )\n col_format_offset = offset + const.column_format_offset_offset + 3 * int_len\n col_format_len = offset + const.column_format_length_offset + 3 * int_len\n text_subheader_label = (\n offset + const.column_label_text_subheader_index_offset + 3 * int_len\n )\n col_label_offset = offset + const.column_label_offset_offset + 3 * int_len\n col_label_len = offset + const.column_label_length_offset + 3 * int_len\n\n x = self._read_uint(\n text_subheader_format, const.column_format_text_subheader_index_length\n )\n format_idx = min(x, len(self.column_names_raw) - 1)\n\n format_start = self._read_uint(\n col_format_offset, const.column_format_offset_length\n )\n format_len = self._read_uint(col_format_len, const.column_format_length_length)\n\n label_idx = self._read_uint(\n text_subheader_label, const.column_label_text_subheader_index_length\n )\n label_idx = min(label_idx, len(self.column_names_raw) - 1)\n\n label_start = self._read_uint(\n col_label_offset, const.column_label_offset_length\n )\n label_len = self._read_uint(col_label_len, const.column_label_length_length)\n\n label_names = self.column_names_raw[label_idx]\n column_label = self._convert_header_text(\n label_names[label_start : label_start + label_len]\n )\n format_names = self.column_names_raw[format_idx]\n column_format = self._convert_header_text(\n format_names[format_start : format_start + format_len]\n )\n current_column_number = len(self.columns)\n\n col = _Column(\n current_column_number,\n self.column_names[current_column_number],\n column_label,\n column_format,\n self._column_types[current_column_number],\n self._column_data_lengths[current_column_number],\n )\n\n self.column_formats.append(column_format)\n self.columns.append(col)\n\n def read(self, nrows: int | None = None) -> DataFrame:\n if (nrows is None) and (self.chunksize is not None):\n nrows = self.chunksize\n elif nrows is None:\n nrows = self.row_count\n\n if len(self._column_types) == 0:\n self.close()\n raise EmptyDataError("No columns to parse from file")\n\n if nrows > 0 and self._current_row_in_file_index >= self.row_count:\n return DataFrame()\n\n nrows = min(nrows, self.row_count - self._current_row_in_file_index)\n\n nd = self._column_types.count(b"d")\n ns = self._column_types.count(b"s")\n\n self._string_chunk = np.empty((ns, nrows), dtype=object)\n self._byte_chunk = np.zeros((nd, 8 * nrows), dtype=np.uint8)\n\n self._current_row_in_chunk_index = 0\n p = Parser(self)\n p.read(nrows)\n\n rslt = self._chunk_to_dataframe()\n if self.index is not None:\n rslt = rslt.set_index(self.index)\n\n return rslt\n\n def _read_next_page(self):\n self._current_page_data_subheader_pointers = []\n self._cached_page = self._path_or_buf.read(self._page_length)\n if len(self._cached_page) <= 0:\n return True\n elif len(self._cached_page) != self._page_length:\n self.close()\n msg = (\n "failed to read complete page from file (read "\n f"{len(self._cached_page):d} of {self._page_length:d} bytes)"\n )\n raise ValueError(msg)\n\n self._read_page_header()\n if self._current_page_type in const.page_meta_types:\n self._process_page_metadata()\n\n if self._current_page_type not in const.page_meta_types + [\n const.page_data_type,\n const.page_mix_type,\n ]:\n return self._read_next_page()\n\n return False\n\n def _chunk_to_dataframe(self) -> DataFrame:\n n = self._current_row_in_chunk_index\n m = self._current_row_in_file_index\n ix = range(m - n, m)\n rslt = {}\n\n js, jb = 0, 0\n infer_string = get_option("future.infer_string")\n for j in range(self.column_count):\n name = self.column_names[j]\n\n if self._column_types[j] == b"d":\n col_arr = self._byte_chunk[jb, :].view(dtype=self.byte_order + "d")\n rslt[name] = pd.Series(col_arr, dtype=np.float64, index=ix, copy=False)\n if self.convert_dates:\n if self.column_formats[j] in const.sas_date_formats:\n rslt[name] = _convert_datetimes(rslt[name], "d")\n elif self.column_formats[j] in const.sas_datetime_formats:\n rslt[name] = _convert_datetimes(rslt[name], "s")\n jb += 1\n elif self._column_types[j] == b"s":\n rslt[name] = pd.Series(self._string_chunk[js, :], index=ix, copy=False)\n if self.convert_text and (self.encoding is not None):\n rslt[name] = self._decode_string(rslt[name].str)\n if infer_string:\n rslt[name] = rslt[name].astype("str")\n\n js += 1\n else:\n self.close()\n raise ValueError(f"unknown column type {repr(self._column_types[j])}")\n\n df = DataFrame(rslt, columns=self.column_names, index=ix, copy=False)\n return df\n\n def _decode_string(self, b):\n return b.decode(self.encoding or self.default_encoding)\n\n def _convert_header_text(self, b: bytes) -> str | bytes:\n if self.convert_header_text:\n return self._decode_string(b)\n else:\n return b\n
.venv\Lib\site-packages\pandas\io\sas\sas7bdat.py
sas7bdat.py
Python
27,730
0.95
0.119423
0.023041
node-utils
958
2023-07-17T22:38:39.536245
MIT
false
62a5a74e9f5e32ecbcfe11fdc2c21de5
"""\nRead SAS sas7bdat or xport files.\n"""\nfrom __future__ import annotations\n\nfrom abc import (\n ABC,\n abstractmethod,\n)\nfrom typing import (\n TYPE_CHECKING,\n overload,\n)\n\nfrom pandas.util._decorators import doc\n\nfrom pandas.core.shared_docs import _shared_docs\n\nfrom pandas.io.common import stringify_path\n\nif TYPE_CHECKING:\n from collections.abc import Hashable\n from types import TracebackType\n\n from pandas._typing import (\n CompressionOptions,\n FilePath,\n ReadBuffer,\n Self,\n )\n\n from pandas import DataFrame\n\n\nclass ReaderBase(ABC):\n """\n Protocol for XportReader and SAS7BDATReader classes.\n """\n\n @abstractmethod\n def read(self, nrows: int | None = None) -> DataFrame:\n ...\n\n @abstractmethod\n def close(self) -> None:\n ...\n\n def __enter__(self) -> Self:\n return self\n\n def __exit__(\n self,\n exc_type: type[BaseException] | None,\n exc_value: BaseException | None,\n traceback: TracebackType | None,\n ) -> None:\n self.close()\n\n\n@overload\ndef read_sas(\n filepath_or_buffer: FilePath | ReadBuffer[bytes],\n *,\n format: str | None = ...,\n index: Hashable | None = ...,\n encoding: str | None = ...,\n chunksize: int = ...,\n iterator: bool = ...,\n compression: CompressionOptions = ...,\n) -> ReaderBase:\n ...\n\n\n@overload\ndef read_sas(\n filepath_or_buffer: FilePath | ReadBuffer[bytes],\n *,\n format: str | None = ...,\n index: Hashable | None = ...,\n encoding: str | None = ...,\n chunksize: None = ...,\n iterator: bool = ...,\n compression: CompressionOptions = ...,\n) -> DataFrame | ReaderBase:\n ...\n\n\n@doc(decompression_options=_shared_docs["decompression_options"] % "filepath_or_buffer")\ndef read_sas(\n filepath_or_buffer: FilePath | ReadBuffer[bytes],\n *,\n format: str | None = None,\n index: Hashable | None = None,\n encoding: str | None = None,\n chunksize: int | None = None,\n iterator: bool = False,\n compression: CompressionOptions = "infer",\n) -> DataFrame | ReaderBase:\n """\n Read SAS files stored as either XPORT or SAS7BDAT format files.\n\n Parameters\n ----------\n filepath_or_buffer : str, path object, or file-like object\n String, path object (implementing ``os.PathLike[str]``), or file-like\n object implementing a binary ``read()`` function. The string could be a URL.\n Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is\n expected. A local file could be:\n ``file://localhost/path/to/table.sas7bdat``.\n format : str {{'xport', 'sas7bdat'}} or None\n If None, file format is inferred from file extension. If 'xport' or\n 'sas7bdat', uses the corresponding format.\n index : identifier of index column, defaults to None\n Identifier of column that should be used as index of the DataFrame.\n encoding : str, default is None\n Encoding for text data. If None, text data are stored as raw bytes.\n chunksize : int\n Read file `chunksize` lines at a time, returns iterator.\n iterator : bool, defaults to False\n If True, returns an iterator for reading the file incrementally.\n {decompression_options}\n\n Returns\n -------\n DataFrame if iterator=False and chunksize=None, else SAS7BDATReader\n or XportReader\n\n Examples\n --------\n >>> df = pd.read_sas("sas_data.sas7bdat") # doctest: +SKIP\n """\n if format is None:\n buffer_error_msg = (\n "If this is a buffer object rather "\n "than a string name, you must specify a format string"\n )\n filepath_or_buffer = stringify_path(filepath_or_buffer)\n if not isinstance(filepath_or_buffer, str):\n raise ValueError(buffer_error_msg)\n fname = filepath_or_buffer.lower()\n if ".xpt" in fname:\n format = "xport"\n elif ".sas7bdat" in fname:\n format = "sas7bdat"\n else:\n raise ValueError(\n f"unable to infer format of SAS file from filename: {repr(fname)}"\n )\n\n reader: ReaderBase\n if format.lower() == "xport":\n from pandas.io.sas.sas_xport import XportReader\n\n reader = XportReader(\n filepath_or_buffer,\n index=index,\n encoding=encoding,\n chunksize=chunksize,\n compression=compression,\n )\n elif format.lower() == "sas7bdat":\n from pandas.io.sas.sas7bdat import SAS7BDATReader\n\n reader = SAS7BDATReader(\n filepath_or_buffer,\n index=index,\n encoding=encoding,\n chunksize=chunksize,\n compression=compression,\n )\n else:\n raise ValueError("unknown SAS format")\n\n if iterator or chunksize:\n return reader\n\n with reader:\n return reader.read()\n
.venv\Lib\site-packages\pandas\io\sas\sasreader.py
sasreader.py
Python
4,885
0.95
0.106742
0.019868
python-kit
750
2025-05-15T20:29:40.642078
GPL-3.0
false
6647ccafde587c47dbee9b705e935322
from __future__ import annotations\n\nfrom typing import Final\n\nmagic: Final = (\n b"\x00\x00\x00\x00\x00\x00\x00\x00"\n b"\x00\x00\x00\x00\xc2\xea\x81\x60"\n b"\xb3\x14\x11\xcf\xbd\x92\x08\x00"\n b"\x09\xc7\x31\x8c\x18\x1f\x10\x11"\n)\n\nalign_1_checker_value: Final = b"3"\nalign_1_offset: Final = 32\nalign_1_length: Final = 1\nalign_1_value: Final = 4\nu64_byte_checker_value: Final = b"3"\nalign_2_offset: Final = 35\nalign_2_length: Final = 1\nalign_2_value: Final = 4\nendianness_offset: Final = 37\nendianness_length: Final = 1\nplatform_offset: Final = 39\nplatform_length: Final = 1\nencoding_offset: Final = 70\nencoding_length: Final = 1\ndataset_offset: Final = 92\ndataset_length: Final = 64\nfile_type_offset: Final = 156\nfile_type_length: Final = 8\ndate_created_offset: Final = 164\ndate_created_length: Final = 8\ndate_modified_offset: Final = 172\ndate_modified_length: Final = 8\nheader_size_offset: Final = 196\nheader_size_length: Final = 4\npage_size_offset: Final = 200\npage_size_length: Final = 4\npage_count_offset: Final = 204\npage_count_length: Final = 4\nsas_release_offset: Final = 216\nsas_release_length: Final = 8\nsas_server_type_offset: Final = 224\nsas_server_type_length: Final = 16\nos_version_number_offset: Final = 240\nos_version_number_length: Final = 16\nos_maker_offset: Final = 256\nos_maker_length: Final = 16\nos_name_offset: Final = 272\nos_name_length: Final = 16\npage_bit_offset_x86: Final = 16\npage_bit_offset_x64: Final = 32\nsubheader_pointer_length_x86: Final = 12\nsubheader_pointer_length_x64: Final = 24\npage_type_offset: Final = 0\npage_type_length: Final = 2\nblock_count_offset: Final = 2\nblock_count_length: Final = 2\nsubheader_count_offset: Final = 4\nsubheader_count_length: Final = 2\npage_type_mask: Final = 0x0F00\n# Keep "page_comp_type" bits\npage_type_mask2: Final = 0xF000 | page_type_mask\npage_meta_type: Final = 0x0000\npage_data_type: Final = 0x0100\npage_mix_type: Final = 0x0200\npage_amd_type: Final = 0x0400\npage_meta2_type: Final = 0x4000\npage_comp_type: Final = 0x9000\npage_meta_types: Final = [page_meta_type, page_meta2_type]\nsubheader_pointers_offset: Final = 8\ntruncated_subheader_id: Final = 1\ncompressed_subheader_id: Final = 4\ncompressed_subheader_type: Final = 1\ntext_block_size_length: Final = 2\nrow_length_offset_multiplier: Final = 5\nrow_count_offset_multiplier: Final = 6\ncol_count_p1_multiplier: Final = 9\ncol_count_p2_multiplier: Final = 10\nrow_count_on_mix_page_offset_multiplier: Final = 15\ncolumn_name_pointer_length: Final = 8\ncolumn_name_text_subheader_offset: Final = 0\ncolumn_name_text_subheader_length: Final = 2\ncolumn_name_offset_offset: Final = 2\ncolumn_name_offset_length: Final = 2\ncolumn_name_length_offset: Final = 4\ncolumn_name_length_length: Final = 2\ncolumn_data_offset_offset: Final = 8\ncolumn_data_length_offset: Final = 8\ncolumn_data_length_length: Final = 4\ncolumn_type_offset: Final = 14\ncolumn_type_length: Final = 1\ncolumn_format_text_subheader_index_offset: Final = 22\ncolumn_format_text_subheader_index_length: Final = 2\ncolumn_format_offset_offset: Final = 24\ncolumn_format_offset_length: Final = 2\ncolumn_format_length_offset: Final = 26\ncolumn_format_length_length: Final = 2\ncolumn_label_text_subheader_index_offset: Final = 28\ncolumn_label_text_subheader_index_length: Final = 2\ncolumn_label_offset_offset: Final = 30\ncolumn_label_offset_length: Final = 2\ncolumn_label_length_offset: Final = 32\ncolumn_label_length_length: Final = 2\nrle_compression: Final = b"SASYZCRL"\nrdc_compression: Final = b"SASYZCR2"\n\ncompression_literals: Final = [rle_compression, rdc_compression]\n\n# Incomplete list of encodings, using SAS nomenclature:\n# https://support.sas.com/documentation/onlinedoc/dfdmstudio/2.6/dmpdmsug/Content/dfU_Encodings_SAS.html\n# corresponding to the Python documentation of standard encodings\n# https://docs.python.org/3/library/codecs.html#standard-encodings\nencoding_names: Final = {\n 20: "utf-8",\n 29: "latin1",\n 30: "latin2",\n 31: "latin3",\n 32: "latin4",\n 33: "cyrillic",\n 34: "arabic",\n 35: "greek",\n 36: "hebrew",\n 37: "latin5",\n 38: "latin6",\n 39: "cp874",\n 40: "latin9",\n 41: "cp437",\n 42: "cp850",\n 43: "cp852",\n 44: "cp857",\n 45: "cp858",\n 46: "cp862",\n 47: "cp864",\n 48: "cp865",\n 49: "cp866",\n 50: "cp869",\n 51: "cp874",\n # 52: "", # not found\n # 53: "", # not found\n # 54: "", # not found\n 55: "cp720",\n 56: "cp737",\n 57: "cp775",\n 58: "cp860",\n 59: "cp863",\n 60: "cp1250",\n 61: "cp1251",\n 62: "cp1252",\n 63: "cp1253",\n 64: "cp1254",\n 65: "cp1255",\n 66: "cp1256",\n 67: "cp1257",\n 68: "cp1258",\n 118: "cp950",\n # 119: "", # not found\n 123: "big5",\n 125: "gb2312",\n 126: "cp936",\n 134: "euc_jp",\n 136: "cp932",\n 138: "shift_jis",\n 140: "euc-kr",\n 141: "cp949",\n 227: "latin8",\n # 228: "", # not found\n # 229: "" # not found\n}\n\n\nclass SASIndex:\n row_size_index: Final = 0\n column_size_index: Final = 1\n subheader_counts_index: Final = 2\n column_text_index: Final = 3\n column_name_index: Final = 4\n column_attributes_index: Final = 5\n format_and_label_index: Final = 6\n column_list_index: Final = 7\n data_subheader_index: Final = 8\n\n\nsubheader_signature_to_index: Final = {\n b"\xF7\xF7\xF7\xF7": SASIndex.row_size_index,\n b"\x00\x00\x00\x00\xF7\xF7\xF7\xF7": SASIndex.row_size_index,\n b"\xF7\xF7\xF7\xF7\x00\x00\x00\x00": SASIndex.row_size_index,\n b"\xF7\xF7\xF7\xF7\xFF\xFF\xFB\xFE": SASIndex.row_size_index,\n b"\xF6\xF6\xF6\xF6": SASIndex.column_size_index,\n b"\x00\x00\x00\x00\xF6\xF6\xF6\xF6": SASIndex.column_size_index,\n b"\xF6\xF6\xF6\xF6\x00\x00\x00\x00": SASIndex.column_size_index,\n b"\xF6\xF6\xF6\xF6\xFF\xFF\xFB\xFE": SASIndex.column_size_index,\n b"\x00\xFC\xFF\xFF": SASIndex.subheader_counts_index,\n b"\xFF\xFF\xFC\x00": SASIndex.subheader_counts_index,\n b"\x00\xFC\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.subheader_counts_index,\n b"\xFF\xFF\xFF\xFF\xFF\xFF\xFC\x00": SASIndex.subheader_counts_index,\n b"\xFD\xFF\xFF\xFF": SASIndex.column_text_index,\n b"\xFF\xFF\xFF\xFD": SASIndex.column_text_index,\n b"\xFD\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_text_index,\n b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD": SASIndex.column_text_index,\n b"\xFF\xFF\xFF\xFF": SASIndex.column_name_index,\n b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_name_index,\n b"\xFC\xFF\xFF\xFF": SASIndex.column_attributes_index,\n b"\xFF\xFF\xFF\xFC": SASIndex.column_attributes_index,\n b"\xFC\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_attributes_index,\n b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFC": SASIndex.column_attributes_index,\n b"\xFE\xFB\xFF\xFF": SASIndex.format_and_label_index,\n b"\xFF\xFF\xFB\xFE": SASIndex.format_and_label_index,\n b"\xFE\xFB\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.format_and_label_index,\n b"\xFF\xFF\xFF\xFF\xFF\xFF\xFB\xFE": SASIndex.format_and_label_index,\n b"\xFE\xFF\xFF\xFF": SASIndex.column_list_index,\n b"\xFF\xFF\xFF\xFE": SASIndex.column_list_index,\n b"\xFE\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_list_index,\n b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE": SASIndex.column_list_index,\n}\n\n\n# List of frequently used SAS date and datetime formats\n# http://support.sas.com/documentation/cdl/en/etsug/60372/HTML/default/viewer.htm#etsug_intervals_sect009.htm\n# https://github.com/epam/parso/blob/master/src/main/java/com/epam/parso/impl/SasFileConstants.java\nsas_date_formats: Final = (\n "DATE",\n "DAY",\n "DDMMYY",\n "DOWNAME",\n "JULDAY",\n "JULIAN",\n "MMDDYY",\n "MMYY",\n "MMYYC",\n "MMYYD",\n "MMYYP",\n "MMYYS",\n "MMYYN",\n "MONNAME",\n "MONTH",\n "MONYY",\n "QTR",\n "QTRR",\n "NENGO",\n "WEEKDATE",\n "WEEKDATX",\n "WEEKDAY",\n "WEEKV",\n "WORDDATE",\n "WORDDATX",\n "YEAR",\n "YYMM",\n "YYMMC",\n "YYMMD",\n "YYMMP",\n "YYMMS",\n "YYMMN",\n "YYMON",\n "YYMMDD",\n "YYQ",\n "YYQC",\n "YYQD",\n "YYQP",\n "YYQS",\n "YYQN",\n "YYQR",\n "YYQRC",\n "YYQRD",\n "YYQRP",\n "YYQRS",\n "YYQRN",\n "YYMMDDP",\n "YYMMDDC",\n "E8601DA",\n "YYMMDDN",\n "MMDDYYC",\n "MMDDYYS",\n "MMDDYYD",\n "YYMMDDS",\n "B8601DA",\n "DDMMYYN",\n "YYMMDDD",\n "DDMMYYB",\n "DDMMYYP",\n "MMDDYYP",\n "YYMMDDB",\n "MMDDYYN",\n "DDMMYYC",\n "DDMMYYD",\n "DDMMYYS",\n "MINGUO",\n)\n\nsas_datetime_formats: Final = (\n "DATETIME",\n "DTWKDATX",\n "B8601DN",\n "B8601DT",\n "B8601DX",\n "B8601DZ",\n "B8601LX",\n "E8601DN",\n "E8601DT",\n "E8601DX",\n "E8601DZ",\n "E8601LX",\n "DATEAMPM",\n "DTDATE",\n "DTMONYY",\n "DTMONYY",\n "DTWKDATX",\n "DTYEAR",\n "TOD",\n "MDYAMPM",\n)\n
.venv\Lib\site-packages\pandas\io\sas\sas_constants.py
sas_constants.py
Python
8,719
0.95
0.003226
0.04698
awesome-app
470
2024-09-01T08:31:42.084380
Apache-2.0
false
4e3323a024532e467d441e4e7bc8bb05
"""\nRead a SAS XPort format file into a Pandas DataFrame.\n\nBased on code from Jack Cushman (github.com/jcushman/xport).\n\nThe file format is defined here:\n\nhttps://support.sas.com/content/dam/SAS/support/en/technical-papers/record-layout-of-a-sas-version-5-or-6-data-set-in-sas-transport-xport-format.pdf\n"""\nfrom __future__ import annotations\n\nfrom collections import abc\nfrom datetime import datetime\nimport struct\nfrom typing import TYPE_CHECKING\nimport warnings\n\nimport numpy as np\n\nfrom pandas.util._decorators import Appender\nfrom pandas.util._exceptions import find_stack_level\n\nimport pandas as pd\n\nfrom pandas.io.common import get_handle\nfrom pandas.io.sas.sasreader import ReaderBase\n\nif TYPE_CHECKING:\n from pandas._typing import (\n CompressionOptions,\n DatetimeNaTType,\n FilePath,\n ReadBuffer,\n )\n_correct_line1 = (\n "HEADER RECORD*******LIBRARY HEADER RECORD!!!!!!!"\n "000000000000000000000000000000 "\n)\n_correct_header1 = (\n "HEADER RECORD*******MEMBER HEADER RECORD!!!!!!!000000000000000001600000000"\n)\n_correct_header2 = (\n "HEADER RECORD*******DSCRPTR HEADER RECORD!!!!!!!"\n "000000000000000000000000000000 "\n)\n_correct_obs_header = (\n "HEADER RECORD*******OBS HEADER RECORD!!!!!!!"\n "000000000000000000000000000000 "\n)\n_fieldkeys = [\n "ntype",\n "nhfun",\n "field_length",\n "nvar0",\n "name",\n "label",\n "nform",\n "nfl",\n "num_decimals",\n "nfj",\n "nfill",\n "niform",\n "nifl",\n "nifd",\n "npos",\n "_",\n]\n\n\n_base_params_doc = """\\nParameters\n----------\nfilepath_or_buffer : str or file-like object\n Path to SAS file or object implementing binary read method."""\n\n_params2_doc = """\\nindex : identifier of index column\n Identifier of column that should be used as index of the DataFrame.\nencoding : str\n Encoding for text data.\nchunksize : int\n Read file `chunksize` lines at a time, returns iterator."""\n\n_format_params_doc = """\\nformat : str\n File format, only `xport` is currently supported."""\n\n_iterator_doc = """\\niterator : bool, default False\n Return XportReader object for reading file incrementally."""\n\n\n_read_sas_doc = f"""Read a SAS file into a DataFrame.\n\n{_base_params_doc}\n{_format_params_doc}\n{_params2_doc}\n{_iterator_doc}\n\nReturns\n-------\nDataFrame or XportReader\n\nExamples\n--------\nRead a SAS Xport file:\n\n>>> df = pd.read_sas('filename.XPT')\n\nRead a Xport file in 10,000 line chunks:\n\n>>> itr = pd.read_sas('filename.XPT', chunksize=10000)\n>>> for chunk in itr:\n>>> do_something(chunk)\n\n"""\n\n_xport_reader_doc = f"""\\nClass for reading SAS Xport files.\n\n{_base_params_doc}\n{_params2_doc}\n\nAttributes\n----------\nmember_info : list\n Contains information about the file\nfields : list\n Contains information about the variables in the file\n"""\n\n_read_method_doc = """\\nRead observations from SAS Xport file, returning as data frame.\n\nParameters\n----------\nnrows : int\n Number of rows to read from data file; if None, read whole\n file.\n\nReturns\n-------\nA DataFrame.\n"""\n\n\ndef _parse_date(datestr: str) -> DatetimeNaTType:\n """Given a date in xport format, return Python date."""\n try:\n # e.g. "16FEB11:10:07:55"\n return datetime.strptime(datestr, "%d%b%y:%H:%M:%S")\n except ValueError:\n return pd.NaT\n\n\ndef _split_line(s: str, parts):\n """\n Parameters\n ----------\n s: str\n Fixed-length string to split\n parts: list of (name, length) pairs\n Used to break up string, name '_' will be filtered from output.\n\n Returns\n -------\n Dict of name:contents of string at given location.\n """\n out = {}\n start = 0\n for name, length in parts:\n out[name] = s[start : start + length].strip()\n start += length\n del out["_"]\n return out\n\n\ndef _handle_truncated_float_vec(vec, nbytes):\n # This feature is not well documented, but some SAS XPORT files\n # have 2-7 byte "truncated" floats. To read these truncated\n # floats, pad them with zeros on the right to make 8 byte floats.\n #\n # References:\n # https://github.com/jcushman/xport/pull/3\n # The R "foreign" library\n\n if nbytes != 8:\n vec1 = np.zeros(len(vec), np.dtype("S8"))\n dtype = np.dtype(f"S{nbytes},S{8 - nbytes}")\n vec2 = vec1.view(dtype=dtype)\n vec2["f0"] = vec\n return vec2\n\n return vec\n\n\ndef _parse_float_vec(vec):\n """\n Parse a vector of float values representing IBM 8 byte floats into\n native 8 byte floats.\n """\n dtype = np.dtype(">u4,>u4")\n vec1 = vec.view(dtype=dtype)\n xport1 = vec1["f0"]\n xport2 = vec1["f1"]\n\n # Start by setting first half of ieee number to first half of IBM\n # number sans exponent\n ieee1 = xport1 & 0x00FFFFFF\n\n # The fraction bit to the left of the binary point in the ieee\n # format was set and the number was shifted 0, 1, 2, or 3\n # places. This will tell us how to adjust the ibm exponent to be a\n # power of 2 ieee exponent and how to shift the fraction bits to\n # restore the correct magnitude.\n shift = np.zeros(len(vec), dtype=np.uint8)\n shift[np.where(xport1 & 0x00200000)] = 1\n shift[np.where(xport1 & 0x00400000)] = 2\n shift[np.where(xport1 & 0x00800000)] = 3\n\n # shift the ieee number down the correct number of places then\n # set the second half of the ieee number to be the second half\n # of the ibm number shifted appropriately, ored with the bits\n # from the first half that would have been shifted in if we\n # could shift a double. All we are worried about are the low\n # order 3 bits of the first half since we're only shifting by\n # 1, 2, or 3.\n ieee1 >>= shift\n ieee2 = (xport2 >> shift) | ((xport1 & 0x00000007) << (29 + (3 - shift)))\n\n # clear the 1 bit to the left of the binary point\n ieee1 &= 0xFFEFFFFF\n\n # set the exponent of the ieee number to be the actual exponent\n # plus the shift count + 1023. Or this into the first half of the\n # ieee number. The ibm exponent is excess 64 but is adjusted by 65\n # since during conversion to ibm format the exponent is\n # incremented by 1 and the fraction bits left 4 positions to the\n # right of the radix point. (had to add >> 24 because C treats &\n # 0x7f as 0x7f000000 and Python doesn't)\n ieee1 |= ((((((xport1 >> 24) & 0x7F) - 65) << 2) + shift + 1023) << 20) | (\n xport1 & 0x80000000\n )\n\n ieee = np.empty((len(ieee1),), dtype=">u4,>u4")\n ieee["f0"] = ieee1\n ieee["f1"] = ieee2\n ieee = ieee.view(dtype=">f8")\n ieee = ieee.astype("f8")\n\n return ieee\n\n\nclass XportReader(ReaderBase, abc.Iterator):\n __doc__ = _xport_reader_doc\n\n def __init__(\n self,\n filepath_or_buffer: FilePath | ReadBuffer[bytes],\n index=None,\n encoding: str | None = "ISO-8859-1",\n chunksize: int | None = None,\n compression: CompressionOptions = "infer",\n ) -> None:\n self._encoding = encoding\n self._lines_read = 0\n self._index = index\n self._chunksize = chunksize\n\n self.handles = get_handle(\n filepath_or_buffer,\n "rb",\n encoding=encoding,\n is_text=False,\n compression=compression,\n )\n self.filepath_or_buffer = self.handles.handle\n\n try:\n self._read_header()\n except Exception:\n self.close()\n raise\n\n def close(self) -> None:\n self.handles.close()\n\n def _get_row(self):\n return self.filepath_or_buffer.read(80).decode()\n\n def _read_header(self) -> None:\n self.filepath_or_buffer.seek(0)\n\n # read file header\n line1 = self._get_row()\n if line1 != _correct_line1:\n if "**COMPRESSED**" in line1:\n # this was created with the PROC CPORT method and can't be read\n # https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.5/movefile/p1bm6aqp3fw4uin1hucwh718f6kp.htm\n raise ValueError(\n "Header record indicates a CPORT file, which is not readable."\n )\n raise ValueError("Header record is not an XPORT file.")\n\n line2 = self._get_row()\n fif = [["prefix", 24], ["version", 8], ["OS", 8], ["_", 24], ["created", 16]]\n file_info = _split_line(line2, fif)\n if file_info["prefix"] != "SAS SAS SASLIB":\n raise ValueError("Header record has invalid prefix.")\n file_info["created"] = _parse_date(file_info["created"])\n self.file_info = file_info\n\n line3 = self._get_row()\n file_info["modified"] = _parse_date(line3[:16])\n\n # read member header\n header1 = self._get_row()\n header2 = self._get_row()\n headflag1 = header1.startswith(_correct_header1)\n headflag2 = header2 == _correct_header2\n if not (headflag1 and headflag2):\n raise ValueError("Member header not found")\n # usually 140, could be 135\n fieldnamelength = int(header1[-5:-2])\n\n # member info\n mem = [\n ["prefix", 8],\n ["set_name", 8],\n ["sasdata", 8],\n ["version", 8],\n ["OS", 8],\n ["_", 24],\n ["created", 16],\n ]\n member_info = _split_line(self._get_row(), mem)\n mem = [["modified", 16], ["_", 16], ["label", 40], ["type", 8]]\n member_info.update(_split_line(self._get_row(), mem))\n member_info["modified"] = _parse_date(member_info["modified"])\n member_info["created"] = _parse_date(member_info["created"])\n self.member_info = member_info\n\n # read field names\n types = {1: "numeric", 2: "char"}\n fieldcount = int(self._get_row()[54:58])\n datalength = fieldnamelength * fieldcount\n # round up to nearest 80\n if datalength % 80:\n datalength += 80 - datalength % 80\n fielddata = self.filepath_or_buffer.read(datalength)\n fields = []\n obs_length = 0\n while len(fielddata) >= fieldnamelength:\n # pull data for one field\n fieldbytes, fielddata = (\n fielddata[:fieldnamelength],\n fielddata[fieldnamelength:],\n )\n\n # rest at end gets ignored, so if field is short, pad out\n # to match struct pattern below\n fieldbytes = fieldbytes.ljust(140)\n\n fieldstruct = struct.unpack(">hhhh8s40s8shhh2s8shhl52s", fieldbytes)\n field = dict(zip(_fieldkeys, fieldstruct))\n del field["_"]\n field["ntype"] = types[field["ntype"]]\n fl = field["field_length"]\n if field["ntype"] == "numeric" and ((fl < 2) or (fl > 8)):\n msg = f"Floating field width {fl} is not between 2 and 8."\n raise TypeError(msg)\n\n for k, v in field.items():\n try:\n field[k] = v.strip()\n except AttributeError:\n pass\n\n obs_length += field["field_length"]\n fields += [field]\n\n header = self._get_row()\n if not header == _correct_obs_header:\n raise ValueError("Observation header not found.")\n\n self.fields = fields\n self.record_length = obs_length\n self.record_start = self.filepath_or_buffer.tell()\n\n self.nobs = self._record_count()\n self.columns = [x["name"].decode() for x in self.fields]\n\n # Setup the dtype.\n dtypel = [\n ("s" + str(i), "S" + str(field["field_length"]))\n for i, field in enumerate(self.fields)\n ]\n dtype = np.dtype(dtypel)\n self._dtype = dtype\n\n def __next__(self) -> pd.DataFrame:\n return self.read(nrows=self._chunksize or 1)\n\n def _record_count(self) -> int:\n """\n Get number of records in file.\n\n This is maybe suboptimal because we have to seek to the end of\n the file.\n\n Side effect: returns file position to record_start.\n """\n self.filepath_or_buffer.seek(0, 2)\n total_records_length = self.filepath_or_buffer.tell() - self.record_start\n\n if total_records_length % 80 != 0:\n warnings.warn(\n "xport file may be corrupted.",\n stacklevel=find_stack_level(),\n )\n\n if self.record_length > 80:\n self.filepath_or_buffer.seek(self.record_start)\n return total_records_length // self.record_length\n\n self.filepath_or_buffer.seek(-80, 2)\n last_card_bytes = self.filepath_or_buffer.read(80)\n last_card = np.frombuffer(last_card_bytes, dtype=np.uint64)\n\n # 8 byte blank\n ix = np.flatnonzero(last_card == 2314885530818453536)\n\n if len(ix) == 0:\n tail_pad = 0\n else:\n tail_pad = 8 * len(ix)\n\n self.filepath_or_buffer.seek(self.record_start)\n\n return (total_records_length - tail_pad) // self.record_length\n\n def get_chunk(self, size: int | None = None) -> pd.DataFrame:\n """\n Reads lines from Xport file and returns as dataframe\n\n Parameters\n ----------\n size : int, defaults to None\n Number of lines to read. If None, reads whole file.\n\n Returns\n -------\n DataFrame\n """\n if size is None:\n size = self._chunksize\n return self.read(nrows=size)\n\n def _missing_double(self, vec):\n v = vec.view(dtype="u1,u1,u2,u4")\n miss = (v["f1"] == 0) & (v["f2"] == 0) & (v["f3"] == 0)\n miss1 = (\n ((v["f0"] >= 0x41) & (v["f0"] <= 0x5A))\n | (v["f0"] == 0x5F)\n | (v["f0"] == 0x2E)\n )\n miss &= miss1\n return miss\n\n @Appender(_read_method_doc)\n def read(self, nrows: int | None = None) -> pd.DataFrame:\n if nrows is None:\n nrows = self.nobs\n\n read_lines = min(nrows, self.nobs - self._lines_read)\n read_len = read_lines * self.record_length\n if read_len <= 0:\n self.close()\n raise StopIteration\n raw = self.filepath_or_buffer.read(read_len)\n data = np.frombuffer(raw, dtype=self._dtype, count=read_lines)\n\n df_data = {}\n for j, x in enumerate(self.columns):\n vec = data["s" + str(j)]\n ntype = self.fields[j]["ntype"]\n if ntype == "numeric":\n vec = _handle_truncated_float_vec(vec, self.fields[j]["field_length"])\n miss = self._missing_double(vec)\n v = _parse_float_vec(vec)\n v[miss] = np.nan\n elif self.fields[j]["ntype"] == "char":\n v = [y.rstrip() for y in vec]\n\n if self._encoding is not None:\n v = [y.decode(self._encoding) for y in v]\n\n df_data.update({x: v})\n df = pd.DataFrame(df_data)\n\n if self._index is None:\n df.index = pd.Index(range(self._lines_read, self._lines_read + read_lines))\n else:\n df = df.set_index(self._index)\n\n self._lines_read += read_lines\n\n return df\n
.venv\Lib\site-packages\pandas\io\sas\sas_xport.py
sas_xport.py
Python
15,134
0.95
0.100394
0.103365
node-utils
269
2025-04-26T07:10:15.781276
GPL-3.0
false
f8a2f09553df3305ab95386dbb4889ec
from pandas.io.sas.sasreader import read_sas\n\n__all__ = ["read_sas"]\n
.venv\Lib\site-packages\pandas\io\sas\__init__.py
__init__.py
Python
69
0.65
0
0
react-lib
748
2024-05-30T18:03:20.825151
BSD-3-Clause
false
24190ae2b13b328594a39898fb18f050
\n\n
.venv\Lib\site-packages\pandas\io\sas\__pycache__\sas7bdat.cpython-313.pyc
sas7bdat.cpython-313.pyc
Other
36,099
0.8
0.016216
0.011364
vue-tools
567
2024-07-11T13:11:28.721178
GPL-3.0
false
6ef11c680fb32c0ec1c0897de0654f45
\n\n
.venv\Lib\site-packages\pandas\io\sas\__pycache__\sasreader.cpython-313.pyc
sasreader.cpython-313.pyc
Other
5,830
0.95
0.042017
0
awesome-app
576
2025-03-11T05:35:15.904390
GPL-3.0
false
47ed0dcd2a18045195220f0882677d70
\n\n
.venv\Lib\site-packages\pandas\io\sas\__pycache__\sas_constants.cpython-313.pyc
sas_constants.cpython-313.pyc
Other
10,059
0.8
0
0
awesome-app
723
2024-07-14T11:30:05.189115
BSD-3-Clause
false
34d343d8a120df263238c175e0ec9a68
\n\n
.venv\Lib\site-packages\pandas\io\sas\__pycache__\sas_xport.cpython-313.pyc
sas_xport.cpython-313.pyc
Other
17,277
0.8
0.020243
0
node-utils
617
2023-12-15T20:13:07.032505
BSD-3-Clause
false
74a357530882311543af1b3a8a383777
\n\n
.venv\Lib\site-packages\pandas\io\sas\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
274
0.7
0
0
python-kit
713
2024-12-20T11:48:55.815380
MIT
false
87b43844c2717e4a05aa540c9648a555
\n\n
.venv\Lib\site-packages\pandas\io\__pycache__\api.cpython-313.pyc
api.cpython-313.pyc
Other
1,381
0.8
0
0
node-utils
213
2024-11-03T02:38:03.858820
BSD-3-Clause
false
c40ba92b1ec891b8085dec41b8e6c19c
\n\n
.venv\Lib\site-packages\pandas\io\__pycache__\clipboards.cpython-313.pyc
clipboards.cpython-313.pyc
Other
6,701
0.95
0.076336
0.026786
react-lib
362
2024-12-05T22:30:07.667839
GPL-3.0
false
1a4f5730708631c422a30ecb53fbf2ca
\n\n
.venv\Lib\site-packages\pandas\io\__pycache__\common.cpython-313.pyc
common.cpython-313.pyc
Other
41,185
0.95
0.031599
0.002088
react-lib
771
2025-02-21T15:09:38.745429
BSD-3-Clause
false
0ceb4da224a9cb5b74336b64d1aa0e62
\n\n
.venv\Lib\site-packages\pandas\io\__pycache__\feather_format.cpython-313.pyc
feather_format.cpython-313.pyc
Other
4,644
0.95
0.042105
0.034884
vue-tools
794
2024-10-28T17:52:40.556214
BSD-3-Clause
false
dc0ea22666864d107306488fde8c68cc
\n\n
.venv\Lib\site-packages\pandas\io\__pycache__\gbq.cpython-313.pyc
gbq.cpython-313.pyc
Other
8,962
0.95
0.084746
0.019608
vue-tools
54
2024-06-24T23:06:54.844975
BSD-3-Clause
false
2cb33b724a82e8e132faad3672e474f3
\n\n
.venv\Lib\site-packages\pandas\io\__pycache__\html.cpython-313.pyc
html.cpython-313.pyc
Other
44,222
0.95
0.075255
0.026786
node-utils
232
2024-12-22T13:16:09.661582
BSD-3-Clause
false
af5648e5660b0664e1fe80654401f59c
\n\n
.venv\Lib\site-packages\pandas\io\__pycache__\orc.cpython-313.pyc
orc.cpython-313.pyc
Other
8,490
0.95
0.058511
0.041176
react-lib
882
2025-03-19T23:55:04.068474
MIT
false
c2068082c41bdd8abec529ea7bd4ca12
\n\n
.venv\Lib\site-packages\pandas\io\__pycache__\parquet.cpython-313.pyc
parquet.cpython-313.pyc
Other
23,661
0.95
0.066832
0.013624
python-kit
995
2024-04-09T19:57:25.221633
BSD-3-Clause
false
06ce9374ec47469e046ff2a429eeaa35
\n\n
.venv\Lib\site-packages\pandas\io\__pycache__\pickle.cpython-313.pyc
pickle.cpython-313.pyc
Other
6,497
0.95
0.024242
0
python-kit
292
2024-11-04T20:33:36.056871
MIT
false
276954d509463c7147f3caa7e445fc94
\n\n
.venv\Lib\site-packages\pandas\io\__pycache__\spss.cpython-313.pyc
spss.cpython-313.pyc
Other
2,593
0.95
0.051724
0.040816
node-utils
415
2025-02-20T22:13:36.076745
Apache-2.0
false
abc2532b4e6126f0df199c9b9c6f8726
\n\n
.venv\Lib\site-packages\pandas\io\__pycache__\sql.cpython-313.pyc
sql.cpython-313.pyc
Other
103,746
0.75
0.058646
0.024104
python-kit
58
2023-09-02T07:17:07.176045
BSD-3-Clause
false
e5eddb2f28549bc1cecaecbf606c7f9d
\n\n
.venv\Lib\site-packages\pandas\io\__pycache__\xml.cpython-313.pyc
xml.cpython-313.pyc
Other
42,032
0.95
0.073759
0.037643
node-utils
570
2023-12-18T22:54:13.328534
BSD-3-Clause
false
a0e55cfd0cc0f1804f687862ea336f02