diff --git a/.gitattributes b/.gitattributes index 13bb8b0726ff3b236c052b361e1725a8953f0081..adc8cb4a3cf57162c642bf83ac0da78d25cec0bc 100644 --- a/.gitattributes +++ b/.gitattributes @@ -103,3 +103,4 @@ parrot/lib/libtinfow.so filter=lfs diff=lfs merge=lfs -text parrot/lib/libtinfow.so.6 filter=lfs diff=lfs merge=lfs -text parrot/lib/python3.10/site-packages/google/protobuf/__pycache__/descriptor_pb2.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text parrot/lib/libtinfo.so.6 filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/aiohttp/_websocket.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/parrot/lib/python3.10/site-packages/aiohttp/_websocket.cpython-310-x86_64-linux-gnu.so b/parrot/lib/python3.10/site-packages/aiohttp/_websocket.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..90c3ae126c630fc4e85cde22b723f8576bea053c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/aiohttp/_websocket.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19a0dab6411e2b9fa88818a2b4486b6e07153b7620d23ffe1a0dba194d3718b0 +size 234048 diff --git a/parrot/lib/python3.10/site-packages/pyarrow/parquet/__init__.py b/parrot/lib/python3.10/site-packages/pyarrow/parquet/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..134f3c097ef004f83fdc8e24e5cb45166c17577e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/parquet/__init__.py @@ -0,0 +1,20 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# flake8: noqa + +from .core import * diff --git a/parrot/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59fb1824464886615068d16ed1481077ea36aaf4 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/core.cpython-310.pyc b/parrot/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0129f2d6585cb3bdaec6095c6bb55f60f719be4d Binary files /dev/null and b/parrot/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/core.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/encryption.cpython-310.pyc b/parrot/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/encryption.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..674d1f3f91fbeaddc4c5427d1fee24f767b7f69a Binary files /dev/null and b/parrot/lib/python3.10/site-packages/pyarrow/parquet/__pycache__/encryption.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/pyarrow/parquet/core.py b/parrot/lib/python3.10/site-packages/pyarrow/parquet/core.py new file mode 100644 index 0000000000000000000000000000000000000000..6ca6f7089e75c203ab1850a740042193ae0a9ac4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/parquet/core.py @@ -0,0 +1,2375 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +from collections import defaultdict +from contextlib import nullcontext +from functools import reduce + +import inspect +import json +import os +import re +import operator +import warnings + +import pyarrow as pa + +try: + import pyarrow._parquet as _parquet +except ImportError as exc: + raise ImportError( + "The pyarrow installation is not built with support " + f"for the Parquet file format ({str(exc)})" + ) from None + +from pyarrow._parquet import (ParquetReader, Statistics, # noqa + FileMetaData, RowGroupMetaData, + ColumnChunkMetaData, + ParquetSchema, ColumnSchema, + ParquetLogicalType, + FileEncryptionProperties, + FileDecryptionProperties, + SortingColumn) +from pyarrow.fs import (LocalFileSystem, FileSystem, FileType, + _resolve_filesystem_and_path, _ensure_filesystem) +from pyarrow.util import guid, _is_path_like, _stringify_path, _deprecate_api + + +def _check_contains_null(val): + if isinstance(val, bytes): + for byte in val: + if isinstance(byte, bytes): + compare_to = chr(0) + else: + compare_to = 0 + if byte == compare_to: + return True + elif isinstance(val, str): + return '\x00' in val + return False + + +def _check_filters(filters, check_null_strings=True): + """ + Check if filters are well-formed. + """ + if filters is not None: + if len(filters) == 0 or any(len(f) == 0 for f in filters): + raise ValueError("Malformed filters") + if isinstance(filters[0][0], str): + # We have encountered the situation where we have one nesting level + # too few: + # We have [(,,), ..] instead of [[(,,), ..]] + filters = [filters] + if check_null_strings: + for conjunction in filters: + for col, op, val in conjunction: + if ( + isinstance(val, list) and + all(_check_contains_null(v) for v in val) or + _check_contains_null(val) + ): + raise NotImplementedError( + "Null-terminated binary strings are not supported " + "as filter values." + ) + return filters + + +_DNF_filter_doc = """Predicates are expressed using an ``Expression`` or using + the disjunctive normal form (DNF), like ``[[('x', '=', 0), ...], ...]``. + DNF allows arbitrary boolean logical combinations of single column predicates. + The innermost tuples each describe a single column predicate. The list of inner + predicates is interpreted as a conjunction (AND), forming a more selective and + multiple column predicate. Finally, the most outer list combines these filters + as a disjunction (OR). + + Predicates may also be passed as List[Tuple]. This form is interpreted + as a single conjunction. To express OR in predicates, one must + use the (preferred) List[List[Tuple]] notation. + + Each tuple has format: (``key``, ``op``, ``value``) and compares the + ``key`` with the ``value``. + The supported ``op`` are: ``=`` or ``==``, ``!=``, ``<``, ``>``, ``<=``, + ``>=``, ``in`` and ``not in``. If the ``op`` is ``in`` or ``not in``, the + ``value`` must be a collection such as a ``list``, a ``set`` or a + ``tuple``. + + Examples: + + Using the ``Expression`` API: + + .. code-block:: python + + import pyarrow.compute as pc + pc.field('x') = 0 + pc.field('y').isin(['a', 'b', 'c']) + ~pc.field('y').isin({'a', 'b'}) + + Using the DNF format: + + .. code-block:: python + + ('x', '=', 0) + ('y', 'in', ['a', 'b', 'c']) + ('z', 'not in', {'a','b'}) + + """ + + +def filters_to_expression(filters): + """ + Check if filters are well-formed and convert to an ``Expression``. + + Parameters + ---------- + filters : List[Tuple] or List[List[Tuple]] + + Notes + ----- + See internal ``pyarrow._DNF_filter_doc`` attribute for more details. + + Examples + -------- + + >>> filters_to_expression([('foo', '==', 'bar')]) + + + Returns + ------- + pyarrow.compute.Expression + An Expression representing the filters + """ + import pyarrow.dataset as ds + + if isinstance(filters, ds.Expression): + return filters + + filters = _check_filters(filters, check_null_strings=False) + + def convert_single_predicate(col, op, val): + field = ds.field(col) + + if op == "=" or op == "==": + return field == val + elif op == "!=": + return field != val + elif op == '<': + return field < val + elif op == '>': + return field > val + elif op == '<=': + return field <= val + elif op == '>=': + return field >= val + elif op == 'in': + return field.isin(val) + elif op == 'not in': + return ~field.isin(val) + else: + raise ValueError( + '"{0}" is not a valid operator in predicates.'.format( + (col, op, val))) + + disjunction_members = [] + + for conjunction in filters: + conjunction_members = [ + convert_single_predicate(col, op, val) + for col, op, val in conjunction + ] + + disjunction_members.append(reduce(operator.and_, conjunction_members)) + + return reduce(operator.or_, disjunction_members) + + +_filters_to_expression = _deprecate_api( + "_filters_to_expression", "filters_to_expression", + filters_to_expression, "10.0.0", DeprecationWarning) + + +# ---------------------------------------------------------------------- +# Reading a single Parquet file + + +class ParquetFile: + """ + Reader interface for a single Parquet file. + + Parameters + ---------- + source : str, pathlib.Path, pyarrow.NativeFile, or file-like object + Readable source. For passing bytes or buffer-like file containing a + Parquet file, use pyarrow.BufferReader. + metadata : FileMetaData, default None + Use existing metadata object, rather than reading from file. + common_metadata : FileMetaData, default None + Will be used in reads for pandas schema metadata if not found in the + main file's metadata, no other uses at the moment. + read_dictionary : list + List of column names to read directly as DictionaryArray. + memory_map : bool, default False + If the source is a file path, use a memory map to read file, which can + improve performance in some environments. + buffer_size : int, default 0 + If positive, perform read buffering when deserializing individual + column chunks. Otherwise IO calls are unbuffered. + pre_buffer : bool, default False + Coalesce and issue file reads in parallel to improve performance on + high-latency filesystems (e.g. S3). If True, Arrow will use a + background I/O thread pool. + coerce_int96_timestamp_unit : str, default None + Cast timestamps that are stored in INT96 format to a particular + resolution (e.g. 'ms'). Setting to None is equivalent to 'ns' + and therefore INT96 timestamps will be inferred as timestamps + in nanoseconds. + decryption_properties : FileDecryptionProperties, default None + File decryption properties for Parquet Modular Encryption. + thrift_string_size_limit : int, default None + If not None, override the maximum total string size allocated + when decoding Thrift structures. The default limit should be + sufficient for most Parquet files. + thrift_container_size_limit : int, default None + If not None, override the maximum total size of containers allocated + when decoding Thrift structures. The default limit should be + sufficient for most Parquet files. + filesystem : FileSystem, default None + If nothing passed, will be inferred based on path. + Path will try to be found in the local on-disk filesystem otherwise + it will be parsed as an URI to determine the filesystem. + page_checksum_verification : bool, default False + If True, verify the checksum for each page read from the file. + + Examples + -------- + + Generate an example PyArrow Table and write it to Parquet file: + + >>> import pyarrow as pa + >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + + >>> import pyarrow.parquet as pq + >>> pq.write_table(table, 'example.parquet') + + Create a ``ParquetFile`` object from the Parquet file: + + >>> parquet_file = pq.ParquetFile('example.parquet') + + Read the data: + + >>> parquet_file.read() + pyarrow.Table + n_legs: int64 + animal: string + ---- + n_legs: [[2,2,4,4,5,100]] + animal: [["Flamingo","Parrot","Dog","Horse","Brittle stars","Centipede"]] + + Create a ParquetFile object with "animal" column as DictionaryArray: + + >>> parquet_file = pq.ParquetFile('example.parquet', + ... read_dictionary=["animal"]) + >>> parquet_file.read() + pyarrow.Table + n_legs: int64 + animal: dictionary + ---- + n_legs: [[2,2,4,4,5,100]] + animal: [ -- dictionary: + ["Flamingo","Parrot",...,"Brittle stars","Centipede"] -- indices: + [0,1,2,3,4,5]] + """ + + def __init__(self, source, *, metadata=None, common_metadata=None, + read_dictionary=None, memory_map=False, buffer_size=0, + pre_buffer=False, coerce_int96_timestamp_unit=None, + decryption_properties=None, thrift_string_size_limit=None, + thrift_container_size_limit=None, filesystem=None, + page_checksum_verification=False): + + self._close_source = getattr(source, 'closed', True) + + filesystem, source = _resolve_filesystem_and_path( + source, filesystem, memory_map=memory_map) + if filesystem is not None: + source = filesystem.open_input_file(source) + self._close_source = True # We opened it here, ensure we close it. + + self.reader = ParquetReader() + self.reader.open( + source, use_memory_map=memory_map, + buffer_size=buffer_size, pre_buffer=pre_buffer, + read_dictionary=read_dictionary, metadata=metadata, + coerce_int96_timestamp_unit=coerce_int96_timestamp_unit, + decryption_properties=decryption_properties, + thrift_string_size_limit=thrift_string_size_limit, + thrift_container_size_limit=thrift_container_size_limit, + page_checksum_verification=page_checksum_verification, + ) + self.common_metadata = common_metadata + self._nested_paths_by_prefix = self._build_nested_paths() + + def __enter__(self): + return self + + def __exit__(self, *args, **kwargs): + self.close() + + def _build_nested_paths(self): + paths = self.reader.column_paths + + result = defaultdict(list) + + for i, path in enumerate(paths): + key = path[0] + rest = path[1:] + while True: + result[key].append(i) + + if not rest: + break + + key = '.'.join((key, rest[0])) + rest = rest[1:] + + return result + + @property + def metadata(self): + """ + Return the Parquet metadata. + """ + return self.reader.metadata + + @property + def schema(self): + """ + Return the Parquet schema, unconverted to Arrow types + """ + return self.metadata.schema + + @property + def schema_arrow(self): + """ + Return the inferred Arrow schema, converted from the whole Parquet + file's schema + + Examples + -------- + Generate an example Parquet file: + + >>> import pyarrow as pa + >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> import pyarrow.parquet as pq + >>> pq.write_table(table, 'example.parquet') + >>> parquet_file = pq.ParquetFile('example.parquet') + + Read the Arrow schema: + + >>> parquet_file.schema_arrow + n_legs: int64 + animal: string + """ + return self.reader.schema_arrow + + @property + def num_row_groups(self): + """ + Return the number of row groups of the Parquet file. + + Examples + -------- + >>> import pyarrow as pa + >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> import pyarrow.parquet as pq + >>> pq.write_table(table, 'example.parquet') + >>> parquet_file = pq.ParquetFile('example.parquet') + + >>> parquet_file.num_row_groups + 1 + """ + return self.reader.num_row_groups + + def close(self, force: bool = False): + if self._close_source or force: + self.reader.close() + + @property + def closed(self) -> bool: + return self.reader.closed + + def read_row_group(self, i, columns=None, use_threads=True, + use_pandas_metadata=False): + """ + Read a single row group from a Parquet file. + + Parameters + ---------- + i : int + Index of the individual row group that we want to read. + columns : list + If not None, only these columns will be read from the row group. A + column name may be a prefix of a nested field, e.g. 'a' will select + 'a.b', 'a.c', and 'a.d.e'. + use_threads : bool, default True + Perform multi-threaded column reads. + use_pandas_metadata : bool, default False + If True and file has custom pandas schema metadata, ensure that + index columns are also loaded. + + Returns + ------- + pyarrow.table.Table + Content of the row group as a table (of columns) + + Examples + -------- + >>> import pyarrow as pa + >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> import pyarrow.parquet as pq + >>> pq.write_table(table, 'example.parquet') + >>> parquet_file = pq.ParquetFile('example.parquet') + + >>> parquet_file.read_row_group(0) + pyarrow.Table + n_legs: int64 + animal: string + ---- + n_legs: [[2,2,4,4,5,100]] + animal: [["Flamingo","Parrot",...,"Brittle stars","Centipede"]] + """ + column_indices = self._get_column_indices( + columns, use_pandas_metadata=use_pandas_metadata) + return self.reader.read_row_group(i, column_indices=column_indices, + use_threads=use_threads) + + def read_row_groups(self, row_groups, columns=None, use_threads=True, + use_pandas_metadata=False): + """ + Read a multiple row groups from a Parquet file. + + Parameters + ---------- + row_groups : list + Only these row groups will be read from the file. + columns : list + If not None, only these columns will be read from the row group. A + column name may be a prefix of a nested field, e.g. 'a' will select + 'a.b', 'a.c', and 'a.d.e'. + use_threads : bool, default True + Perform multi-threaded column reads. + use_pandas_metadata : bool, default False + If True and file has custom pandas schema metadata, ensure that + index columns are also loaded. + + Returns + ------- + pyarrow.table.Table + Content of the row groups as a table (of columns). + + Examples + -------- + >>> import pyarrow as pa + >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> import pyarrow.parquet as pq + >>> pq.write_table(table, 'example.parquet') + >>> parquet_file = pq.ParquetFile('example.parquet') + + >>> parquet_file.read_row_groups([0,0]) + pyarrow.Table + n_legs: int64 + animal: string + ---- + n_legs: [[2,2,4,4,5,...,2,4,4,5,100]] + animal: [["Flamingo","Parrot","Dog",...,"Brittle stars","Centipede"]] + """ + column_indices = self._get_column_indices( + columns, use_pandas_metadata=use_pandas_metadata) + return self.reader.read_row_groups(row_groups, + column_indices=column_indices, + use_threads=use_threads) + + def iter_batches(self, batch_size=65536, row_groups=None, columns=None, + use_threads=True, use_pandas_metadata=False): + """ + Read streaming batches from a Parquet file. + + Parameters + ---------- + batch_size : int, default 64K + Maximum number of records to yield per batch. Batches may be + smaller if there aren't enough rows in the file. + row_groups : list + Only these row groups will be read from the file. + columns : list + If not None, only these columns will be read from the file. A + column name may be a prefix of a nested field, e.g. 'a' will select + 'a.b', 'a.c', and 'a.d.e'. + use_threads : boolean, default True + Perform multi-threaded column reads. + use_pandas_metadata : boolean, default False + If True and file has custom pandas schema metadata, ensure that + index columns are also loaded. + + Yields + ------ + pyarrow.RecordBatch + Contents of each batch as a record batch + + Examples + -------- + Generate an example Parquet file: + + >>> import pyarrow as pa + >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> import pyarrow.parquet as pq + >>> pq.write_table(table, 'example.parquet') + >>> parquet_file = pq.ParquetFile('example.parquet') + >>> for i in parquet_file.iter_batches(): + ... print("RecordBatch") + ... print(i.to_pandas()) + ... + RecordBatch + n_legs animal + 0 2 Flamingo + 1 2 Parrot + 2 4 Dog + 3 4 Horse + 4 5 Brittle stars + 5 100 Centipede + """ + if row_groups is None: + row_groups = range(0, self.metadata.num_row_groups) + column_indices = self._get_column_indices( + columns, use_pandas_metadata=use_pandas_metadata) + + batches = self.reader.iter_batches(batch_size, + row_groups=row_groups, + column_indices=column_indices, + use_threads=use_threads) + return batches + + def read(self, columns=None, use_threads=True, use_pandas_metadata=False): + """ + Read a Table from Parquet format. + + Parameters + ---------- + columns : list + If not None, only these columns will be read from the file. A + column name may be a prefix of a nested field, e.g. 'a' will select + 'a.b', 'a.c', and 'a.d.e'. + use_threads : bool, default True + Perform multi-threaded column reads. + use_pandas_metadata : bool, default False + If True and file has custom pandas schema metadata, ensure that + index columns are also loaded. + + Returns + ------- + pyarrow.table.Table + Content of the file as a table (of columns). + + Examples + -------- + Generate an example Parquet file: + + >>> import pyarrow as pa + >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> import pyarrow.parquet as pq + >>> pq.write_table(table, 'example.parquet') + >>> parquet_file = pq.ParquetFile('example.parquet') + + Read a Table: + + >>> parquet_file.read(columns=["animal"]) + pyarrow.Table + animal: string + ---- + animal: [["Flamingo","Parrot",...,"Brittle stars","Centipede"]] + """ + column_indices = self._get_column_indices( + columns, use_pandas_metadata=use_pandas_metadata) + return self.reader.read_all(column_indices=column_indices, + use_threads=use_threads) + + def scan_contents(self, columns=None, batch_size=65536): + """ + Read contents of file for the given columns and batch size. + + Notes + ----- + This function's primary purpose is benchmarking. + The scan is executed on a single thread. + + Parameters + ---------- + columns : list of integers, default None + Select columns to read, if None scan all columns. + batch_size : int, default 64K + Number of rows to read at a time internally. + + Returns + ------- + num_rows : int + Number of rows in file + + Examples + -------- + >>> import pyarrow as pa + >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> import pyarrow.parquet as pq + >>> pq.write_table(table, 'example.parquet') + >>> parquet_file = pq.ParquetFile('example.parquet') + + >>> parquet_file.scan_contents() + 6 + """ + column_indices = self._get_column_indices(columns) + return self.reader.scan_contents(column_indices, + batch_size=batch_size) + + def _get_column_indices(self, column_names, use_pandas_metadata=False): + if column_names is None: + return None + + indices = [] + + for name in column_names: + if name in self._nested_paths_by_prefix: + indices.extend(self._nested_paths_by_prefix[name]) + + if use_pandas_metadata: + file_keyvalues = self.metadata.metadata + common_keyvalues = (self.common_metadata.metadata + if self.common_metadata is not None + else None) + + if file_keyvalues and b'pandas' in file_keyvalues: + index_columns = _get_pandas_index_columns(file_keyvalues) + elif common_keyvalues and b'pandas' in common_keyvalues: + index_columns = _get_pandas_index_columns(common_keyvalues) + else: + index_columns = [] + + if indices is not None and index_columns: + indices += [self.reader.column_name_idx(descr) + for descr in index_columns + if not isinstance(descr, dict)] + + return indices + + +_SPARK_DISALLOWED_CHARS = re.compile('[ ,;{}()\n\t=]') + + +def _sanitized_spark_field_name(name): + return _SPARK_DISALLOWED_CHARS.sub('_', name) + + +def _sanitize_schema(schema, flavor): + if 'spark' in flavor: + sanitized_fields = [] + + schema_changed = False + + for field in schema: + name = field.name + sanitized_name = _sanitized_spark_field_name(name) + + if sanitized_name != name: + schema_changed = True + sanitized_field = pa.field(sanitized_name, field.type, + field.nullable, field.metadata) + sanitized_fields.append(sanitized_field) + else: + sanitized_fields.append(field) + + new_schema = pa.schema(sanitized_fields, metadata=schema.metadata) + return new_schema, schema_changed + else: + return schema, False + + +def _sanitize_table(table, new_schema, flavor): + # TODO: This will not handle prohibited characters in nested field names + if 'spark' in flavor: + column_data = [table[i] for i in range(table.num_columns)] + return pa.Table.from_arrays(column_data, schema=new_schema) + else: + return table + + +_parquet_writer_arg_docs = """version : {"1.0", "2.4", "2.6"}, default "2.6" + Determine which Parquet logical types are available for use, whether the + reduced set from the Parquet 1.x.x format or the expanded logical types + added in later format versions. + Files written with version='2.4' or '2.6' may not be readable in all + Parquet implementations, so version='1.0' is likely the choice that + maximizes file compatibility. + UINT32 and some logical types are only available with version '2.4'. + Nanosecond timestamps are only available with version '2.6'. + Other features such as compression algorithms or the new serialized + data page format must be enabled separately (see 'compression' and + 'data_page_version'). +use_dictionary : bool or list, default True + Specify if we should use dictionary encoding in general or only for + some columns. + When encoding the column, if the dictionary size is too large, the + column will fallback to ``PLAIN`` encoding. Specially, ``BOOLEAN`` type + doesn't support dictionary encoding. +compression : str or dict, default 'snappy' + Specify the compression codec, either on a general basis or per-column. + Valid values: {'NONE', 'SNAPPY', 'GZIP', 'BROTLI', 'LZ4', 'ZSTD'}. +write_statistics : bool or list, default True + Specify if we should write statistics in general (default is True) or only + for some columns. +use_deprecated_int96_timestamps : bool, default None + Write timestamps to INT96 Parquet format. Defaults to False unless enabled + by flavor argument. This take priority over the coerce_timestamps option. +coerce_timestamps : str, default None + Cast timestamps to a particular resolution. If omitted, defaults are chosen + depending on `version`. For ``version='1.0'`` and ``version='2.4'``, + nanoseconds are cast to microseconds ('us'), while for + ``version='2.6'`` (the default), they are written natively without loss + of resolution. Seconds are always cast to milliseconds ('ms') by default, + as Parquet does not have any temporal type with seconds resolution. + If the casting results in loss of data, it will raise an exception + unless ``allow_truncated_timestamps=True`` is given. + Valid values: {None, 'ms', 'us'} +allow_truncated_timestamps : bool, default False + Allow loss of data when coercing timestamps to a particular + resolution. E.g. if microsecond or nanosecond data is lost when coercing to + 'ms', do not raise an exception. Passing ``allow_truncated_timestamp=True`` + will NOT result in the truncation exception being ignored unless + ``coerce_timestamps`` is not None. +data_page_size : int, default None + Set a target threshold for the approximate encoded size of data + pages within a column chunk (in bytes). If None, use the default data page + size of 1MByte. +flavor : {'spark'}, default None + Sanitize schema or set other compatibility options to work with + various target systems. +filesystem : FileSystem, default None + If nothing passed, will be inferred from `where` if path-like, else + `where` is already a file-like object so no filesystem is needed. +compression_level : int or dict, default None + Specify the compression level for a codec, either on a general basis or + per-column. If None is passed, arrow selects the compression level for + the compression codec in use. The compression level has a different + meaning for each codec, so you have to read the documentation of the + codec you are using. + An exception is thrown if the compression codec does not allow specifying + a compression level. +use_byte_stream_split : bool or list, default False + Specify if the byte_stream_split encoding should be used in general or + only for some columns. If both dictionary and byte_stream_stream are + enabled, then dictionary is preferred. + The byte_stream_split encoding is valid for integer, floating-point + and fixed-size binary data types (including decimals); it should be + combined with a compression codec so as to achieve size reduction. +column_encoding : string or dict, default None + Specify the encoding scheme on a per column basis. + Can only be used when ``use_dictionary`` is set to False, and + cannot be used in combination with ``use_byte_stream_split``. + Currently supported values: {'PLAIN', 'BYTE_STREAM_SPLIT', + 'DELTA_BINARY_PACKED', 'DELTA_LENGTH_BYTE_ARRAY', 'DELTA_BYTE_ARRAY'}. + Certain encodings are only compatible with certain data types. + Please refer to the encodings section of `Reading and writing Parquet + files `_. +data_page_version : {"1.0", "2.0"}, default "1.0" + The serialized Parquet data page format version to write, defaults to + 1.0. This does not impact the file schema logical types and Arrow to + Parquet type casting behavior; for that use the "version" option. +use_compliant_nested_type : bool, default True + Whether to write compliant Parquet nested type (lists) as defined + `here `_, defaults to ``True``. + For ``use_compliant_nested_type=True``, this will write into a list + with 3-level structure where the middle level, named ``list``, + is a repeated group with a single field named ``element``:: + + group (LIST) { + repeated group list { + element; + } + } + + For ``use_compliant_nested_type=False``, this will also write into a list + with 3-level structure, where the name of the single field of the middle + level ``list`` is taken from the element name for nested columns in Arrow, + which defaults to ``item``:: + + group (LIST) { + repeated group list { + item; + } + } +encryption_properties : FileEncryptionProperties, default None + File encryption properties for Parquet Modular Encryption. + If None, no encryption will be done. + The encryption properties can be created using: + ``CryptoFactory.file_encryption_properties()``. +write_batch_size : int, default None + Number of values to write to a page at a time. If None, use the default of + 1024. ``write_batch_size`` is complementary to ``data_page_size``. If pages + are exceeding the ``data_page_size`` due to large column values, lowering + the batch size can help keep page sizes closer to the intended size. +dictionary_pagesize_limit : int, default None + Specify the dictionary page size limit per row group. If None, use the + default 1MB. +store_schema : bool, default True + By default, the Arrow schema is serialized and stored in the Parquet + file metadata (in the "ARROW:schema" key). When reading the file, + if this key is available, it will be used to more faithfully recreate + the original Arrow data. For example, for tz-aware timestamp columns + it will restore the timezone (Parquet only stores the UTC values without + timezone), or columns with duration type will be restored from the int64 + Parquet column. +write_page_index : bool, default False + Whether to write a page index in general for all columns. + Writing statistics to the page index disables the old method of writing + statistics to each data page header. The page index makes statistics-based + filtering more efficient than the page header, as it gathers all the + statistics for a Parquet file in a single place, avoiding scattered I/O. + Note that the page index is not yet used on the read size by PyArrow. +write_page_checksum : bool, default False + Whether to write page checksums in general for all columns. + Page checksums enable detection of data corruption, which might occur during + transmission or in the storage. +sorting_columns : Sequence of SortingColumn, default None + Specify the sort order of the data being written. The writer does not sort + the data nor does it verify that the data is sorted. The sort order is + written to the row group metadata, which can then be used by readers. +store_decimal_as_integer : bool, default False + Allow decimals with 1 <= precision <= 18 to be stored as integers. + In Parquet, DECIMAL can be stored in any of the following physical types: + - int32: for 1 <= precision <= 9. + - int64: for 10 <= precision <= 18. + - fixed_len_byte_array: precision is limited by the array size. + Length n can store <= floor(log_10(2^(8*n - 1) - 1)) base-10 digits. + - binary: precision is unlimited. The minimum number of bytes to store the + unscaled value is used. + + By default, this is DISABLED and all decimal types annotate fixed_len_byte_array. + When enabled, the writer will use the following physical types to store decimals: + - int32: for 1 <= precision <= 9. + - int64: for 10 <= precision <= 18. + - fixed_len_byte_array: for precision > 18. + + As a consequence, decimal columns stored in integer types are more compact. +""" + +_parquet_writer_example_doc = """\ +Generate an example PyArrow Table and RecordBatch: + +>>> import pyarrow as pa +>>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100], +... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", +... "Brittle stars", "Centipede"]}) +>>> batch = pa.record_batch([[2, 2, 4, 4, 5, 100], +... ["Flamingo", "Parrot", "Dog", "Horse", +... "Brittle stars", "Centipede"]], +... names=['n_legs', 'animal']) + +create a ParquetWriter object: + +>>> import pyarrow.parquet as pq +>>> writer = pq.ParquetWriter('example.parquet', table.schema) + +and write the Table into the Parquet file: + +>>> writer.write_table(table) +>>> writer.close() + +>>> pq.read_table('example.parquet').to_pandas() + n_legs animal +0 2 Flamingo +1 2 Parrot +2 4 Dog +3 4 Horse +4 5 Brittle stars +5 100 Centipede + +create a ParquetWriter object for the RecordBatch: + +>>> writer2 = pq.ParquetWriter('example2.parquet', batch.schema) + +and write the RecordBatch into the Parquet file: + +>>> writer2.write_batch(batch) +>>> writer2.close() + +>>> pq.read_table('example2.parquet').to_pandas() + n_legs animal +0 2 Flamingo +1 2 Parrot +2 4 Dog +3 4 Horse +4 5 Brittle stars +5 100 Centipede +""" + + +class ParquetWriter: + + __doc__ = """ +Class for incrementally building a Parquet file for Arrow tables. + +Parameters +---------- +where : path or file-like object +schema : pyarrow.Schema +{} +writer_engine_version : unused +**options : dict + If options contains a key `metadata_collector` then the + corresponding value is assumed to be a list (or any object with + `.append` method) that will be filled with the file metadata instance + of the written file. + +Examples +-------- +{} +""".format(_parquet_writer_arg_docs, _parquet_writer_example_doc) + + def __init__(self, where, schema, filesystem=None, + flavor=None, + version='2.6', + use_dictionary=True, + compression='snappy', + write_statistics=True, + use_deprecated_int96_timestamps=None, + compression_level=None, + use_byte_stream_split=False, + column_encoding=None, + writer_engine_version=None, + data_page_version='1.0', + use_compliant_nested_type=True, + encryption_properties=None, + write_batch_size=None, + dictionary_pagesize_limit=None, + store_schema=True, + write_page_index=False, + write_page_checksum=False, + sorting_columns=None, + store_decimal_as_integer=False, + **options): + if use_deprecated_int96_timestamps is None: + # Use int96 timestamps for Spark + if flavor is not None and 'spark' in flavor: + use_deprecated_int96_timestamps = True + else: + use_deprecated_int96_timestamps = False + + self.flavor = flavor + if flavor is not None: + schema, self.schema_changed = _sanitize_schema(schema, flavor) + else: + self.schema_changed = False + + self.schema = schema + self.where = where + + # If we open a file using a filesystem, store file handle so we can be + # sure to close it when `self.close` is called. + self.file_handle = None + + filesystem, path = _resolve_filesystem_and_path(where, filesystem) + if filesystem is not None: + # ARROW-10480: do not auto-detect compression. While + # a filename like foo.parquet.gz is nonconforming, it + # shouldn't implicitly apply compression. + sink = self.file_handle = filesystem.open_output_stream( + path, compression=None) + else: + sink = where + self._metadata_collector = options.pop('metadata_collector', None) + engine_version = 'V2' + self.writer = _parquet.ParquetWriter( + sink, schema, + version=version, + compression=compression, + use_dictionary=use_dictionary, + write_statistics=write_statistics, + use_deprecated_int96_timestamps=use_deprecated_int96_timestamps, + compression_level=compression_level, + use_byte_stream_split=use_byte_stream_split, + column_encoding=column_encoding, + writer_engine_version=engine_version, + data_page_version=data_page_version, + use_compliant_nested_type=use_compliant_nested_type, + encryption_properties=encryption_properties, + write_batch_size=write_batch_size, + dictionary_pagesize_limit=dictionary_pagesize_limit, + store_schema=store_schema, + write_page_index=write_page_index, + write_page_checksum=write_page_checksum, + sorting_columns=sorting_columns, + store_decimal_as_integer=store_decimal_as_integer, + **options) + self.is_open = True + + def __del__(self): + if getattr(self, 'is_open', False): + self.close() + + def __enter__(self): + return self + + def __exit__(self, *args, **kwargs): + self.close() + # return false since we want to propagate exceptions + return False + + def write(self, table_or_batch, row_group_size=None): + """ + Write RecordBatch or Table to the Parquet file. + + Parameters + ---------- + table_or_batch : {RecordBatch, Table} + row_group_size : int, default None + Maximum number of rows in each written row group. If None, + the row group size will be the minimum of the input + table or batch length and 1024 * 1024. + """ + if isinstance(table_or_batch, pa.RecordBatch): + self.write_batch(table_or_batch, row_group_size) + elif isinstance(table_or_batch, pa.Table): + self.write_table(table_or_batch, row_group_size) + else: + raise TypeError(type(table_or_batch)) + + def write_batch(self, batch, row_group_size=None): + """ + Write RecordBatch to the Parquet file. + + Parameters + ---------- + batch : RecordBatch + row_group_size : int, default None + Maximum number of rows in written row group. If None, the + row group size will be the minimum of the RecordBatch + size and 1024 * 1024. If set larger than 64Mi then 64Mi + will be used instead. + """ + table = pa.Table.from_batches([batch], batch.schema) + self.write_table(table, row_group_size) + + def write_table(self, table, row_group_size=None): + """ + Write Table to the Parquet file. + + Parameters + ---------- + table : Table + row_group_size : int, default None + Maximum number of rows in each written row group. If None, + the row group size will be the minimum of the Table size + and 1024 * 1024. If set larger than 64Mi then 64Mi will + be used instead. + + """ + if self.schema_changed: + table = _sanitize_table(table, self.schema, self.flavor) + assert self.is_open + + if not table.schema.equals(self.schema, check_metadata=False): + msg = ('Table schema does not match schema used to create file: ' + '\ntable:\n{!s} vs. \nfile:\n{!s}' + .format(table.schema, self.schema)) + raise ValueError(msg) + + self.writer.write_table(table, row_group_size=row_group_size) + + def close(self): + """ + Close the connection to the Parquet file. + """ + if self.is_open: + self.writer.close() + self.is_open = False + if self._metadata_collector is not None: + self._metadata_collector.append(self.writer.metadata) + if self.file_handle is not None: + self.file_handle.close() + + def add_key_value_metadata(self, key_value_metadata): + """ + Add key-value metadata to the file. + This will overwrite any existing metadata with the same key. + + Parameters + ---------- + key_value_metadata : dict + Keys and values must be string-like / coercible to bytes. + """ + assert self.is_open + self.writer.add_key_value_metadata(key_value_metadata) + + +def _get_pandas_index_columns(keyvalues): + return (json.loads(keyvalues[b'pandas'].decode('utf8')) + ['index_columns']) + + +EXCLUDED_PARQUET_PATHS = {'_SUCCESS'} + + +_read_docstring_common = """\ +read_dictionary : list, default None + List of names or column paths (for nested types) to read directly + as DictionaryArray. Only supported for BYTE_ARRAY storage. To read + a flat column as dictionary-encoded pass the column name. For + nested types, you must pass the full column "path", which could be + something like level1.level2.list.item. Refer to the Parquet + file's schema to obtain the paths. +memory_map : bool, default False + If the source is a file path, use a memory map to read file, which can + improve performance in some environments. +buffer_size : int, default 0 + If positive, perform read buffering when deserializing individual + column chunks. Otherwise IO calls are unbuffered. +partitioning : pyarrow.dataset.Partitioning or str or list of str, \ +default "hive" + The partitioning scheme for a partitioned dataset. The default of "hive" + assumes directory names with key=value pairs like "/year=2009/month=11". + In addition, a scheme like "/2009/11" is also supported, in which case + you need to specify the field names or a full schema. See the + ``pyarrow.dataset.partitioning()`` function for more details.""" + + +_parquet_dataset_example = """\ +Generate an example PyArrow Table and write it to a partitioned dataset: + +>>> import pyarrow as pa +>>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021], +... 'n_legs': [2, 2, 4, 4, 5, 100], +... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", +... "Brittle stars", "Centipede"]}) +>>> import pyarrow.parquet as pq +>>> pq.write_to_dataset(table, root_path='dataset_v2', +... partition_cols=['year']) + +create a ParquetDataset object from the dataset source: + +>>> dataset = pq.ParquetDataset('dataset_v2/') + +and read the data: + +>>> dataset.read().to_pandas() + n_legs animal year +0 5 Brittle stars 2019 +1 2 Flamingo 2020 +2 4 Dog 2021 +3 100 Centipede 2021 +4 2 Parrot 2022 +5 4 Horse 2022 + +create a ParquetDataset object with filter: + +>>> dataset = pq.ParquetDataset('dataset_v2/', +... filters=[('n_legs','=',4)]) +>>> dataset.read().to_pandas() + n_legs animal year +0 4 Dog 2021 +1 4 Horse 2022 +""" + + +class ParquetDataset: + __doc__ = """ +Encapsulates details of reading a complete Parquet dataset possibly +consisting of multiple files and partitions in subdirectories. + +Parameters +---------- +path_or_paths : str or List[str] + A directory name, single file name, or list of file names. +filesystem : FileSystem, default None + If nothing passed, will be inferred based on path. + Path will try to be found in the local on-disk filesystem otherwise + it will be parsed as an URI to determine the filesystem. +schema : pyarrow.parquet.Schema + Optionally provide the Schema for the Dataset, in which case it will + not be inferred from the source. +filters : pyarrow.compute.Expression or List[Tuple] or List[List[Tuple]], default None + Rows which do not match the filter predicate will be removed from scanned + data. Partition keys embedded in a nested directory structure will be + exploited to avoid loading files at all if they contain no matching rows. + Within-file level filtering and different partitioning schemes are supported. + + {1} +{0} +ignore_prefixes : list, optional + Files matching any of these prefixes will be ignored by the + discovery process. + This is matched to the basename of a path. + By default this is ['.', '_']. + Note that discovery happens only if a directory is passed as source. +pre_buffer : bool, default True + Coalesce and issue file reads in parallel to improve performance on + high-latency filesystems (e.g. S3, GCS). If True, Arrow will use a + background I/O thread pool. If using a filesystem layer that itself + performs readahead (e.g. fsspec's S3FS), disable readahead for best + results. Set to False if you want to prioritize minimal memory usage + over maximum speed. +coerce_int96_timestamp_unit : str, default None + Cast timestamps that are stored in INT96 format to a particular resolution + (e.g. 'ms'). Setting to None is equivalent to 'ns' and therefore INT96 + timestamps will be inferred as timestamps in nanoseconds. +decryption_properties : FileDecryptionProperties or None + File-level decryption properties. + The decryption properties can be created using + ``CryptoFactory.file_decryption_properties()``. +thrift_string_size_limit : int, default None + If not None, override the maximum total string size allocated + when decoding Thrift structures. The default limit should be + sufficient for most Parquet files. +thrift_container_size_limit : int, default None + If not None, override the maximum total size of containers allocated + when decoding Thrift structures. The default limit should be + sufficient for most Parquet files. +page_checksum_verification : bool, default False + If True, verify the page checksum for each page read from the file. +use_legacy_dataset : bool, optional + Deprecated and has no effect from PyArrow version 15.0.0. + +Examples +-------- +{2} +""".format(_read_docstring_common, _DNF_filter_doc, _parquet_dataset_example) + + def __init__(self, path_or_paths, filesystem=None, schema=None, *, filters=None, + read_dictionary=None, memory_map=False, buffer_size=None, + partitioning="hive", ignore_prefixes=None, pre_buffer=True, + coerce_int96_timestamp_unit=None, + decryption_properties=None, thrift_string_size_limit=None, + thrift_container_size_limit=None, + page_checksum_verification=False, + use_legacy_dataset=None): + + if use_legacy_dataset is not None: + warnings.warn( + "Passing 'use_legacy_dataset' is deprecated as of pyarrow 15.0.0 " + "and will be removed in a future version.", + FutureWarning, stacklevel=2) + + import pyarrow.dataset as ds + + # map format arguments + read_options = { + "pre_buffer": pre_buffer, + "coerce_int96_timestamp_unit": coerce_int96_timestamp_unit, + "thrift_string_size_limit": thrift_string_size_limit, + "thrift_container_size_limit": thrift_container_size_limit, + "page_checksum_verification": page_checksum_verification, + } + if buffer_size: + read_options.update(use_buffered_stream=True, + buffer_size=buffer_size) + if read_dictionary is not None: + read_options.update(dictionary_columns=read_dictionary) + + if decryption_properties is not None: + read_options.update(decryption_properties=decryption_properties) + + self._filter_expression = None + if filters is not None: + self._filter_expression = filters_to_expression(filters) + + # map old filesystems to new one + if filesystem is not None: + filesystem = _ensure_filesystem( + filesystem, use_mmap=memory_map) + elif filesystem is None and memory_map: + # if memory_map is specified, assume local file system (string + # path can in principle be URI for any filesystem) + filesystem = LocalFileSystem(use_mmap=memory_map) + + # This needs to be checked after _ensure_filesystem, because that + # handles the case of an fsspec LocalFileSystem + if ( + hasattr(path_or_paths, "__fspath__") and + filesystem is not None and + not isinstance(filesystem, LocalFileSystem) + ): + raise TypeError( + "Path-like objects with __fspath__ must only be used with " + f"local file systems, not {type(filesystem)}" + ) + + # check for single fragment dataset or dataset directory + single_file = None + self._base_dir = None + if not isinstance(path_or_paths, list): + if _is_path_like(path_or_paths): + path_or_paths = _stringify_path(path_or_paths) + if filesystem is None: + # path might be a URI describing the FileSystem as well + try: + filesystem, path_or_paths = FileSystem.from_uri( + path_or_paths) + except ValueError: + filesystem = LocalFileSystem(use_mmap=memory_map) + finfo = filesystem.get_file_info(path_or_paths) + if finfo.type == FileType.Directory: + self._base_dir = path_or_paths + else: + single_file = path_or_paths + + parquet_format = ds.ParquetFileFormat(**read_options) + + if single_file is not None: + fragment = parquet_format.make_fragment(single_file, filesystem) + + self._dataset = ds.FileSystemDataset( + [fragment], schema=schema or fragment.physical_schema, + format=parquet_format, + filesystem=fragment.filesystem + ) + return + + # check partitioning to enable dictionary encoding + if partitioning == "hive": + partitioning = ds.HivePartitioning.discover( + infer_dictionary=True) + + self._dataset = ds.dataset(path_or_paths, filesystem=filesystem, + schema=schema, format=parquet_format, + partitioning=partitioning, + ignore_prefixes=ignore_prefixes) + + def equals(self, other): + if not isinstance(other, ParquetDataset): + raise TypeError('`other` must be an instance of ParquetDataset') + + return (self.schema == other.schema and + self._dataset.format == other._dataset.format and + self.filesystem == other.filesystem and + # self.fragments == other.fragments and + self.files == other.files) + + def __eq__(self, other): + try: + return self.equals(other) + except TypeError: + return NotImplemented + + @property + def schema(self): + """ + Schema of the Dataset. + + Examples + -------- + Generate an example dataset: + + >>> import pyarrow as pa + >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021], + ... 'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> import pyarrow.parquet as pq + >>> pq.write_to_dataset(table, root_path='dataset_v2_schema', + ... partition_cols=['year']) + >>> dataset = pq.ParquetDataset('dataset_v2_schema/') + + Read the schema: + + >>> dataset.schema + n_legs: int64 + animal: string + year: dictionary + """ + return self._dataset.schema + + def read(self, columns=None, use_threads=True, use_pandas_metadata=False): + """ + Read (multiple) Parquet files as a single pyarrow.Table. + + Parameters + ---------- + columns : List[str] + Names of columns to read from the dataset. The partition fields + are not automatically included. + use_threads : bool, default True + Perform multi-threaded column reads. + use_pandas_metadata : bool, default False + If True and file has custom pandas schema metadata, ensure that + index columns are also loaded. + + Returns + ------- + pyarrow.Table + Content of the file as a table (of columns). + + Examples + -------- + Generate an example dataset: + + >>> import pyarrow as pa + >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021], + ... 'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> import pyarrow.parquet as pq + >>> pq.write_to_dataset(table, root_path='dataset_v2_read', + ... partition_cols=['year']) + >>> dataset = pq.ParquetDataset('dataset_v2_read/') + + Read the dataset: + + >>> dataset.read(columns=["n_legs"]) + pyarrow.Table + n_legs: int64 + ---- + n_legs: [[5],[2],[4,100],[2,4]] + """ + # if use_pandas_metadata, we need to include index columns in the + # column selection, to be able to restore those in the pandas DataFrame + metadata = self.schema.metadata or {} + + if use_pandas_metadata: + # if the dataset schema metadata itself doesn't have pandas + # then try to get this from common file (for backwards compat) + if b"pandas" not in metadata: + common_metadata = self._get_common_pandas_metadata() + if common_metadata: + metadata = common_metadata + + if columns is not None and use_pandas_metadata: + if metadata and b'pandas' in metadata: + # RangeIndex can be represented as dict instead of column name + index_columns = [ + col for col in _get_pandas_index_columns(metadata) + if not isinstance(col, dict) + ] + columns = ( + list(columns) + list(set(index_columns) - set(columns)) + ) + + table = self._dataset.to_table( + columns=columns, filter=self._filter_expression, + use_threads=use_threads + ) + + # if use_pandas_metadata, restore the pandas metadata (which gets + # lost if doing a specific `columns` selection in to_table) + if use_pandas_metadata: + if metadata and b"pandas" in metadata: + new_metadata = table.schema.metadata or {} + new_metadata.update({b"pandas": metadata[b"pandas"]}) + table = table.replace_schema_metadata(new_metadata) + + return table + + def _get_common_pandas_metadata(self): + + if not self._base_dir: + return None + + metadata = None + for name in ["_common_metadata", "_metadata"]: + metadata_path = os.path.join(str(self._base_dir), name) + finfo = self.filesystem.get_file_info(metadata_path) + if finfo.is_file: + pq_meta = read_metadata( + metadata_path, filesystem=self.filesystem) + metadata = pq_meta.metadata + if metadata and b'pandas' in metadata: + break + + return metadata + + def read_pandas(self, **kwargs): + """ + Read dataset including pandas metadata, if any. Other arguments passed + through to :func:`read`, see docstring for further details. + + Parameters + ---------- + **kwargs : optional + Additional options for :func:`read` + + Examples + -------- + Generate an example parquet file: + + >>> import pyarrow as pa + >>> import pandas as pd + >>> df = pd.DataFrame({'year': [2020, 2022, 2021, 2022, 2019, 2021], + ... 'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> table = pa.Table.from_pandas(df) + >>> import pyarrow.parquet as pq + >>> pq.write_table(table, 'table_V2.parquet') + >>> dataset = pq.ParquetDataset('table_V2.parquet') + + Read the dataset with pandas metadata: + + >>> dataset.read_pandas(columns=["n_legs"]) + pyarrow.Table + n_legs: int64 + ---- + n_legs: [[2,2,4,4,5,100]] + + >>> dataset.read_pandas(columns=["n_legs"]).schema.pandas_metadata + {'index_columns': [{'kind': 'range', 'name': None, 'start': 0, ...} + """ + return self.read(use_pandas_metadata=True, **kwargs) + + @property + def fragments(self): + """ + A list of the Dataset source fragments or pieces with absolute + file paths. + + Examples + -------- + Generate an example dataset: + + >>> import pyarrow as pa + >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021], + ... 'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> import pyarrow.parquet as pq + >>> pq.write_to_dataset(table, root_path='dataset_v2_fragments', + ... partition_cols=['year']) + >>> dataset = pq.ParquetDataset('dataset_v2_fragments/') + + List the fragments: + + >>> dataset.fragments + [>> import pyarrow as pa + >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021], + ... 'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + >>> import pyarrow.parquet as pq + >>> pq.write_to_dataset(table, root_path='dataset_v2_files', + ... partition_cols=['year']) + >>> dataset = pq.ParquetDataset('dataset_v2_files/') + + List the files: + + >>> dataset.files + ['dataset_v2_files/year=2019/...-0.parquet', ... + """ + return self._dataset.files + + @property + def filesystem(self): + """ + The filesystem type of the Dataset source. + """ + return self._dataset.filesystem + + @property + def partitioning(self): + """ + The partitioning of the Dataset source, if discovered. + """ + return self._dataset.partitioning + + +_read_table_docstring = """ +{0} + +Parameters +---------- +source : str, pyarrow.NativeFile, or file-like object + If a string passed, can be a single file name or directory name. For + file-like objects, only read a single file. Use pyarrow.BufferReader to + read a file contained in a bytes or buffer-like object. +columns : list + If not None, only these columns will be read from the file. A column + name may be a prefix of a nested field, e.g. 'a' will select 'a.b', + 'a.c', and 'a.d.e'. If empty, no columns will be read. Note + that the table will still have the correct num_rows set despite having + no columns. +use_threads : bool, default True + Perform multi-threaded column reads. +schema : Schema, optional + Optionally provide the Schema for the parquet dataset, in which case it + will not be inferred from the source. +{1} +filesystem : FileSystem, default None + If nothing passed, will be inferred based on path. + Path will try to be found in the local on-disk filesystem otherwise + it will be parsed as an URI to determine the filesystem. +filters : pyarrow.compute.Expression or List[Tuple] or List[List[Tuple]], default None + Rows which do not match the filter predicate will be removed from scanned + data. Partition keys embedded in a nested directory structure will be + exploited to avoid loading files at all if they contain no matching rows. + Within-file level filtering and different partitioning schemes are supported. + + {3} +use_legacy_dataset : bool, optional + Deprecated and has no effect from PyArrow version 15.0.0. +ignore_prefixes : list, optional + Files matching any of these prefixes will be ignored by the + discovery process. + This is matched to the basename of a path. + By default this is ['.', '_']. + Note that discovery happens only if a directory is passed as source. +pre_buffer : bool, default True + Coalesce and issue file reads in parallel to improve performance on + high-latency filesystems (e.g. S3). If True, Arrow will use a + background I/O thread pool. If using a filesystem layer that itself + performs readahead (e.g. fsspec's S3FS), disable readahead for best + results. +coerce_int96_timestamp_unit : str, default None + Cast timestamps that are stored in INT96 format to a particular + resolution (e.g. 'ms'). Setting to None is equivalent to 'ns' + and therefore INT96 timestamps will be inferred as timestamps + in nanoseconds. +decryption_properties : FileDecryptionProperties or None + File-level decryption properties. + The decryption properties can be created using + ``CryptoFactory.file_decryption_properties()``. +thrift_string_size_limit : int, default None + If not None, override the maximum total string size allocated + when decoding Thrift structures. The default limit should be + sufficient for most Parquet files. +thrift_container_size_limit : int, default None + If not None, override the maximum total size of containers allocated + when decoding Thrift structures. The default limit should be + sufficient for most Parquet files. +page_checksum_verification : bool, default False + If True, verify the checksum for each page read from the file. + +Returns +------- +{2} + +{4} +""" + +_read_table_example = """\ + +Examples +-------- + +Generate an example PyArrow Table and write it to a partitioned dataset: + +>>> import pyarrow as pa +>>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021], +... 'n_legs': [2, 2, 4, 4, 5, 100], +... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", +... "Brittle stars", "Centipede"]}) +>>> import pyarrow.parquet as pq +>>> pq.write_to_dataset(table, root_path='dataset_name_2', +... partition_cols=['year']) + +Read the data: + +>>> pq.read_table('dataset_name_2').to_pandas() + n_legs animal year +0 5 Brittle stars 2019 +1 2 Flamingo 2020 +2 4 Dog 2021 +3 100 Centipede 2021 +4 2 Parrot 2022 +5 4 Horse 2022 + + +Read only a subset of columns: + +>>> pq.read_table('dataset_name_2', columns=["n_legs", "animal"]) +pyarrow.Table +n_legs: int64 +animal: string +---- +n_legs: [[5],[2],[4,100],[2,4]] +animal: [["Brittle stars"],["Flamingo"],["Dog","Centipede"],["Parrot","Horse"]] + +Read a subset of columns and read one column as DictionaryArray: + +>>> pq.read_table('dataset_name_2', columns=["n_legs", "animal"], +... read_dictionary=["animal"]) +pyarrow.Table +n_legs: int64 +animal: dictionary +---- +n_legs: [[5],[2],[4,100],[2,4]] +animal: [ -- dictionary: +["Brittle stars"] -- indices: +[0], -- dictionary: +["Flamingo"] -- indices: +[0], -- dictionary: +["Dog","Centipede"] -- indices: +[0,1], -- dictionary: +["Parrot","Horse"] -- indices: +[0,1]] + +Read the table with filter: + +>>> pq.read_table('dataset_name_2', columns=["n_legs", "animal"], +... filters=[('n_legs','<',4)]).to_pandas() + n_legs animal +0 2 Flamingo +1 2 Parrot + +Read data from a single Parquet file: + +>>> pq.write_table(table, 'example.parquet') +>>> pq.read_table('dataset_name_2').to_pandas() + n_legs animal year +0 5 Brittle stars 2019 +1 2 Flamingo 2020 +2 4 Dog 2021 +3 100 Centipede 2021 +4 2 Parrot 2022 +5 4 Horse 2022 +""" + + +def read_table(source, *, columns=None, use_threads=True, + schema=None, use_pandas_metadata=False, read_dictionary=None, + memory_map=False, buffer_size=0, partitioning="hive", + filesystem=None, filters=None, use_legacy_dataset=None, + ignore_prefixes=None, pre_buffer=True, + coerce_int96_timestamp_unit=None, + decryption_properties=None, thrift_string_size_limit=None, + thrift_container_size_limit=None, + page_checksum_verification=False): + + if use_legacy_dataset is not None: + warnings.warn( + "Passing 'use_legacy_dataset' is deprecated as of pyarrow 15.0.0 " + "and will be removed in a future version.", + FutureWarning, stacklevel=2) + + try: + dataset = ParquetDataset( + source, + schema=schema, + filesystem=filesystem, + partitioning=partitioning, + memory_map=memory_map, + read_dictionary=read_dictionary, + buffer_size=buffer_size, + filters=filters, + ignore_prefixes=ignore_prefixes, + pre_buffer=pre_buffer, + coerce_int96_timestamp_unit=coerce_int96_timestamp_unit, + decryption_properties=decryption_properties, + thrift_string_size_limit=thrift_string_size_limit, + thrift_container_size_limit=thrift_container_size_limit, + page_checksum_verification=page_checksum_verification, + ) + except ImportError: + # fall back on ParquetFile for simple cases when pyarrow.dataset + # module is not available + if filters is not None: + raise ValueError( + "the 'filters' keyword is not supported when the " + "pyarrow.dataset module is not available" + ) + if partitioning != "hive": + raise ValueError( + "the 'partitioning' keyword is not supported when the " + "pyarrow.dataset module is not available" + ) + if schema is not None: + raise ValueError( + "the 'schema' argument is not supported when the " + "pyarrow.dataset module is not available" + ) + filesystem, path = _resolve_filesystem_and_path(source, filesystem) + if filesystem is not None: + source = filesystem.open_input_file(path) + # TODO test that source is not a directory or a list + dataset = ParquetFile( + source, read_dictionary=read_dictionary, + memory_map=memory_map, buffer_size=buffer_size, + pre_buffer=pre_buffer, + coerce_int96_timestamp_unit=coerce_int96_timestamp_unit, + decryption_properties=decryption_properties, + thrift_string_size_limit=thrift_string_size_limit, + thrift_container_size_limit=thrift_container_size_limit, + page_checksum_verification=page_checksum_verification, + ) + + return dataset.read(columns=columns, use_threads=use_threads, + use_pandas_metadata=use_pandas_metadata) + + +read_table.__doc__ = _read_table_docstring.format( + """Read a Table from Parquet format""", + "\n".join(("""use_pandas_metadata : bool, default False + If True and file has custom pandas schema metadata, ensure that + index columns are also loaded.""", _read_docstring_common)), + """pyarrow.Table + Content of the file as a table (of columns)""", + _DNF_filter_doc, _read_table_example) + + +def read_pandas(source, columns=None, **kwargs): + return read_table( + source, columns=columns, use_pandas_metadata=True, **kwargs + ) + + +read_pandas.__doc__ = _read_table_docstring.format( + 'Read a Table from Parquet format, also reading DataFrame\n' + 'index values if known in the file metadata', + "\n".join((_read_docstring_common, + """**kwargs + additional options for :func:`read_table`""")), + """pyarrow.Table + Content of the file as a Table of Columns, including DataFrame + indexes as columns""", + _DNF_filter_doc, "") + + +def write_table(table, where, row_group_size=None, version='2.6', + use_dictionary=True, compression='snappy', + write_statistics=True, + use_deprecated_int96_timestamps=None, + coerce_timestamps=None, + allow_truncated_timestamps=False, + data_page_size=None, flavor=None, + filesystem=None, + compression_level=None, + use_byte_stream_split=False, + column_encoding=None, + data_page_version='1.0', + use_compliant_nested_type=True, + encryption_properties=None, + write_batch_size=None, + dictionary_pagesize_limit=None, + store_schema=True, + write_page_index=False, + write_page_checksum=False, + sorting_columns=None, + store_decimal_as_integer=False, + **kwargs): + # Implementor's note: when adding keywords here / updating defaults, also + # update it in write_to_dataset and _dataset_parquet.pyx ParquetFileWriteOptions + row_group_size = kwargs.pop('chunk_size', row_group_size) + use_int96 = use_deprecated_int96_timestamps + try: + with ParquetWriter( + where, table.schema, + filesystem=filesystem, + version=version, + flavor=flavor, + use_dictionary=use_dictionary, + write_statistics=write_statistics, + coerce_timestamps=coerce_timestamps, + data_page_size=data_page_size, + allow_truncated_timestamps=allow_truncated_timestamps, + compression=compression, + use_deprecated_int96_timestamps=use_int96, + compression_level=compression_level, + use_byte_stream_split=use_byte_stream_split, + column_encoding=column_encoding, + data_page_version=data_page_version, + use_compliant_nested_type=use_compliant_nested_type, + encryption_properties=encryption_properties, + write_batch_size=write_batch_size, + dictionary_pagesize_limit=dictionary_pagesize_limit, + store_schema=store_schema, + write_page_index=write_page_index, + write_page_checksum=write_page_checksum, + sorting_columns=sorting_columns, + store_decimal_as_integer=store_decimal_as_integer, + **kwargs) as writer: + writer.write_table(table, row_group_size=row_group_size) + except Exception: + if _is_path_like(where): + try: + os.remove(_stringify_path(where)) + except os.error: + pass + raise + + +_write_table_example = """\ +Generate an example PyArrow Table: + +>>> import pyarrow as pa +>>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100], +... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", +... "Brittle stars", "Centipede"]}) + +and write the Table into Parquet file: + +>>> import pyarrow.parquet as pq +>>> pq.write_table(table, 'example.parquet') + +Defining row group size for the Parquet file: + +>>> pq.write_table(table, 'example.parquet', row_group_size=3) + +Defining row group compression (default is Snappy): + +>>> pq.write_table(table, 'example.parquet', compression='none') + +Defining row group compression and encoding per-column: + +>>> pq.write_table(table, 'example.parquet', +... compression={'n_legs': 'snappy', 'animal': 'gzip'}, +... use_dictionary=['n_legs', 'animal']) + +Defining column encoding per-column: + +>>> pq.write_table(table, 'example.parquet', +... column_encoding={'animal':'PLAIN'}, +... use_dictionary=False) +""" + +write_table.__doc__ = """ +Write a Table to Parquet format. + +Parameters +---------- +table : pyarrow.Table +where : string or pyarrow.NativeFile +row_group_size : int + Maximum number of rows in each written row group. If None, the + row group size will be the minimum of the Table size and + 1024 * 1024. +{} +**kwargs : optional + Additional options for ParquetWriter + +Examples +-------- +{} +""".format(_parquet_writer_arg_docs, _write_table_example) + + +def write_to_dataset(table, root_path, partition_cols=None, + filesystem=None, use_legacy_dataset=None, + schema=None, partitioning=None, + basename_template=None, use_threads=None, + file_visitor=None, existing_data_behavior=None, + **kwargs): + """Wrapper around dataset.write_dataset for writing a Table to + Parquet format by partitions. + For each combination of partition columns and values, + a subdirectories are created in the following + manner: + + root_dir/ + group1=value1 + group2=value1 + .parquet + group2=value2 + .parquet + group1=valueN + group2=value1 + .parquet + group2=valueN + .parquet + + Parameters + ---------- + table : pyarrow.Table + root_path : str, pathlib.Path + The root directory of the dataset. + partition_cols : list, + Column names by which to partition the dataset. + Columns are partitioned in the order they are given. + filesystem : FileSystem, default None + If nothing passed, will be inferred based on path. + Path will try to be found in the local on-disk filesystem otherwise + it will be parsed as an URI to determine the filesystem. + use_legacy_dataset : bool, optional + Deprecated and has no effect from PyArrow version 15.0.0. + schema : Schema, optional + This Schema of the dataset. + partitioning : Partitioning or list[str], optional + The partitioning scheme specified with the + ``pyarrow.dataset.partitioning()`` function or a list of field names. + When providing a list of field names, you can use + ``partitioning_flavor`` to drive which partitioning type should be + used. + basename_template : str, optional + A template string used to generate basenames of written data files. + The token '{i}' will be replaced with an automatically incremented + integer. If not specified, it defaults to "guid-{i}.parquet". + use_threads : bool, default True + Write files in parallel. If enabled, then maximum parallelism will be + used determined by the number of available CPU cores. + file_visitor : function + If set, this function will be called with a WrittenFile instance + for each file created during the call. This object will have both + a path attribute and a metadata attribute. + + The path attribute will be a string containing the path to + the created file. + + The metadata attribute will be the parquet metadata of the file. + This metadata will have the file path attribute set and can be used + to build a _metadata file. The metadata attribute will be None if + the format is not parquet. + + Example visitor which simple collects the filenames created:: + + visited_paths = [] + + def file_visitor(written_file): + visited_paths.append(written_file.path) + + existing_data_behavior : 'overwrite_or_ignore' | 'error' | \ +'delete_matching' + Controls how the dataset will handle data that already exists in + the destination. The default behaviour is 'overwrite_or_ignore'. + + 'overwrite_or_ignore' will ignore any existing data and will + overwrite files with the same name as an output file. Other + existing files will be ignored. This behavior, in combination + with a unique basename_template for each write, will allow for + an append workflow. + + 'error' will raise an error if any data exists in the destination. + + 'delete_matching' is useful when you are writing a partitioned + dataset. The first time each partition directory is encountered + the entire directory will be deleted. This allows you to overwrite + old partitions completely. + **kwargs : dict, + Used as additional kwargs for :func:`pyarrow.dataset.write_dataset` + function for matching kwargs, and remainder to + :func:`pyarrow.dataset.ParquetFileFormat.make_write_options`. + See the docstring of :func:`write_table` and + :func:`pyarrow.dataset.write_dataset` for the available options. + Using `metadata_collector` in kwargs allows one to collect the + file metadata instances of dataset pieces. The file paths in the + ColumnChunkMetaData will be set relative to `root_path`. + + Examples + -------- + Generate an example PyArrow Table: + + >>> import pyarrow as pa + >>> table = pa.table({'year': [2020, 2022, 2021, 2022, 2019, 2021], + ... 'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + + and write it to a partitioned dataset: + + >>> import pyarrow.parquet as pq + >>> pq.write_to_dataset(table, root_path='dataset_name_3', + ... partition_cols=['year']) + >>> pq.ParquetDataset('dataset_name_3').files + ['dataset_name_3/year=2019/...-0.parquet', ... + + Write a single Parquet file into the root folder: + + >>> pq.write_to_dataset(table, root_path='dataset_name_4') + >>> pq.ParquetDataset('dataset_name_4/').files + ['dataset_name_4/...-0.parquet'] + """ + if use_legacy_dataset is not None: + warnings.warn( + "Passing 'use_legacy_dataset' is deprecated as of pyarrow 15.0.0 " + "and will be removed in a future version.", + FutureWarning, stacklevel=2) + + metadata_collector = kwargs.pop('metadata_collector', None) + + # Check for conflicting keywords + msg_confl = ( + "The '{1}' argument is not supported. " + "Use only '{0}' instead." + ) + if partition_cols is not None and partitioning is not None: + raise ValueError(msg_confl.format("partitioning", + "partition_cols")) + + if metadata_collector is not None and file_visitor is not None: + raise ValueError(msg_confl.format("file_visitor", + "metadata_collector")) + + import pyarrow.dataset as ds + + # extract write_dataset specific options + # reset assumed to go to make_write_options + write_dataset_kwargs = dict() + for key in inspect.signature(ds.write_dataset).parameters: + if key in kwargs: + write_dataset_kwargs[key] = kwargs.pop(key) + write_dataset_kwargs['max_rows_per_group'] = kwargs.pop( + 'row_group_size', kwargs.pop("chunk_size", None) + ) + + if metadata_collector is not None: + def file_visitor(written_file): + metadata_collector.append(written_file.metadata) + + # map format arguments + parquet_format = ds.ParquetFileFormat() + write_options = parquet_format.make_write_options(**kwargs) + + # map old filesystems to new one + if filesystem is not None: + filesystem = _ensure_filesystem(filesystem) + + if partition_cols: + part_schema = table.select(partition_cols).schema + partitioning = ds.partitioning(part_schema, flavor="hive") + + if basename_template is None: + basename_template = guid() + '-{i}.parquet' + + if existing_data_behavior is None: + existing_data_behavior = 'overwrite_or_ignore' + + ds.write_dataset( + table, root_path, filesystem=filesystem, + format=parquet_format, file_options=write_options, schema=schema, + partitioning=partitioning, use_threads=use_threads, + file_visitor=file_visitor, + basename_template=basename_template, + existing_data_behavior=existing_data_behavior, + **write_dataset_kwargs) + return + + +def write_metadata(schema, where, metadata_collector=None, filesystem=None, + **kwargs): + """ + Write metadata-only Parquet file from schema. This can be used with + `write_to_dataset` to generate `_common_metadata` and `_metadata` sidecar + files. + + Parameters + ---------- + schema : pyarrow.Schema + where : string or pyarrow.NativeFile + metadata_collector : list + where to collect metadata information. + filesystem : FileSystem, default None + If nothing passed, will be inferred from `where` if path-like, else + `where` is already a file-like object so no filesystem is needed. + **kwargs : dict, + Additional kwargs for ParquetWriter class. See docstring for + `ParquetWriter` for more information. + + Examples + -------- + Generate example data: + + >>> import pyarrow as pa + >>> table = pa.table({'n_legs': [2, 2, 4, 4, 5, 100], + ... 'animal': ["Flamingo", "Parrot", "Dog", "Horse", + ... "Brittle stars", "Centipede"]}) + + Write a dataset and collect metadata information. + + >>> metadata_collector = [] + >>> import pyarrow.parquet as pq + >>> pq.write_to_dataset( + ... table, 'dataset_metadata', + ... metadata_collector=metadata_collector) + + Write the `_common_metadata` parquet file without row groups statistics. + + >>> pq.write_metadata( + ... table.schema, 'dataset_metadata/_common_metadata') + + Write the `_metadata` parquet file with row groups statistics. + + >>> pq.write_metadata( + ... table.schema, 'dataset_metadata/_metadata', + ... metadata_collector=metadata_collector) + """ + filesystem, where = _resolve_filesystem_and_path(where, filesystem) + + if hasattr(where, "seek"): # file-like + cursor_position = where.tell() + + writer = ParquetWriter(where, schema, filesystem, **kwargs) + writer.close() + + if metadata_collector is not None: + # ParquetWriter doesn't expose the metadata until it's written. Write + # it and read it again. + metadata = read_metadata(where, filesystem=filesystem) + if hasattr(where, "seek"): + where.seek(cursor_position) # file-like, set cursor back. + + for m in metadata_collector: + metadata.append_row_groups(m) + if filesystem is not None: + with filesystem.open_output_stream(where) as f: + metadata.write_metadata_file(f) + else: + metadata.write_metadata_file(where) + + +def read_metadata(where, memory_map=False, decryption_properties=None, + filesystem=None): + """ + Read FileMetaData from footer of a single Parquet file. + + Parameters + ---------- + where : str (file path) or file-like object + memory_map : bool, default False + Create memory map when the source is a file path. + decryption_properties : FileDecryptionProperties, default None + Decryption properties for reading encrypted Parquet files. + filesystem : FileSystem, default None + If nothing passed, will be inferred based on path. + Path will try to be found in the local on-disk filesystem otherwise + it will be parsed as an URI to determine the filesystem. + + Returns + ------- + metadata : FileMetaData + The metadata of the Parquet file + + Examples + -------- + >>> import pyarrow as pa + >>> import pyarrow.parquet as pq + >>> table = pa.table({'n_legs': [4, 5, 100], + ... 'animal': ["Dog", "Brittle stars", "Centipede"]}) + >>> pq.write_table(table, 'example.parquet') + + >>> pq.read_metadata('example.parquet') + + created_by: parquet-cpp-arrow version ... + num_columns: 2 + num_rows: 3 + num_row_groups: 1 + format_version: 2.6 + serialized_size: ... + """ + filesystem, where = _resolve_filesystem_and_path(where, filesystem) + file_ctx = nullcontext() + if filesystem is not None: + file_ctx = where = filesystem.open_input_file(where) + + with file_ctx: + file = ParquetFile(where, memory_map=memory_map, + decryption_properties=decryption_properties) + return file.metadata + + +def read_schema(where, memory_map=False, decryption_properties=None, + filesystem=None): + """ + Read effective Arrow schema from Parquet file metadata. + + Parameters + ---------- + where : str (file path) or file-like object + memory_map : bool, default False + Create memory map when the source is a file path. + decryption_properties : FileDecryptionProperties, default None + Decryption properties for reading encrypted Parquet files. + filesystem : FileSystem, default None + If nothing passed, will be inferred based on path. + Path will try to be found in the local on-disk filesystem otherwise + it will be parsed as an URI to determine the filesystem. + + Returns + ------- + schema : pyarrow.Schema + The schema of the Parquet file + + Examples + -------- + >>> import pyarrow as pa + >>> import pyarrow.parquet as pq + >>> table = pa.table({'n_legs': [4, 5, 100], + ... 'animal': ["Dog", "Brittle stars", "Centipede"]}) + >>> pq.write_table(table, 'example.parquet') + + >>> pq.read_schema('example.parquet') + n_legs: int64 + animal: string + """ + filesystem, where = _resolve_filesystem_and_path(where, filesystem) + file_ctx = nullcontext() + if filesystem is not None: + file_ctx = where = filesystem.open_input_file(where) + + with file_ctx: + file = ParquetFile( + where, memory_map=memory_map, + decryption_properties=decryption_properties) + return file.schema.to_arrow_schema() + + +__all__ = ( + "ColumnChunkMetaData", + "ColumnSchema", + "FileDecryptionProperties", + "FileEncryptionProperties", + "FileMetaData", + "ParquetDataset", + "ParquetFile", + "ParquetLogicalType", + "ParquetReader", + "ParquetSchema", + "ParquetWriter", + "RowGroupMetaData", + "SortingColumn", + "Statistics", + "read_metadata", + "read_pandas", + "read_schema", + "read_table", + "write_metadata", + "write_table", + "write_to_dataset", + "_filters_to_expression", + "filters_to_expression", +) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/parquet/encryption.py b/parrot/lib/python3.10/site-packages/pyarrow/parquet/encryption.py new file mode 100644 index 0000000000000000000000000000000000000000..df6eed913fa52da8c8c9f497f12953b6178439b6 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/parquet/encryption.py @@ -0,0 +1,23 @@ +# pylint: disable=unused-wildcard-import, unused-import + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from pyarrow._parquet_encryption import (CryptoFactory, # noqa + EncryptionConfiguration, + DecryptionConfiguration, + KmsConnectionConfig, + KmsClient) diff --git a/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/CMakeLists.txt b/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..ff355e46a4bdf25501536f0851eb1a751359b777 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/CMakeLists.txt @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +arrow_install_all_headers("arrow/python") diff --git a/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/api.h b/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/api.h new file mode 100644 index 0000000000000000000000000000000000000000..a0b13d6d13013cfd0f5f0af9c6a6dcea6ceeaafd --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/api.h @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/python/arrow_to_pandas.h" +#include "arrow/python/common.h" +#include "arrow/python/datetime.h" +#include "arrow/python/deserialize.h" +#include "arrow/python/helpers.h" +#include "arrow/python/inference.h" +#include "arrow/python/io.h" +#include "arrow/python/numpy_convert.h" +#include "arrow/python/numpy_to_arrow.h" +#include "arrow/python/python_to_arrow.h" +#include "arrow/python/serialize.h" diff --git a/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/arrow_to_pandas.cc b/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/arrow_to_pandas.cc new file mode 100644 index 0000000000000000000000000000000000000000..734f6263d9990949dd2d1b687392367d11cd66f2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/arrow_to_pandas.cc @@ -0,0 +1,2645 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Functions for pandas conversion via NumPy + +#include "arrow/python/arrow_to_pandas.h" +#include "arrow/python/numpy_interop.h" // IWYU pragma: expand + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arrow/array.h" +#include "arrow/buffer.h" +#include "arrow/datum.h" +#include "arrow/status.h" +#include "arrow/table.h" +#include "arrow/type.h" +#include "arrow/type_traits.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/hashing.h" +#include "arrow/util/int_util.h" +#include "arrow/util/logging.h" +#include "arrow/util/macros.h" +#include "arrow/util/parallel.h" +#include "arrow/visit_type_inline.h" + +#include "arrow/compute/api.h" + +#include "arrow/python/arrow_to_python_internal.h" +#include "arrow/python/common.h" +#include "arrow/python/datetime.h" +#include "arrow/python/decimal.h" +#include "arrow/python/helpers.h" +#include "arrow/python/numpy_convert.h" +#include "arrow/python/numpy_internal.h" +#include "arrow/python/pyarrow.h" +#include "arrow/python/python_to_arrow.h" +#include "arrow/python/type_traits.h" + +namespace arrow { + +class MemoryPool; + +using internal::checked_cast; +using internal::CheckIndexBounds; +using internal::OptionalParallelFor; + +namespace py { +namespace { + +// Fix options for conversion of an inner (child) array. +PandasOptions MakeInnerOptions(PandasOptions options) { + // Make sure conversion of inner dictionary arrays always returns an array, + // not a dict {'indices': array, 'dictionary': array, 'ordered': bool} + options.decode_dictionaries = true; + options.categorical_columns.clear(); + options.strings_to_categorical = false; + + // In ARROW-7723, we found as a result of ARROW-3789 that second + // through microsecond resolution tz-aware timestamps were being promoted to + // use the DATETIME_NANO_TZ conversion path, yielding a datetime64[ns] NumPy + // array in this function. PyArray_GETITEM returns datetime.datetime for + // units second through microsecond but PyLong for nanosecond (because + // datetime.datetime does not support nanoseconds). + // We force the object conversion to preserve the value of the timezone. + // Nanoseconds are returned as integers. + options.coerce_temporal_nanoseconds = false; + + return options; +} + +// ---------------------------------------------------------------------- +// PyCapsule code for setting ndarray base to reference C++ object + +struct ArrayCapsule { + std::shared_ptr array; +}; + +struct BufferCapsule { + std::shared_ptr buffer; +}; + +void ArrayCapsule_Destructor(PyObject* capsule) { + delete reinterpret_cast(PyCapsule_GetPointer(capsule, "arrow::Array")); +} + +void BufferCapsule_Destructor(PyObject* capsule) { + delete reinterpret_cast(PyCapsule_GetPointer(capsule, "arrow::Buffer")); +} + +// ---------------------------------------------------------------------- +// pandas 0.x DataFrame conversion internals + +using internal::arrow_traits; +using internal::npy_traits; + +template +struct WrapBytes {}; + +template <> +struct WrapBytes { + static inline PyObject* Wrap(const char* data, int64_t length) { + return PyUnicode_FromStringAndSize(data, length); + } +}; + +template <> +struct WrapBytes { + static inline PyObject* Wrap(const char* data, int64_t length) { + return PyUnicode_FromStringAndSize(data, length); + } +}; + +template <> +struct WrapBytes { + static inline PyObject* Wrap(const char* data, int64_t length) { + return PyUnicode_FromStringAndSize(data, length); + } +}; + +template <> +struct WrapBytes { + static inline PyObject* Wrap(const char* data, int64_t length) { + return PyBytes_FromStringAndSize(data, length); + } +}; + +template <> +struct WrapBytes { + static inline PyObject* Wrap(const char* data, int64_t length) { + return PyBytes_FromStringAndSize(data, length); + } +}; + +template <> +struct WrapBytes { + static inline PyObject* Wrap(const char* data, int64_t length) { + return PyBytes_FromStringAndSize(data, length); + } +}; + +template <> +struct WrapBytes { + static inline PyObject* Wrap(const char* data, int64_t length) { + return PyBytes_FromStringAndSize(data, length); + } +}; + +static inline bool ListTypeSupported(const DataType& type) { + switch (type.id()) { + case Type::BOOL: + case Type::UINT8: + case Type::INT8: + case Type::UINT16: + case Type::INT16: + case Type::UINT32: + case Type::INT32: + case Type::INT64: + case Type::UINT64: + case Type::HALF_FLOAT: + case Type::FLOAT: + case Type::DOUBLE: + case Type::DECIMAL128: + case Type::DECIMAL256: + case Type::BINARY: + case Type::LARGE_BINARY: + case Type::STRING: + case Type::LARGE_STRING: + case Type::DATE32: + case Type::DATE64: + case Type::STRUCT: + case Type::MAP: + case Type::TIME32: + case Type::TIME64: + case Type::TIMESTAMP: + case Type::DURATION: + case Type::DICTIONARY: + case Type::INTERVAL_MONTH_DAY_NANO: + case Type::NA: // empty list + // The above types are all supported. + return true; + case Type::FIXED_SIZE_LIST: + case Type::LIST: + case Type::LARGE_LIST: + case Type::LIST_VIEW: + case Type::LARGE_LIST_VIEW: { + const auto& list_type = checked_cast(type); + return ListTypeSupported(*list_type.value_type()); + } + case Type::EXTENSION: { + const auto& ext = checked_cast(*type.GetSharedPtr()); + return ListTypeSupported(*(ext.storage_type())); + } + default: + break; + } + return false; +} + +Status CapsulizeArray(const std::shared_ptr& arr, PyObject** out) { + auto capsule = new ArrayCapsule{{arr}}; + *out = PyCapsule_New(reinterpret_cast(capsule), "arrow::Array", + &ArrayCapsule_Destructor); + if (*out == nullptr) { + delete capsule; + RETURN_IF_PYERROR(); + } + return Status::OK(); +} + +Status CapsulizeBuffer(const std::shared_ptr& buffer, PyObject** out) { + auto capsule = new BufferCapsule{{buffer}}; + *out = PyCapsule_New(reinterpret_cast(capsule), "arrow::Buffer", + &BufferCapsule_Destructor); + if (*out == nullptr) { + delete capsule; + RETURN_IF_PYERROR(); + } + return Status::OK(); +} + +Status SetNdarrayBase(PyArrayObject* arr, PyObject* base) { + if (PyArray_SetBaseObject(arr, base) == -1) { + // Error occurred, trust that SetBaseObject sets the error state + Py_XDECREF(base); + RETURN_IF_PYERROR(); + } + return Status::OK(); +} + +Status SetBufferBase(PyArrayObject* arr, const std::shared_ptr& buffer) { + PyObject* base; + RETURN_NOT_OK(CapsulizeBuffer(buffer, &base)); + return SetNdarrayBase(arr, base); +} + +inline void set_numpy_metadata(int type, const DataType* datatype, PyArray_Descr* out) { + auto metadata = + reinterpret_cast(PyDataType_C_METADATA(out)); + if (type == NPY_DATETIME) { + if (datatype->id() == Type::TIMESTAMP) { + const auto& timestamp_type = checked_cast(*datatype); + metadata->meta.base = internal::NumPyFrequency(timestamp_type.unit()); + } else { + DCHECK(false) << "NPY_DATETIME views only supported for Arrow TIMESTAMP types"; + } + } else if (type == NPY_TIMEDELTA) { + DCHECK_EQ(datatype->id(), Type::DURATION); + const auto& duration_type = checked_cast(*datatype); + metadata->meta.base = internal::NumPyFrequency(duration_type.unit()); + } +} + +Status PyArray_NewFromPool(int nd, npy_intp* dims, PyArray_Descr* descr, MemoryPool* pool, + PyObject** out) { + // ARROW-6570: Allocate memory from MemoryPool for a couple reasons + // + // * Track allocations + // * Get better performance through custom allocators + int64_t total_size = PyDataType_ELSIZE(descr); + for (int i = 0; i < nd; ++i) { + total_size *= dims[i]; + } + + ARROW_ASSIGN_OR_RAISE(auto buffer, AllocateBuffer(total_size, pool)); + *out = PyArray_NewFromDescr(&PyArray_Type, descr, nd, dims, + /*strides=*/nullptr, + /*data=*/buffer->mutable_data(), + /*flags=*/NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEABLE, + /*obj=*/nullptr); + if (*out == nullptr) { + RETURN_IF_PYERROR(); + // Trust that error set if NULL returned + } + return SetBufferBase(reinterpret_cast(*out), std::move(buffer)); +} + +template +inline const T* GetPrimitiveValues(const Array& arr) { + if (arr.length() == 0) { + return nullptr; + } + const int elsize = arr.type()->byte_width(); + const auto& prim_arr = checked_cast(arr); + return reinterpret_cast(prim_arr.values()->data() + arr.offset() * elsize); +} + +Status MakeNumPyView(std::shared_ptr arr, PyObject* py_ref, int npy_type, int ndim, + npy_intp* dims, PyObject** out) { + PyAcquireGIL lock; + + PyArray_Descr* descr = internal::GetSafeNumPyDtype(npy_type); + set_numpy_metadata(npy_type, arr->type().get(), descr); + PyObject* result = PyArray_NewFromDescr( + &PyArray_Type, descr, ndim, dims, /*strides=*/nullptr, + const_cast(GetPrimitiveValues(*arr)), /*flags=*/0, nullptr); + PyArrayObject* np_arr = reinterpret_cast(result); + if (np_arr == nullptr) { + // Error occurred, trust that error set + return Status::OK(); + } + + PyObject* base; + if (py_ref == nullptr) { + // Capsule will be owned by the ndarray, no incref necessary. See + // ARROW-1973 + RETURN_NOT_OK(CapsulizeArray(arr, &base)); + } else { + Py_INCREF(py_ref); + base = py_ref; + } + RETURN_NOT_OK(SetNdarrayBase(np_arr, base)); + + // Do not allow Arrow data to be mutated + PyArray_CLEARFLAGS(np_arr, NPY_ARRAY_WRITEABLE); + *out = result; + return Status::OK(); +} + +class PandasWriter { + public: + enum type { + OBJECT, + UINT8, + INT8, + UINT16, + INT16, + UINT32, + INT32, + UINT64, + INT64, + HALF_FLOAT, + FLOAT, + DOUBLE, + BOOL, + DATETIME_DAY, + DATETIME_SECOND, + DATETIME_MILLI, + DATETIME_MICRO, + DATETIME_NANO, + DATETIME_SECOND_TZ, + DATETIME_MILLI_TZ, + DATETIME_MICRO_TZ, + DATETIME_NANO_TZ, + TIMEDELTA_SECOND, + TIMEDELTA_MILLI, + TIMEDELTA_MICRO, + TIMEDELTA_NANO, + CATEGORICAL, + EXTENSION + }; + + PandasWriter(const PandasOptions& options, int64_t num_rows, int num_columns) + : options_(options), num_rows_(num_rows), num_columns_(num_columns) { + PyAcquireGIL lock; + internal::InitPandasStaticData(); + } + virtual ~PandasWriter() {} + + void SetBlockData(PyObject* arr) { + block_arr_.reset(arr); + block_data_ = + reinterpret_cast(PyArray_DATA(reinterpret_cast(arr))); + } + + /// \brief Either copy or wrap single array to create pandas-compatible array + /// for Series or DataFrame. num_columns_ can only be 1. Will try to zero + /// copy if possible (or error if not possible and zero_copy_only=True) + virtual Status TransferSingle(std::shared_ptr data, PyObject* py_ref) = 0; + + /// \brief Copy ChunkedArray into a multi-column block + virtual Status CopyInto(std::shared_ptr data, int64_t rel_placement) = 0; + + Status EnsurePlacementAllocated() { + std::lock_guard guard(allocation_lock_); + if (placement_data_ != nullptr) { + return Status::OK(); + } + PyAcquireGIL lock; + npy_intp placement_dims[1] = {num_columns_}; + PyObject* placement_arr = PyArray_SimpleNew(1, placement_dims, NPY_INT64); + RETURN_IF_PYERROR(); + placement_arr_.reset(placement_arr); + placement_data_ = reinterpret_cast( + PyArray_DATA(reinterpret_cast(placement_arr))); + return Status::OK(); + } + + Status EnsureAllocated() { + std::lock_guard guard(allocation_lock_); + if (block_data_ != nullptr) { + return Status::OK(); + } + RETURN_NOT_OK(Allocate()); + return Status::OK(); + } + + virtual bool CanZeroCopy(const ChunkedArray& data) const { return false; } + + virtual Status Write(std::shared_ptr data, int64_t abs_placement, + int64_t rel_placement) { + RETURN_NOT_OK(EnsurePlacementAllocated()); + if (num_columns_ == 1 && options_.allow_zero_copy_blocks) { + RETURN_NOT_OK(TransferSingle(data, /*py_ref=*/nullptr)); + } else { + RETURN_NOT_OK( + CheckNoZeroCopy("Cannot do zero copy conversion into " + "multi-column DataFrame block")); + RETURN_NOT_OK(EnsureAllocated()); + RETURN_NOT_OK(CopyInto(data, rel_placement)); + } + placement_data_[rel_placement] = abs_placement; + return Status::OK(); + } + + virtual Status GetDataFrameResult(PyObject** out) { + PyObject* result = PyDict_New(); + RETURN_IF_PYERROR(); + + PyObject* block; + RETURN_NOT_OK(GetResultBlock(&block)); + + PyDict_SetItemString(result, "block", block); + PyDict_SetItemString(result, "placement", placement_arr_.obj()); + + RETURN_NOT_OK(AddResultMetadata(result)); + *out = result; + return Status::OK(); + } + + // Caller steals the reference to this object + virtual Status GetSeriesResult(PyObject** out) { + RETURN_NOT_OK(MakeBlock1D()); + // Caller owns the object now + *out = block_arr_.detach(); + return Status::OK(); + } + + protected: + virtual Status AddResultMetadata(PyObject* result) { return Status::OK(); } + + Status MakeBlock1D() { + // For Series or for certain DataFrame block types, we need to shape to a + // 1D array when there is only one column + PyAcquireGIL lock; + + DCHECK_EQ(1, num_columns_); + + npy_intp new_dims[1] = {static_cast(num_rows_)}; + PyArray_Dims dims; + dims.ptr = new_dims; + dims.len = 1; + + PyObject* reshaped = PyArray_Newshape( + reinterpret_cast(block_arr_.obj()), &dims, NPY_ANYORDER); + RETURN_IF_PYERROR(); + + // ARROW-8801: Here a PyArrayObject is created that is not being managed by + // any OwnedRef object. This object is then put in the resulting object + // with PyDict_SetItemString, which increments the reference count, so a + // memory leak ensues. There are several ways to fix the memory leak but a + // simple one is to put the reshaped 1D block array in this OwnedRefNoGIL + // so it will be correctly decref'd when this class is destructed. + block_arr_.reset(reshaped); + return Status::OK(); + } + + virtual Status GetResultBlock(PyObject** out) { + *out = block_arr_.obj(); + return Status::OK(); + } + + Status CheckNoZeroCopy(const std::string& message) { + if (options_.zero_copy_only) { + return Status::Invalid(message); + } + return Status::OK(); + } + + Status CheckNotZeroCopyOnly(const ChunkedArray& data) { + if (options_.zero_copy_only) { + return Status::Invalid("Needed to copy ", data.num_chunks(), " chunks with ", + data.null_count(), " nulls, but zero_copy_only was True"); + } + return Status::OK(); + } + + virtual Status Allocate() { + return Status::NotImplemented("Override Allocate in subclasses"); + } + + Status AllocateNDArray(int npy_type, int ndim = 2) { + PyAcquireGIL lock; + + PyObject* block_arr = nullptr; + npy_intp block_dims[2] = {0, 0}; + + if (ndim == 2) { + block_dims[0] = num_columns_; + block_dims[1] = num_rows_; + } else { + block_dims[0] = num_rows_; + } + PyArray_Descr* descr = internal::GetSafeNumPyDtype(npy_type); + if (PyDataType_REFCHK(descr)) { + // ARROW-6876: if the array has refcounted items, let Numpy + // own the array memory so as to decref elements on array destruction + block_arr = PyArray_SimpleNewFromDescr(ndim, block_dims, descr); + RETURN_IF_PYERROR(); + } else { + RETURN_NOT_OK( + PyArray_NewFromPool(ndim, block_dims, descr, options_.pool, &block_arr)); + } + + SetBlockData(block_arr); + return Status::OK(); + } + + void SetDatetimeUnit(NPY_DATETIMEUNIT unit) { + PyAcquireGIL lock; + auto date_dtype = + reinterpret_cast(PyDataType_C_METADATA( + PyArray_DESCR(reinterpret_cast(block_arr_.obj())))); + date_dtype->meta.base = unit; + } + + PandasOptions options_; + + std::mutex allocation_lock_; + + int64_t num_rows_; + int num_columns_; + + OwnedRefNoGIL block_arr_; + uint8_t* block_data_ = nullptr; + + // ndarray + OwnedRefNoGIL placement_arr_; + int64_t* placement_data_ = nullptr; + + private: + ARROW_DISALLOW_COPY_AND_ASSIGN(PandasWriter); +}; + +template +inline void ConvertIntegerWithNulls(const PandasOptions& options, + const ChunkedArray& data, OutType* out_values) { + for (int c = 0; c < data.num_chunks(); c++) { + const auto& arr = *data.chunk(c); + const InType* in_values = GetPrimitiveValues(arr); + // Upcast to double, set NaN as appropriate + + for (int i = 0; i < arr.length(); ++i) { + *out_values++ = + arr.IsNull(i) ? static_cast(NAN) : static_cast(in_values[i]); + } + } +} + +template +inline void ConvertIntegerNoNullsSameType(const PandasOptions& options, + const ChunkedArray& data, T* out_values) { + for (int c = 0; c < data.num_chunks(); c++) { + const auto& arr = *data.chunk(c); + if (arr.length() > 0) { + const T* in_values = GetPrimitiveValues(arr); + memcpy(out_values, in_values, sizeof(T) * arr.length()); + out_values += arr.length(); + } + } +} + +template +inline void ConvertIntegerNoNullsCast(const PandasOptions& options, + const ChunkedArray& data, OutType* out_values) { + for (int c = 0; c < data.num_chunks(); c++) { + const auto& arr = *data.chunk(c); + const InType* in_values = GetPrimitiveValues(arr); + for (int64_t i = 0; i < arr.length(); ++i) { + *out_values = in_values[i]; + } + } +} + +template +struct MemoizationTraits { + using Scalar = typename T::c_type; +}; + +template +struct MemoizationTraits> { + // For binary, we memoize string_view as a scalar value to avoid having to + // unnecessarily copy the memory into the memo table data structure + using Scalar = std::string_view; +}; + +// Generic Array -> PyObject** converter that handles object deduplication, if +// requested +template +inline Status ConvertAsPyObjects(const PandasOptions& options, const ChunkedArray& data, + WrapFunction&& wrap_func, PyObject** out_values) { + using ArrayType = typename TypeTraits::ArrayType; + using Scalar = typename MemoizationTraits::Scalar; + + auto convert_chunks = [&](auto&& wrap_func) -> Status { + for (int c = 0; c < data.num_chunks(); c++) { + const auto& arr = arrow::internal::checked_cast(*data.chunk(c)); + RETURN_NOT_OK(internal::WriteArrayObjects(arr, wrap_func, out_values)); + out_values += arr.length(); + } + return Status::OK(); + }; + + if (options.deduplicate_objects) { + // GH-40316: only allocate a memo table if deduplication is enabled. + ::arrow::internal::ScalarMemoTable memo_table(options.pool); + std::vector unique_values; + int32_t memo_size = 0; + + auto WrapMemoized = [&](const Scalar& value, PyObject** out_values) { + int32_t memo_index; + RETURN_NOT_OK(memo_table.GetOrInsert(value, &memo_index)); + if (memo_index == memo_size) { + // New entry + RETURN_NOT_OK(wrap_func(value, out_values)); + unique_values.push_back(*out_values); + ++memo_size; + } else { + // Duplicate entry + Py_INCREF(unique_values[memo_index]); + *out_values = unique_values[memo_index]; + } + return Status::OK(); + }; + return convert_chunks(std::move(WrapMemoized)); + } else { + return convert_chunks(std::forward(wrap_func)); + } +} + +Status ConvertStruct(PandasOptions options, const ChunkedArray& data, + PyObject** out_values) { + if (data.num_chunks() == 0) { + return Status::OK(); + } + // ChunkedArray has at least one chunk + auto arr = checked_cast(data.chunk(0).get()); + // Use it to cache the struct type and number of fields for all chunks + int32_t num_fields = arr->num_fields(); + auto array_type = arr->type(); + std::vector fields_data(num_fields * data.num_chunks()); + OwnedRef dict_item; + + // See notes in MakeInnerOptions. + options = MakeInnerOptions(std::move(options)); + // Don't blindly convert because timestamps in lists are handled differently. + options.timestamp_as_object = true; + + for (int c = 0; c < data.num_chunks(); c++) { + auto fields_data_offset = c * num_fields; + auto arr = checked_cast(data.chunk(c).get()); + // Convert the struct arrays first + for (int32_t i = 0; i < num_fields; i++) { + auto field = arr->field(static_cast(i)); + // In case the field is an extension array, use .storage() to convert to Pandas + if (field->type()->id() == Type::EXTENSION) { + const ExtensionArray& arr_ext = checked_cast(*field); + field = arr_ext.storage(); + } + RETURN_NOT_OK(ConvertArrayToPandas(options, field, nullptr, + fields_data[i + fields_data_offset].ref())); + DCHECK(PyArray_Check(fields_data[i + fields_data_offset].obj())); + } + + // Construct a dictionary for each row + const bool has_nulls = data.null_count() > 0; + for (int64_t i = 0; i < arr->length(); ++i) { + if (has_nulls && arr->IsNull(i)) { + Py_INCREF(Py_None); + *out_values = Py_None; + } else { + // Build the new dict object for the row + dict_item.reset(PyDict_New()); + RETURN_IF_PYERROR(); + for (int32_t field_idx = 0; field_idx < num_fields; ++field_idx) { + OwnedRef field_value; + auto name = array_type->field(static_cast(field_idx))->name(); + if (!arr->field(static_cast(field_idx))->IsNull(i)) { + // Value exists in child array, obtain it + auto array = reinterpret_cast( + fields_data[field_idx + fields_data_offset].obj()); + auto ptr = reinterpret_cast(PyArray_GETPTR1(array, i)); + field_value.reset(PyArray_GETITEM(array, ptr)); + RETURN_IF_PYERROR(); + } else { + // Translate the Null to a None + Py_INCREF(Py_None); + field_value.reset(Py_None); + } + // PyDict_SetItemString increments reference count + auto setitem_result = + PyDict_SetItemString(dict_item.obj(), name.c_str(), field_value.obj()); + RETURN_IF_PYERROR(); + DCHECK_EQ(setitem_result, 0); + } + *out_values = dict_item.obj(); + // Grant ownership to the resulting array + Py_INCREF(*out_values); + } + ++out_values; + } + } + return Status::OK(); +} + +Status DecodeDictionaries(MemoryPool* pool, const std::shared_ptr& dense_type, + ArrayVector* arrays) { + compute::ExecContext ctx(pool); + compute::CastOptions options; + for (size_t i = 0; i < arrays->size(); ++i) { + ARROW_ASSIGN_OR_RAISE((*arrays)[i], + compute::Cast(*(*arrays)[i], dense_type, options, &ctx)); + } + return Status::OK(); +} + +Status DecodeDictionaries(MemoryPool* pool, const std::shared_ptr& dense_type, + std::shared_ptr* array) { + auto chunks = (*array)->chunks(); + RETURN_NOT_OK(DecodeDictionaries(pool, dense_type, &chunks)); + *array = std::make_shared(std::move(chunks), dense_type); + return Status::OK(); +} + +template +enable_if_list_like ConvertListsLike(PandasOptions options, + const ChunkedArray& data, + PyObject** out_values) { + using ListArrayT = typename TypeTraits::ArrayType; + // Get column of underlying value arrays + ArrayVector value_arrays; + for (int c = 0; c < data.num_chunks(); c++) { + const auto& arr = checked_cast(*data.chunk(c)); + // values() does not account for offsets, so we need to slice into it. + // We can't use Flatten(), because it removes the values behind a null list + // value, and that makes the offsets into original list values and our + // flattened_values array different. + std::shared_ptr flattened_values = arr.values()->Slice( + arr.value_offset(0), arr.value_offset(arr.length()) - arr.value_offset(0)); + if (arr.value_type()->id() == Type::EXTENSION) { + const auto& arr_ext = checked_cast(*flattened_values); + value_arrays.emplace_back(arr_ext.storage()); + } else { + value_arrays.emplace_back(flattened_values); + } + } + + using ListArrayType = typename ListArrayT::TypeClass; + const auto& list_type = checked_cast(*data.type()); + auto value_type = list_type.value_type(); + if (value_type->id() == Type::EXTENSION) { + value_type = checked_cast(*value_type).storage_type(); + } + + auto flat_column = std::make_shared(value_arrays, value_type); + + options = MakeInnerOptions(std::move(options)); + + OwnedRefNoGIL owned_numpy_array; + RETURN_NOT_OK(ConvertChunkedArrayToPandas(options, flat_column, nullptr, + owned_numpy_array.ref())); + PyObject* numpy_array = owned_numpy_array.obj(); + DCHECK(PyArray_Check(numpy_array)); + + int64_t chunk_offset = 0; + for (int c = 0; c < data.num_chunks(); c++) { + const auto& arr = checked_cast(*data.chunk(c)); + const bool has_nulls = data.null_count() > 0; + for (int64_t i = 0; i < arr.length(); ++i) { + if (has_nulls && arr.IsNull(i)) { + Py_INCREF(Py_None); + *out_values = Py_None; + } else { + // Need to subtract value_offset(0) since the original chunk might be a slice + // into another array. + OwnedRef start(PyLong_FromLongLong(arr.value_offset(i) + chunk_offset - + arr.value_offset(0))); + OwnedRef end(PyLong_FromLongLong(arr.value_offset(i + 1) + chunk_offset - + arr.value_offset(0))); + OwnedRef slice(PySlice_New(start.obj(), end.obj(), nullptr)); + + if (ARROW_PREDICT_FALSE(slice.obj() == nullptr)) { + // Fall out of loop, will return from RETURN_IF_PYERROR + break; + } + *out_values = PyObject_GetItem(numpy_array, slice.obj()); + + if (*out_values == nullptr) { + // Fall out of loop, will return from RETURN_IF_PYERROR + break; + } + } + ++out_values; + } + RETURN_IF_PYERROR(); + + chunk_offset += arr.value_offset(arr.length()) - arr.value_offset(0); + } + + return Status::OK(); +} + +// TODO GH-40579: optimize ListView conversion to avoid unnecessary copies +template +enable_if_list_view ConvertListsLike(PandasOptions options, + const ChunkedArray& data, + PyObject** out_values) { + using ListViewArrayType = typename TypeTraits::ArrayType; + using NonViewType = + std::conditional_t; + using NonViewClass = typename TypeTraits::ArrayType; + ArrayVector list_arrays; + for (int c = 0; c < data.num_chunks(); c++) { + const auto& arr = checked_cast(*data.chunk(c)); + ARROW_ASSIGN_OR_RAISE(auto non_view_array, + NonViewClass::FromListView(arr, options.pool)); + list_arrays.emplace_back(non_view_array); + } + auto chunked_array = std::make_shared(list_arrays); + return ConvertListsLike(options, *chunked_array, out_values); +} + +template +Status ConvertMapHelper(F1 resetRow, F2 addPairToRow, F3 stealRow, + const ChunkedArray& data, PyArrayObject* py_keys, + PyArrayObject* py_items, + // needed for null checks in items + const std::vector> item_arrays, + PyObject** out_values) { + OwnedRef key_value; + OwnedRef item_value; + + int64_t chunk_offset = 0; + for (int c = 0; c < data.num_chunks(); ++c) { + const auto& arr = checked_cast(*data.chunk(c)); + const bool has_nulls = data.null_count() > 0; + + // Make a list of key/item pairs for each row in array + for (int64_t i = 0; i < arr.length(); ++i) { + if (has_nulls && arr.IsNull(i)) { + Py_INCREF(Py_None); + *out_values = Py_None; + } else { + int64_t entry_offset = arr.value_offset(i); + int64_t num_pairs = arr.value_offset(i + 1) - entry_offset; + + // Build the new list object for the row of Python pairs + RETURN_NOT_OK(resetRow(num_pairs)); + + // Add each key/item pair in the row + for (int64_t j = 0; j < num_pairs; ++j) { + // Get key value, key is non-nullable for a valid row + auto ptr_key = reinterpret_cast( + PyArray_GETPTR1(py_keys, chunk_offset + entry_offset + j)); + key_value.reset(PyArray_GETITEM(py_keys, ptr_key)); + RETURN_IF_PYERROR(); + + if (item_arrays[c]->IsNull(entry_offset + j)) { + // Translate the Null to a None + Py_INCREF(Py_None); + item_value.reset(Py_None); + } else { + // Get valid value from item array + auto ptr_item = reinterpret_cast( + PyArray_GETPTR1(py_items, chunk_offset + entry_offset + j)); + item_value.reset(PyArray_GETITEM(py_items, ptr_item)); + RETURN_IF_PYERROR(); + } + + // Add the key/item pair to the row + RETURN_NOT_OK(addPairToRow(j, key_value, item_value)); + } + + // Pass ownership to the resulting array + *out_values = stealRow(); + } + ++out_values; + } + RETURN_IF_PYERROR(); + + chunk_offset += arr.values()->length(); + } + + return Status::OK(); +} + +// A more helpful error message around TypeErrors that may stem from unhashable keys +Status CheckMapAsPydictsTypeError() { + if (ARROW_PREDICT_TRUE(!PyErr_Occurred())) { + return Status::OK(); + } + if (PyErr_ExceptionMatches(PyExc_TypeError)) { + // Modify the error string directly, so it is re-raised + // with our additional info. + // + // There are not many interesting things happening when this + // is hit. This is intended to only be called directly after + // PyDict_SetItem, where a finite set of errors could occur. + PyObject *type, *value, *traceback; + PyErr_Fetch(&type, &value, &traceback); + std::string message; + RETURN_NOT_OK(internal::PyObject_StdStringStr(value, &message)); + message += + ". If keys are not hashable, then you must use the option " + "[maps_as_pydicts=None (default)]"; + + // resets the error + PyErr_SetString(PyExc_TypeError, message.c_str()); + } + return ConvertPyError(); +} + +Status CheckForDuplicateKeys(bool error_on_duplicate_keys, Py_ssize_t total_dict_len, + Py_ssize_t total_raw_len) { + if (total_dict_len < total_raw_len) { + const char* message = + "[maps_as_pydicts] " + "After conversion of Arrow maps to pydicts, " + "detected data loss due to duplicate keys. " + "Original input length is [%lld], total converted pydict length is [%lld]."; + std::array buf; + std::snprintf(buf.data(), buf.size(), message, total_raw_len, total_dict_len); + + if (error_on_duplicate_keys) { + return Status::UnknownError(buf.data()); + } else { + ARROW_LOG(WARNING) << buf.data(); + } + } + return Status::OK(); +} + +Status ConvertMap(PandasOptions options, const ChunkedArray& data, + PyObject** out_values) { + // Get columns of underlying key/item arrays + std::vector> key_arrays; + std::vector> item_arrays; + for (int c = 0; c < data.num_chunks(); ++c) { + const auto& map_arr = checked_cast(*data.chunk(c)); + key_arrays.emplace_back(map_arr.keys()); + item_arrays.emplace_back(map_arr.items()); + } + + const auto& map_type = checked_cast(*data.type()); + auto key_type = map_type.key_type(); + auto item_type = map_type.item_type(); + + // ARROW-6899: Convert dictionary-encoded children to dense instead of + // failing below. A more efficient conversion than this could be done later + if (key_type->id() == Type::DICTIONARY) { + auto dense_type = checked_cast(*key_type).value_type(); + RETURN_NOT_OK(DecodeDictionaries(options.pool, dense_type, &key_arrays)); + key_type = dense_type; + } + if (item_type->id() == Type::DICTIONARY) { + auto dense_type = checked_cast(*item_type).value_type(); + RETURN_NOT_OK(DecodeDictionaries(options.pool, dense_type, &item_arrays)); + item_type = dense_type; + } + + // See notes in MakeInnerOptions. + options = MakeInnerOptions(std::move(options)); + // Don't blindly convert because timestamps in lists are handled differently. + options.timestamp_as_object = true; + + auto flat_keys = std::make_shared(key_arrays, key_type); + auto flat_items = std::make_shared(item_arrays, item_type); + OwnedRefNoGIL owned_numpy_keys; + RETURN_NOT_OK( + ConvertChunkedArrayToPandas(options, flat_keys, nullptr, owned_numpy_keys.ref())); + OwnedRefNoGIL owned_numpy_items; + RETURN_NOT_OK( + ConvertChunkedArrayToPandas(options, flat_items, nullptr, owned_numpy_items.ref())); + PyArrayObject* py_keys = reinterpret_cast(owned_numpy_keys.obj()); + PyArrayObject* py_items = reinterpret_cast(owned_numpy_items.obj()); + + if (options.maps_as_pydicts == MapConversionType::DEFAULT) { + // The default behavior to express an Arrow MAP as a list of [(key, value), ...] pairs + OwnedRef list_item; + return ConvertMapHelper( + [&list_item](int64_t num_pairs) { + list_item.reset(PyList_New(num_pairs)); + return CheckPyError(); + }, + [&list_item](int64_t idx, OwnedRef& key_value, OwnedRef& item_value) { + PyList_SET_ITEM(list_item.obj(), idx, + PyTuple_Pack(2, key_value.obj(), item_value.obj())); + return CheckPyError(); + }, + [&list_item] { return list_item.detach(); }, data, py_keys, py_items, item_arrays, + out_values); + } else { + // Use a native pydict + OwnedRef dict_item; + Py_ssize_t total_dict_len{0}; + Py_ssize_t total_raw_len{0}; + + bool error_on_duplicate_keys; + if (options.maps_as_pydicts == MapConversionType::LOSSY) { + error_on_duplicate_keys = false; + } else if (options.maps_as_pydicts == MapConversionType::STRICT_) { + error_on_duplicate_keys = true; + } else { + auto val = std::underlying_type_t(options.maps_as_pydicts); + return Status::UnknownError("Received unknown option for maps_as_pydicts: " + + std::to_string(val)); + } + + auto status = ConvertMapHelper( + [&dict_item, &total_raw_len](int64_t num_pairs) { + total_raw_len += num_pairs; + dict_item.reset(PyDict_New()); + return CheckPyError(); + }, + [&dict_item]([[maybe_unused]] int64_t idx, OwnedRef& key_value, + OwnedRef& item_value) { + auto setitem_result = + PyDict_SetItem(dict_item.obj(), key_value.obj(), item_value.obj()); + ARROW_RETURN_NOT_OK(CheckMapAsPydictsTypeError()); + // returns -1 if there are internal errors around hashing/resizing + return setitem_result == 0 ? Status::OK() + : Status::UnknownError( + "[maps_as_pydicts] " + "Unexpected failure inserting Arrow (key, " + "value) pair into Python dict"); + }, + [&dict_item, &total_dict_len] { + total_dict_len += PyDict_Size(dict_item.obj()); + return dict_item.detach(); + }, + data, py_keys, py_items, item_arrays, out_values); + + ARROW_RETURN_NOT_OK(status); + // If there were no errors generating the pydicts, + // then check if we detected any data loss from duplicate keys. + return CheckForDuplicateKeys(error_on_duplicate_keys, total_dict_len, total_raw_len); + } +} + +template +inline void ConvertNumericNullable(const ChunkedArray& data, InType na_value, + OutType* out_values) { + for (int c = 0; c < data.num_chunks(); c++) { + const auto& arr = *data.chunk(c); + const InType* in_values = GetPrimitiveValues(arr); + + if (arr.null_count() > 0) { + for (int64_t i = 0; i < arr.length(); ++i) { + *out_values++ = arr.IsNull(i) ? na_value : in_values[i]; + } + } else { + memcpy(out_values, in_values, sizeof(InType) * arr.length()); + out_values += arr.length(); + } + } +} + +template +inline void ConvertNumericNullableCast(const ChunkedArray& data, InType na_value, + OutType* out_values) { + for (int c = 0; c < data.num_chunks(); c++) { + const auto& arr = *data.chunk(c); + const InType* in_values = GetPrimitiveValues(arr); + + for (int64_t i = 0; i < arr.length(); ++i) { + *out_values++ = arr.IsNull(i) ? static_cast(na_value) + : static_cast(in_values[i]); + } + } +} + +template +class TypedPandasWriter : public PandasWriter { + public: + using T = typename npy_traits::value_type; + + using PandasWriter::PandasWriter; + + Status TransferSingle(std::shared_ptr data, PyObject* py_ref) override { + if (CanZeroCopy(*data)) { + PyObject* wrapped; + npy_intp dims[2] = {static_cast(num_columns_), + static_cast(num_rows_)}; + RETURN_NOT_OK( + MakeNumPyView(data->chunk(0), py_ref, NPY_TYPE, /*ndim=*/2, dims, &wrapped)); + SetBlockData(wrapped); + return Status::OK(); + } else { + RETURN_NOT_OK(CheckNotZeroCopyOnly(*data)); + RETURN_NOT_OK(EnsureAllocated()); + return CopyInto(data, /*rel_placement=*/0); + } + } + + Status CheckTypeExact(const DataType& type, Type::type expected) { + if (type.id() != expected) { + // TODO(wesm): stringify NumPy / pandas type + return Status::NotImplemented("Cannot write Arrow data of type ", type.ToString()); + } + return Status::OK(); + } + + T* GetBlockColumnStart(int64_t rel_placement) { + return reinterpret_cast(block_data_) + rel_placement * num_rows_; + } + + protected: + Status Allocate() override { return AllocateNDArray(NPY_TYPE); } +}; + +struct ObjectWriterVisitor { + const PandasOptions& options; + const ChunkedArray& data; + PyObject** out_values; + + Status Visit(const NullType& type) { + for (int c = 0; c < data.num_chunks(); c++) { + std::shared_ptr arr = data.chunk(c); + + for (int64_t i = 0; i < arr->length(); ++i) { + // All values are null + Py_INCREF(Py_None); + *out_values = Py_None; + ++out_values; + } + } + return Status::OK(); + } + + Status Visit(const BooleanType& type) { + for (int c = 0; c < data.num_chunks(); c++) { + const auto& arr = checked_cast(*data.chunk(c)); + + for (int64_t i = 0; i < arr.length(); ++i) { + if (arr.IsNull(i)) { + Py_INCREF(Py_None); + *out_values++ = Py_None; + } else if (arr.Value(i)) { + // True + Py_INCREF(Py_True); + *out_values++ = Py_True; + } else { + // False + Py_INCREF(Py_False); + *out_values++ = Py_False; + } + } + } + return Status::OK(); + } + + template + enable_if_integer Visit(const Type& type) { + using T = typename Type::c_type; + auto WrapValue = [](T value, PyObject** out) { + *out = std::is_signed::value ? PyLong_FromLongLong(value) + : PyLong_FromUnsignedLongLong(value); + RETURN_IF_PYERROR(); + return Status::OK(); + }; + return ConvertAsPyObjects(options, data, WrapValue, out_values); + } + + template + enable_if_t::value || is_binary_view_like_type::value || + is_fixed_size_binary_type::value, + Status> + Visit(const Type& type) { + auto WrapValue = [](const std::string_view& view, PyObject** out) { + *out = WrapBytes::Wrap(view.data(), view.length()); + if (*out == nullptr) { + PyErr_Clear(); + return Status::UnknownError("Wrapping ", view, " failed"); + } + return Status::OK(); + }; + return ConvertAsPyObjects(options, data, WrapValue, out_values); + } + + template + enable_if_date Visit(const Type& type) { + auto WrapValue = [](typename Type::c_type value, PyObject** out) { + RETURN_NOT_OK(internal::PyDate_from_int(value, Type::UNIT, out)); + RETURN_IF_PYERROR(); + return Status::OK(); + }; + return ConvertAsPyObjects(options, data, WrapValue, out_values); + } + + template + enable_if_time Visit(const Type& type) { + const TimeUnit::type unit = type.unit(); + auto WrapValue = [unit](typename Type::c_type value, PyObject** out) { + RETURN_NOT_OK(internal::PyTime_from_int(value, unit, out)); + RETURN_IF_PYERROR(); + return Status::OK(); + }; + return ConvertAsPyObjects(options, data, WrapValue, out_values); + } + + template + enable_if_timestamp Visit(const Type& type) { + const TimeUnit::type unit = type.unit(); + OwnedRef tzinfo; + + auto ConvertTimezoneNaive = [&](typename Type::c_type value, PyObject** out) { + RETURN_NOT_OK(internal::PyDateTime_from_int(value, unit, out)); + RETURN_IF_PYERROR(); + return Status::OK(); + }; + auto ConvertTimezoneAware = [&](typename Type::c_type value, PyObject** out) { + PyObject* naive_datetime; + RETURN_NOT_OK(ConvertTimezoneNaive(value, &naive_datetime)); + + // convert the timezone naive datetime object to timezone aware + // two step conversion of the datetime mimics Python's code: + // dt.replace(tzinfo=datetime.timezone.utc).astimezone(tzinfo) + // first step: replacing timezone with timezone.utc (replace method) + OwnedRef args(PyTuple_New(0)); + OwnedRef keywords(PyDict_New()); + PyDict_SetItemString(keywords.obj(), "tzinfo", PyDateTime_TimeZone_UTC); + OwnedRef naive_datetime_replace(PyObject_GetAttrString(naive_datetime, "replace")); + OwnedRef datetime_utc( + PyObject_Call(naive_datetime_replace.obj(), args.obj(), keywords.obj())); + // second step: adjust the datetime to tzinfo timezone (astimezone method) + *out = PyObject_CallMethod(datetime_utc.obj(), "astimezone", "O", tzinfo.obj()); + + // the timezone naive object is no longer required + Py_DECREF(naive_datetime); + RETURN_IF_PYERROR(); + + return Status::OK(); + }; + + if (!type.timezone().empty() && !options.ignore_timezone) { + // convert timezone aware + PyObject* tzobj; + ARROW_ASSIGN_OR_RAISE(tzobj, internal::StringToTzinfo(type.timezone())); + tzinfo.reset(tzobj); + RETURN_IF_PYERROR(); + RETURN_NOT_OK( + ConvertAsPyObjects(options, data, ConvertTimezoneAware, out_values)); + } else { + // convert timezone naive + RETURN_NOT_OK( + ConvertAsPyObjects(options, data, ConvertTimezoneNaive, out_values)); + } + + return Status::OK(); + } + + template + enable_if_t::value, Status> Visit( + const Type& type) { + OwnedRef args(PyTuple_New(0)); + OwnedRef kwargs(PyDict_New()); + RETURN_IF_PYERROR(); + auto to_date_offset = [&](const MonthDayNanoIntervalType::MonthDayNanos& interval, + PyObject** out) { + DCHECK(internal::BorrowPandasDataOffsetType() != nullptr); + // DateOffset objects do not add nanoseconds component to pd.Timestamp. + // as of Pandas 1.3.3 + // (https://github.com/pandas-dev/pandas/issues/43892). + // So convert microseconds and remainder to preserve data + // but give users more expected results. + int64_t microseconds = interval.nanoseconds / 1000; + int64_t nanoseconds; + if (interval.nanoseconds >= 0) { + nanoseconds = interval.nanoseconds % 1000; + } else { + nanoseconds = -((-interval.nanoseconds) % 1000); + } + + PyDict_SetItemString(kwargs.obj(), "months", PyLong_FromLong(interval.months)); + PyDict_SetItemString(kwargs.obj(), "days", PyLong_FromLong(interval.days)); + PyDict_SetItemString(kwargs.obj(), "microseconds", + PyLong_FromLongLong(microseconds)); + PyDict_SetItemString(kwargs.obj(), "nanoseconds", PyLong_FromLongLong(nanoseconds)); + *out = + PyObject_Call(internal::BorrowPandasDataOffsetType(), args.obj(), kwargs.obj()); + RETURN_IF_PYERROR(); + return Status::OK(); + }; + return ConvertAsPyObjects(options, data, to_date_offset, + out_values); + } + + Status Visit(const Decimal128Type& type) { + OwnedRef decimal; + OwnedRef Decimal; + RETURN_NOT_OK(internal::ImportModule("decimal", &decimal)); + RETURN_NOT_OK(internal::ImportFromModule(decimal.obj(), "Decimal", &Decimal)); + PyObject* decimal_constructor = Decimal.obj(); + + for (int c = 0; c < data.num_chunks(); c++) { + const auto& arr = checked_cast(*data.chunk(c)); + + for (int64_t i = 0; i < arr.length(); ++i) { + if (arr.IsNull(i)) { + Py_INCREF(Py_None); + *out_values++ = Py_None; + } else { + *out_values++ = + internal::DecimalFromString(decimal_constructor, arr.FormatValue(i)); + RETURN_IF_PYERROR(); + } + } + } + + return Status::OK(); + } + + Status Visit(const Decimal256Type& type) { + OwnedRef decimal; + OwnedRef Decimal; + RETURN_NOT_OK(internal::ImportModule("decimal", &decimal)); + RETURN_NOT_OK(internal::ImportFromModule(decimal.obj(), "Decimal", &Decimal)); + PyObject* decimal_constructor = Decimal.obj(); + + for (int c = 0; c < data.num_chunks(); c++) { + const auto& arr = checked_cast(*data.chunk(c)); + + for (int64_t i = 0; i < arr.length(); ++i) { + if (arr.IsNull(i)) { + Py_INCREF(Py_None); + *out_values++ = Py_None; + } else { + *out_values++ = + internal::DecimalFromString(decimal_constructor, arr.FormatValue(i)); + RETURN_IF_PYERROR(); + } + } + } + + return Status::OK(); + } + + template + enable_if_t::value || is_list_view_type::value, Status> Visit( + const T& type) { + if (!ListTypeSupported(*type.value_type())) { + return Status::NotImplemented( + "Not implemented type for conversion from List to Pandas: ", + type.value_type()->ToString()); + } + return ConvertListsLike(options, data, out_values); + } + + Status Visit(const MapType& type) { return ConvertMap(options, data, out_values); } + + Status Visit(const StructType& type) { + return ConvertStruct(options, data, out_values); + } + + template + enable_if_t::value || + std::is_same::value || + std::is_same::value || + std::is_same::value || + std::is_same::value || + (std::is_base_of::value && + !std::is_same::value) || + std::is_base_of::value, + Status> + Visit(const Type& type) { + return Status::NotImplemented("No implemented conversion to object dtype: ", + type.ToString()); + } +}; + +class ObjectWriter : public TypedPandasWriter { + public: + using TypedPandasWriter::TypedPandasWriter; + Status CopyInto(std::shared_ptr data, int64_t rel_placement) override { + PyAcquireGIL lock; + ObjectWriterVisitor visitor{this->options_, *data, + this->GetBlockColumnStart(rel_placement)}; + return VisitTypeInline(*data->type(), &visitor); + } +}; + +static inline bool IsNonNullContiguous(const ChunkedArray& data) { + return data.num_chunks() == 1 && data.null_count() == 0; +} + +template +class IntWriter : public TypedPandasWriter { + public: + using ArrowType = typename npy_traits::TypeClass; + using TypedPandasWriter::TypedPandasWriter; + + bool CanZeroCopy(const ChunkedArray& data) const override { + return IsNonNullContiguous(data); + } + + Status CopyInto(std::shared_ptr data, int64_t rel_placement) override { + RETURN_NOT_OK(this->CheckTypeExact(*data->type(), ArrowType::type_id)); + ConvertIntegerNoNullsSameType( + this->options_, *data, this->GetBlockColumnStart(rel_placement)); + return Status::OK(); + } +}; + +template +class FloatWriter : public TypedPandasWriter { + public: + using ArrowType = typename npy_traits::TypeClass; + using TypedPandasWriter::TypedPandasWriter; + using T = typename ArrowType::c_type; + + bool CanZeroCopy(const ChunkedArray& data) const override { + return IsNonNullContiguous(data) && data.type()->id() == ArrowType::type_id; + } + + Status CopyInto(std::shared_ptr data, int64_t rel_placement) override { + Type::type in_type = data->type()->id(); + auto out_values = this->GetBlockColumnStart(rel_placement); + +#define INTEGER_CASE(IN_TYPE) \ + ConvertIntegerWithNulls(this->options_, *data, out_values); \ + break; + + switch (in_type) { + case Type::UINT8: + INTEGER_CASE(uint8_t); + case Type::INT8: + INTEGER_CASE(int8_t); + case Type::UINT16: + INTEGER_CASE(uint16_t); + case Type::INT16: + INTEGER_CASE(int16_t); + case Type::UINT32: + INTEGER_CASE(uint32_t); + case Type::INT32: + INTEGER_CASE(int32_t); + case Type::UINT64: + INTEGER_CASE(uint64_t); + case Type::INT64: + INTEGER_CASE(int64_t); + case Type::HALF_FLOAT: + ConvertNumericNullableCast(*data, npy_traits::na_sentinel, out_values); + case Type::FLOAT: + ConvertNumericNullableCast(*data, npy_traits::na_sentinel, out_values); + break; + case Type::DOUBLE: + ConvertNumericNullableCast(*data, npy_traits::na_sentinel, out_values); + break; + default: + return Status::NotImplemented("Cannot write Arrow data of type ", + data->type()->ToString(), + " to a Pandas floating point block"); + } + +#undef INTEGER_CASE + + return Status::OK(); + } +}; + +using UInt8Writer = IntWriter; +using Int8Writer = IntWriter; +using UInt16Writer = IntWriter; +using Int16Writer = IntWriter; +using UInt32Writer = IntWriter; +using Int32Writer = IntWriter; +using UInt64Writer = IntWriter; +using Int64Writer = IntWriter; +using Float16Writer = FloatWriter; +using Float32Writer = FloatWriter; +using Float64Writer = FloatWriter; + +class BoolWriter : public TypedPandasWriter { + public: + using TypedPandasWriter::TypedPandasWriter; + + Status TransferSingle(std::shared_ptr data, PyObject* py_ref) override { + RETURN_NOT_OK( + CheckNoZeroCopy("Zero copy conversions not possible with " + "boolean types")); + RETURN_NOT_OK(EnsureAllocated()); + return CopyInto(data, /*rel_placement=*/0); + } + + Status CopyInto(std::shared_ptr data, int64_t rel_placement) override { + RETURN_NOT_OK(this->CheckTypeExact(*data->type(), Type::BOOL)); + auto out_values = this->GetBlockColumnStart(rel_placement); + for (int c = 0; c < data->num_chunks(); c++) { + const auto& arr = checked_cast(*data->chunk(c)); + for (int64_t i = 0; i < arr.length(); ++i) { + *out_values++ = static_cast(arr.Value(i)); + } + } + return Status::OK(); + } +}; + +// ---------------------------------------------------------------------- +// Date / timestamp types + +template +inline void ConvertDatetime(const ChunkedArray& data, int64_t* out_values) { + for (int c = 0; c < data.num_chunks(); c++) { + const auto& arr = *data.chunk(c); + const T* in_values = GetPrimitiveValues(arr); + + for (int64_t i = 0; i < arr.length(); ++i) { + *out_values++ = arr.IsNull(i) ? kPandasTimestampNull + : (static_cast(in_values[i]) * SHIFT); + } + } +} + +template +void ConvertDatesShift(const ChunkedArray& data, int64_t* out_values) { + for (int c = 0; c < data.num_chunks(); c++) { + const auto& arr = *data.chunk(c); + const T* in_values = GetPrimitiveValues(arr); + for (int64_t i = 0; i < arr.length(); ++i) { + *out_values++ = arr.IsNull(i) ? kPandasTimestampNull + : static_cast(in_values[i]) / SHIFT; + } + } +} + +class DatetimeDayWriter : public TypedPandasWriter { + public: + using TypedPandasWriter::TypedPandasWriter; + + Status CopyInto(std::shared_ptr data, int64_t rel_placement) override { + int64_t* out_values = this->GetBlockColumnStart(rel_placement); + const auto& type = checked_cast(*data->type()); + switch (type.unit()) { + case DateUnit::DAY: + ConvertDatesShift(*data, out_values); + break; + case DateUnit::MILLI: + ConvertDatesShift(*data, out_values); + break; + } + return Status::OK(); + } + + protected: + Status Allocate() override { + RETURN_NOT_OK(this->AllocateNDArray(NPY_DATETIME)); + SetDatetimeUnit(NPY_FR_D); + return Status::OK(); + } +}; + +template +class DatetimeWriter : public TypedPandasWriter { + public: + using TypedPandasWriter::TypedPandasWriter; + + bool CanZeroCopy(const ChunkedArray& data) const override { + if (data.type()->id() == Type::TIMESTAMP) { + const auto& type = checked_cast(*data.type()); + return IsNonNullContiguous(data) && type.unit() == UNIT; + } else { + return false; + } + } + + Status CopyInto(std::shared_ptr data, int64_t rel_placement) override { + const auto& ts_type = checked_cast(*data->type()); + DCHECK_EQ(UNIT, ts_type.unit()) << "Should only call instances of this writer " + << "with arrays of the correct unit"; + ConvertNumericNullable(*data, kPandasTimestampNull, + this->GetBlockColumnStart(rel_placement)); + return Status::OK(); + } + + protected: + Status Allocate() override { + RETURN_NOT_OK(this->AllocateNDArray(NPY_DATETIME)); + SetDatetimeUnit(internal::NumPyFrequency(UNIT)); + return Status::OK(); + } +}; + +using DatetimeSecondWriter = DatetimeWriter; + +class DatetimeMilliWriter : public DatetimeWriter { + public: + using DatetimeWriter::DatetimeWriter; + + Status CopyInto(std::shared_ptr data, int64_t rel_placement) override { + Type::type type = data->type()->id(); + int64_t* out_values = this->GetBlockColumnStart(rel_placement); + if (type == Type::DATE32) { + // Convert from days since epoch to datetime64[ms] + ConvertDatetime(*data, out_values); + } else if (type == Type::DATE64) { + ConvertNumericNullable(*data, kPandasTimestampNull, out_values); + } else { + const auto& ts_type = checked_cast(*data->type()); + DCHECK_EQ(TimeUnit::MILLI, ts_type.unit()) + << "Should only call instances of this writer " + << "with arrays of the correct unit"; + ConvertNumericNullable(*data, kPandasTimestampNull, out_values); + } + return Status::OK(); + } +}; + +using DatetimeMicroWriter = DatetimeWriter; + +class DatetimeNanoWriter : public DatetimeWriter { + public: + using DatetimeWriter::DatetimeWriter; + + Status CopyInto(std::shared_ptr data, int64_t rel_placement) override { + Type::type type = data->type()->id(); + int64_t* out_values = this->GetBlockColumnStart(rel_placement); + compute::ExecContext ctx(options_.pool); + compute::CastOptions options; + if (options_.safe_cast) { + options = compute::CastOptions::Safe(); + } else { + options = compute::CastOptions::Unsafe(); + } + Datum out; + auto target_type = timestamp(TimeUnit::NANO); + + if (type == Type::DATE32) { + // Convert from days since epoch to datetime64[ns] + ConvertDatetime(*data, out_values); + } else if (type == Type::DATE64) { + // Date64Type is millisecond timestamp stored as int64_t + // TODO(wesm): Do we want to make sure to zero out the milliseconds? + ConvertDatetime(*data, out_values); + } else if (type == Type::TIMESTAMP) { + const auto& ts_type = checked_cast(*data->type()); + + if (ts_type.unit() == TimeUnit::NANO) { + ConvertNumericNullable(*data, kPandasTimestampNull, out_values); + } else if (ts_type.unit() == TimeUnit::MICRO || ts_type.unit() == TimeUnit::MILLI || + ts_type.unit() == TimeUnit::SECOND) { + ARROW_ASSIGN_OR_RAISE(out, compute::Cast(data, target_type, options, &ctx)); + ConvertNumericNullable(*out.chunked_array(), kPandasTimestampNull, + out_values); + } else { + return Status::NotImplemented("Unsupported time unit"); + } + } else { + return Status::NotImplemented("Cannot write Arrow data of type ", + data->type()->ToString(), + " to a Pandas datetime block."); + } + return Status::OK(); + } +}; + +template +class DatetimeTZWriter : public BASE { + public: + DatetimeTZWriter(const PandasOptions& options, const std::string& timezone, + int64_t num_rows) + : BASE(options, num_rows, 1), timezone_(timezone) {} + + protected: + Status GetResultBlock(PyObject** out) override { + RETURN_NOT_OK(this->MakeBlock1D()); + *out = this->block_arr_.obj(); + return Status::OK(); + } + + Status AddResultMetadata(PyObject* result) override { + PyObject* py_tz = PyUnicode_FromStringAndSize( + timezone_.c_str(), static_cast(timezone_.size())); + RETURN_IF_PYERROR(); + PyDict_SetItemString(result, "timezone", py_tz); + Py_DECREF(py_tz); + return Status::OK(); + } + + private: + std::string timezone_; +}; + +using DatetimeSecondTZWriter = DatetimeTZWriter; +using DatetimeMilliTZWriter = DatetimeTZWriter; +using DatetimeMicroTZWriter = DatetimeTZWriter; +using DatetimeNanoTZWriter = DatetimeTZWriter; + +template +class TimedeltaWriter : public TypedPandasWriter { + public: + using TypedPandasWriter::TypedPandasWriter; + + Status AllocateTimedelta(int ndim) { + RETURN_NOT_OK(this->AllocateNDArray(NPY_TIMEDELTA, ndim)); + SetDatetimeUnit(internal::NumPyFrequency(UNIT)); + return Status::OK(); + } + + bool CanZeroCopy(const ChunkedArray& data) const override { + const auto& type = checked_cast(*data.type()); + return IsNonNullContiguous(data) && type.unit() == UNIT; + } + + Status CopyInto(std::shared_ptr data, int64_t rel_placement) override { + const auto& type = checked_cast(*data->type()); + DCHECK_EQ(UNIT, type.unit()) << "Should only call instances of this writer " + << "with arrays of the correct unit"; + ConvertNumericNullable(*data, kPandasTimestampNull, + this->GetBlockColumnStart(rel_placement)); + return Status::OK(); + } + + protected: + Status Allocate() override { return AllocateTimedelta(2); } +}; + +using TimedeltaSecondWriter = TimedeltaWriter; +using TimedeltaMilliWriter = TimedeltaWriter; +using TimedeltaMicroWriter = TimedeltaWriter; + +class TimedeltaNanoWriter : public TimedeltaWriter { + public: + using TimedeltaWriter::TimedeltaWriter; + + Status CopyInto(std::shared_ptr data, int64_t rel_placement) override { + Type::type type = data->type()->id(); + int64_t* out_values = this->GetBlockColumnStart(rel_placement); + if (type == Type::DURATION) { + const auto& ts_type = checked_cast(*data->type()); + if (ts_type.unit() == TimeUnit::NANO) { + ConvertNumericNullable(*data, kPandasTimestampNull, out_values); + } else if (ts_type.unit() == TimeUnit::MICRO) { + ConvertDatetime(*data, out_values); + } else if (ts_type.unit() == TimeUnit::MILLI) { + ConvertDatetime(*data, out_values); + } else if (ts_type.unit() == TimeUnit::SECOND) { + ConvertDatetime(*data, out_values); + } else { + return Status::NotImplemented("Unsupported time unit"); + } + } else { + return Status::NotImplemented("Cannot write Arrow data of type ", + data->type()->ToString(), + " to a Pandas timedelta block."); + } + return Status::OK(); + } +}; + +Status MakeZeroLengthArray(const std::shared_ptr& type, + std::shared_ptr* out) { + std::unique_ptr builder; + RETURN_NOT_OK(MakeBuilder(default_memory_pool(), type, &builder)); + RETURN_NOT_OK(builder->Resize(0)); + return builder->Finish(out); +} + +bool NeedDictionaryUnification(const ChunkedArray& data) { + if (data.num_chunks() < 2) { + return false; + } + const auto& arr_first = checked_cast(*data.chunk(0)); + for (int c = 1; c < data.num_chunks(); c++) { + const auto& arr = checked_cast(*data.chunk(c)); + if (!(arr_first.dictionary()->Equals(arr.dictionary()))) { + return true; + } + } + return false; +} + +template +class CategoricalWriter + : public TypedPandasWriter::npy_type> { + public: + using TRAITS = arrow_traits; + using ArrayType = typename TypeTraits::ArrayType; + using T = typename TRAITS::T; + + explicit CategoricalWriter(const PandasOptions& options, int64_t num_rows) + : TypedPandasWriter(options, num_rows, 1), + ordered_(false), + needs_copy_(false) {} + + Status CopyInto(std::shared_ptr data, int64_t rel_placement) override { + return Status::NotImplemented("categorical type"); + } + + Status TransferSingle(std::shared_ptr data, PyObject* py_ref) override { + const auto& dict_type = checked_cast(*data->type()); + std::shared_ptr dict; + if (data->num_chunks() == 0) { + // no dictionary values => create empty array + RETURN_NOT_OK(this->AllocateNDArray(TRAITS::npy_type, 1)); + RETURN_NOT_OK(MakeZeroLengthArray(dict_type.value_type(), &dict)); + } else { + DCHECK_EQ(IndexType::type_id, dict_type.index_type()->id()); + RETURN_NOT_OK(WriteIndices(*data, &dict)); + } + + PyObject* pydict; + RETURN_NOT_OK(ConvertArrayToPandas(this->options_, dict, nullptr, &pydict)); + dictionary_.reset(pydict); + ordered_ = dict_type.ordered(); + return Status::OK(); + } + + Status Write(std::shared_ptr data, int64_t abs_placement, + int64_t rel_placement) override { + RETURN_NOT_OK(this->EnsurePlacementAllocated()); + RETURN_NOT_OK(TransferSingle(data, /*py_ref=*/nullptr)); + this->placement_data_[rel_placement] = abs_placement; + return Status::OK(); + } + + Status GetSeriesResult(PyObject** out) override { + PyAcquireGIL lock; + + PyObject* result = PyDict_New(); + RETURN_IF_PYERROR(); + + // Expected single array dictionary layout + PyDict_SetItemString(result, "indices", this->block_arr_.obj()); + RETURN_IF_PYERROR(); + RETURN_NOT_OK(AddResultMetadata(result)); + + *out = result; + return Status::OK(); + } + + protected: + Status AddResultMetadata(PyObject* result) override { + PyDict_SetItemString(result, "dictionary", dictionary_.obj()); + PyObject* py_ordered = ordered_ ? Py_True : Py_False; + Py_INCREF(py_ordered); + PyDict_SetItemString(result, "ordered", py_ordered); + return Status::OK(); + } + + Status WriteIndicesUniform(const ChunkedArray& data) { + RETURN_NOT_OK(this->AllocateNDArray(TRAITS::npy_type, 1)); + T* out_values = reinterpret_cast(this->block_data_); + + for (int c = 0; c < data.num_chunks(); c++) { + const auto& arr = checked_cast(*data.chunk(c)); + const auto& indices = checked_cast(*arr.indices()); + auto values = reinterpret_cast(indices.raw_values()); + + RETURN_NOT_OK(CheckIndexBounds(*indices.data(), arr.dictionary()->length())); + // Null is -1 in CategoricalBlock + for (int i = 0; i < arr.length(); ++i) { + if (indices.IsValid(i)) { + *out_values++ = values[i]; + } else { + *out_values++ = -1; + } + } + } + return Status::OK(); + } + + Status WriteIndicesVarying(const ChunkedArray& data, std::shared_ptr* out_dict) { + // Yield int32 indices to allow for dictionary outgrowing the current index + // type + RETURN_NOT_OK(this->AllocateNDArray(NPY_INT32, 1)); + auto out_values = reinterpret_cast(this->block_data_); + + const auto& dict_type = checked_cast(*data.type()); + + ARROW_ASSIGN_OR_RAISE(auto unifier, DictionaryUnifier::Make(dict_type.value_type(), + this->options_.pool)); + for (int c = 0; c < data.num_chunks(); c++) { + const auto& arr = checked_cast(*data.chunk(c)); + const auto& indices = checked_cast(*arr.indices()); + auto values = reinterpret_cast(indices.raw_values()); + + std::shared_ptr transpose_buffer; + RETURN_NOT_OK(unifier->Unify(*arr.dictionary(), &transpose_buffer)); + + auto transpose = reinterpret_cast(transpose_buffer->data()); + int64_t dict_length = arr.dictionary()->length(); + + RETURN_NOT_OK(CheckIndexBounds(*indices.data(), dict_length)); + + // Null is -1 in CategoricalBlock + for (int i = 0; i < arr.length(); ++i) { + if (indices.IsValid(i)) { + *out_values++ = transpose[values[i]]; + } else { + *out_values++ = -1; + } + } + } + + std::shared_ptr unused_type; + return unifier->GetResult(&unused_type, out_dict); + } + + Status WriteIndices(const ChunkedArray& data, std::shared_ptr* out_dict) { + DCHECK_GT(data.num_chunks(), 0); + + // Sniff the first chunk + const auto& arr_first = checked_cast(*data.chunk(0)); + const auto indices_first = std::static_pointer_cast(arr_first.indices()); + + if (data.num_chunks() == 1 && indices_first->null_count() == 0) { + RETURN_NOT_OK( + CheckIndexBounds(*indices_first->data(), arr_first.dictionary()->length())); + + PyObject* wrapped; + npy_intp dims[1] = {static_cast(this->num_rows_)}; + RETURN_NOT_OK(MakeNumPyView(indices_first, /*py_ref=*/nullptr, TRAITS::npy_type, + /*ndim=*/1, dims, &wrapped)); + this->SetBlockData(wrapped); + *out_dict = arr_first.dictionary(); + } else { + RETURN_NOT_OK(this->CheckNotZeroCopyOnly(data)); + if (NeedDictionaryUnification(data)) { + RETURN_NOT_OK(WriteIndicesVarying(data, out_dict)); + } else { + RETURN_NOT_OK(WriteIndicesUniform(data)); + *out_dict = arr_first.dictionary(); + } + } + return Status::OK(); + } + + OwnedRefNoGIL dictionary_; + bool ordered_; + bool needs_copy_; +}; + +class ExtensionWriter : public PandasWriter { + public: + using PandasWriter::PandasWriter; + + Status Allocate() override { + // no-op + return Status::OK(); + } + + Status TransferSingle(std::shared_ptr data, PyObject* py_ref) override { + PyAcquireGIL lock; + PyObject* py_array; + py_array = wrap_chunked_array(data); + py_array_.reset(py_array); + + return Status::OK(); + } + + Status CopyInto(std::shared_ptr data, int64_t rel_placement) override { + return TransferSingle(data, nullptr); + } + + Status GetDataFrameResult(PyObject** out) override { + PyAcquireGIL lock; + PyObject* result = PyDict_New(); + RETURN_IF_PYERROR(); + + PyDict_SetItemString(result, "py_array", py_array_.obj()); + PyDict_SetItemString(result, "placement", placement_arr_.obj()); + *out = result; + return Status::OK(); + } + + Status GetSeriesResult(PyObject** out) override { + *out = py_array_.detach(); + return Status::OK(); + } + + protected: + OwnedRefNoGIL py_array_; +}; + +Status MakeWriter(const PandasOptions& options, PandasWriter::type writer_type, + const DataType& type, int64_t num_rows, int num_columns, + std::shared_ptr* writer) { +#define BLOCK_CASE(NAME, TYPE) \ + case PandasWriter::NAME: \ + *writer = std::make_shared(options, num_rows, num_columns); \ + break; + +#define CATEGORICAL_CASE(TYPE) \ + case TYPE::type_id: \ + *writer = std::make_shared>(options, num_rows); \ + break; + +#define TZ_CASE(NAME, TYPE) \ + case PandasWriter::NAME: { \ + const auto& ts_type = checked_cast(type); \ + *writer = std::make_shared(options, ts_type.timezone(), num_rows); \ + } break; + + switch (writer_type) { + case PandasWriter::CATEGORICAL: { + const auto& index_type = *checked_cast(type).index_type(); + switch (index_type.id()) { + CATEGORICAL_CASE(Int8Type); + CATEGORICAL_CASE(Int16Type); + CATEGORICAL_CASE(Int32Type); + CATEGORICAL_CASE(Int64Type); + case Type::UINT8: + case Type::UINT16: + case Type::UINT32: + case Type::UINT64: + return Status::TypeError( + "Converting unsigned dictionary indices to pandas", + " not yet supported, index type: ", index_type.ToString()); + default: + // Unreachable + DCHECK(false); + break; + } + } break; + case PandasWriter::EXTENSION: + *writer = std::make_shared(options, num_rows, num_columns); + break; + BLOCK_CASE(OBJECT, ObjectWriter); + BLOCK_CASE(UINT8, UInt8Writer); + BLOCK_CASE(INT8, Int8Writer); + BLOCK_CASE(UINT16, UInt16Writer); + BLOCK_CASE(INT16, Int16Writer); + BLOCK_CASE(UINT32, UInt32Writer); + BLOCK_CASE(INT32, Int32Writer); + BLOCK_CASE(UINT64, UInt64Writer); + BLOCK_CASE(INT64, Int64Writer); + BLOCK_CASE(HALF_FLOAT, Float16Writer); + BLOCK_CASE(FLOAT, Float32Writer); + BLOCK_CASE(DOUBLE, Float64Writer); + BLOCK_CASE(BOOL, BoolWriter); + BLOCK_CASE(DATETIME_DAY, DatetimeDayWriter); + BLOCK_CASE(DATETIME_SECOND, DatetimeSecondWriter); + BLOCK_CASE(DATETIME_MILLI, DatetimeMilliWriter); + BLOCK_CASE(DATETIME_MICRO, DatetimeMicroWriter); + BLOCK_CASE(DATETIME_NANO, DatetimeNanoWriter); + BLOCK_CASE(TIMEDELTA_SECOND, TimedeltaSecondWriter); + BLOCK_CASE(TIMEDELTA_MILLI, TimedeltaMilliWriter); + BLOCK_CASE(TIMEDELTA_MICRO, TimedeltaMicroWriter); + BLOCK_CASE(TIMEDELTA_NANO, TimedeltaNanoWriter); + TZ_CASE(DATETIME_SECOND_TZ, DatetimeSecondTZWriter); + TZ_CASE(DATETIME_MILLI_TZ, DatetimeMilliTZWriter); + TZ_CASE(DATETIME_MICRO_TZ, DatetimeMicroTZWriter); + TZ_CASE(DATETIME_NANO_TZ, DatetimeNanoTZWriter); + default: + return Status::NotImplemented("Unsupported block type"); + } + +#undef BLOCK_CASE +#undef CATEGORICAL_CASE + + return Status::OK(); +} + +static Status GetPandasWriterType(const ChunkedArray& data, const PandasOptions& options, + PandasWriter::type* output_type) { +#define INTEGER_CASE(NAME) \ + *output_type = \ + data.null_count() > 0 \ + ? options.integer_object_nulls ? PandasWriter::OBJECT : PandasWriter::DOUBLE \ + : PandasWriter::NAME; \ + break; + + switch (data.type()->id()) { + case Type::BOOL: + *output_type = data.null_count() > 0 ? PandasWriter::OBJECT : PandasWriter::BOOL; + break; + case Type::UINT8: + INTEGER_CASE(UINT8); + case Type::INT8: + INTEGER_CASE(INT8); + case Type::UINT16: + INTEGER_CASE(UINT16); + case Type::INT16: + INTEGER_CASE(INT16); + case Type::UINT32: + INTEGER_CASE(UINT32); + case Type::INT32: + INTEGER_CASE(INT32); + case Type::UINT64: + INTEGER_CASE(UINT64); + case Type::INT64: + INTEGER_CASE(INT64); + case Type::HALF_FLOAT: + *output_type = PandasWriter::HALF_FLOAT; + break; + case Type::FLOAT: + *output_type = PandasWriter::FLOAT; + break; + case Type::DOUBLE: + *output_type = PandasWriter::DOUBLE; + break; + case Type::STRING: // fall through + case Type::LARGE_STRING: // fall through + case Type::STRING_VIEW: // fall through + case Type::BINARY: // fall through + case Type::LARGE_BINARY: + case Type::BINARY_VIEW: + case Type::NA: // fall through + case Type::FIXED_SIZE_BINARY: // fall through + case Type::STRUCT: // fall through + case Type::TIME32: // fall through + case Type::TIME64: // fall through + case Type::DECIMAL128: // fall through + case Type::DECIMAL256: // fall through + case Type::INTERVAL_MONTH_DAY_NANO: // fall through + *output_type = PandasWriter::OBJECT; + break; + case Type::DATE32: + if (options.date_as_object) { + *output_type = PandasWriter::OBJECT; + } else if (options.coerce_temporal_nanoseconds) { + *output_type = PandasWriter::DATETIME_NANO; + } else if (options.to_numpy) { + // Numpy supports Day, but Pandas does not + *output_type = PandasWriter::DATETIME_DAY; + } else { + *output_type = PandasWriter::DATETIME_MILLI; + } + break; + case Type::DATE64: + if (options.date_as_object) { + *output_type = PandasWriter::OBJECT; + } else if (options.coerce_temporal_nanoseconds) { + *output_type = PandasWriter::DATETIME_NANO; + } else { + *output_type = PandasWriter::DATETIME_MILLI; + } + break; + case Type::TIMESTAMP: { + const auto& ts_type = checked_cast(*data.type()); + if (options.timestamp_as_object && ts_type.unit() != TimeUnit::NANO) { + // Nanoseconds are never out of bounds for pandas, so in that case + // we don't convert to object + *output_type = PandasWriter::OBJECT; + } else if (options.coerce_temporal_nanoseconds) { + if (!ts_type.timezone().empty()) { + *output_type = PandasWriter::DATETIME_NANO_TZ; + } else { + *output_type = PandasWriter::DATETIME_NANO; + } + } else { + if (!ts_type.timezone().empty()) { + switch (ts_type.unit()) { + case TimeUnit::SECOND: + *output_type = PandasWriter::DATETIME_SECOND_TZ; + break; + case TimeUnit::MILLI: + *output_type = PandasWriter::DATETIME_MILLI_TZ; + break; + case TimeUnit::MICRO: + *output_type = PandasWriter::DATETIME_MICRO_TZ; + break; + case TimeUnit::NANO: + *output_type = PandasWriter::DATETIME_NANO_TZ; + break; + } + } else { + switch (ts_type.unit()) { + case TimeUnit::SECOND: + *output_type = PandasWriter::DATETIME_SECOND; + break; + case TimeUnit::MILLI: + *output_type = PandasWriter::DATETIME_MILLI; + break; + case TimeUnit::MICRO: + *output_type = PandasWriter::DATETIME_MICRO; + break; + case TimeUnit::NANO: + *output_type = PandasWriter::DATETIME_NANO; + break; + } + } + } + } break; + case Type::DURATION: { + const auto& dur_type = checked_cast(*data.type()); + if (options.coerce_temporal_nanoseconds) { + *output_type = PandasWriter::TIMEDELTA_NANO; + } else { + switch (dur_type.unit()) { + case TimeUnit::SECOND: + *output_type = PandasWriter::TIMEDELTA_SECOND; + break; + case TimeUnit::MILLI: + *output_type = PandasWriter::TIMEDELTA_MILLI; + break; + case TimeUnit::MICRO: + *output_type = PandasWriter::TIMEDELTA_MICRO; + break; + case TimeUnit::NANO: + *output_type = PandasWriter::TIMEDELTA_NANO; + break; + } + } + } break; + case Type::FIXED_SIZE_LIST: + case Type::LIST: + case Type::LARGE_LIST: + case Type::LIST_VIEW: + case Type::LARGE_LIST_VIEW: + case Type::MAP: { + auto list_type = std::static_pointer_cast(data.type()); + if (!ListTypeSupported(*list_type->value_type())) { + return Status::NotImplemented("Not implemented type for Arrow list to pandas: ", + list_type->value_type()->ToString()); + } + *output_type = PandasWriter::OBJECT; + } break; + case Type::DICTIONARY: + *output_type = PandasWriter::CATEGORICAL; + break; + case Type::EXTENSION: + *output_type = PandasWriter::EXTENSION; + break; + default: + return Status::NotImplemented( + "No known equivalent Pandas block for Arrow data of type ", + data.type()->ToString(), " is known."); + } + return Status::OK(); +} + +// Construct the exact pandas "BlockManager" memory layout +// +// * For each column determine the correct output pandas type +// * Allocate 2D blocks (ncols x nrows) for each distinct data type in output +// * Allocate block placement arrays +// * Write Arrow columns out into each slice of memory; populate block +// * placement arrays as we go +class PandasBlockCreator { + public: + using WriterMap = std::unordered_map>; + + explicit PandasBlockCreator(const PandasOptions& options, FieldVector fields, + ChunkedArrayVector arrays) + : options_(options), fields_(std::move(fields)), arrays_(std::move(arrays)) { + num_columns_ = static_cast(arrays_.size()); + if (num_columns_ > 0) { + num_rows_ = arrays_[0]->length(); + } + column_block_placement_.resize(num_columns_); + } + virtual ~PandasBlockCreator() = default; + + virtual Status Convert(PyObject** out) = 0; + + Status AppendBlocks(const WriterMap& blocks, PyObject* list) { + for (const auto& it : blocks) { + PyObject* item; + RETURN_NOT_OK(it.second->GetDataFrameResult(&item)); + if (PyList_Append(list, item) < 0) { + RETURN_IF_PYERROR(); + } + + // ARROW-1017; PyList_Append increments object refcount + Py_DECREF(item); + } + return Status::OK(); + } + + protected: + PandasOptions options_; + + FieldVector fields_; + ChunkedArrayVector arrays_; + int num_columns_; + int64_t num_rows_; + + // column num -> relative placement within internal block + std::vector column_block_placement_; +}; + +// Helper function for extension chunked arrays +// Constructing a storage chunked array of an extension chunked array +std::shared_ptr GetStorageChunkedArray(std::shared_ptr arr) { + auto value_type = checked_cast(*arr->type()).storage_type(); + ArrayVector storage_arrays; + for (int c = 0; c < arr->num_chunks(); c++) { + const auto& arr_ext = checked_cast(*arr->chunk(c)); + storage_arrays.emplace_back(arr_ext.storage()); + } + return std::make_shared(std::move(storage_arrays), value_type); +}; + +// Helper function to decode RunEndEncodedArray +Result> GetDecodedChunkedArray( + std::shared_ptr arr) { + ARROW_ASSIGN_OR_RAISE(Datum decoded, compute::RunEndDecode(arr)); + DCHECK(decoded.is_chunked_array()); + return decoded.chunked_array(); +}; + +class ConsolidatedBlockCreator : public PandasBlockCreator { + public: + using PandasBlockCreator::PandasBlockCreator; + + Status Convert(PyObject** out) override { + column_types_.resize(num_columns_); + RETURN_NOT_OK(CreateBlocks()); + RETURN_NOT_OK(WriteTableToBlocks()); + PyAcquireGIL lock; + + PyObject* result = PyList_New(0); + RETURN_IF_PYERROR(); + + RETURN_NOT_OK(AppendBlocks(blocks_, result)); + RETURN_NOT_OK(AppendBlocks(singleton_blocks_, result)); + + *out = result; + return Status::OK(); + } + + Status GetBlockType(int column_index, PandasWriter::type* out) { + if (options_.extension_columns.count(fields_[column_index]->name())) { + *out = PandasWriter::EXTENSION; + return Status::OK(); + } else { + // In case of an extension array default to the storage type + if (arrays_[column_index]->type()->id() == Type::EXTENSION) { + arrays_[column_index] = GetStorageChunkedArray(arrays_[column_index]); + } + // In case of a RunEndEncodedArray default to the values type + else if (arrays_[column_index]->type()->id() == Type::RUN_END_ENCODED) { + ARROW_ASSIGN_OR_RAISE(arrays_[column_index], + GetDecodedChunkedArray(arrays_[column_index])); + } + return GetPandasWriterType(*arrays_[column_index], options_, out); + } + } + + Status CreateBlocks() { + for (int i = 0; i < num_columns_; ++i) { + const DataType& type = *arrays_[i]->type(); + PandasWriter::type output_type; + RETURN_NOT_OK(GetBlockType(i, &output_type)); + + int block_placement = 0; + std::shared_ptr writer; + if (output_type == PandasWriter::CATEGORICAL || + output_type == PandasWriter::DATETIME_SECOND_TZ || + output_type == PandasWriter::DATETIME_MILLI_TZ || + output_type == PandasWriter::DATETIME_MICRO_TZ || + output_type == PandasWriter::DATETIME_NANO_TZ || + output_type == PandasWriter::EXTENSION) { + RETURN_NOT_OK(MakeWriter(options_, output_type, type, num_rows_, + /*num_columns=*/1, &writer)); + singleton_blocks_[i] = writer; + } else { + auto it = block_sizes_.find(output_type); + if (it != block_sizes_.end()) { + block_placement = it->second; + // Increment count + ++it->second; + } else { + // Add key to map + block_sizes_[output_type] = 1; + } + } + column_types_[i] = output_type; + column_block_placement_[i] = block_placement; + } + + // Create normal non-categorical blocks + for (const auto& it : this->block_sizes_) { + PandasWriter::type output_type = static_cast(it.first); + std::shared_ptr block; + RETURN_NOT_OK(MakeWriter(this->options_, output_type, /*unused*/ *null(), num_rows_, + it.second, &block)); + this->blocks_[output_type] = block; + } + return Status::OK(); + } + + Status GetWriter(int i, std::shared_ptr* block) { + PandasWriter::type output_type = this->column_types_[i]; + switch (output_type) { + case PandasWriter::CATEGORICAL: + case PandasWriter::DATETIME_SECOND_TZ: + case PandasWriter::DATETIME_MILLI_TZ: + case PandasWriter::DATETIME_MICRO_TZ: + case PandasWriter::DATETIME_NANO_TZ: + case PandasWriter::EXTENSION: { + auto it = this->singleton_blocks_.find(i); + if (it == this->singleton_blocks_.end()) { + return Status::KeyError("No block allocated"); + } + *block = it->second; + } break; + default: + auto it = this->blocks_.find(output_type); + if (it == this->blocks_.end()) { + return Status::KeyError("No block allocated"); + } + *block = it->second; + break; + } + return Status::OK(); + } + + Status WriteTableToBlocks() { + auto WriteColumn = [this](int i) { + std::shared_ptr block; + RETURN_NOT_OK(this->GetWriter(i, &block)); + // ARROW-3789 Use std::move on the array to permit self-destructing + return block->Write(std::move(arrays_[i]), i, this->column_block_placement_[i]); + }; + + return OptionalParallelFor(options_.use_threads, num_columns_, WriteColumn); + } + + private: + // column num -> block type id + std::vector column_types_; + + // block type -> type count + std::unordered_map block_sizes_; + std::unordered_map block_types_; + + // block type -> block + WriterMap blocks_; + + WriterMap singleton_blocks_; +}; + +/// \brief Create blocks for pandas.DataFrame block manager using one block per +/// column strategy. This permits some zero-copy optimizations as well as the +/// ability for the table to "self-destruct" if selected by the user. +class SplitBlockCreator : public PandasBlockCreator { + public: + using PandasBlockCreator::PandasBlockCreator; + + Status GetWriter(int i, std::shared_ptr* writer) { + PandasWriter::type output_type = PandasWriter::OBJECT; + const DataType& type = *arrays_[i]->type(); + if (options_.extension_columns.count(fields_[i]->name())) { + output_type = PandasWriter::EXTENSION; + } else { + // Null count needed to determine output type + RETURN_NOT_OK(GetPandasWriterType(*arrays_[i], options_, &output_type)); + } + return MakeWriter(this->options_, output_type, type, num_rows_, 1, writer); + } + + Status Convert(PyObject** out) override { + PyAcquireGIL lock; + + PyObject* result = PyList_New(0); + RETURN_IF_PYERROR(); + + for (int i = 0; i < num_columns_; ++i) { + std::shared_ptr writer; + RETURN_NOT_OK(GetWriter(i, &writer)); + // ARROW-3789 Use std::move on the array to permit self-destructing + RETURN_NOT_OK(writer->Write(std::move(arrays_[i]), i, /*rel_placement=*/0)); + + PyObject* item; + RETURN_NOT_OK(writer->GetDataFrameResult(&item)); + if (PyList_Append(result, item) < 0) { + RETURN_IF_PYERROR(); + } + // PyList_Append increments object refcount + Py_DECREF(item); + } + + *out = result; + return Status::OK(); + } + + private: + std::vector> writers_; +}; + +Status ConvertCategoricals(const PandasOptions& options, ChunkedArrayVector* arrays, + FieldVector* fields) { + std::vector columns_to_encode; + + // For Categorical conversions + auto EncodeColumn = [&](int j) { + int i = columns_to_encode[j]; + if (options.zero_copy_only) { + return Status::Invalid("Need to dictionary encode a column, but ", + "only zero-copy conversions allowed"); + } + compute::ExecContext ctx(options.pool); + ARROW_ASSIGN_OR_RAISE( + Datum out, DictionaryEncode((*arrays)[i], + compute::DictionaryEncodeOptions::Defaults(), &ctx)); + (*arrays)[i] = out.chunked_array(); + (*fields)[i] = (*fields)[i]->WithType((*arrays)[i]->type()); + return Status::OK(); + }; + + if (!options.categorical_columns.empty()) { + for (int i = 0; i < static_cast(arrays->size()); i++) { + if ((*arrays)[i]->type()->id() != Type::DICTIONARY && + options.categorical_columns.count((*fields)[i]->name())) { + columns_to_encode.push_back(i); + } + } + } + if (options.strings_to_categorical) { + for (int i = 0; i < static_cast(arrays->size()); i++) { + if (is_base_binary_like((*arrays)[i]->type()->id())) { + columns_to_encode.push_back(i); + } + } + } + return OptionalParallelFor(options.use_threads, + static_cast(columns_to_encode.size()), EncodeColumn); +} + +} // namespace + +Status ConvertArrayToPandas(const PandasOptions& options, std::shared_ptr arr, + PyObject* py_ref, PyObject** out) { + return ConvertChunkedArrayToPandas( + options, std::make_shared(std::move(arr)), py_ref, out); +} + +Status ConvertChunkedArrayToPandas(const PandasOptions& options, + std::shared_ptr arr, PyObject* py_ref, + PyObject** out) { + if (options.decode_dictionaries && arr->type()->id() == Type::DICTIONARY) { + // XXX we should return an error as below if options.zero_copy_only + // is true, but that would break compatibility with existing tests. + const auto& dense_type = + checked_cast(*arr->type()).value_type(); + RETURN_NOT_OK(DecodeDictionaries(options.pool, dense_type, &arr)); + DCHECK_NE(arr->type()->id(), Type::DICTIONARY); + + // The original Python DictionaryArray won't own the memory anymore + // as we actually built a new array when we decoded the DictionaryArray + // thus let the final resulting numpy array own the memory through a Capsule + py_ref = nullptr; + } + + if (options.strings_to_categorical && is_base_binary_like(arr->type()->id())) { + if (options.zero_copy_only) { + return Status::Invalid("Need to dictionary encode a column, but ", + "only zero-copy conversions allowed"); + } + compute::ExecContext ctx(options.pool); + ARROW_ASSIGN_OR_RAISE( + Datum out, + DictionaryEncode(arr, compute::DictionaryEncodeOptions::Defaults(), &ctx)); + arr = out.chunked_array(); + } + + PandasOptions modified_options = options; + modified_options.strings_to_categorical = false; + + // ARROW-7596: We permit the hybrid Series/DataFrame code path to do zero copy + // optimizations that we do not allow in the default case when converting + // Table->DataFrame + modified_options.allow_zero_copy_blocks = true; + + // In case of an extension array default to the storage type + if (arr->type()->id() == Type::EXTENSION) { + arr = GetStorageChunkedArray(arr); + } + // In case of a RunEndEncodedArray decode the array + else if (arr->type()->id() == Type::RUN_END_ENCODED) { + if (options.zero_copy_only) { + return Status::Invalid("Need to dencode a RunEndEncodedArray, but ", + "only zero-copy conversions allowed"); + } + ARROW_ASSIGN_OR_RAISE(arr, GetDecodedChunkedArray(arr)); + + // Because we built a new array when we decoded the RunEndEncodedArray + // the final resulting numpy array should own the memory through a Capsule + py_ref = nullptr; + } + + PandasWriter::type output_type; + RETURN_NOT_OK(GetPandasWriterType(*arr, modified_options, &output_type)); + if (options.decode_dictionaries) { + DCHECK_NE(output_type, PandasWriter::CATEGORICAL); + } + + std::shared_ptr writer; + RETURN_NOT_OK(MakeWriter(modified_options, output_type, *arr->type(), arr->length(), + /*num_columns=*/1, &writer)); + RETURN_NOT_OK(writer->TransferSingle(std::move(arr), py_ref)); + return writer->GetSeriesResult(out); +} + +Status ConvertTableToPandas(const PandasOptions& options, std::shared_ptr table, + PyObject** out) { + ChunkedArrayVector arrays = table->columns(); + FieldVector fields = table->fields(); + + // ARROW-3789: allow "self-destructing" by releasing references to columns as + // we convert them to pandas + table = nullptr; + + RETURN_NOT_OK(ConvertCategoricals(options, &arrays, &fields)); + + PandasOptions modified_options = options; + modified_options.strings_to_categorical = false; + modified_options.categorical_columns.clear(); + + if (options.split_blocks) { + modified_options.allow_zero_copy_blocks = true; + SplitBlockCreator helper(modified_options, std::move(fields), std::move(arrays)); + return helper.Convert(out); + } else { + ConsolidatedBlockCreator helper(modified_options, std::move(fields), + std::move(arrays)); + return helper.Convert(out); + } +} + +} // namespace py +} // namespace arrow diff --git a/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/arrow_to_pandas.h b/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/arrow_to_pandas.h new file mode 100644 index 0000000000000000000000000000000000000000..82e0a600513d4abd9bb956053a2a7e94a1033f39 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/arrow_to_pandas.h @@ -0,0 +1,146 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Functions for converting between pandas's NumPy-based data representation +// and Arrow data structures + +#pragma once + +#include "arrow/python/platform.h" + +#include +#include +#include + +#include "arrow/memory_pool.h" +#include "arrow/python/visibility.h" + +namespace arrow { + +class Array; +class ChunkedArray; +class Column; +class DataType; +class MemoryPool; +class Status; +class Table; + +namespace py { + +enum class MapConversionType { + DEFAULT, // convert arrow maps to assoc lists (list of kev-value tuples) in Pandas + LOSSY, // report warnings when lossiness is encountered due to duplicate keys + STRICT_, // raise a Python exception when lossiness is encountered due to duplicate + // keys +}; + +struct PandasOptions { + /// arrow::MemoryPool to use for memory allocations + MemoryPool* pool = default_memory_pool(); + + /// If true, we will convert all string columns to categoricals + bool strings_to_categorical = false; + bool zero_copy_only = false; + bool integer_object_nulls = false; + bool date_as_object = false; + bool timestamp_as_object = false; + bool use_threads = false; + + /// Coerce all date and timestamp to datetime64[ns] + bool coerce_temporal_nanoseconds = false; + + /// Used to maintain backwards compatibility for + /// timezone bugs (see ARROW-9528). Should be removed + /// after Arrow 2.0 release. + bool ignore_timezone = false; + + /// \brief If true, do not create duplicate PyObject versions of equal + /// objects. This only applies to immutable objects like strings or datetime + /// objects + bool deduplicate_objects = false; + + /// \brief For certain data types, a cast is needed in order to store the + /// data in a pandas DataFrame or Series (e.g. timestamps are always stored + /// as nanoseconds in pandas). This option controls whether it is a safe + /// cast or not. + bool safe_cast = true; + + /// \brief If true, create one block per column rather than consolidated + /// blocks (1 per data type). Do zero-copy wrapping when there are no + /// nulls. pandas currently will consolidate the blocks on its own, causing + /// increased memory use, so keep this in mind if you are working on a + /// memory-constrained situation. + bool split_blocks = false; + + /// \brief If true, allow non-writable zero-copy views to be created for + /// single column blocks. This option is also used to provide zero copy for + /// Series data + bool allow_zero_copy_blocks = false; + + /// \brief If true, attempt to deallocate buffers in passed Arrow object if + /// it is the only remaining shared_ptr copy of it. See ARROW-3789 for + /// original context for this feature. Only currently implemented for Table + /// conversions + bool self_destruct = false; + + /// \brief The default behavior (DEFAULT), is to convert Arrow Map arrays to + /// Python association lists (list-of-tuples) in the same order as the Arrow + /// Map, as in [(key1, value1), (key2, value2), ...] + /// If LOSSY or STRICT, convert Arrow Map arrays to native Python dicts. + /// This can change the ordering of (key, value) pairs, and will deduplicate + /// multiple keys, resulting in a possible loss of data. + /// If 'lossy', this key deduplication results in a warning printed + /// when detected. If 'strict', this instead results in an exception + /// being raised when detected. + MapConversionType maps_as_pydicts = MapConversionType::DEFAULT; + + // Used internally for nested arrays. + bool decode_dictionaries = false; + + // Columns that should be casted to categorical + std::unordered_set categorical_columns; + + // Columns that should be passed through to be converted to + // ExtensionArray/Block + std::unordered_set extension_columns; + + // Used internally to decipher between to_numpy() and to_pandas() when + // the expected output differs + bool to_numpy = false; +}; + +ARROW_PYTHON_EXPORT +Status ConvertArrayToPandas(const PandasOptions& options, std::shared_ptr arr, + PyObject* py_ref, PyObject** out); + +ARROW_PYTHON_EXPORT +Status ConvertChunkedArrayToPandas(const PandasOptions& options, + std::shared_ptr col, PyObject* py_ref, + PyObject** out); + +// Convert a whole table as efficiently as possible to a pandas.DataFrame. +// +// The returned Python object is a list of tuples consisting of the exact 2D +// BlockManager structure of the pandas.DataFrame used as of pandas 0.19.x. +// +// tuple item: (indices: ndarray[int32], block: ndarray[TYPE, ndim=2]) +ARROW_PYTHON_EXPORT +Status ConvertTableToPandas(const PandasOptions& options, std::shared_ptr
table, + PyObject** out); + +} // namespace py +} // namespace arrow diff --git a/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/arrow_to_python_internal.h b/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/arrow_to_python_internal.h new file mode 100644 index 0000000000000000000000000000000000000000..514cda320012316b1f9bc04a76c45159dc5bd181 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/arrow_to_python_internal.h @@ -0,0 +1,49 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/array.h" +#include "arrow/python/platform.h" + +namespace arrow { +namespace py { +namespace internal { +// TODO(ARROW-12976): See if we can refactor Pandas ObjectWriter logic +// to the .cc file and move this there as well if we can. + +// Converts array to a sequency of python objects. +template +inline Status WriteArrayObjects(const ArrayType& arr, WriteValue&& write_func, + Assigner out_values) { + // TODO(ARROW-12976): Use visitor here? + const bool has_nulls = arr.null_count() > 0; + for (int64_t i = 0; i < arr.length(); ++i) { + if (has_nulls && arr.IsNull(i)) { + Py_INCREF(Py_None); + *out_values = Py_None; + } else { + RETURN_NOT_OK(write_func(arr.GetView(i), out_values)); + } + ++out_values; + } + return Status::OK(); +} + +} // namespace internal +} // namespace py +} // namespace arrow diff --git a/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/async.h b/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/async.h new file mode 100644 index 0000000000000000000000000000000000000000..1568d21938e6e79e724d957120e68a7576ba9c2a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/async.h @@ -0,0 +1,60 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include + +#include "arrow/python/common.h" +#include "arrow/status.h" +#include "arrow/util/future.h" + +namespace arrow::py { + +/// \brief Bind a Python callback to an arrow::Future. +/// +/// If the Future finishes successfully, py_wrapper is called with its +/// result value and should return a PyObject*. If py_wrapper is successful, +/// py_cb is called with its return value. +/// +/// If either the Future or py_wrapper fails, py_cb is called with the +/// associated Python exception. +/// +/// \param future The future to bind to. +/// \param py_cb The Python callback function. Will be passed the result of +/// py_wrapper, or a Python exception if the future failed or one was +/// raised by py_wrapper. +/// \param py_wrapper A function (likely defined in Cython) to convert the C++ +/// result of the future to a Python object. +template +void BindFuture(Future future, PyObject* py_cb, PyWrapper py_wrapper) { + Py_INCREF(py_cb); + OwnedRefNoGIL cb_ref(py_cb); + + auto future_cb = [cb_ref = std::move(cb_ref), + py_wrapper = std::move(py_wrapper)](Result result) { + SafeCallIntoPythonVoid([&]() { + OwnedRef py_value_or_exc{WrapResult(std::move(result), std::move(py_wrapper))}; + Py_XDECREF( + PyObject_CallFunctionObjArgs(cb_ref.obj(), py_value_or_exc.obj(), NULLPTR)); + ARROW_WARN_NOT_OK(CheckPyError(), "Internal error in async call"); + }); + }; + future.AddCallback(std::move(future_cb)); +} + +} // namespace arrow::py diff --git a/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/benchmark.cc b/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/benchmark.cc new file mode 100644 index 0000000000000000000000000000000000000000..6dcc959ed221247eb93a80179e61a1f40a726e29 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/benchmark.cc @@ -0,0 +1,38 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "arrow/python/benchmark.h" +#include "arrow/python/helpers.h" + +namespace arrow { +namespace py { +namespace benchmark { + +void Benchmark_PandasObjectIsNull(PyObject* list) { + if (!PyList_CheckExact(list)) { + PyErr_SetString(PyExc_TypeError, "expected a list"); + return; + } + Py_ssize_t i, n = PyList_GET_SIZE(list); + for (i = 0; i < n; i++) { + internal::PandasObjectIsNull(PyList_GET_ITEM(list, i)); + } +} + +} // namespace benchmark +} // namespace py +} // namespace arrow diff --git a/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/benchmark.h b/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/benchmark.h new file mode 100644 index 0000000000000000000000000000000000000000..8060dd33722a08eb0935687ea5cb306dbd38a9f0 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/benchmark.h @@ -0,0 +1,36 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/python/platform.h" + +#include "arrow/python/visibility.h" + +namespace arrow { +namespace py { +namespace benchmark { + +// Micro-benchmark routines for use from ASV + +// Run PandasObjectIsNull() once over every object in *list* +ARROW_PYTHON_EXPORT +void Benchmark_PandasObjectIsNull(PyObject* list); + +} // namespace benchmark +} // namespace py +} // namespace arrow diff --git a/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/common.cc b/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/common.cc new file mode 100644 index 0000000000000000000000000000000000000000..2f44a9122f0247960f5c8331824111ebb1b179a7 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/common.cc @@ -0,0 +1,246 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "arrow/python/common.h" + +#include +#include +#include +#include + +#include "arrow/memory_pool.h" +#include "arrow/status.h" +#include "arrow/util/checked_cast.h" +#include "arrow/util/logging.h" + +#include "arrow/python/helpers.h" + +namespace arrow { + +using internal::checked_cast; + +namespace py { + +static std::mutex memory_pool_mutex; +static MemoryPool* default_python_pool = nullptr; + +void set_default_memory_pool(MemoryPool* pool) { + std::lock_guard guard(memory_pool_mutex); + default_python_pool = pool; +} + +MemoryPool* get_memory_pool() { + std::lock_guard guard(memory_pool_mutex); + if (default_python_pool) { + return default_python_pool; + } else { + return default_memory_pool(); + } +} + +// ---------------------------------------------------------------------- +// PythonErrorDetail + +namespace { + +const char kErrorDetailTypeId[] = "arrow::py::PythonErrorDetail"; + +// Try to match the Python exception type with an appropriate Status code +StatusCode MapPyError(PyObject* exc_type) { + StatusCode code; + + if (PyErr_GivenExceptionMatches(exc_type, PyExc_MemoryError)) { + code = StatusCode::OutOfMemory; + } else if (PyErr_GivenExceptionMatches(exc_type, PyExc_IndexError)) { + code = StatusCode::IndexError; + } else if (PyErr_GivenExceptionMatches(exc_type, PyExc_KeyError)) { + code = StatusCode::KeyError; + } else if (PyErr_GivenExceptionMatches(exc_type, PyExc_TypeError)) { + code = StatusCode::TypeError; + } else if (PyErr_GivenExceptionMatches(exc_type, PyExc_ValueError) || + PyErr_GivenExceptionMatches(exc_type, PyExc_OverflowError)) { + code = StatusCode::Invalid; + } else if (PyErr_GivenExceptionMatches(exc_type, PyExc_EnvironmentError)) { + code = StatusCode::IOError; + } else if (PyErr_GivenExceptionMatches(exc_type, PyExc_NotImplementedError)) { + code = StatusCode::NotImplemented; + } else { + code = StatusCode::UnknownError; + } + return code; +} + +// PythonErrorDetail indicates a Python exception was raised. +class PythonErrorDetail : public StatusDetail { + public: + const char* type_id() const override { return kErrorDetailTypeId; } + + std::string ToString() const override { + // This is simple enough not to need the GIL + Result result = FormatImpl(); + + if (result.ok()) { + return result.ValueOrDie(); + } else { + // Fallback to just the exception type + const auto ty = reinterpret_cast(exc_type_.obj()); + return std::string("Python exception: ") + ty->tp_name; + } + } + + void RestorePyError() const { + Py_INCREF(exc_type_.obj()); + Py_INCREF(exc_value_.obj()); + Py_INCREF(exc_traceback_.obj()); + PyErr_Restore(exc_type_.obj(), exc_value_.obj(), exc_traceback_.obj()); + } + + PyObject* exc_type() const { return exc_type_.obj(); } + + PyObject* exc_value() const { return exc_value_.obj(); } + + static std::shared_ptr FromPyError() { + PyObject* exc_type = nullptr; + PyObject* exc_value = nullptr; + PyObject* exc_traceback = nullptr; + + PyErr_Fetch(&exc_type, &exc_value, &exc_traceback); + PyErr_NormalizeException(&exc_type, &exc_value, &exc_traceback); + ARROW_CHECK(exc_type) + << "PythonErrorDetail::FromPyError called without a Python error set"; + DCHECK(PyType_Check(exc_type)); + DCHECK(exc_value); // Ensured by PyErr_NormalizeException, double-check + if (exc_traceback == nullptr) { + // Needed by PyErr_Restore() + Py_INCREF(Py_None); + exc_traceback = Py_None; + } + + std::shared_ptr detail(new PythonErrorDetail); + detail->exc_type_.reset(exc_type); + detail->exc_value_.reset(exc_value); + detail->exc_traceback_.reset(exc_traceback); + return detail; + } + + protected: + Result FormatImpl() const { + PyAcquireGIL lock; + + // Use traceback.format_exception() + OwnedRef traceback_module; + RETURN_NOT_OK(internal::ImportModule("traceback", &traceback_module)); + + OwnedRef fmt_exception; + RETURN_NOT_OK(internal::ImportFromModule(traceback_module.obj(), "format_exception", + &fmt_exception)); + + OwnedRef formatted; + formatted.reset(PyObject_CallFunctionObjArgs(fmt_exception.obj(), exc_type_.obj(), + exc_value_.obj(), exc_traceback_.obj(), + NULL)); + RETURN_IF_PYERROR(); + + std::stringstream ss; + ss << "Python exception: "; + Py_ssize_t num_lines = PySequence_Length(formatted.obj()); + RETURN_IF_PYERROR(); + + for (Py_ssize_t i = 0; i < num_lines; ++i) { + Py_ssize_t line_size; + + PyObject* line = PySequence_GetItem(formatted.obj(), i); + RETURN_IF_PYERROR(); + + const char* data = PyUnicode_AsUTF8AndSize(line, &line_size); + RETURN_IF_PYERROR(); + + ss << std::string_view(data, line_size); + } + return ss.str(); + } + + PythonErrorDetail() = default; + + OwnedRefNoGIL exc_type_, exc_value_, exc_traceback_; +}; + +} // namespace + +// ---------------------------------------------------------------------- +// Python exception <-> Status + +Status ConvertPyError(StatusCode code) { + auto detail = PythonErrorDetail::FromPyError(); + if (code == StatusCode::UnknownError) { + code = MapPyError(detail->exc_type()); + } + + std::string message; + RETURN_NOT_OK(internal::PyObject_StdStringStr(detail->exc_value(), &message)); + return Status(code, message, detail); +} + +bool IsPyError(const Status& status) { + if (status.ok()) { + return false; + } + auto detail = status.detail(); + bool result = detail != nullptr && detail->type_id() == kErrorDetailTypeId; + return result; +} + +void RestorePyError(const Status& status) { + ARROW_CHECK(IsPyError(status)); + const auto& detail = checked_cast(*status.detail()); + detail.RestorePyError(); +} + +// ---------------------------------------------------------------------- +// PyBuffer + +PyBuffer::PyBuffer() : Buffer(nullptr, 0) {} + +Status PyBuffer::Init(PyObject* obj) { + if (!PyObject_GetBuffer(obj, &py_buf_, PyBUF_ANY_CONTIGUOUS)) { + data_ = reinterpret_cast(py_buf_.buf); + ARROW_CHECK_NE(data_, nullptr) << "Null pointer in Py_buffer"; + size_ = py_buf_.len; + capacity_ = py_buf_.len; + is_mutable_ = !py_buf_.readonly; + return Status::OK(); + } else { + return ConvertPyError(StatusCode::Invalid); + } +} + +Result> PyBuffer::FromPyObject(PyObject* obj) { + PyBuffer* buf = new PyBuffer(); + std::shared_ptr res(buf); + RETURN_NOT_OK(buf->Init(obj)); + return res; +} + +PyBuffer::~PyBuffer() { + if (data_ != nullptr) { + PyAcquireGIL lock; + PyBuffer_Release(&py_buf_); + } +} + +} // namespace py +} // namespace arrow diff --git a/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/common.h b/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/common.h new file mode 100644 index 0000000000000000000000000000000000000000..4a7886695eadbd70fa6442b1cae88c695f9cd602 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/pyarrow/src/arrow/python/common.h @@ -0,0 +1,458 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include +#include +#include + +#include "arrow/buffer.h" +#include "arrow/python/pyarrow.h" +#include "arrow/python/visibility.h" +#include "arrow/result.h" +#include "arrow/util/macros.h" + +namespace arrow { + +class MemoryPool; +template +class Result; + +namespace py { + +// Convert current Python error to a Status. The Python error state is cleared +// and can be restored with RestorePyError(). +ARROW_PYTHON_EXPORT Status ConvertPyError(StatusCode code = StatusCode::UnknownError); +// Query whether the given Status is a Python error (as wrapped by ConvertPyError()). +ARROW_PYTHON_EXPORT bool IsPyError(const Status& status); +// Restore a Python error wrapped in a Status. +ARROW_PYTHON_EXPORT void RestorePyError(const Status& status); + +// Catch a pending Python exception and return the corresponding Status. +// If no exception is pending, Status::OK() is returned. +inline Status CheckPyError(StatusCode code = StatusCode::UnknownError) { + if (ARROW_PREDICT_TRUE(!PyErr_Occurred())) { + return Status::OK(); + } else { + return ConvertPyError(code); + } +} + +#define RETURN_IF_PYERROR() ARROW_RETURN_NOT_OK(CheckPyError()) + +#define PY_RETURN_IF_ERROR(CODE) ARROW_RETURN_NOT_OK(CheckPyError(CODE)) + +// For Cython, as you can't define template C++ functions in Cython, only use them. +// This function can set a Python exception. It assumes that T has a (cheap) +// default constructor. +template +T GetResultValue(Result result) { + if (ARROW_PREDICT_TRUE(result.ok())) { + return *std::move(result); + } else { + int r = internal::check_status(result.status()); // takes the GIL + assert(r == -1); // should have errored out + ARROW_UNUSED(r); + return {}; + } +} + +/// \brief Wrap a Result and return the corresponding Python object. +/// +/// If the Result is successful, py_wrapper is called with its result value +/// and should return a PyObject*. If py_wrapper is successful (returns +/// a non-NULL value), its return value is returned. +/// +/// If either the Result or py_wrapper fails, the associated Python exception +/// is raised and NULL is returned. +// +/// \param result The Result whose value to wrap in a Python object. +/// \param py_wrapper A function (likely defined in Cython) to convert the C++ +/// value of the Result to a Python object. +/// \return A new Python reference, or NULL if an exception occurred +template +PyObject* WrapResult(Result result, PyWrapper&& py_wrapper) { + static_assert(std::is_same_v()))>, + "PyWrapper argument to WrapResult should return a PyObject* " + "when called with a T*"); + Status st = result.status(); + if (st.ok()) { + PyObject* py_value = py_wrapper(result.MoveValueUnsafe()); + st = CheckPyError(); + if (st.ok()) { + return py_value; + } + Py_XDECREF(py_value); // should be null, but who knows + } + // Status is an error, convert it to an exception. + return internal::convert_status(st); +} + +// A RAII-style helper that ensures the GIL is acquired inside a lexical block. +class ARROW_PYTHON_EXPORT PyAcquireGIL { + public: + PyAcquireGIL() : acquired_gil_(false) { acquire(); } + + ~PyAcquireGIL() { release(); } + + void acquire() { + if (!acquired_gil_) { + state_ = PyGILState_Ensure(); + acquired_gil_ = true; + } + } + + // idempotent + void release() { + if (acquired_gil_) { + PyGILState_Release(state_); + acquired_gil_ = false; + } + } + + private: + bool acquired_gil_; + PyGILState_STATE state_; + ARROW_DISALLOW_COPY_AND_ASSIGN(PyAcquireGIL); +}; + +// A RAII-style helper that releases the GIL until the end of a lexical block +class ARROW_PYTHON_EXPORT PyReleaseGIL { + public: + PyReleaseGIL() : ptr_(PyEval_SaveThread(), &unique_ptr_deleter) {} + + private: + static void unique_ptr_deleter(PyThreadState* state) { + if (state) { + PyEval_RestoreThread(state); + } + } + std::unique_ptr ptr_; +}; + +// A helper to call safely into the Python interpreter from arbitrary C++ code. +// The GIL is acquired, and the current thread's error status is preserved. +template +auto SafeCallIntoPython(Function&& func) -> decltype(func()) { + PyAcquireGIL lock; + PyObject* exc_type; + PyObject* exc_value; + PyObject* exc_traceback; + PyErr_Fetch(&exc_type, &exc_value, &exc_traceback); + auto maybe_status = std::forward(func)(); + // If the return Status is a "Python error", the current Python error status + // describes the error and shouldn't be clobbered. + if (!IsPyError(::arrow::internal::GenericToStatus(maybe_status)) && + exc_type != NULLPTR) { + PyErr_Restore(exc_type, exc_value, exc_traceback); + } + return maybe_status; +} + +template +auto SafeCallIntoPythonVoid(Function&& func) -> decltype(func()) { + PyAcquireGIL lock; + PyObject* exc_type; + PyObject* exc_value; + PyObject* exc_traceback; + PyErr_Fetch(&exc_type, &exc_value, &exc_traceback); + func(); + if (exc_type != NULLPTR) { + PyErr_Restore(exc_type, exc_value, exc_traceback); + } +} + +// A RAII primitive that DECREFs the underlying PyObject* when it +// goes out of scope. +class ARROW_PYTHON_EXPORT OwnedRef { + public: + OwnedRef() : obj_(NULLPTR) {} + OwnedRef(OwnedRef&& other) : OwnedRef(other.detach()) {} + explicit OwnedRef(PyObject* obj) : obj_(obj) {} + + OwnedRef& operator=(OwnedRef&& other) { + obj_ = other.detach(); + return *this; + } + + ~OwnedRef() { + // GH-38626: destructor may be called after the Python interpreter is finalized. + if (Py_IsInitialized()) { + reset(); + } + } + + void reset(PyObject* obj) { + Py_XDECREF(obj_); + obj_ = obj; + } + + void reset() { reset(NULLPTR); } + + PyObject* detach() { + PyObject* result = obj_; + obj_ = NULLPTR; + return result; + } + + PyObject* obj() const { return obj_; } + + PyObject** ref() { return &obj_; } + + operator bool() const { return obj_ != NULLPTR; } + + private: + ARROW_DISALLOW_COPY_AND_ASSIGN(OwnedRef); + + PyObject* obj_; +}; + +// Same as OwnedRef, but ensures the GIL is taken when it goes out of scope. +// This is for situations where the GIL is not always known to be held +// (e.g. if it is released in the middle of a function for performance reasons) +class ARROW_PYTHON_EXPORT OwnedRefNoGIL : public OwnedRef { + public: + OwnedRefNoGIL() : OwnedRef() {} + OwnedRefNoGIL(OwnedRefNoGIL&& other) : OwnedRef(other.detach()) {} + explicit OwnedRefNoGIL(PyObject* obj) : OwnedRef(obj) {} + + ~OwnedRefNoGIL() { + // GH-38626: destructor may be called after the Python interpreter is finalized. + if (Py_IsInitialized() && obj() != NULLPTR) { + PyAcquireGIL lock; + reset(); + } + } +}; + +template