repo
stringclasses 1
value | pull_number
int64 14
6.65k
| instance_id
stringlengths 19
21
| issue_numbers
sequencelengths 1
2
| base_commit
stringlengths 40
40
| patch
stringlengths 505
226k
| test_patch
stringlengths 265
112k
| problem_statement
stringlengths 99
16.2k
| hints_text
stringlengths 0
33.2k
| created_at
stringlengths 20
20
| version
stringlengths 3
4
| environment_setup_commit
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
apache/arrow-rs
| 6,649
|
apache__arrow-rs-6649
|
[
"6648"
] |
7bcc1ad988498def843180c6a4c95f9732f31a4b
|
diff --git a/parquet/src/record/reader.rs b/parquet/src/record/reader.rs
index 1f9128a8b4f..fd6ca7cdd57 100644
--- a/parquet/src/record/reader.rs
+++ b/parquet/src/record/reader.rs
@@ -138,7 +138,17 @@ impl TreeBuilder {
.column_descr_ptr();
let col_reader = row_group_reader.get_column_reader(orig_index)?;
let column = TripletIter::new(col_descr, col_reader, self.batch_size);
- Reader::PrimitiveReader(field, Box::new(column))
+ let reader = Reader::PrimitiveReader(field.clone(), Box::new(column));
+ if repetition == Repetition::REPEATED {
+ Reader::RepeatedReader(
+ field,
+ curr_def_level - 1,
+ curr_rep_level - 1,
+ Box::new(reader),
+ )
+ } else {
+ reader
+ }
} else {
match field.get_basic_info().converted_type() {
// List types
@@ -1688,6 +1698,131 @@ mod tests {
assert_eq!(rows, expected_rows);
}
+ #[test]
+ fn test_tree_reader_handle_primitive_repeated_fields_with_no_annotation() {
+ // In this test the REPEATED fields are primitives
+ let rows = test_file_reader_rows("repeated_primitive_no_list.parquet", None).unwrap();
+ let expected_rows = vec![
+ row![
+ (
+ "Int32_list".to_string(),
+ Field::ListInternal(make_list([0, 1, 2, 3].map(Field::Int).to_vec()))
+ ),
+ (
+ "String_list".to_string(),
+ Field::ListInternal(make_list(
+ ["foo", "zero", "one", "two"]
+ .map(|s| Field::Str(s.to_string()))
+ .to_vec()
+ ))
+ ),
+ (
+ "group_of_lists".to_string(),
+ group![
+ (
+ "Int32_list_in_group".to_string(),
+ Field::ListInternal(make_list([0, 1, 2, 3].map(Field::Int).to_vec()))
+ ),
+ (
+ "String_list_in_group".to_string(),
+ Field::ListInternal(make_list(
+ ["foo", "zero", "one", "two"]
+ .map(|s| Field::Str(s.to_string()))
+ .to_vec()
+ ))
+ )
+ ]
+ )
+ ],
+ row![
+ (
+ "Int32_list".to_string(),
+ Field::ListInternal(make_list(vec![]))
+ ),
+ (
+ "String_list".to_string(),
+ Field::ListInternal(make_list(
+ ["three"].map(|s| Field::Str(s.to_string())).to_vec()
+ ))
+ ),
+ (
+ "group_of_lists".to_string(),
+ group![
+ (
+ "Int32_list_in_group".to_string(),
+ Field::ListInternal(make_list(vec![]))
+ ),
+ (
+ "String_list_in_group".to_string(),
+ Field::ListInternal(make_list(
+ ["three"].map(|s| Field::Str(s.to_string())).to_vec()
+ ))
+ )
+ ]
+ )
+ ],
+ row![
+ (
+ "Int32_list".to_string(),
+ Field::ListInternal(make_list(vec![Field::Int(4)]))
+ ),
+ (
+ "String_list".to_string(),
+ Field::ListInternal(make_list(
+ ["four"].map(|s| Field::Str(s.to_string())).to_vec()
+ ))
+ ),
+ (
+ "group_of_lists".to_string(),
+ group![
+ (
+ "Int32_list_in_group".to_string(),
+ Field::ListInternal(make_list(vec![Field::Int(4)]))
+ ),
+ (
+ "String_list_in_group".to_string(),
+ Field::ListInternal(make_list(
+ ["four"].map(|s| Field::Str(s.to_string())).to_vec()
+ ))
+ )
+ ]
+ )
+ ],
+ row![
+ (
+ "Int32_list".to_string(),
+ Field::ListInternal(make_list([5, 6, 7, 8].map(Field::Int).to_vec()))
+ ),
+ (
+ "String_list".to_string(),
+ Field::ListInternal(make_list(
+ ["five", "six", "seven", "eight"]
+ .map(|s| Field::Str(s.to_string()))
+ .to_vec()
+ ))
+ ),
+ (
+ "group_of_lists".to_string(),
+ group![
+ (
+ "Int32_list_in_group".to_string(),
+ Field::ListInternal(make_list([5, 6, 7, 8].map(Field::Int).to_vec()))
+ ),
+ (
+ "String_list_in_group".to_string(),
+ Field::ListInternal(make_list(
+ ["five", "six", "seven", "eight"]
+ .map(|s| Field::Str(s.to_string()))
+ .to_vec()
+ ))
+ )
+ ]
+ )
+ ],
+ ];
+ assert_eq!(rows, expected_rows);
+ }
+
fn test_file_reader_rows(file_name: &str, schema: Option<Type>) -> Result<Vec<Row>> {
let file = get_test_file(file_name);
let file_reader: Box<dyn FileReader> = Box::new(SerializedFileReader::new(file)?);
|
diff --git a/parquet-testing b/parquet-testing
index 50af3d8ce20..550368ca77b 160000
--- a/parquet-testing
+++ b/parquet-testing
@@ -1,1 +1,1 @@
-Subproject commit 50af3d8ce206990d81014b1862e5ce7380dc3e08
+Subproject commit 550368ca77b97231efead39251a96bd6f8f08c6e
|
Primitive REPEATED fields not contained in LIST annotated groups aren't read as lists by record reader
**Describe the bug**
Primitive REPEATED fields not contained in LIST annotated groups should be read as lists according to the format but aren't.
**To Reproduce**
<!--
Steps to reproduce the behavior:
-->
**Expected behavior**
<!--
A clear and concise description of what you expected to happen.
-->
**Additional context**
<!--
Add any other context about the problem here.
-->
|
2024-10-29T23:41:15Z
|
53.2
|
7bcc1ad988498def843180c6a4c95f9732f31a4b
|
|
apache/arrow-rs
| 6,453
|
apache__arrow-rs-6453
|
[
"6282"
] |
f41c258246cd4bd9d89228cded9ed54dbd00faff
|
diff --git a/arrow-flight/examples/flight_sql_server.rs b/arrow-flight/examples/flight_sql_server.rs
index 81afecf85625..dd3a3943dd95 100644
--- a/arrow-flight/examples/flight_sql_server.rs
+++ b/arrow-flight/examples/flight_sql_server.rs
@@ -19,6 +19,7 @@ use arrow_flight::sql::server::PeekableFlightDataStream;
use arrow_flight::sql::DoPutPreparedStatementResult;
use base64::prelude::BASE64_STANDARD;
use base64::Engine;
+use core::str;
use futures::{stream, Stream, TryStreamExt};
use once_cell::sync::Lazy;
use prost::Message;
@@ -168,7 +169,7 @@ impl FlightSqlService for FlightSqlServiceImpl {
let bytes = BASE64_STANDARD
.decode(base64)
.map_err(|e| status!("authorization not decodable", e))?;
- let str = String::from_utf8(bytes).map_err(|e| status!("authorization not parsable", e))?;
+ let str = str::from_utf8(&bytes).map_err(|e| status!("authorization not parsable", e))?;
let parts: Vec<_> = str.split(':').collect();
let (user, pass) = match parts.as_slice() {
[user, pass] => (user, pass),
diff --git a/arrow-flight/src/bin/flight_sql_client.rs b/arrow-flight/src/bin/flight_sql_client.rs
index c334b95a9a96..8f0618f495bc 100644
--- a/arrow-flight/src/bin/flight_sql_client.rs
+++ b/arrow-flight/src/bin/flight_sql_client.rs
@@ -26,6 +26,7 @@ use arrow_flight::{
};
use arrow_schema::Schema;
use clap::{Parser, Subcommand};
+use core::str;
use futures::TryStreamExt;
use tonic::{
metadata::MetadataMap,
diff --git a/arrow-flight/src/decode.rs b/arrow-flight/src/decode.rs
index 5561f256ce01..7bafc384306b 100644
--- a/arrow-flight/src/decode.rs
+++ b/arrow-flight/src/decode.rs
@@ -388,11 +388,14 @@ struct FlightStreamState {
/// FlightData and the decoded payload (Schema, RecordBatch), if any
#[derive(Debug)]
pub struct DecodedFlightData {
+ /// The original FlightData message
pub inner: FlightData,
+ /// The decoded payload
pub payload: DecodedPayload,
}
impl DecodedFlightData {
+ /// Create a new DecodedFlightData with no payload
pub fn new_none(inner: FlightData) -> Self {
Self {
inner,
@@ -400,6 +403,7 @@ impl DecodedFlightData {
}
}
+ /// Create a new DecodedFlightData with a [`Schema`] payload
pub fn new_schema(inner: FlightData, schema: SchemaRef) -> Self {
Self {
inner,
@@ -407,6 +411,7 @@ impl DecodedFlightData {
}
}
+ /// Create a new [`DecodedFlightData`] with a [`RecordBatch`] payload
pub fn new_record_batch(inner: FlightData, batch: RecordBatch) -> Self {
Self {
inner,
@@ -414,7 +419,7 @@ impl DecodedFlightData {
}
}
- /// return the metadata field of the inner flight data
+ /// Return the metadata field of the inner flight data
pub fn app_metadata(&self) -> Bytes {
self.inner.app_metadata.clone()
}
diff --git a/arrow-flight/src/encode.rs b/arrow-flight/src/encode.rs
index 59fa8afd58d5..55bc9240321d 100644
--- a/arrow-flight/src/encode.rs
+++ b/arrow-flight/src/encode.rs
@@ -144,6 +144,7 @@ impl Default for FlightDataEncoderBuilder {
}
impl FlightDataEncoderBuilder {
+ /// Create a new [`FlightDataEncoderBuilder`].
pub fn new() -> Self {
Self::default()
}
@@ -1403,7 +1404,7 @@ mod tests {
let input_rows = batch.num_rows();
let split = split_batch_for_grpc_response(batch.clone(), max_flight_data_size_bytes);
- let sizes: Vec<_> = split.iter().map(|batch| batch.num_rows()).collect();
+ let sizes: Vec<_> = split.iter().map(RecordBatch::num_rows).collect();
let output_rows: usize = sizes.iter().sum();
assert_eq!(sizes, expected_sizes, "mismatch for {batch:?}");
diff --git a/arrow-flight/src/error.rs b/arrow-flight/src/error.rs
index ba979ca9f7a6..499706e1ede7 100644
--- a/arrow-flight/src/error.rs
+++ b/arrow-flight/src/error.rs
@@ -37,6 +37,7 @@ pub enum FlightError {
}
impl FlightError {
+ /// Generate a new `FlightError::ProtocolError` variant.
pub fn protocol(message: impl Into<String>) -> Self {
Self::ProtocolError(message.into())
}
@@ -98,6 +99,7 @@ impl From<FlightError> for tonic::Status {
}
}
+/// Result type for the Apache Arrow Flight crate
pub type Result<T> = std::result::Result<T, FlightError>;
#[cfg(test)]
diff --git a/arrow-flight/src/lib.rs b/arrow-flight/src/lib.rs
index 64e3ba01c5bd..9f18416c06ec 100644
--- a/arrow-flight/src/lib.rs
+++ b/arrow-flight/src/lib.rs
@@ -37,6 +37,7 @@
//!
//! [Flight SQL]: https://arrow.apache.org/docs/format/FlightSql.html
#![allow(rustdoc::invalid_html_tags)]
+#![warn(missing_docs)]
use arrow_ipc::{convert, writer, writer::EncodedData, writer::IpcWriteOptions};
use arrow_schema::{ArrowError, Schema};
@@ -52,6 +53,8 @@ type ArrowResult<T> = std::result::Result<T, ArrowError>;
#[allow(clippy::all)]
mod gen {
+ // Since this file is auto-generated, we suppress all warnings
+ #![allow(missing_docs)]
include!("arrow.flight.protocol.rs");
}
@@ -125,6 +128,7 @@ use flight_descriptor::DescriptorType;
/// SchemaAsIpc represents a pairing of a `Schema` with IpcWriteOptions
pub struct SchemaAsIpc<'a> {
+ /// Data type representing a schema and its IPC write options
pub pair: (&'a Schema, &'a IpcWriteOptions),
}
@@ -684,6 +688,7 @@ impl PollInfo {
}
impl<'a> SchemaAsIpc<'a> {
+ /// Create a new `SchemaAsIpc` from a `Schema` and `IpcWriteOptions`
pub fn new(schema: &'a Schema, options: &'a IpcWriteOptions) -> Self {
SchemaAsIpc {
pair: (schema, options),
diff --git a/arrow-flight/src/sql/client.rs b/arrow-flight/src/sql/client.rs
index ef52aa27ef50..e45e505b2b61 100644
--- a/arrow-flight/src/sql/client.rs
+++ b/arrow-flight/src/sql/client.rs
@@ -695,9 +695,11 @@ fn flight_error_to_arrow_error(err: FlightError) -> ArrowError {
}
}
-// A polymorphic structure to natively represent different types of data contained in `FlightData`
+/// A polymorphic structure to natively represent different types of data contained in `FlightData`
pub enum ArrowFlightData {
+ /// A record batch
RecordBatch(RecordBatch),
+ /// A schema
Schema(Schema),
}
diff --git a/arrow-flight/src/sql/metadata/sql_info.rs b/arrow-flight/src/sql/metadata/sql_info.rs
index 97304d3c872d..2ea30df7fc2f 100644
--- a/arrow-flight/src/sql/metadata/sql_info.rs
+++ b/arrow-flight/src/sql/metadata/sql_info.rs
@@ -331,7 +331,7 @@ impl SqlInfoUnionBuilder {
///
/// Servers constuct - usually static - [`SqlInfoData`] via the [`SqlInfoDataBuilder`],
/// and build responses using [`CommandGetSqlInfo::into_builder`]
-#[derive(Debug, Clone, PartialEq)]
+#[derive(Debug, Clone, PartialEq, Default)]
pub struct SqlInfoDataBuilder {
/// Use BTreeMap to ensure the values are sorted by value as
/// to make output consistent
@@ -341,17 +341,10 @@ pub struct SqlInfoDataBuilder {
infos: BTreeMap<u32, SqlInfoValue>,
}
-impl Default for SqlInfoDataBuilder {
- fn default() -> Self {
- Self::new()
- }
-}
-
impl SqlInfoDataBuilder {
+ /// Create a new SQL info builder
pub fn new() -> Self {
- Self {
- infos: BTreeMap::new(),
- }
+ Self::default()
}
/// register the specific sql metadata item
diff --git a/arrow-flight/src/sql/metadata/xdbc_info.rs b/arrow-flight/src/sql/metadata/xdbc_info.rs
index 2e635d3037bc..485bedaebfb0 100644
--- a/arrow-flight/src/sql/metadata/xdbc_info.rs
+++ b/arrow-flight/src/sql/metadata/xdbc_info.rs
@@ -41,24 +41,43 @@ use crate::sql::{CommandGetXdbcTypeInfo, Nullable, Searchable, XdbcDataType, Xdb
/// Data structure representing type information for xdbc types.
#[derive(Debug, Clone, Default)]
pub struct XdbcTypeInfo {
+ /// The name of the type
pub type_name: String,
+ /// The data type of the type
pub data_type: XdbcDataType,
+ /// The column size of the type
pub column_size: Option<i32>,
+ /// The prefix of the type
pub literal_prefix: Option<String>,
+ /// The suffix of the type
pub literal_suffix: Option<String>,
+ /// The create parameters of the type
pub create_params: Option<Vec<String>>,
+ /// The nullability of the type
pub nullable: Nullable,
+ /// Whether the type is case sensitive
pub case_sensitive: bool,
+ /// Whether the type is searchable
pub searchable: Searchable,
+ /// Whether the type is unsigned
pub unsigned_attribute: Option<bool>,
+ /// Whether the type has fixed precision and scale
pub fixed_prec_scale: bool,
+ /// Whether the type is auto-incrementing
pub auto_increment: Option<bool>,
+ /// The local type name of the type
pub local_type_name: Option<String>,
+ /// The minimum scale of the type
pub minimum_scale: Option<i32>,
+ /// The maximum scale of the type
pub maximum_scale: Option<i32>,
+ /// The SQL data type of the type
pub sql_data_type: XdbcDataType,
+ /// The optional datetime subcode of the type
pub datetime_subcode: Option<XdbcDatetimeSubcode>,
+ /// The number precision radix of the type
pub num_prec_radix: Option<i32>,
+ /// The interval precision of the type
pub interval_precision: Option<i32>,
}
@@ -93,16 +112,6 @@ impl XdbcTypeInfoData {
}
}
-pub struct XdbcTypeInfoDataBuilder {
- infos: Vec<XdbcTypeInfo>,
-}
-
-impl Default for XdbcTypeInfoDataBuilder {
- fn default() -> Self {
- Self::new()
- }
-}
-
/// A builder for [`XdbcTypeInfoData`] which is used to create [`CommandGetXdbcTypeInfo`] responses.
///
/// # Example
@@ -138,6 +147,16 @@ impl Default for XdbcTypeInfoDataBuilder {
/// // to access the underlying record batch
/// let batch = info_list.record_batch(None);
/// ```
+pub struct XdbcTypeInfoDataBuilder {
+ infos: Vec<XdbcTypeInfo>,
+}
+
+impl Default for XdbcTypeInfoDataBuilder {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
impl XdbcTypeInfoDataBuilder {
/// Create a new instance of [`XdbcTypeInfoDataBuilder`].
pub fn new() -> Self {
diff --git a/arrow-flight/src/sql/mod.rs b/arrow-flight/src/sql/mod.rs
index 453f608d353a..94bb96a4f852 100644
--- a/arrow-flight/src/sql/mod.rs
+++ b/arrow-flight/src/sql/mod.rs
@@ -43,9 +43,11 @@ use bytes::Bytes;
use paste::paste;
use prost::Message;
+#[allow(clippy::all)]
mod gen {
- #![allow(clippy::all)]
#![allow(rustdoc::unportable_markdown)]
+ // Since this file is auto-generated, we suppress all warnings
+ #![allow(missing_docs)]
include!("arrow.flight.protocol.sql.rs");
}
@@ -163,7 +165,9 @@ macro_rules! prost_message_ext {
/// ```
#[derive(Clone, Debug, PartialEq)]
pub enum Command {
- $($name($name),)*
+ $(
+ #[doc = concat!(stringify!($name), "variant")]
+ $name($name),)*
/// Any message that is not any FlightSQL command.
Unknown(Any),
@@ -297,10 +301,12 @@ pub struct Any {
}
impl Any {
+ /// Checks whether the message is of type `M`
pub fn is<M: ProstMessageExt>(&self) -> bool {
M::type_url() == self.type_url
}
+ /// Unpacks the contents of the message if it is of type `M`
pub fn unpack<M: ProstMessageExt>(&self) -> Result<Option<M>, ArrowError> {
if !self.is::<M>() {
return Ok(None);
@@ -310,6 +316,7 @@ impl Any {
Ok(Some(m))
}
+ /// Packs a message into an [`Any`] message
pub fn pack<M: ProstMessageExt>(message: &M) -> Result<Any, ArrowError> {
Ok(message.as_any())
}
diff --git a/arrow-flight/src/utils.rs b/arrow-flight/src/utils.rs
index 37d7ff9e7293..f6129ddfe248 100644
--- a/arrow-flight/src/utils.rs
+++ b/arrow-flight/src/utils.rs
@@ -160,9 +160,12 @@ pub fn batches_to_flight_data(
dictionaries.extend(encoded_dictionaries.into_iter().map(Into::into));
flight_data.push(encoded_batch.into());
}
- let mut stream = vec![schema_flight_data];
+
+ let mut stream = Vec::with_capacity(1 + dictionaries.len() + flight_data.len());
+
+ stream.push(schema_flight_data);
stream.extend(dictionaries);
stream.extend(flight_data);
- let flight_data: Vec<_> = stream.into_iter().collect();
+ let flight_data = stream;
Ok(flight_data)
}
diff --git a/arrow-ipc/src/convert.rs b/arrow-ipc/src/convert.rs
index 52c6a0d614d0..eef236529e10 100644
--- a/arrow-ipc/src/convert.rs
+++ b/arrow-ipc/src/convert.rs
@@ -133,6 +133,7 @@ pub fn schema_to_fb(schema: &Schema) -> FlatBufferBuilder<'_> {
IpcSchemaEncoder::new().schema_to_fb(schema)
}
+/// Push a key-value metadata into a FlatBufferBuilder and return [WIPOffset]
pub fn metadata_to_fb<'a>(
fbb: &mut FlatBufferBuilder<'a>,
metadata: &HashMap<String, String>,
@@ -152,7 +153,7 @@ pub fn metadata_to_fb<'a>(
fbb.create_vector(&custom_metadata)
}
-#[deprecated(since = "54.0.0", note = "Use `IpcSchemaConverter`.")]
+/// Adds a [Schema] to a flatbuffer and returns the offset
pub fn schema_to_fb_offset<'a>(
fbb: &mut FlatBufferBuilder<'a>,
schema: &Schema,
diff --git a/arrow-ipc/src/lib.rs b/arrow-ipc/src/lib.rs
index 4f35ffb60a9f..dde137153964 100644
--- a/arrow-ipc/src/lib.rs
+++ b/arrow-ipc/src/lib.rs
@@ -19,6 +19,7 @@
//!
//! [Arrow IPC Format]: https://arrow.apache.org/docs/format/Columnar.html#serialization-and-interprocess-communication-ipc
+#![warn(missing_docs)]
pub mod convert;
pub mod reader;
pub mod writer;
@@ -31,6 +32,7 @@ mod compression;
#[allow(clippy::redundant_static_lifetimes)]
#[allow(clippy::redundant_field_names)]
#[allow(non_camel_case_types)]
+#[allow(missing_docs)] // Because this is autogenerated
pub mod gen;
pub use self::gen::File::*;
diff --git a/arrow-ipc/src/writer.rs b/arrow-ipc/src/writer.rs
index b5cf20ef337f..f9256b4e8175 100644
--- a/arrow-ipc/src/writer.rs
+++ b/arrow-ipc/src/writer.rs
@@ -60,7 +60,7 @@ pub struct IpcWriteOptions {
/// Compression, if desired. Will result in a runtime error
/// if the corresponding feature is not enabled
batch_compression_type: Option<crate::CompressionType>,
- /// Flag indicating whether the writer should preserver the dictionary IDs defined in the
+ /// Flag indicating whether the writer should preserve the dictionary IDs defined in the
/// schema or generate unique dictionary IDs internally during encoding.
///
/// Defaults to `true`
@@ -135,6 +135,8 @@ impl IpcWriteOptions {
}
}
+ /// Return whether the writer is configured to preserve the dictionary IDs
+ /// defined in the schema
pub fn preserve_dict_id(&self) -> bool {
self.preserve_dict_id
}
@@ -200,6 +202,11 @@ impl Default for IpcWriteOptions {
pub struct IpcDataGenerator {}
impl IpcDataGenerator {
+ /// Converts a schema to an IPC message along with `dictionary_tracker`
+ /// and returns it encoded inside [EncodedData] as a flatbuffer
+ ///
+ /// Preferred method over [IpcDataGenerator::schema_to_bytes] since it's
+ /// deprecated since Arrow v54.0.0
pub fn schema_to_bytes_with_dictionary_tracker(
&self,
schema: &Schema,
@@ -234,6 +241,7 @@ impl IpcDataGenerator {
since = "54.0.0",
note = "Use `schema_to_bytes_with_dictionary_tracker` instead. This function signature of `schema_to_bytes_with_dictionary_tracker` in the next release."
)]
+ /// Converts a schema to an IPC message and returns it encoded inside [EncodedData] as a flatbuffer
pub fn schema_to_bytes(&self, schema: &Schema, write_options: &IpcWriteOptions) -> EncodedData {
let mut fbb = FlatBufferBuilder::new();
let schema = {
@@ -951,6 +959,7 @@ impl<W: Write> FileWriter<W> {
})
}
+ /// Adds a key-value pair to the [FileWriter]'s custom metadata
pub fn write_metadata(&mut self, key: impl Into<String>, value: impl Into<String>) {
self.custom_metadata.insert(key.into(), value.into());
}
diff --git a/arrow-json/src/writer.rs b/arrow-json/src/writer.rs
index 86d2e88d99f0..d973206ccf74 100644
--- a/arrow-json/src/writer.rs
+++ b/arrow-json/src/writer.rs
@@ -397,6 +397,7 @@ where
#[cfg(test)]
mod tests {
+ use core::str;
use std::fs::{read_to_string, File};
use std::io::{BufReader, Seek};
use std::sync::Arc;
@@ -1111,7 +1112,7 @@ mod tests {
}
}
- let result = String::from_utf8(buf).unwrap();
+ let result = str::from_utf8(&buf).unwrap();
let expected = read_to_string(test_file).unwrap();
for (r, e) in result.lines().zip(expected.lines()) {
let mut expected_json = serde_json::from_str::<Value>(e).unwrap();
@@ -1150,7 +1151,7 @@ mod tests {
fn json_writer_empty() {
let mut writer = ArrayWriter::new(vec![] as Vec<u8>);
writer.finish().unwrap();
- assert_eq!(String::from_utf8(writer.into_inner()).unwrap(), "");
+ assert_eq!(str::from_utf8(&writer.into_inner()).unwrap(), "");
}
#[test]
@@ -1279,7 +1280,7 @@ mod tests {
writer.write(&batch).unwrap();
}
- let result = String::from_utf8(buf).unwrap();
+ let result = str::from_utf8(&buf).unwrap();
let expected = read_to_string(test_file).unwrap();
for (r, e) in result.lines().zip(expected.lines()) {
let mut expected_json = serde_json::from_str::<Value>(e).unwrap();
@@ -1321,7 +1322,7 @@ mod tests {
writer.write_batches(&batches).unwrap();
}
- let result = String::from_utf8(buf).unwrap();
+ let result = str::from_utf8(&buf).unwrap();
let expected = read_to_string(test_file).unwrap();
// result is eq to 2 same batches
let expected = format!("{expected}\n{expected}");
diff --git a/arrow-schema/src/field.rs b/arrow-schema/src/field.rs
index fc4852a3d37d..b532ea8616b6 100644
--- a/arrow-schema/src/field.rs
+++ b/arrow-schema/src/field.rs
@@ -610,14 +610,14 @@ mod test {
#[test]
fn test_new_with_string() {
// Fields should allow owned Strings to support reuse
- let s = String::from("c1");
+ let s = "c1";
Field::new(s, DataType::Int64, false);
}
#[test]
fn test_new_dict_with_string() {
// Fields should allow owned Strings to support reuse
- let s = String::from("c1");
+ let s = "c1";
Field::new_dict(s, DataType::Int64, false, 4, false);
}
diff --git a/object_store/src/aws/builder.rs b/object_store/src/aws/builder.rs
index 75acb73e56a9..c52c3f8dfbd7 100644
--- a/object_store/src/aws/builder.rs
+++ b/object_store/src/aws/builder.rs
@@ -44,7 +44,6 @@ static DEFAULT_METADATA_ENDPOINT: &str = "http://169.254.169.254";
/// A specialized `Error` for object store-related errors
#[derive(Debug, Snafu)]
-#[allow(missing_docs)]
enum Error {
#[snafu(display("Missing bucket name"))]
MissingBucketName,
diff --git a/object_store/src/aws/client.rs b/object_store/src/aws/client.rs
index 6fe4889db176..7034a372e95f 100644
--- a/object_store/src/aws/client.rs
+++ b/object_store/src/aws/client.rs
@@ -65,7 +65,6 @@ const USER_DEFINED_METADATA_HEADER_PREFIX: &str = "x-amz-meta-";
/// A specialized `Error` for object store-related errors
#[derive(Debug, Snafu)]
-#[allow(missing_docs)]
pub(crate) enum Error {
#[snafu(display("Error performing DeleteObjects request: {}", source))]
DeleteObjectsRequest { source: crate::client::retry::Error },
diff --git a/object_store/src/aws/resolve.rs b/object_store/src/aws/resolve.rs
index 12c9f26d220b..4c7489316b6c 100644
--- a/object_store/src/aws/resolve.rs
+++ b/object_store/src/aws/resolve.rs
@@ -21,7 +21,6 @@ use snafu::{ensure, OptionExt, ResultExt, Snafu};
/// A specialized `Error` for object store-related errors
#[derive(Debug, Snafu)]
-#[allow(missing_docs)]
enum Error {
#[snafu(display("Bucket '{}' not found", bucket))]
BucketNotFound { bucket: String },
diff --git a/object_store/src/azure/builder.rs b/object_store/src/azure/builder.rs
index 35cedeafc049..1c4589ba1ec6 100644
--- a/object_store/src/azure/builder.rs
+++ b/object_store/src/azure/builder.rs
@@ -46,7 +46,6 @@ const MSI_ENDPOINT_ENV_KEY: &str = "IDENTITY_ENDPOINT";
/// A specialized `Error` for Azure builder-related errors
#[derive(Debug, Snafu)]
-#[allow(missing_docs)]
enum Error {
#[snafu(display("Unable parse source url. Url: {}, Error: {}", url, source))]
UnableToParseUrl {
diff --git a/object_store/src/azure/client.rs b/object_store/src/azure/client.rs
index 04990515543a..06d3fb5c8678 100644
--- a/object_store/src/azure/client.rs
+++ b/object_store/src/azure/client.rs
@@ -60,7 +60,6 @@ static TAGS_HEADER: HeaderName = HeaderName::from_static("x-ms-tags");
/// A specialized `Error` for object store-related errors
#[derive(Debug, Snafu)]
-#[allow(missing_docs)]
pub(crate) enum Error {
#[snafu(display("Error performing get request {}: {}", path, source))]
GetRequest {
diff --git a/object_store/src/client/get.rs b/object_store/src/client/get.rs
index 0fef5785c052..ae6a8d9deaae 100644
--- a/object_store/src/client/get.rs
+++ b/object_store/src/client/get.rs
@@ -96,7 +96,6 @@ impl ContentRange {
/// A specialized `Error` for get-related errors
#[derive(Debug, Snafu)]
-#[allow(missing_docs)]
enum GetResultError {
#[snafu(context(false))]
Header {
diff --git a/object_store/src/lib.rs b/object_store/src/lib.rs
index 8820983b2025..a0d83eb0b6dd 100644
--- a/object_store/src/lib.rs
+++ b/object_store/src/lib.rs
@@ -1224,78 +1224,116 @@ pub type Result<T, E = Error> = std::result::Result<T, E>;
/// A specialized `Error` for object store-related errors
#[derive(Debug, Snafu)]
-#[allow(missing_docs)]
#[non_exhaustive]
pub enum Error {
+ /// A fallback error type when no variant matches
#[snafu(display("Generic {} error: {}", store, source))]
Generic {
+ /// The store this error originated from
store: &'static str,
+ /// The wrapped error
source: Box<dyn std::error::Error + Send + Sync + 'static>,
},
+ /// Error when the object is not found at given location
#[snafu(display("Object at location {} not found: {}", path, source))]
NotFound {
+ /// The path to file
path: String,
+ /// The wrapped error
source: Box<dyn std::error::Error + Send + Sync + 'static>,
},
+ /// Error for invalid path
#[snafu(
display("Encountered object with invalid path: {}", source),
context(false)
)]
- InvalidPath { source: path::Error },
+ InvalidPath {
+ /// The wrapped error
+ source: path::Error,
+ },
+ /// Error when `tokio::spawn` failed
#[snafu(display("Error joining spawned task: {}", source), context(false))]
- JoinError { source: tokio::task::JoinError },
+ JoinError {
+ /// The wrapped error
+ source: tokio::task::JoinError,
+ },
+ /// Error when the attempted operation is not supported
#[snafu(display("Operation not supported: {}", source))]
NotSupported {
+ /// The wrapped error
source: Box<dyn std::error::Error + Send + Sync + 'static>,
},
+ /// Error when the object already exists
#[snafu(display("Object at location {} already exists: {}", path, source))]
AlreadyExists {
+ /// The path to the
path: String,
+ /// The wrapped error
source: Box<dyn std::error::Error + Send + Sync + 'static>,
},
+ /// Error when the required conditions failed for the operation
#[snafu(display("Request precondition failure for path {}: {}", path, source))]
Precondition {
+ /// The path to the file
path: String,
+ /// The wrapped error
source: Box<dyn std::error::Error + Send + Sync + 'static>,
},
+ /// Error when the object at the location isn't modified
#[snafu(display("Object at location {} not modified: {}", path, source))]
NotModified {
+ /// The path to the file
path: String,
+ /// The wrapped error
source: Box<dyn std::error::Error + Send + Sync + 'static>,
},
+ /// Error when an operation is not implemented
#[snafu(display("Operation not yet implemented."))]
NotImplemented,
+ /// Error when the used credentials don't have enough permission
+ /// to perform the requested operation
#[snafu(display(
"The operation lacked the necessary privileges to complete for path {}: {}",
path,
source
))]
PermissionDenied {
+ /// The path to the file
path: String,
+ /// The wrapped error
source: Box<dyn std::error::Error + Send + Sync + 'static>,
},
+ /// Error when the used credentials lack valid authentication
#[snafu(display(
"The operation lacked valid authentication credentials for path {}: {}",
path,
source
))]
Unauthenticated {
+ /// The path to the file
path: String,
+ /// The wrapped error
source: Box<dyn std::error::Error + Send + Sync + 'static>,
},
+ /// Error when a configuration key is invalid for the store used
#[snafu(display("Configuration key: '{}' is not valid for store '{}'.", key, store))]
- UnknownConfigurationKey { store: &'static str, key: String },
+ UnknownConfigurationKey {
+ /// The object store used
+ store: &'static str,
+ /// The configuration key used
+ key: String,
+ },
}
impl From<Error> for std::io::Error {
diff --git a/object_store/src/local.rs b/object_store/src/local.rs
index db4b4b05031e..ac10f332d743 100644
--- a/object_store/src/local.rs
+++ b/object_store/src/local.rs
@@ -44,7 +44,6 @@ use crate::{
/// A specialized `Error` for filesystem object store-related errors
#[derive(Debug, Snafu)]
-#[allow(missing_docs)]
pub(crate) enum Error {
#[snafu(display("File size for {} did not fit in a usize: {}", path, source))]
FileSizeOverflowedUsize {
diff --git a/object_store/src/memory.rs b/object_store/src/memory.rs
index 0d72983b0495..b458bdddfbf5 100644
--- a/object_store/src/memory.rs
+++ b/object_store/src/memory.rs
@@ -38,7 +38,6 @@ use crate::{GetOptions, PutPayload};
/// A specialized `Error` for in-memory object store-related errors
#[derive(Debug, Snafu)]
-#[allow(missing_docs)]
enum Error {
#[snafu(display("No data in memory found. Location: {path}"))]
NoDataInMemory { path: String },
diff --git a/object_store/src/path/mod.rs b/object_store/src/path/mod.rs
index 59e08e2eaba9..4c9bb5f05186 100644
--- a/object_store/src/path/mod.rs
+++ b/object_store/src/path/mod.rs
@@ -36,32 +36,57 @@ pub use parts::{InvalidPart, PathPart};
/// Error returned by [`Path::parse`]
#[derive(Debug, Snafu)]
-#[allow(missing_docs)]
#[non_exhaustive]
pub enum Error {
+ /// Error when there's an empty segment between two slashes `/` in the path
#[snafu(display("Path \"{}\" contained empty path segment", path))]
- EmptySegment { path: String },
+ EmptySegment {
+ /// The source path
+ path: String,
+ },
+ /// Error when an invalid segment is encountered in the given path
#[snafu(display("Error parsing Path \"{}\": {}", path, source))]
- BadSegment { path: String, source: InvalidPart },
+ BadSegment {
+ /// The source path
+ path: String,
+ /// The part containing the error
+ source: InvalidPart,
+ },
+ /// Error when path cannot be canonicalized
#[snafu(display("Failed to canonicalize path \"{}\": {}", path.display(), source))]
Canonicalize {
+ /// The source path
path: std::path::PathBuf,
+ /// The underlying error
source: std::io::Error,
},
+ /// Error when the path is not a valid URL
#[snafu(display("Unable to convert path \"{}\" to URL", path.display()))]
- InvalidPath { path: std::path::PathBuf },
+ InvalidPath {
+ /// The source path
+ path: std::path::PathBuf,
+ },
+ /// Error when a path contains non-unicode characters
#[snafu(display("Path \"{}\" contained non-unicode characters: {}", path, source))]
NonUnicode {
+ /// The source path
path: String,
+ /// The underlying `UTF8Error`
source: std::str::Utf8Error,
},
+ /// Error when the a path doesn't start with given prefix
#[snafu(display("Path {} does not start with prefix {}", path, prefix))]
- PrefixMismatch { path: String, prefix: String },
+ PrefixMismatch {
+ /// The source path
+ path: String,
+ /// The mismatched prefix
+ prefix: String,
+ },
}
/// A parsed path representation that can be safely written to object storage
diff --git a/parquet/src/compression.rs b/parquet/src/compression.rs
index edf675f1302a..ccc060250af4 100644
--- a/parquet/src/compression.rs
+++ b/parquet/src/compression.rs
@@ -298,6 +298,35 @@ mod gzip_codec {
pub use gzip_codec::*;
/// Represents a valid gzip compression level.
+///
+/// Defaults to 6.
+///
+/// * 0: least compression
+/// * 9: most compression (that other software can read)
+/// * 10: most compression (incompatible with other software, see below)
+/// #### WARNING:
+/// Level 10 compression can offer smallest file size,
+/// but Parquet files created with it will not be readable
+/// by other "standard" paquet readers.
+///
+/// Do **NOT** use level 10 if you need other software to
+/// be able to read the files. Read below for details.
+///
+/// ### IMPORTANT:
+/// There's often confusion about the compression levels in `flate2` vs `arrow`
+/// as highlighted in issue [#1011](https://github.com/apache/arrow-rs/issues/6282).
+///
+/// `flate2` supports two compression backends: `miniz_oxide` and `zlib`.
+///
+/// - `zlib` supports levels from 0 to 9.
+/// - `miniz_oxide` supports levels from 0 to 10.
+///
+/// `arrow` uses `flate` with `rust_backend` feature,
+/// which provides `miniz_oxide` as the backend.
+/// Therefore 0-10 levels are supported.
+///
+/// `flate2` documents this behavior properly with
+/// [this commit](https://github.com/rust-lang/flate2-rs/pull/430).
#[derive(Debug, Eq, PartialEq, Hash, Clone, Copy)]
pub struct GzipLevel(u32);
|
diff --git a/arrow-integration-test/src/lib.rs b/arrow-integration-test/src/lib.rs
index d1486fd5a153..ea5b545f2e81 100644
--- a/arrow-integration-test/src/lib.rs
+++ b/arrow-integration-test/src/lib.rs
@@ -21,6 +21,7 @@
//!
//! This is not a canonical format, but provides a human-readable way of verifying language implementations
+#![warn(missing_docs)]
use arrow_buffer::{IntervalDayTime, IntervalMonthDayNano, ScalarBuffer};
use hex::decode;
use num::BigInt;
@@ -49,8 +50,11 @@ pub use schema::*;
/// See <https://github.com/apache/arrow/blob/master/docs/source/format/Integration.rst#json-test-data-format>
#[derive(Deserialize, Serialize, Debug)]
pub struct ArrowJson {
+ /// The Arrow schema for JSON file
pub schema: ArrowJsonSchema,
+ /// The `RecordBatch`es in the JSON file
pub batches: Vec<ArrowJsonBatch>,
+ /// The dictionaries in the JSON file
#[serde(skip_serializing_if = "Option::is_none")]
pub dictionaries: Option<Vec<ArrowJsonDictionaryBatch>>,
}
@@ -60,7 +64,9 @@ pub struct ArrowJson {
/// Fields are left as JSON `Value` as they vary by `DataType`
#[derive(Deserialize, Serialize, Debug)]
pub struct ArrowJsonSchema {
+ /// An array of JSON fields
pub fields: Vec<ArrowJsonField>,
+ /// An array of metadata key-value pairs
#[serde(skip_serializing_if = "Option::is_none")]
pub metadata: Option<Vec<HashMap<String, String>>>,
}
@@ -68,13 +74,20 @@ pub struct ArrowJsonSchema {
/// Fields are left as JSON `Value` as they vary by `DataType`
#[derive(Deserialize, Serialize, Debug)]
pub struct ArrowJsonField {
+ /// The name of the field
pub name: String,
+ /// The data type of the field,
+ /// can be any valid JSON value
#[serde(rename = "type")]
pub field_type: Value,
+ /// Whether the field is nullable
pub nullable: bool,
+ /// The children fields
pub children: Vec<ArrowJsonField>,
+ /// The dictionary for the field
#[serde(skip_serializing_if = "Option::is_none")]
pub dictionary: Option<ArrowJsonFieldDictionary>,
+ /// The metadata for the field, if any
#[serde(skip_serializing_if = "Option::is_none")]
pub metadata: Option<Value>,
}
@@ -115,20 +128,28 @@ impl From<&Field> for ArrowJsonField {
}
}
+/// Represents a dictionary-encoded field in the Arrow JSON format
#[derive(Deserialize, Serialize, Debug)]
pub struct ArrowJsonFieldDictionary {
+ /// A unique identifier for the dictionary
pub id: i64,
+ /// The type of the dictionary index
#[serde(rename = "indexType")]
pub index_type: DictionaryIndexType,
+ /// Whether the dictionary is ordered
#[serde(rename = "isOrdered")]
pub is_ordered: bool,
}
+/// Type of an index for a dictionary-encoded field in the Arrow JSON format
#[derive(Deserialize, Serialize, Debug)]
pub struct DictionaryIndexType {
+ /// The name of the dictionary index type
pub name: String,
+ /// Whether the dictionary index type is signed
#[serde(rename = "isSigned")]
pub is_signed: bool,
+ /// The bit width of the dictionary index type
#[serde(rename = "bitWidth")]
pub bit_width: i64,
}
@@ -137,6 +158,7 @@ pub struct DictionaryIndexType {
#[derive(Deserialize, Serialize, Debug, Clone)]
pub struct ArrowJsonBatch {
count: usize,
+ /// The columns in the record batch
pub columns: Vec<ArrowJsonColumn>,
}
@@ -144,7 +166,9 @@ pub struct ArrowJsonBatch {
#[derive(Deserialize, Serialize, Debug, Clone)]
#[allow(non_snake_case)]
pub struct ArrowJsonDictionaryBatch {
+ /// The unique identifier for the dictionary
pub id: i64,
+ /// The data for the dictionary
pub data: ArrowJsonBatch,
}
@@ -152,15 +176,21 @@ pub struct ArrowJsonDictionaryBatch {
#[derive(Deserialize, Serialize, Clone, Debug)]
pub struct ArrowJsonColumn {
name: String,
+ /// The number of elements in the column
pub count: usize,
+ /// The validity bitmap to determine null values
#[serde(rename = "VALIDITY")]
pub validity: Option<Vec<u8>>,
+ /// The data values in the column
#[serde(rename = "DATA")]
pub data: Option<Vec<Value>>,
+ /// The offsets for variable-sized data types
#[serde(rename = "OFFSET")]
pub offset: Option<Vec<Value>>, // leaving as Value as 64-bit offsets are strings
+ /// The type id for union types
#[serde(rename = "TYPE_ID")]
pub type_id: Option<Vec<i8>>,
+ /// The children columns for nested types
pub children: Option<Vec<ArrowJsonColumn>>,
}
@@ -189,6 +219,7 @@ impl ArrowJson {
Ok(true)
}
+ /// Convert the stored dictionaries to `Vec[RecordBatch]`
pub fn get_record_batches(&self) -> Result<Vec<RecordBatch>> {
let schema = self.schema.to_arrow_schema()?;
@@ -275,6 +306,7 @@ impl ArrowJsonField {
}
}
+/// Generates a [`RecordBatch`] from an Arrow JSON batch, given a schema
pub fn record_batch_from_json(
schema: &Schema,
json_batch: ArrowJsonBatch,
@@ -877,6 +909,7 @@ pub fn array_from_json(
}
}
+/// Construct a [`DictionaryArray`] from a partially typed JSON column
pub fn dictionary_array_from_json(
field: &Field,
json_col: ArrowJsonColumn,
@@ -965,6 +998,7 @@ fn create_null_buf(json_col: &ArrowJsonColumn) -> Buffer {
}
impl ArrowJsonBatch {
+ /// Convert a [`RecordBatch`] to an [`ArrowJsonBatch`]
pub fn from_batch(batch: &RecordBatch) -> ArrowJsonBatch {
let mut json_batch = ArrowJsonBatch {
count: batch.num_rows(),
diff --git a/arrow-integration-testing/src/flight_client_scenarios/auth_basic_proto.rs b/arrow-integration-testing/src/flight_client_scenarios/auth_basic_proto.rs
index 376e31e15553..34c3c7706df5 100644
--- a/arrow-integration-testing/src/flight_client_scenarios/auth_basic_proto.rs
+++ b/arrow-integration-testing/src/flight_client_scenarios/auth_basic_proto.rs
@@ -15,6 +15,8 @@
// specific language governing permissions and limitations
// under the License.
+//! Scenario for testing basic auth.
+
use crate::{AUTH_PASSWORD, AUTH_USERNAME};
use arrow_flight::{flight_service_client::FlightServiceClient, BasicAuth, HandshakeRequest};
@@ -27,6 +29,7 @@ type Result<T = (), E = Error> = std::result::Result<T, E>;
type Client = FlightServiceClient<tonic::transport::Channel>;
+/// Run a scenario that tests basic auth.
pub async fn run_scenario(host: &str, port: u16) -> Result {
let url = format!("http://{host}:{port}");
let mut client = FlightServiceClient::connect(url).await?;
diff --git a/arrow-integration-testing/src/flight_client_scenarios/integration_test.rs b/arrow-integration-testing/src/flight_client_scenarios/integration_test.rs
index 1a6c4e28a76b..c8289ff446a0 100644
--- a/arrow-integration-testing/src/flight_client_scenarios/integration_test.rs
+++ b/arrow-integration-testing/src/flight_client_scenarios/integration_test.rs
@@ -15,6 +15,8 @@
// specific language governing permissions and limitations
// under the License.
+//! Integration tests for the Flight client.
+
use crate::open_json_file;
use std::collections::HashMap;
@@ -40,6 +42,7 @@ type Result<T = (), E = Error> = std::result::Result<T, E>;
type Client = FlightServiceClient<tonic::transport::Channel>;
+/// Run a scenario that uploads data to a Flight server and then downloads it back
pub async fn run_scenario(host: &str, port: u16, path: &str) -> Result {
let url = format!("http://{host}:{port}");
diff --git a/arrow-integration-testing/src/flight_client_scenarios/middleware.rs b/arrow-integration-testing/src/flight_client_scenarios/middleware.rs
index 3b71edf446a3..b826ad456055 100644
--- a/arrow-integration-testing/src/flight_client_scenarios/middleware.rs
+++ b/arrow-integration-testing/src/flight_client_scenarios/middleware.rs
@@ -15,6 +15,8 @@
// specific language governing permissions and limitations
// under the License.
+//! Scenario for testing middleware.
+
use arrow_flight::{
flight_descriptor::DescriptorType, flight_service_client::FlightServiceClient, FlightDescriptor,
};
@@ -24,6 +26,7 @@ use tonic::{Request, Status};
type Error = Box<dyn std::error::Error + Send + Sync + 'static>;
type Result<T = (), E = Error> = std::result::Result<T, E>;
+/// Run a scenario that tests middleware.
pub async fn run_scenario(host: &str, port: u16) -> Result {
let url = format!("http://{host}:{port}");
let conn = tonic::transport::Endpoint::new(url)?.connect().await?;
diff --git a/arrow-integration-testing/src/flight_client_scenarios.rs b/arrow-integration-testing/src/flight_client_scenarios/mod.rs
similarity index 93%
rename from arrow-integration-testing/src/flight_client_scenarios.rs
rename to arrow-integration-testing/src/flight_client_scenarios/mod.rs
index 66cced5f4c2e..c5794433764a 100644
--- a/arrow-integration-testing/src/flight_client_scenarios.rs
+++ b/arrow-integration-testing/src/flight_client_scenarios/mod.rs
@@ -15,6 +15,8 @@
// specific language governing permissions and limitations
// under the License.
+//! Collection of utilities for testing the Flight client.
+
pub mod auth_basic_proto;
pub mod integration_test;
pub mod middleware;
diff --git a/arrow-integration-testing/src/flight_server_scenarios/auth_basic_proto.rs b/arrow-integration-testing/src/flight_server_scenarios/auth_basic_proto.rs
index 20d868953664..5462e5bd674b 100644
--- a/arrow-integration-testing/src/flight_server_scenarios/auth_basic_proto.rs
+++ b/arrow-integration-testing/src/flight_server_scenarios/auth_basic_proto.rs
@@ -15,6 +15,8 @@
// specific language governing permissions and limitations
// under the License.
+//! Basic auth test for the Flight server.
+
use std::pin::Pin;
use std::sync::Arc;
@@ -35,6 +37,7 @@ use prost::Message;
use crate::{AUTH_PASSWORD, AUTH_USERNAME};
+/// Run a scenario that tests basic auth.
pub async fn scenario_setup(port: u16) -> Result {
let service = AuthBasicProtoScenarioImpl {
username: AUTH_USERNAME.into(),
@@ -52,6 +55,7 @@ pub async fn scenario_setup(port: u16) -> Result {
Ok(())
}
+/// Scenario for testing basic auth.
#[derive(Clone)]
pub struct AuthBasicProtoScenarioImpl {
username: Arc<str>,
diff --git a/arrow-integration-testing/src/flight_server_scenarios/integration_test.rs b/arrow-integration-testing/src/flight_server_scenarios/integration_test.rs
index 76eb9d880199..0c58fae93df5 100644
--- a/arrow-integration-testing/src/flight_server_scenarios/integration_test.rs
+++ b/arrow-integration-testing/src/flight_server_scenarios/integration_test.rs
@@ -15,6 +15,9 @@
// specific language governing permissions and limitations
// under the License.
+//! Integration tests for the Flight server.
+
+use core::str;
use std::collections::HashMap;
use std::pin::Pin;
use std::sync::Arc;
@@ -42,6 +45,7 @@ type TonicStream<T> = Pin<Box<dyn Stream<Item = T> + Send + Sync + 'static>>;
type Error = Box<dyn std::error::Error + Send + Sync + 'static>;
type Result<T = (), E = Error> = std::result::Result<T, E>;
+/// Run a scenario that tests integration testing.
pub async fn scenario_setup(port: u16) -> Result {
let addr = super::listen_on(port).await?;
@@ -65,6 +69,7 @@ struct IntegrationDataset {
chunks: Vec<RecordBatch>,
}
+/// Flight service implementation for integration testing
#[derive(Clone, Default)]
pub struct FlightServiceImpl {
server_location: String,
@@ -100,13 +105,13 @@ impl FlightService for FlightServiceImpl {
) -> Result<Response<Self::DoGetStream>, Status> {
let ticket = request.into_inner();
- let key = String::from_utf8(ticket.ticket.to_vec())
+ let key = str::from_utf8(&ticket.ticket)
.map_err(|e| Status::invalid_argument(format!("Invalid ticket: {e:?}")))?;
let uploaded_chunks = self.uploaded_chunks.lock().await;
let flight = uploaded_chunks
- .get(&key)
+ .get(key)
.ok_or_else(|| Status::not_found(format!("Could not find flight. {key}")))?;
let options = arrow::ipc::writer::IpcWriteOptions::default();
diff --git a/arrow-integration-testing/src/flight_server_scenarios/middleware.rs b/arrow-integration-testing/src/flight_server_scenarios/middleware.rs
index e8d9c521bb99..6685d45dffac 100644
--- a/arrow-integration-testing/src/flight_server_scenarios/middleware.rs
+++ b/arrow-integration-testing/src/flight_server_scenarios/middleware.rs
@@ -15,6 +15,8 @@
// specific language governing permissions and limitations
// under the License.
+//! Middleware test for the Flight server.
+
use std::pin::Pin;
use arrow_flight::{
@@ -31,6 +33,7 @@ type TonicStream<T> = Pin<Box<dyn Stream<Item = T> + Send + Sync + 'static>>;
type Error = Box<dyn std::error::Error + Send + Sync + 'static>;
type Result<T = (), E = Error> = std::result::Result<T, E>;
+/// Run a scenario that tests middleware.
pub async fn scenario_setup(port: u16) -> Result {
let service = MiddlewareScenarioImpl {};
let svc = FlightServiceServer::new(service);
@@ -44,6 +47,7 @@ pub async fn scenario_setup(port: u16) -> Result {
Ok(())
}
+/// Middleware interceptor for testing
#[derive(Clone, Default)]
pub struct MiddlewareScenarioImpl {}
diff --git a/arrow-integration-testing/src/flight_server_scenarios.rs b/arrow-integration-testing/src/flight_server_scenarios/mod.rs
similarity index 91%
rename from arrow-integration-testing/src/flight_server_scenarios.rs
rename to arrow-integration-testing/src/flight_server_scenarios/mod.rs
index 48d4e6045684..3833e1c6335c 100644
--- a/arrow-integration-testing/src/flight_server_scenarios.rs
+++ b/arrow-integration-testing/src/flight_server_scenarios/mod.rs
@@ -15,6 +15,7 @@
// specific language governing permissions and limitations
// under the License.
+//! Collection of utilities for testing the Flight server.
use std::net::SocketAddr;
use arrow_flight::{FlightEndpoint, Location, Ticket};
@@ -27,6 +28,7 @@ pub mod middleware;
type Error = Box<dyn std::error::Error + Send + Sync + 'static>;
type Result<T = (), E = Error> = std::result::Result<T, E>;
+/// Listen on a port and return the address
pub async fn listen_on(port: u16) -> Result<SocketAddr> {
let addr: SocketAddr = format!("0.0.0.0:{port}").parse()?;
@@ -36,6 +38,7 @@ pub async fn listen_on(port: u16) -> Result<SocketAddr> {
Ok(addr)
}
+/// Create a FlightEndpoint with a ticket and location
pub fn endpoint(ticket: &str, location_uri: impl Into<String>) -> FlightEndpoint {
FlightEndpoint {
ticket: Some(Ticket {
diff --git a/arrow-integration-testing/src/lib.rs b/arrow-integration-testing/src/lib.rs
index 4ce7b06a1888..ba8e3876c3e3 100644
--- a/arrow-integration-testing/src/lib.rs
+++ b/arrow-integration-testing/src/lib.rs
@@ -17,6 +17,7 @@
//! Common code used in the integration test binaries
+#![warn(missing_docs)]
use serde_json::Value;
use arrow::array::{Array, StructArray};
@@ -42,7 +43,9 @@ pub const AUTH_PASSWORD: &str = "flight";
pub mod flight_client_scenarios;
pub mod flight_server_scenarios;
+/// An Arrow file in JSON format
pub struct ArrowFile {
+ /// The schema of the file
pub schema: Schema,
// we can evolve this into a concrete Arrow type
// this is temporarily not being read from
@@ -51,12 +54,14 @@ pub struct ArrowFile {
}
impl ArrowFile {
+ /// Read a single [RecordBatch] from the file
pub fn read_batch(&self, batch_num: usize) -> Result<RecordBatch> {
let b = self.arrow_json["batches"].get(batch_num).unwrap();
let json_batch: ArrowJsonBatch = serde_json::from_value(b.clone()).unwrap();
record_batch_from_json(&self.schema, json_batch, Some(&self.dictionaries))
}
+ /// Read all [RecordBatch]es from the file
pub fn read_batches(&self) -> Result<Vec<RecordBatch>> {
self.arrow_json["batches"]
.as_array()
@@ -70,7 +75,7 @@ impl ArrowFile {
}
}
-// Canonicalize the names of map fields in a schema
+/// Canonicalize the names of map fields in a schema
pub fn canonicalize_schema(schema: &Schema) -> Schema {
let fields = schema
.fields()
@@ -107,6 +112,7 @@ pub fn canonicalize_schema(schema: &Schema) -> Schema {
Schema::new(fields).with_metadata(schema.metadata().clone())
}
+/// Read an Arrow file in JSON format
pub fn open_json_file(json_name: &str) -> Result<ArrowFile> {
let json_file = File::open(json_name)?;
let reader = BufReader::new(json_file);
@@ -157,10 +163,7 @@ pub fn read_gzip_json(version: &str, path: &str) -> ArrowJson {
arrow_json
}
-//
-// C Data Integration entrypoints
-//
-
+/// C Data Integration entrypoint to export the schema from a JSON file
fn cdata_integration_export_schema_from_json(
c_json_name: *const i8,
out: *mut FFI_ArrowSchema,
@@ -173,6 +176,7 @@ fn cdata_integration_export_schema_from_json(
Ok(())
}
+/// C Data Integration entrypoint to export a batch from a JSON file
fn cdata_integration_export_batch_from_json(
c_json_name: *const i8,
batch_num: c_int,
@@ -263,6 +267,7 @@ pub unsafe extern "C" fn arrow_rs_free_error(c_error: *mut i8) {
}
}
+/// A C-ABI for exporting an Arrow schema from a JSON file
#[no_mangle]
pub extern "C" fn arrow_rs_cdata_integration_export_schema_from_json(
c_json_name: *const i8,
@@ -272,6 +277,7 @@ pub extern "C" fn arrow_rs_cdata_integration_export_schema_from_json(
result_to_c_error(&r)
}
+/// A C-ABI to compare an Arrow schema against a JSON file
#[no_mangle]
pub extern "C" fn arrow_rs_cdata_integration_import_schema_and_compare_to_json(
c_json_name: *const i8,
@@ -281,6 +287,7 @@ pub extern "C" fn arrow_rs_cdata_integration_import_schema_and_compare_to_json(
result_to_c_error(&r)
}
+/// A C-ABI for exporting a RecordBatch from a JSON file
#[no_mangle]
pub extern "C" fn arrow_rs_cdata_integration_export_batch_from_json(
c_json_name: *const i8,
@@ -291,6 +298,7 @@ pub extern "C" fn arrow_rs_cdata_integration_export_batch_from_json(
result_to_c_error(&r)
}
+/// A C-ABI to compare a RecordBatch against a JSON file
#[no_mangle]
pub extern "C" fn arrow_rs_cdata_integration_import_batch_and_compare_to_json(
c_json_name: *const i8,
diff --git a/arrow/tests/array_cast.rs b/arrow/tests/array_cast.rs
index 0fd89cc2bff4..8f86cbeab717 100644
--- a/arrow/tests/array_cast.rs
+++ b/arrow/tests/array_cast.rs
@@ -179,7 +179,7 @@ fn test_can_cast_types() {
/// Create instances of arrays with varying types for cast tests
fn get_arrays_of_all_types() -> Vec<ArrayRef> {
- let tz_name = String::from("+08:00");
+ let tz_name = "+08:00";
let binary_data: Vec<&[u8]> = vec![b"foo", b"bar"];
vec![
Arc::new(BinaryArray::from(binary_data.clone())),
@@ -238,9 +238,9 @@ fn get_arrays_of_all_types() -> Vec<ArrayRef> {
Arc::new(TimestampMillisecondArray::from(vec![1000, 2000])),
Arc::new(TimestampMicrosecondArray::from(vec![1000, 2000])),
Arc::new(TimestampNanosecondArray::from(vec![1000, 2000])),
- Arc::new(TimestampSecondArray::from(vec![1000, 2000]).with_timezone(tz_name.clone())),
- Arc::new(TimestampMillisecondArray::from(vec![1000, 2000]).with_timezone(tz_name.clone())),
- Arc::new(TimestampMicrosecondArray::from(vec![1000, 2000]).with_timezone(tz_name.clone())),
+ Arc::new(TimestampSecondArray::from(vec![1000, 2000]).with_timezone(tz_name)),
+ Arc::new(TimestampMillisecondArray::from(vec![1000, 2000]).with_timezone(tz_name)),
+ Arc::new(TimestampMicrosecondArray::from(vec![1000, 2000]).with_timezone(tz_name)),
Arc::new(TimestampNanosecondArray::from(vec![1000, 2000]).with_timezone(tz_name)),
Arc::new(Date32Array::from(vec![1000, 2000])),
Arc::new(Date64Array::from(vec![1000, 2000])),
|
What is the highest compression level in gzip?
**Which part is this question about**
What is the highest compression level in gzip?
**Describe your question**
I see from other sources, including `flate2`, the highest compression level for gzip is 9 instead of 10. If we pass 10, it should be accepted by parquet but rejected by flate2. Am I getting misunderstanding somewhere?
```rust
impl CompressionLevel<u32> for GzipLevel {
const MINIMUM_LEVEL: u32 = 0;
const MAXIMUM_LEVEL: u32 = 10;
}
```
|
It seems flate2 documentation is wrong.
```rust
/// Returns an integer representing the compression level, typically on a
/// scale of 0-9
pub fn level(&self) -> u32 {
self.0
}
```
But internally, inside `DeflateBackend::make` they have `debug_assert!(level.level() <= 10);`. Using compression level up to 10 works fine but panics for bigger values.
@JakkuSakura After discussing with `flate2` maintainers, I've confirmed that it's actually a documentation issue, but there's a slight caveat as well. `flate2` supports both `miniz` and `zlib` backends, the former enabled by default.
For consistency with zlib (which supports up to 9), the documentation states the compression range as `0-9`, but 10 is supported miniz, enabled by default. If the backend is switched to `zlib`, then an attempt to use a compression level 10 will cause a panic.
I've opened a [pull request in flate2](https://github.com/rust-lang/flate2-rs/pull/427) to explicitly mention this in the docs, so that there's no confusion around this behavior. I hope this resolves your query?
One more thing to add here. Parquet (and the entire arrow project) uses `flate2` with `rust_backend` feature enabled. Which uses `miniz` backend, thereby supporting level 10 compression, aka `UBER COMPRESSION`. Flate2 still chooses to call 9 as the `best` level of compression because with 10 we might run into performance issues on the user's device.
The PR I created in `flate2` is merged. So the docs should mention this caveat very soon hopefully. But behaviorally speaking, using level = 10 in parquet shouldn't be a problem at all. Discretion is advised when using `flate2` separately.
@alamb @tustvold what do you think? And should we close this?
Maybe we can add a note to the arrow documentation with a link to flate2 and close this issuse?
Sounds like a good idea. I'll make this a part of #37 exercise itself.
@alamb @tustvold have a look please before I add anything to the docs.
https://github.com/rust-lang/flate2-rs/pull/427#issuecomment-2377460294 and https://github.com/rust-lang/flate2-rs/pull/429
> @alamb @tustvold have a look please before I add anything to the docs.
>
> [rust-lang/flate2-rs#427 (comment)](https://github.com/rust-lang/flate2-rs/pull/427#issuecomment-2377460294) and [rust-lang/flate2-rs#429](https://github.com/rust-lang/flate2-rs/pull/429)
I recommend linking to the docs added in https://github.com/rust-lang/flate2-rs/pull/430 -- they are pretty clear to me. Basically we can say the max is 10 but offer the caveat that nothing else will be able to read the parquet files
|
2024-09-25T04:27:02Z
|
53.0
|
f41c258246cd4bd9d89228cded9ed54dbd00faff
|
apache/arrow-rs
| 6,368
|
apache__arrow-rs-6368
|
[
"6366"
] |
0491294828a6480959ba3983355b415abbaf1174
|
diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml
index 1937fafe3a62..41edc1bb194e 100644
--- a/.github/workflows/integration.yml
+++ b/.github/workflows/integration.yml
@@ -48,7 +48,6 @@ on:
- arrow/**
jobs:
-
integration:
name: Archery test With other arrows
runs-on: ubuntu-latest
@@ -118,9 +117,9 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- rust: [ stable ]
- # PyArrow 13 was the last version prior to introduction to Arrow PyCapsules
- pyarrow: [ "13", "14" ]
+ rust: [stable]
+ # PyArrow 15 was the first version to introduce StringView/BinaryView support
+ pyarrow: ["15", "16", "17"]
steps:
- uses: actions/checkout@v4
with:
diff --git a/arrow-array/src/ffi.rs b/arrow-array/src/ffi.rs
index 1d76ed62d365..a28b3f746115 100644
--- a/arrow-array/src/ffi.rs
+++ b/arrow-array/src/ffi.rs
@@ -193,6 +193,13 @@ fn bit_width(data_type: &DataType, i: usize) -> Result<usize> {
"The datatype \"{data_type:?}\" expects 3 buffers, but requested {i}. Please verify that the C data interface is correctly implemented."
)))
}
+ // Variable-sized views: have 3 or more buffers.
+ // Buffer 1 are the u128 views
+ // Buffers 2...N-1 are u8 byte buffers
+ (DataType::Utf8View, 1) | (DataType::BinaryView,1) => u128::BITS as _,
+ (DataType::Utf8View, _) | (DataType::BinaryView, _) => {
+ u8::BITS as _
+ }
// type ids. UnionArray doesn't have null bitmap so buffer index begins with 0.
(DataType::Union(_, _), 0) => i8::BITS as _,
// Only DenseUnion has 2nd buffer
@@ -300,7 +307,7 @@ impl<'a> ImportedArrowArray<'a> {
};
let data_layout = layout(&self.data_type);
- let buffers = self.buffers(data_layout.can_contain_null_mask)?;
+ let buffers = self.buffers(data_layout.can_contain_null_mask, data_layout.variadic)?;
let null_bit_buffer = if data_layout.can_contain_null_mask {
self.null_bit_buffer()
@@ -373,13 +380,30 @@ impl<'a> ImportedArrowArray<'a> {
/// returns all buffers, as organized by Rust (i.e. null buffer is skipped if it's present
/// in the spec of the type)
- fn buffers(&self, can_contain_null_mask: bool) -> Result<Vec<Buffer>> {
+ fn buffers(&self, can_contain_null_mask: bool, variadic: bool) -> Result<Vec<Buffer>> {
// + 1: skip null buffer
let buffer_begin = can_contain_null_mask as usize;
- (buffer_begin..self.array.num_buffers())
- .map(|index| {
- let len = self.buffer_len(index, &self.data_type)?;
+ let buffer_end = self.array.num_buffers() - usize::from(variadic);
+
+ let variadic_buffer_lens = if variadic {
+ // Each views array has 1 (optional) null buffer, 1 views buffer, 1 lengths buffer.
+ // Rest are variadic.
+ let num_variadic_buffers =
+ self.array.num_buffers() - (2 + usize::from(can_contain_null_mask));
+ if num_variadic_buffers == 0 {
+ &[]
+ } else {
+ let lengths = self.array.buffer(self.array.num_buffers() - 1);
+ // SAFETY: is lengths is non-null, then it must be valid for up to num_variadic_buffers.
+ unsafe { std::slice::from_raw_parts(lengths.cast::<i64>(), num_variadic_buffers) }
+ }
+ } else {
+ &[]
+ };
+ (buffer_begin..buffer_end)
+ .map(|index| {
+ let len = self.buffer_len(index, variadic_buffer_lens, &self.data_type)?;
match unsafe { create_buffer(self.owner.clone(), self.array, index, len) } {
Some(buf) => Ok(buf),
None if len == 0 => {
@@ -399,7 +423,12 @@ impl<'a> ImportedArrowArray<'a> {
/// Rust implementation uses fixed-sized buffers, which require knowledge of their `len`.
/// for variable-sized buffers, such as the second buffer of a stringArray, we need
/// to fetch offset buffer's len to build the second buffer.
- fn buffer_len(&self, i: usize, dt: &DataType) -> Result<usize> {
+ fn buffer_len(
+ &self,
+ i: usize,
+ variadic_buffer_lengths: &[i64],
+ dt: &DataType,
+ ) -> Result<usize> {
// Special handling for dictionary type as we only care about the key type in the case.
let data_type = match dt {
DataType::Dictionary(key_data_type, _) => key_data_type.as_ref(),
@@ -430,7 +459,7 @@ impl<'a> ImportedArrowArray<'a> {
}
// the len of the data buffer (buffer 2) equals the last value of the offset buffer (buffer 1)
- let len = self.buffer_len(1, dt)?;
+ let len = self.buffer_len(1, variadic_buffer_lengths, dt)?;
// first buffer is the null buffer => add(1)
// we assume that pointer is aligned for `i32`, as Utf8 uses `i32` offsets.
#[allow(clippy::cast_ptr_alignment)]
@@ -444,7 +473,7 @@ impl<'a> ImportedArrowArray<'a> {
}
// the len of the data buffer (buffer 2) equals the last value of the offset buffer (buffer 1)
- let len = self.buffer_len(1, dt)?;
+ let len = self.buffer_len(1, variadic_buffer_lengths, dt)?;
// first buffer is the null buffer => add(1)
// we assume that pointer is aligned for `i64`, as Large uses `i64` offsets.
#[allow(clippy::cast_ptr_alignment)]
@@ -452,6 +481,16 @@ impl<'a> ImportedArrowArray<'a> {
// get last offset
(unsafe { *offset_buffer.add(len / size_of::<i64>() - 1) }) as usize
}
+ // View types: these have variadic buffers.
+ // Buffer 1 is the views buffer, which stores 1 u128 per length of the array.
+ // Buffers 2..N-1 are the buffers holding the byte data. Their lengths are variable.
+ // Buffer N is of length (N - 2) and stores i64 containing the lengths of buffers 2..N-1
+ (DataType::Utf8View, 1) | (DataType::BinaryView, 1) => {
+ std::mem::size_of::<u128>() * length
+ }
+ (DataType::Utf8View, i) | (DataType::BinaryView, i) => {
+ variadic_buffer_lengths[i - 2] as usize
+ }
// buffer len of primitive types
_ => {
let bits = bit_width(data_type, i)?;
@@ -1229,18 +1268,18 @@ mod tests_from_ffi {
use arrow_data::ArrayData;
use arrow_schema::{DataType, Field};
- use crate::types::Int32Type;
+ use super::{ImportedArrowArray, Result};
+ use crate::builder::GenericByteViewBuilder;
+ use crate::types::{BinaryViewType, ByteViewType, Int32Type, StringViewType};
use crate::{
array::{
Array, BooleanArray, DictionaryArray, FixedSizeBinaryArray, FixedSizeListArray,
Int32Array, Int64Array, StringArray, StructArray, UInt32Array, UInt64Array,
},
ffi::{from_ffi, FFI_ArrowArray, FFI_ArrowSchema},
- make_array, ArrayRef, ListArray,
+ make_array, ArrayRef, GenericByteViewArray, ListArray,
};
- use super::{ImportedArrowArray, Result};
-
fn test_round_trip(expected: &ArrayData) -> Result<()> {
// here we export the array
let array = FFI_ArrowArray::new(expected);
@@ -1453,8 +1492,8 @@ mod tests_from_ffi {
owner: &array,
};
- let offset_buf_len = imported_array.buffer_len(1, &imported_array.data_type)?;
- let data_buf_len = imported_array.buffer_len(2, &imported_array.data_type)?;
+ let offset_buf_len = imported_array.buffer_len(1, &[], &imported_array.data_type)?;
+ let data_buf_len = imported_array.buffer_len(2, &[], &imported_array.data_type)?;
assert_eq!(offset_buf_len, 4);
assert_eq!(data_buf_len, 0);
@@ -1472,6 +1511,18 @@ mod tests_from_ffi {
StringArray::from(array)
}
+ fn roundtrip_byte_view_array<T: ByteViewType>(
+ array: GenericByteViewArray<T>,
+ ) -> GenericByteViewArray<T> {
+ let data = array.into_data();
+
+ let array = FFI_ArrowArray::new(&data);
+ let schema = FFI_ArrowSchema::try_from(data.data_type()).unwrap();
+
+ let array = unsafe { from_ffi(array, &schema) }.unwrap();
+ GenericByteViewArray::<T>::from(array)
+ }
+
fn extend_array(array: &dyn Array) -> ArrayRef {
let len = array.len();
let data = array.to_data();
@@ -1551,4 +1602,93 @@ mod tests_from_ffi {
&imported
);
}
+
+ /// Helper trait to allow us to use easily strings as either BinaryViewType::Native or
+ /// StringViewType::Native scalars.
+ trait NativeFromStr {
+ fn from_str(value: &str) -> &Self;
+ }
+
+ impl NativeFromStr for str {
+ fn from_str(value: &str) -> &Self {
+ value
+ }
+ }
+
+ impl NativeFromStr for [u8] {
+ fn from_str(value: &str) -> &Self {
+ value.as_bytes()
+ }
+ }
+
+ #[test]
+ fn test_round_trip_byte_view() {
+ fn test_case<T>()
+ where
+ T: ByteViewType,
+ T::Native: NativeFromStr,
+ {
+ macro_rules! run_test_case {
+ ($array:expr) => {{
+ // round-trip through C Data Interface
+ let len = $array.len();
+ let imported = roundtrip_byte_view_array($array);
+ assert_eq!(imported.len(), len);
+
+ let copied = extend_array(&imported);
+ assert_eq!(
+ copied
+ .as_any()
+ .downcast_ref::<GenericByteViewArray<T>>()
+ .unwrap(),
+ &imported
+ );
+ }};
+ }
+
+ // Empty test case.
+ let empty = GenericByteViewBuilder::<T>::new().finish();
+ run_test_case!(empty);
+
+ // All inlined strings test case.
+ let mut all_inlined = GenericByteViewBuilder::<T>::new();
+ all_inlined.append_value(T::Native::from_str("inlined1"));
+ all_inlined.append_value(T::Native::from_str("inlined2"));
+ all_inlined.append_value(T::Native::from_str("inlined3"));
+ let all_inlined = all_inlined.finish();
+ assert_eq!(all_inlined.data_buffers().len(), 0);
+ run_test_case!(all_inlined);
+
+ // some inlined + non-inlined, 1 variadic buffer.
+ let mixed_one_variadic = {
+ let mut builder = GenericByteViewBuilder::<T>::new();
+ builder.append_value(T::Native::from_str("inlined"));
+ let block_id =
+ builder.append_block(Buffer::from("non-inlined-string-buffer".as_bytes()));
+ builder.try_append_view(block_id, 0, 25).unwrap();
+ builder.finish()
+ };
+ assert_eq!(mixed_one_variadic.data_buffers().len(), 1);
+ run_test_case!(mixed_one_variadic);
+
+ // inlined + non-inlined, 2 variadic buffers.
+ let mixed_two_variadic = {
+ let mut builder = GenericByteViewBuilder::<T>::new();
+ builder.append_value(T::Native::from_str("inlined"));
+ let block_id =
+ builder.append_block(Buffer::from("non-inlined-string-buffer".as_bytes()));
+ builder.try_append_view(block_id, 0, 25).unwrap();
+
+ let block_id = builder
+ .append_block(Buffer::from("another-non-inlined-string-buffer".as_bytes()));
+ builder.try_append_view(block_id, 0, 33).unwrap();
+ builder.finish()
+ };
+ assert_eq!(mixed_two_variadic.data_buffers().len(), 2);
+ run_test_case!(mixed_two_variadic);
+ }
+
+ test_case::<StringViewType>();
+ test_case::<BinaryViewType>();
+ }
}
diff --git a/arrow-buffer/src/buffer/immutable.rs b/arrow-buffer/src/buffer/immutable.rs
index 7cd3552215f8..fef2f8008b2a 100644
--- a/arrow-buffer/src/buffer/immutable.rs
+++ b/arrow-buffer/src/buffer/immutable.rs
@@ -203,7 +203,9 @@ impl Buffer {
pub fn advance(&mut self, offset: usize) {
assert!(
offset <= self.length,
- "the offset of the new Buffer cannot exceed the existing length"
+ "the offset of the new Buffer cannot exceed the existing length: offset={} length={}",
+ offset,
+ self.length
);
self.length -= offset;
// Safety:
@@ -221,7 +223,8 @@ impl Buffer {
pub fn slice_with_length(&self, offset: usize, length: usize) -> Self {
assert!(
offset.saturating_add(length) <= self.length,
- "the offset of the new Buffer cannot exceed the existing length"
+ "the offset of the new Buffer cannot exceed the existing length: slice offset={offset} length={length} selflen={}",
+ self.length
);
// Safety:
// offset + length <= self.length
diff --git a/arrow-data/src/ffi.rs b/arrow-data/src/ffi.rs
index 3345595fac19..cd283d32662f 100644
--- a/arrow-data/src/ffi.rs
+++ b/arrow-data/src/ffi.rs
@@ -20,7 +20,7 @@
use crate::bit_mask::set_bits;
use crate::{layout, ArrayData};
use arrow_buffer::buffer::NullBuffer;
-use arrow_buffer::{Buffer, MutableBuffer};
+use arrow_buffer::{Buffer, MutableBuffer, ScalarBuffer};
use arrow_schema::DataType;
use std::ffi::c_void;
@@ -121,7 +121,7 @@ impl FFI_ArrowArray {
pub fn new(data: &ArrayData) -> Self {
let data_layout = layout(data.data_type());
- let buffers = if data_layout.can_contain_null_mask {
+ let mut buffers = if data_layout.can_contain_null_mask {
// * insert the null buffer at the start
// * make all others `Option<Buffer>`.
std::iter::once(align_nulls(data.offset(), data.nulls()))
@@ -132,7 +132,7 @@ impl FFI_ArrowArray {
};
// `n_buffers` is the number of buffers by the spec.
- let n_buffers = {
+ let mut n_buffers = {
data_layout.buffers.len() + {
// If the layout has a null buffer by Arrow spec.
// Note that even the array doesn't have a null buffer because it has
@@ -141,10 +141,22 @@ impl FFI_ArrowArray {
}
} as i64;
+ if data_layout.variadic {
+ // Save the lengths of all variadic buffers into a new buffer.
+ // The first buffer is `views`, and the rest are variadic.
+ let mut data_buffers_lengths = Vec::new();
+ for buffer in data.buffers().iter().skip(1) {
+ data_buffers_lengths.push(buffer.len() as i64);
+ n_buffers += 1;
+ }
+
+ buffers.push(Some(ScalarBuffer::from(data_buffers_lengths).into_inner()));
+ n_buffers += 1;
+ }
+
let buffers_ptr = buffers
.iter()
.flat_map(|maybe_buffer| match maybe_buffer {
- // note that `raw_data` takes into account the buffer's offset
Some(b) => Some(b.as_ptr() as *const c_void),
// This is for null buffer. We only put a null pointer for
// null buffer if by spec it can contain null mask.
diff --git a/arrow/src/pyarrow.rs b/arrow/src/pyarrow.rs
index 336398cbf22f..a7b593799835 100644
--- a/arrow/src/pyarrow.rs
+++ b/arrow/src/pyarrow.rs
@@ -354,7 +354,7 @@ impl FromPyArrow for RecordBatch {
validate_pycapsule(array_capsule, "arrow_array")?;
let schema_ptr = unsafe { schema_capsule.reference::<FFI_ArrowSchema>() };
- let ffi_array = unsafe { FFI_ArrowArray::from_raw(array_capsule.pointer() as _) };
+ let ffi_array = unsafe { FFI_ArrowArray::from_raw(array_capsule.pointer().cast()) };
let array_data = unsafe { ffi::from_ffi(ffi_array, schema_ptr) }.map_err(to_py_err)?;
if !matches!(array_data.data_type(), DataType::Struct(_)) {
return Err(PyTypeError::new_err(
|
diff --git a/arrow/tests/pyarrow.rs b/arrow/tests/pyarrow.rs
index a1c365c31798..d9ebd0daa1cd 100644
--- a/arrow/tests/pyarrow.rs
+++ b/arrow/tests/pyarrow.rs
@@ -18,6 +18,8 @@
use arrow::array::{ArrayRef, Int32Array, StringArray};
use arrow::pyarrow::{FromPyArrow, ToPyArrow};
use arrow::record_batch::RecordBatch;
+use arrow_array::builder::{BinaryViewBuilder, StringViewBuilder};
+use arrow_array::{Array, BinaryViewArray, StringViewArray};
use pyo3::Python;
use std::sync::Arc;
@@ -27,7 +29,9 @@ fn test_to_pyarrow() {
let a: ArrayRef = Arc::new(Int32Array::from(vec![1, 2]));
let b: ArrayRef = Arc::new(StringArray::from(vec!["a", "b"]));
- let input = RecordBatch::try_from_iter(vec![("a", a), ("b", b)]).unwrap();
+ // The "very long string" will not be inlined, and force the creation of a data buffer.
+ let c: ArrayRef = Arc::new(StringViewArray::from(vec!["short", "a very long string"]));
+ let input = RecordBatch::try_from_iter(vec![("a", a), ("b", b), ("c", c)]).unwrap();
println!("input: {:?}", input);
let res = Python::with_gil(|py| {
@@ -40,3 +44,66 @@ fn test_to_pyarrow() {
assert_eq!(input, res);
}
+
+#[test]
+fn test_to_pyarrow_byte_view() {
+ pyo3::prepare_freethreaded_python();
+
+ for num_variadic_buffers in 0..=2 {
+ let string_view: ArrayRef = Arc::new(string_view_column(num_variadic_buffers));
+ let binary_view: ArrayRef = Arc::new(binary_view_column(num_variadic_buffers));
+
+ let input = RecordBatch::try_from_iter(vec![
+ ("string_view", string_view),
+ ("binary_view", binary_view),
+ ])
+ .unwrap();
+
+ println!("input: {:?}", input);
+ let res = Python::with_gil(|py| {
+ let py_input = input.to_pyarrow(py)?;
+ let records = RecordBatch::from_pyarrow_bound(py_input.bind(py))?;
+ let py_records = records.to_pyarrow(py)?;
+ RecordBatch::from_pyarrow_bound(py_records.bind(py))
+ })
+ .unwrap();
+
+ assert_eq!(input, res);
+ }
+}
+
+fn binary_view_column(num_variadic_buffers: usize) -> BinaryViewArray {
+ let long_scalar = b"but soft what light through yonder window breaks".as_slice();
+ let mut builder = BinaryViewBuilder::new().with_fixed_block_size(long_scalar.len() as u32);
+ // Make sure there is at least one non-inlined value.
+ builder.append_value("inlined".as_bytes());
+
+ for _ in 0..num_variadic_buffers {
+ builder.append_value(long_scalar);
+ }
+
+ let result = builder.finish();
+
+ assert_eq!(result.data_buffers().len(), num_variadic_buffers);
+ assert_eq!(result.len(), num_variadic_buffers + 1);
+
+ result
+}
+
+fn string_view_column(num_variadic_buffers: usize) -> StringViewArray {
+ let long_scalar = "but soft what light through yonder window breaks";
+ let mut builder = StringViewBuilder::new().with_fixed_block_size(long_scalar.len() as u32);
+ // Make sure there is at least one non-inlined value.
+ builder.append_value("inlined");
+
+ for _ in 0..num_variadic_buffers {
+ builder.append_value(long_scalar);
+ }
+
+ let result = builder.finish();
+
+ assert_eq!(result.data_buffers().len(), num_variadic_buffers);
+ assert_eq!(result.len(), num_variadic_buffers + 1);
+
+ result
+}
|
Exporting Binary/Utf8View from arrow-rs to pyarrow fails
**Describe the bug**
Exporting binaryview arrow to pyarrow fails with
`Expected at least 3 buffers for imported type binary_view, ArrowArray struct has 2`
**To Reproduce**
Construct binaryview array and export it over c data interface to pyarrow
**Expected behavior**
Export should succeed without error
**Additional context**
I think there's ambiguity in the spec https://github.com/apache/arrow/issues/43989. However, regardless of that issue it seems that export binaryview array has to do a bit extra work to produce buffer lengths
|
2024-09-06T21:16:12Z
|
53.0
|
f41c258246cd4bd9d89228cded9ed54dbd00faff
|
|
apache/arrow-rs
| 6,332
|
apache__arrow-rs-6332
|
[
"6331"
] |
d4be752ef54ee30198d0aa1abd3838188482e992
|
diff --git a/arrow-flight/src/bin/flight_sql_client.rs b/arrow-flight/src/bin/flight_sql_client.rs
index 296efc1c308e..c334b95a9a96 100644
--- a/arrow-flight/src/bin/flight_sql_client.rs
+++ b/arrow-flight/src/bin/flight_sql_client.rs
@@ -20,7 +20,10 @@ use std::{sync::Arc, time::Duration};
use anyhow::{bail, Context, Result};
use arrow_array::{ArrayRef, Datum, RecordBatch, StringArray};
use arrow_cast::{cast_with_options, pretty::pretty_format_batches, CastOptions};
-use arrow_flight::{sql::client::FlightSqlServiceClient, FlightInfo};
+use arrow_flight::{
+ sql::{client::FlightSqlServiceClient, CommandGetDbSchemas, CommandGetTables},
+ FlightInfo,
+};
use arrow_schema::Schema;
use clap::{Parser, Subcommand};
use futures::TryStreamExt;
@@ -111,6 +114,51 @@ struct Args {
/// Different available commands.
#[derive(Debug, Subcommand)]
enum Command {
+ /// Get catalogs.
+ Catalogs,
+ /// Get db schemas for a catalog.
+ DbSchemas {
+ /// Name of a catalog.
+ ///
+ /// Required.
+ catalog: String,
+ /// Specifies a filter pattern for schemas to search for.
+ /// When no schema_filter is provided, the pattern will not be used to narrow the search.
+ /// In the pattern string, two special characters can be used to denote matching rules:
+ /// - "%" means to match any substring with 0 or more characters.
+ /// - "_" means to match any one character.
+ #[clap(short, long)]
+ db_schema_filter: Option<String>,
+ },
+ /// Get tables for a catalog.
+ Tables {
+ /// Name of a catalog.
+ ///
+ /// Required.
+ catalog: String,
+ /// Specifies a filter pattern for schemas to search for.
+ /// When no schema_filter is provided, the pattern will not be used to narrow the search.
+ /// In the pattern string, two special characters can be used to denote matching rules:
+ /// - "%" means to match any substring with 0 or more characters.
+ /// - "_" means to match any one character.
+ #[clap(short, long)]
+ db_schema_filter: Option<String>,
+ /// Specifies a filter pattern for tables to search for.
+ /// When no table_filter is provided, all tables matching other filters are searched.
+ /// In the pattern string, two special characters can be used to denote matching rules:
+ /// - "%" means to match any substring with 0 or more characters.
+ /// - "_" means to match any one character.
+ #[clap(short, long)]
+ table_filter: Option<String>,
+ /// Specifies a filter of table types which must match.
+ /// The table types depend on vendor/implementation. It is usually used to separate tables from views or system tables.
+ /// TABLE, VIEW, and SYSTEM TABLE are commonly supported.
+ #[clap(long)]
+ table_types: Vec<String>,
+ },
+ /// Get table types.
+ TableTypes,
+
/// Execute given statement.
StatementQuery {
/// SQL query.
@@ -150,6 +198,36 @@ async fn main() -> Result<()> {
.context("setup client")?;
let flight_info = match args.cmd {
+ Command::Catalogs => client.get_catalogs().await.context("get catalogs")?,
+ Command::DbSchemas {
+ catalog,
+ db_schema_filter,
+ } => client
+ .get_db_schemas(CommandGetDbSchemas {
+ catalog: Some(catalog),
+ db_schema_filter_pattern: db_schema_filter,
+ })
+ .await
+ .context("get db schemas")?,
+ Command::Tables {
+ catalog,
+ db_schema_filter,
+ table_filter,
+ table_types,
+ } => client
+ .get_tables(CommandGetTables {
+ catalog: Some(catalog),
+ db_schema_filter_pattern: db_schema_filter,
+ table_name_filter_pattern: table_filter,
+ table_types,
+ // Schema is returned as ipc encoded bytes.
+ // We do not support returning the schema as there is no trivial mechanism
+ // to display the information to the user.
+ include_schema: false,
+ })
+ .await
+ .context("get tables")?,
+ Command::TableTypes => client.get_table_types().await.context("get table types")?,
Command::StatementQuery { query } => client
.execute(query, None)
.await
diff --git a/arrow-flight/src/sql/metadata/mod.rs b/arrow-flight/src/sql/metadata/mod.rs
index 1e9881ffa70e..fd71149a3180 100644
--- a/arrow-flight/src/sql/metadata/mod.rs
+++ b/arrow-flight/src/sql/metadata/mod.rs
@@ -33,6 +33,7 @@
mod catalogs;
mod db_schemas;
mod sql_info;
+mod table_types;
mod tables;
mod xdbc_info;
diff --git a/arrow-flight/src/sql/metadata/table_types.rs b/arrow-flight/src/sql/metadata/table_types.rs
new file mode 100644
index 000000000000..54cfe6fe27a7
--- /dev/null
+++ b/arrow-flight/src/sql/metadata/table_types.rs
@@ -0,0 +1,158 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! [`GetTableTypesBuilder`] for building responses to [`CommandGetTableTypes`] queries.
+//!
+//! [`CommandGetTableTypes`]: crate::sql::CommandGetTableTypes
+
+use std::sync::Arc;
+
+use arrow_array::{builder::StringBuilder, ArrayRef, RecordBatch};
+use arrow_schema::{DataType, Field, Schema, SchemaRef};
+use arrow_select::take::take;
+use once_cell::sync::Lazy;
+
+use crate::error::*;
+use crate::sql::CommandGetTableTypes;
+
+use super::lexsort_to_indices;
+
+/// A builder for a [`CommandGetTableTypes`] response.
+///
+/// Builds rows like this:
+///
+/// * table_type: utf8,
+#[derive(Default)]
+pub struct GetTableTypesBuilder {
+ // array builder for table types
+ table_type: StringBuilder,
+}
+
+impl CommandGetTableTypes {
+ /// Create a builder suitable for constructing a response
+ pub fn into_builder(self) -> GetTableTypesBuilder {
+ self.into()
+ }
+}
+
+impl From<CommandGetTableTypes> for GetTableTypesBuilder {
+ fn from(_value: CommandGetTableTypes) -> Self {
+ Self::new()
+ }
+}
+
+impl GetTableTypesBuilder {
+ /// Create a new instance of [`GetTableTypesBuilder`]
+ pub fn new() -> Self {
+ Self {
+ table_type: StringBuilder::new(),
+ }
+ }
+
+ /// Append a row
+ pub fn append(&mut self, table_type: impl AsRef<str>) {
+ self.table_type.append_value(table_type);
+ }
+
+ /// builds a `RecordBatch` with the correct schema for a `CommandGetTableTypes` response
+ pub fn build(self) -> Result<RecordBatch> {
+ let schema = self.schema();
+ let Self { mut table_type } = self;
+
+ // Make the arrays
+ let table_type = table_type.finish();
+
+ let batch = RecordBatch::try_new(schema, vec![Arc::new(table_type) as ArrayRef])?;
+
+ // Order filtered results by table_type
+ let indices = lexsort_to_indices(batch.columns());
+ let columns = batch
+ .columns()
+ .iter()
+ .map(|c| take(c, &indices, None))
+ .collect::<std::result::Result<Vec<_>, _>>()?;
+
+ Ok(RecordBatch::try_new(batch.schema(), columns)?)
+ }
+
+ /// Return the schema of the RecordBatch that will be returned
+ /// from [`CommandGetTableTypes`]
+ pub fn schema(&self) -> SchemaRef {
+ get_table_types_schema()
+ }
+}
+
+fn get_table_types_schema() -> SchemaRef {
+ Arc::clone(&GET_TABLE_TYPES_SCHEMA)
+}
+
+/// The schema for [`CommandGetTableTypes`].
+static GET_TABLE_TYPES_SCHEMA: Lazy<SchemaRef> = Lazy::new(|| {
+ Arc::new(Schema::new(vec![Field::new(
+ "table_type",
+ DataType::Utf8,
+ false,
+ )]))
+});
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use arrow_array::StringArray;
+
+ fn get_ref_batch() -> RecordBatch {
+ RecordBatch::try_new(
+ get_table_types_schema(),
+ vec![Arc::new(StringArray::from(vec![
+ "a_table_type",
+ "b_table_type",
+ "c_table_type",
+ "d_table_type",
+ ])) as ArrayRef],
+ )
+ .unwrap()
+ }
+
+ #[test]
+ fn test_table_types_are_sorted() {
+ let ref_batch = get_ref_batch();
+
+ let mut builder = GetTableTypesBuilder::new();
+ builder.append("b_table_type");
+ builder.append("a_table_type");
+ builder.append("d_table_type");
+ builder.append("c_table_type");
+ let schema_batch = builder.build().unwrap();
+
+ assert_eq!(schema_batch, ref_batch)
+ }
+
+ #[test]
+ fn test_builder_from_query() {
+ let ref_batch = get_ref_batch();
+ let query = CommandGetTableTypes {};
+
+ let mut builder = query.into_builder();
+ builder.append("a_table_type");
+ builder.append("b_table_type");
+ builder.append("c_table_type");
+ builder.append("d_table_type");
+ let schema_batch = builder.build().unwrap();
+
+ assert_eq!(schema_batch, ref_batch)
+ }
+}
|
diff --git a/arrow-flight/tests/flight_sql_client_cli.rs b/arrow-flight/tests/flight_sql_client_cli.rs
index 168015d07e2d..6e1f6142c8b6 100644
--- a/arrow-flight/tests/flight_sql_client_cli.rs
+++ b/arrow-flight/tests/flight_sql_client_cli.rs
@@ -23,10 +23,12 @@ use crate::common::fixture::TestFixture;
use arrow_array::{ArrayRef, Int64Array, RecordBatch, StringArray};
use arrow_flight::{
decode::FlightRecordBatchStream,
+ encode::FlightDataEncoderBuilder,
flight_service_server::{FlightService, FlightServiceServer},
sql::{
server::{FlightSqlService, PeekableFlightDataStream},
ActionCreatePreparedStatementRequest, ActionCreatePreparedStatementResult, Any,
+ CommandGetCatalogs, CommandGetDbSchemas, CommandGetTableTypes, CommandGetTables,
CommandPreparedStatementQuery, CommandStatementQuery, DoPutPreparedStatementResult,
ProstMessageExt, SqlInfo,
},
@@ -85,6 +87,205 @@ async fn test_simple() {
);
}
+#[tokio::test]
+async fn test_get_catalogs() {
+ let test_server = FlightSqlServiceImpl::default();
+ let fixture = TestFixture::new(test_server.service()).await;
+ let addr = fixture.addr;
+
+ let stdout = tokio::task::spawn_blocking(move || {
+ Command::cargo_bin("flight_sql_client")
+ .unwrap()
+ .env_clear()
+ .env("RUST_BACKTRACE", "1")
+ .env("RUST_LOG", "warn")
+ .arg("--host")
+ .arg(addr.ip().to_string())
+ .arg("--port")
+ .arg(addr.port().to_string())
+ .arg("catalogs")
+ .assert()
+ .success()
+ .get_output()
+ .stdout
+ .clone()
+ })
+ .await
+ .unwrap();
+
+ fixture.shutdown_and_wait().await;
+
+ assert_eq!(
+ std::str::from_utf8(&stdout).unwrap().trim(),
+ "+--------------+\
+ \n| catalog_name |\
+ \n+--------------+\
+ \n| catalog_a |\
+ \n| catalog_b |\
+ \n+--------------+",
+ );
+}
+
+#[tokio::test]
+async fn test_get_db_schemas() {
+ let test_server = FlightSqlServiceImpl::default();
+ let fixture = TestFixture::new(test_server.service()).await;
+ let addr = fixture.addr;
+
+ let stdout = tokio::task::spawn_blocking(move || {
+ Command::cargo_bin("flight_sql_client")
+ .unwrap()
+ .env_clear()
+ .env("RUST_BACKTRACE", "1")
+ .env("RUST_LOG", "warn")
+ .arg("--host")
+ .arg(addr.ip().to_string())
+ .arg("--port")
+ .arg(addr.port().to_string())
+ .arg("db-schemas")
+ .arg("catalog_a")
+ .assert()
+ .success()
+ .get_output()
+ .stdout
+ .clone()
+ })
+ .await
+ .unwrap();
+
+ fixture.shutdown_and_wait().await;
+
+ assert_eq!(
+ std::str::from_utf8(&stdout).unwrap().trim(),
+ "+--------------+----------------+\
+ \n| catalog_name | db_schema_name |\
+ \n+--------------+----------------+\
+ \n| catalog_a | schema_1 |\
+ \n| catalog_a | schema_2 |\
+ \n+--------------+----------------+",
+ );
+}
+
+#[tokio::test]
+async fn test_get_tables() {
+ let test_server = FlightSqlServiceImpl::default();
+ let fixture = TestFixture::new(test_server.service()).await;
+ let addr = fixture.addr;
+
+ let stdout = tokio::task::spawn_blocking(move || {
+ Command::cargo_bin("flight_sql_client")
+ .unwrap()
+ .env_clear()
+ .env("RUST_BACKTRACE", "1")
+ .env("RUST_LOG", "warn")
+ .arg("--host")
+ .arg(addr.ip().to_string())
+ .arg("--port")
+ .arg(addr.port().to_string())
+ .arg("tables")
+ .arg("catalog_a")
+ .assert()
+ .success()
+ .get_output()
+ .stdout
+ .clone()
+ })
+ .await
+ .unwrap();
+
+ fixture.shutdown_and_wait().await;
+
+ assert_eq!(
+ std::str::from_utf8(&stdout).unwrap().trim(),
+ "+--------------+----------------+------------+------------+\
+ \n| catalog_name | db_schema_name | table_name | table_type |\
+ \n+--------------+----------------+------------+------------+\
+ \n| catalog_a | schema_1 | table_1 | TABLE |\
+ \n| catalog_a | schema_2 | table_2 | VIEW |\
+ \n+--------------+----------------+------------+------------+",
+ );
+}
+#[tokio::test]
+async fn test_get_tables_db_filter() {
+ let test_server = FlightSqlServiceImpl::default();
+ let fixture = TestFixture::new(test_server.service()).await;
+ let addr = fixture.addr;
+
+ let stdout = tokio::task::spawn_blocking(move || {
+ Command::cargo_bin("flight_sql_client")
+ .unwrap()
+ .env_clear()
+ .env("RUST_BACKTRACE", "1")
+ .env("RUST_LOG", "warn")
+ .arg("--host")
+ .arg(addr.ip().to_string())
+ .arg("--port")
+ .arg(addr.port().to_string())
+ .arg("tables")
+ .arg("catalog_a")
+ .arg("--db-schema-filter")
+ .arg("schema_2")
+ .assert()
+ .success()
+ .get_output()
+ .stdout
+ .clone()
+ })
+ .await
+ .unwrap();
+
+ fixture.shutdown_and_wait().await;
+
+ assert_eq!(
+ std::str::from_utf8(&stdout).unwrap().trim(),
+ "+--------------+----------------+------------+------------+\
+ \n| catalog_name | db_schema_name | table_name | table_type |\
+ \n+--------------+----------------+------------+------------+\
+ \n| catalog_a | schema_2 | table_2 | VIEW |\
+ \n+--------------+----------------+------------+------------+",
+ );
+}
+
+#[tokio::test]
+async fn test_get_tables_types() {
+ let test_server = FlightSqlServiceImpl::default();
+ let fixture = TestFixture::new(test_server.service()).await;
+ let addr = fixture.addr;
+
+ let stdout = tokio::task::spawn_blocking(move || {
+ Command::cargo_bin("flight_sql_client")
+ .unwrap()
+ .env_clear()
+ .env("RUST_BACKTRACE", "1")
+ .env("RUST_LOG", "warn")
+ .arg("--host")
+ .arg(addr.ip().to_string())
+ .arg("--port")
+ .arg(addr.port().to_string())
+ .arg("table-types")
+ .assert()
+ .success()
+ .get_output()
+ .stdout
+ .clone()
+ })
+ .await
+ .unwrap();
+
+ fixture.shutdown_and_wait().await;
+
+ assert_eq!(
+ std::str::from_utf8(&stdout).unwrap().trim(),
+ "+--------------+\
+ \n| table_type |\
+ \n+--------------+\
+ \n| SYSTEM_TABLE |\
+ \n| TABLE |\
+ \n| VIEW |\
+ \n+--------------+",
+ );
+}
+
const PREPARED_QUERY: &str = "SELECT * FROM table WHERE field = $1";
const PREPARED_STATEMENT_HANDLE: &str = "prepared_statement_handle";
const UPDATED_PREPARED_STATEMENT_HANDLE: &str = "updated_prepared_statement_handle";
@@ -278,6 +479,84 @@ impl FlightSqlService for FlightSqlServiceImpl {
Ok(resp)
}
+ async fn get_flight_info_catalogs(
+ &self,
+ query: CommandGetCatalogs,
+ request: Request<FlightDescriptor>,
+ ) -> Result<Response<FlightInfo>, Status> {
+ let flight_descriptor = request.into_inner();
+ let ticket = Ticket {
+ ticket: query.as_any().encode_to_vec().into(),
+ };
+ let endpoint = FlightEndpoint::new().with_ticket(ticket);
+
+ let flight_info = FlightInfo::new()
+ .try_with_schema(&query.into_builder().schema())
+ .unwrap()
+ .with_endpoint(endpoint)
+ .with_descriptor(flight_descriptor);
+
+ Ok(Response::new(flight_info))
+ }
+ async fn get_flight_info_schemas(
+ &self,
+ query: CommandGetDbSchemas,
+ request: Request<FlightDescriptor>,
+ ) -> Result<Response<FlightInfo>, Status> {
+ let flight_descriptor = request.into_inner();
+ let ticket = Ticket {
+ ticket: query.as_any().encode_to_vec().into(),
+ };
+ let endpoint = FlightEndpoint::new().with_ticket(ticket);
+
+ let flight_info = FlightInfo::new()
+ .try_with_schema(&query.into_builder().schema())
+ .unwrap()
+ .with_endpoint(endpoint)
+ .with_descriptor(flight_descriptor);
+
+ Ok(Response::new(flight_info))
+ }
+
+ async fn get_flight_info_tables(
+ &self,
+ query: CommandGetTables,
+ request: Request<FlightDescriptor>,
+ ) -> Result<Response<FlightInfo>, Status> {
+ let flight_descriptor = request.into_inner();
+ let ticket = Ticket {
+ ticket: query.as_any().encode_to_vec().into(),
+ };
+ let endpoint = FlightEndpoint::new().with_ticket(ticket);
+
+ let flight_info = FlightInfo::new()
+ .try_with_schema(&query.into_builder().schema())
+ .unwrap()
+ .with_endpoint(endpoint)
+ .with_descriptor(flight_descriptor);
+
+ Ok(Response::new(flight_info))
+ }
+
+ async fn get_flight_info_table_types(
+ &self,
+ query: CommandGetTableTypes,
+ request: Request<FlightDescriptor>,
+ ) -> Result<Response<FlightInfo>, Status> {
+ let flight_descriptor = request.into_inner();
+ let ticket = Ticket {
+ ticket: query.as_any().encode_to_vec().into(),
+ };
+ let endpoint = FlightEndpoint::new().with_ticket(ticket);
+
+ let flight_info = FlightInfo::new()
+ .try_with_schema(&query.into_builder().schema())
+ .unwrap()
+ .with_endpoint(endpoint)
+ .with_descriptor(flight_descriptor);
+
+ Ok(Response::new(flight_info))
+ }
async fn get_flight_info_statement(
&self,
query: CommandStatementQuery,
@@ -309,6 +588,109 @@ impl FlightSqlService for FlightSqlServiceImpl {
Ok(resp)
}
+ async fn do_get_catalogs(
+ &self,
+ query: CommandGetCatalogs,
+ _request: Request<Ticket>,
+ ) -> Result<Response<<Self as FlightService>::DoGetStream>, Status> {
+ let mut builder = query.into_builder();
+ for catalog_name in ["catalog_a", "catalog_b"] {
+ builder.append(catalog_name);
+ }
+ let schema = builder.schema();
+ let batch = builder.build();
+ let stream = FlightDataEncoderBuilder::new()
+ .with_schema(schema)
+ .build(futures::stream::once(async { batch }))
+ .map_err(Status::from);
+ Ok(Response::new(Box::pin(stream)))
+ }
+
+ async fn do_get_schemas(
+ &self,
+ query: CommandGetDbSchemas,
+ _request: Request<Ticket>,
+ ) -> Result<Response<<Self as FlightService>::DoGetStream>, Status> {
+ let mut builder = query.into_builder();
+ for (catalog_name, schema_name) in [
+ ("catalog_a", "schema_1"),
+ ("catalog_a", "schema_2"),
+ ("catalog_b", "schema_3"),
+ ] {
+ builder.append(catalog_name, schema_name);
+ }
+
+ let schema = builder.schema();
+ let batch = builder.build();
+ let stream = FlightDataEncoderBuilder::new()
+ .with_schema(schema)
+ .build(futures::stream::once(async { batch }))
+ .map_err(Status::from);
+ Ok(Response::new(Box::pin(stream)))
+ }
+
+ async fn do_get_tables(
+ &self,
+ query: CommandGetTables,
+ _request: Request<Ticket>,
+ ) -> Result<Response<<Self as FlightService>::DoGetStream>, Status> {
+ let mut builder = query.into_builder();
+ for (catalog_name, schema_name, table_name, table_type, schema) in [
+ (
+ "catalog_a",
+ "schema_1",
+ "table_1",
+ "TABLE",
+ Arc::new(Schema::empty()),
+ ),
+ (
+ "catalog_a",
+ "schema_2",
+ "table_2",
+ "VIEW",
+ Arc::new(Schema::empty()),
+ ),
+ (
+ "catalog_b",
+ "schema_3",
+ "table_3",
+ "TABLE",
+ Arc::new(Schema::empty()),
+ ),
+ ] {
+ builder
+ .append(catalog_name, schema_name, table_name, table_type, &schema)
+ .unwrap();
+ }
+
+ let schema = builder.schema();
+ let batch = builder.build();
+ let stream = FlightDataEncoderBuilder::new()
+ .with_schema(schema)
+ .build(futures::stream::once(async { batch }))
+ .map_err(Status::from);
+ Ok(Response::new(Box::pin(stream)))
+ }
+
+ async fn do_get_table_types(
+ &self,
+ query: CommandGetTableTypes,
+ _request: Request<Ticket>,
+ ) -> Result<Response<<Self as FlightService>::DoGetStream>, Status> {
+ let mut builder = query.into_builder();
+ for table_type in ["TABLE", "VIEW", "SYSTEM_TABLE"] {
+ builder.append(table_type);
+ }
+
+ let schema = builder.schema();
+ let batch = builder.build();
+ let stream = FlightDataEncoderBuilder::new()
+ .with_schema(schema)
+ .build(futures::stream::once(async { batch }))
+ .map_err(Status::from);
+ Ok(Response::new(Box::pin(stream)))
+ }
+
async fn do_put_prepared_statement_query(
&self,
_query: CommandPreparedStatementQuery,
|
Add Catalog DB Schema subcommands to `flight_sql_client`
**Is your feature request related to a problem or challenge? Please describe what you are trying to do.**
When using the `flight_sql_client` it can be helpful to interrogate the Flight SQL server for information about available catalogs, db schemas and tables.
**Describe the solution you'd like**
Adding subcommands to the CLI that mirror the various FlightSQL commands about catalogs, schemas and tables.
**Describe alternatives you've considered**
We could support `SHOW ...` variant queries and map them to the FlightSQL commands, however this is likely not as portable as a direct mapping of CLI subcommand to FlightSQL command.
**Additional context**
I have PR in progress.
|
2024-08-29T19:02:56Z
|
53.0
|
f41c258246cd4bd9d89228cded9ed54dbd00faff
|
|
apache/arrow-rs
| 6,328
|
apache__arrow-rs-6328
|
[
"6179"
] |
678517018ddfd21b202a94df13b06dfa1ab8a378
|
diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml
index a1644ee49b8d..1b65c5057de1 100644
--- a/.github/workflows/rust.yml
+++ b/.github/workflows/rust.yml
@@ -100,6 +100,13 @@ jobs:
run: rustup component add rustfmt
- name: Format arrow
run: cargo fmt --all -- --check
+ - name: Format parquet
+ # Many modules in parquet are skipped, so check parquet separately. If this check fails, run:
+ # cargo fmt -p parquet -- --config skip_children=true `find ./parquet -name "*.rs" \! -name format.rs`
+ # from the top level arrow-rs directory and check in the result.
+ # https://github.com/apache/arrow-rs/issues/6179
+ working-directory: parquet
+ run: cargo fmt -p parquet -- --check --config skip_children=true `find . -name "*.rs" \! -name format.rs`
- name: Format object_store
working-directory: object_store
run: cargo fmt --all -- --check
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index a53604d0f6ae..e0adc18a9a60 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -160,6 +160,13 @@ PR be sure to run the following and check for lint issues:
cargo +stable fmt --all -- --check
```
+Note that currently the above will not check all source files in the parquet crate. To check all
+parquet files run the following from the top-level `arrow-rs` directory:
+
+```bash
+cargo fmt -p parquet -- --check --config skip_children=true `find . -name "*.rs" \! -name format.rs`
+```
+
## Breaking Changes
Our [release schedule] allows breaking API changes only in major releases.
diff --git a/parquet/src/arrow/array_reader/empty_array.rs b/parquet/src/arrow/array_reader/empty_array.rs
index 51673f2f8cf2..9aba08b4e853 100644
--- a/parquet/src/arrow/array_reader/empty_array.rs
+++ b/parquet/src/arrow/array_reader/empty_array.rs
@@ -17,9 +17,9 @@
use crate::arrow::array_reader::ArrayReader;
use crate::errors::Result;
-use arrow_schema::{DataType as ArrowType, Fields};
use arrow_array::{ArrayRef, StructArray};
use arrow_data::ArrayDataBuilder;
+use arrow_schema::{DataType as ArrowType, Fields};
use std::any::Any;
use std::sync::Arc;
diff --git a/parquet/src/arrow/array_reader/fixed_size_list_array.rs b/parquet/src/arrow/array_reader/fixed_size_list_array.rs
index 4cf68a06601c..75099d018fc9 100644
--- a/parquet/src/arrow/array_reader/fixed_size_list_array.rs
+++ b/parquet/src/arrow/array_reader/fixed_size_list_array.rs
@@ -138,7 +138,7 @@ impl ArrayReader for FixedSizeListArrayReader {
"Encountered misaligned row with length {} (expected length {})",
row_len,
self.fixed_size
- ))
+ ));
}
row_len = 0;
@@ -226,9 +226,7 @@ mod tests {
use super::*;
use crate::arrow::{
array_reader::{test_util::InMemoryArrayReader, ListArrayReader},
- arrow_reader::{
- ArrowReaderBuilder, ArrowReaderOptions, ParquetRecordBatchReader,
- },
+ arrow_reader::{ArrowReaderBuilder, ArrowReaderOptions, ParquetRecordBatchReader},
ArrowWriter,
};
use arrow::datatypes::{Field, Int32Type};
@@ -279,10 +277,7 @@ mod tests {
let mut list_array_reader = FixedSizeListArrayReader::new(
Box::new(item_array_reader),
3,
- ArrowType::FixedSizeList(
- Arc::new(Field::new("item", ArrowType::Int32, true)),
- 3,
- ),
+ ArrowType::FixedSizeList(Arc::new(Field::new("item", ArrowType::Int32, true)), 3),
2,
1,
true,
@@ -328,10 +323,7 @@ mod tests {
let mut list_array_reader = FixedSizeListArrayReader::new(
Box::new(item_array_reader),
2,
- ArrowType::FixedSizeList(
- Arc::new(Field::new("item", ArrowType::Int32, true)),
- 2,
- ),
+ ArrowType::FixedSizeList(Arc::new(Field::new("item", ArrowType::Int32, true)), 2),
1,
1,
false,
@@ -354,14 +346,10 @@ mod tests {
// [[4, 5]],
// [[null, null]],
// ]
- let l2_type = ArrowType::FixedSizeList(
- Arc::new(Field::new("item", ArrowType::Int32, true)),
- 2,
- );
- let l1_type = ArrowType::FixedSizeList(
- Arc::new(Field::new("item", l2_type.clone(), false)),
- 1,
- );
+ let l2_type =
+ ArrowType::FixedSizeList(Arc::new(Field::new("item", ArrowType::Int32, true)), 2);
+ let l1_type =
+ ArrowType::FixedSizeList(Arc::new(Field::new("item", l2_type.clone(), false)), 1);
let array = PrimitiveArray::<Int32Type>::from(vec![
None,
@@ -413,14 +401,8 @@ mod tests {
Some(vec![0, 0, 2, 0, 2, 0, 0, 2, 0, 2]),
);
- let l2 = FixedSizeListArrayReader::new(
- Box::new(item_array_reader),
- 2,
- l2_type,
- 4,
- 2,
- false,
- );
+ let l2 =
+ FixedSizeListArrayReader::new(Box::new(item_array_reader), 2, l2_type, 4, 2, false);
let mut l1 = FixedSizeListArrayReader::new(Box::new(l2), 1, l1_type, 3, 1, true);
let expected_1 = expected.slice(0, 2);
@@ -454,10 +436,7 @@ mod tests {
let mut list_array_reader = FixedSizeListArrayReader::new(
Box::new(item_array_reader),
0,
- ArrowType::FixedSizeList(
- Arc::new(Field::new("item", ArrowType::Int32, true)),
- 0,
- ),
+ ArrowType::FixedSizeList(Arc::new(Field::new("item", ArrowType::Int32, true)), 0),
2,
1,
true,
@@ -473,8 +452,7 @@ mod tests {
#[test]
fn test_nested_var_list() {
// [[[1, null, 3], null], [[4], []], [[5, 6], [null, null]], null]
- let mut builder =
- FixedSizeListBuilder::new(ListBuilder::new(Int32Builder::new()), 2);
+ let mut builder = FixedSizeListBuilder::new(ListBuilder::new(Int32Builder::new()), 2);
builder.values().append_value([Some(1), None, Some(3)]);
builder.values().append_null();
builder.append(true);
@@ -503,12 +481,9 @@ mod tests {
None,
]));
- let inner_type =
- ArrowType::List(Arc::new(Field::new("item", ArrowType::Int32, true)));
- let list_type = ArrowType::FixedSizeList(
- Arc::new(Field::new("item", inner_type.clone(), true)),
- 2,
- );
+ let inner_type = ArrowType::List(Arc::new(Field::new("item", ArrowType::Int32, true)));
+ let list_type =
+ ArrowType::FixedSizeList(Arc::new(Field::new("item", inner_type.clone(), true)), 2);
let item_array_reader = InMemoryArrayReader::new(
ArrowType::Int32,
@@ -517,22 +492,11 @@ mod tests {
Some(vec![0, 2, 2, 1, 0, 1, 0, 2, 1, 2, 0]),
);
- let inner_array_reader = ListArrayReader::<i32>::new(
- Box::new(item_array_reader),
- inner_type,
- 4,
- 2,
- true,
- );
+ let inner_array_reader =
+ ListArrayReader::<i32>::new(Box::new(item_array_reader), inner_type, 4, 2, true);
- let mut list_array_reader = FixedSizeListArrayReader::new(
- Box::new(inner_array_reader),
- 2,
- list_type,
- 2,
- 1,
- true,
- );
+ let mut list_array_reader =
+ FixedSizeListArrayReader::new(Box::new(inner_array_reader), 2, list_type, 2, 1, true);
let actual = list_array_reader.next_batch(1024).unwrap();
let actual = actual
.as_any()
@@ -564,21 +528,13 @@ mod tests {
);
// [null, 2, 3, null, 5]
- let primitive = PrimitiveArray::<Int32Type>::from_iter(vec![
- None,
- Some(2),
- Some(3),
- None,
- Some(5),
- ]);
+ let primitive =
+ PrimitiveArray::<Int32Type>::from_iter(vec![None, Some(2), Some(3), None, Some(5)]);
let schema = Arc::new(Schema::new(vec![
Field::new(
"list",
- ArrowType::FixedSizeList(
- Arc::new(Field::new("item", ArrowType::Int32, true)),
- 4,
- ),
+ ArrowType::FixedSizeList(Arc::new(Field::new("item", ArrowType::Int32, true)), 4),
true,
),
Field::new("primitive", ArrowType::Int32, true),
@@ -643,10 +599,7 @@ mod tests {
let schema = Arc::new(Schema::new(vec![Field::new(
"list",
- ArrowType::FixedSizeList(
- Arc::new(Field::new("item", ArrowType::Int32, true)),
- 4,
- ),
+ ArrowType::FixedSizeList(Arc::new(Field::new("item", ArrowType::Int32, true)), 4),
true,
)]));
diff --git a/parquet/src/arrow/array_reader/list_array.rs b/parquet/src/arrow/array_reader/list_array.rs
index e1752f30cea8..ebff3286bed5 100644
--- a/parquet/src/arrow/array_reader/list_array.rs
+++ b/parquet/src/arrow/array_reader/list_array.rs
@@ -125,8 +125,7 @@ impl<OffsetSize: OffsetSizeTrait> ArrayReader for ListArrayReader<OffsetSize> {
// lists, and for consistency we do the same for nulls.
// The output offsets for the computed ListArray
- let mut list_offsets: Vec<OffsetSize> =
- Vec::with_capacity(next_batch_array.len() + 1);
+ let mut list_offsets: Vec<OffsetSize> = Vec::with_capacity(next_batch_array.len() + 1);
// The validity mask of the computed ListArray if nullable
let mut validity = self
@@ -270,9 +269,7 @@ mod tests {
GenericListArray::<OffsetSize>::DATA_TYPE_CONSTRUCTOR(field)
}
- fn downcast<OffsetSize: OffsetSizeTrait>(
- array: &ArrayRef,
- ) -> &'_ GenericListArray<OffsetSize> {
+ fn downcast<OffsetSize: OffsetSizeTrait>(array: &ArrayRef) -> &'_ GenericListArray<OffsetSize> {
array
.as_any()
.downcast_ref::<GenericListArray<OffsetSize>>()
@@ -383,18 +380,12 @@ mod tests {
Some(vec![0, 3, 2, 2, 2, 1, 1, 1, 1, 3, 3, 2, 3, 3, 2, 0, 0, 0]),
);
- let l3 = ListArrayReader::<OffsetSize>::new(
- Box::new(item_array_reader),
- l3_type,
- 5,
- 3,
- true,
- );
+ let l3 =
+ ListArrayReader::<OffsetSize>::new(Box::new(item_array_reader), l3_type, 5, 3, true);
let l2 = ListArrayReader::<OffsetSize>::new(Box::new(l3), l2_type, 3, 2, false);
- let mut l1 =
- ListArrayReader::<OffsetSize>::new(Box::new(l2), l1_type, 2, 1, true);
+ let mut l1 = ListArrayReader::<OffsetSize>::new(Box::new(l2), l1_type, 2, 1, true);
let expected_1 = expected.slice(0, 2);
let expected_2 = expected.slice(2, 2);
@@ -560,8 +551,7 @@ mod tests {
.unwrap();
writer.close().unwrap();
- let file_reader: Arc<dyn FileReader> =
- Arc::new(SerializedFileReader::new(file).unwrap());
+ let file_reader: Arc<dyn FileReader> = Arc::new(SerializedFileReader::new(file).unwrap());
let file_metadata = file_reader.metadata().file_metadata();
let schema = file_metadata.schema_descr();
@@ -573,8 +563,7 @@ mod tests {
)
.unwrap();
- let mut array_reader =
- build_array_reader(fields.as_ref(), &mask, &file_reader).unwrap();
+ let mut array_reader = build_array_reader(fields.as_ref(), &mask, &file_reader).unwrap();
let batch = array_reader.next_batch(100).unwrap();
assert_eq!(batch.data_type(), array_reader.get_data_type());
@@ -584,9 +573,7 @@ mod tests {
"table_info",
ArrowType::List(Arc::new(Field::new(
"table_info",
- ArrowType::Struct(
- vec![Field::new("name", ArrowType::Binary, false)].into()
- ),
+ ArrowType::Struct(vec![Field::new("name", ArrowType::Binary, false)].into()),
false
))),
false
diff --git a/parquet/src/arrow/array_reader/map_array.rs b/parquet/src/arrow/array_reader/map_array.rs
index 9bfc047322a7..4bdec602ba4f 100644
--- a/parquet/src/arrow/array_reader/map_array.rs
+++ b/parquet/src/arrow/array_reader/map_array.rs
@@ -184,21 +184,19 @@ mod tests {
map_builder.append(true).expect("adding map entry");
// Create record batch
- let batch =
- RecordBatch::try_new(Arc::new(schema), vec![Arc::new(map_builder.finish())])
- .expect("create record batch");
+ let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(map_builder.finish())])
+ .expect("create record batch");
// Write record batch to file
let mut buffer = Vec::with_capacity(1024);
- let mut writer = ArrowWriter::try_new(&mut buffer, batch.schema(), None)
- .expect("creat file writer");
+ let mut writer =
+ ArrowWriter::try_new(&mut buffer, batch.schema(), None).expect("creat file writer");
writer.write(&batch).expect("writing file");
writer.close().expect("close writer");
// Read file
let reader = Bytes::from(buffer);
- let record_batch_reader =
- ParquetRecordBatchReader::try_new(reader, 1024).unwrap();
+ let record_batch_reader = ParquetRecordBatchReader::try_new(reader, 1024).unwrap();
for maybe_record_batch in record_batch_reader {
let record_batch = maybe_record_batch.expect("Getting current batch");
let col = record_batch.column(0);
diff --git a/parquet/src/arrow/array_reader/struct_array.rs b/parquet/src/arrow/array_reader/struct_array.rs
index 4af194774bfb..fb2f2f8928b9 100644
--- a/parquet/src/arrow/array_reader/struct_array.rs
+++ b/parquet/src/arrow/array_reader/struct_array.rs
@@ -112,10 +112,10 @@ impl ArrayReader for StructArrayReader {
.collect::<Result<Vec<_>>>()?;
// check that array child data has same size
- let children_array_len =
- children_array.first().map(|arr| arr.len()).ok_or_else(|| {
- general_err!("Struct array reader should have at least one child!")
- })?;
+ let children_array_len = children_array
+ .first()
+ .map(|arr| arr.len())
+ .ok_or_else(|| general_err!("Struct array reader should have at least one child!"))?;
let all_children_len_eq = children_array
.iter()
@@ -169,8 +169,7 @@ impl ArrayReader for StructArrayReader {
return Err(general_err!("Failed to decode level data for struct array"));
}
- array_data_builder =
- array_data_builder.null_bit_buffer(Some(bitmap_builder.into()));
+ array_data_builder = array_data_builder.null_bit_buffer(Some(bitmap_builder.into()));
}
let array_data = unsafe { array_data_builder.build_unchecked() };
@@ -282,13 +281,12 @@ mod tests {
// null,
// ]
- let expected_l =
- Arc::new(ListArray::from_iter_primitive::<Int32Type, _, _>(vec![
- Some(vec![Some(1), Some(2), None]),
- Some(vec![]),
- None,
- None,
- ]));
+ let expected_l = Arc::new(ListArray::from_iter_primitive::<Int32Type, _, _>(vec![
+ Some(vec![Some(1), Some(2), None]),
+ Some(vec![]),
+ None,
+ None,
+ ]));
let validity = Buffer::from([0b00000111]);
let struct_fields = vec![(
diff --git a/parquet/src/arrow/schema/complex.rs b/parquet/src/arrow/schema/complex.rs
index a5f6d242ef91..e487feabb848 100644
--- a/parquet/src/arrow/schema/complex.rs
+++ b/parquet/src/arrow/schema/complex.rs
@@ -41,7 +41,7 @@ pub struct ParquetField {
/// i.e. guaranteed to be > 0 for an element of list type
pub rep_level: i16,
/// The level at which this field is fully defined,
- /// i.e. guaranteed to be > 0 for a nullable type or child of a
+ /// i.e. guaranteed to be > 0 for a nullable type or child of a
/// nullable type
pub def_level: i16,
/// Whether this field is nullable
@@ -64,11 +64,7 @@ impl ParquetField {
rep_level: self.rep_level,
def_level: self.def_level,
nullable: false,
- arrow_type: DataType::List(Arc::new(Field::new(
- name,
- self.arrow_type.clone(),
- false,
- ))),
+ arrow_type: DataType::List(Arc::new(Field::new(name, self.arrow_type.clone(), false))),
field_type: ParquetFieldType::Group {
children: vec![self],
},
@@ -289,7 +285,7 @@ impl Visitor {
match map_key.get_basic_info().repetition() {
Repetition::REPEATED => {
- return Err(arrow_err!("Map keys cannot be repeated"));
+ return Err(arrow_err!("Map keys cannot be repeated"));
}
Repetition::REQUIRED | Repetition::OPTIONAL => {
// Relaxed check for having repetition REQUIRED as there exists
@@ -317,10 +313,7 @@ impl Visitor {
(Some(field), Some(&*fields[0]), Some(&*fields[1]), *sorted)
}
d => {
- return Err(arrow_err!(
- "Map data type should contain struct got {}",
- d
- ));
+ return Err(arrow_err!("Map data type should contain struct got {}", d));
}
},
Some(d) => {
@@ -416,9 +409,7 @@ impl Visitor {
let (def_level, nullable) = match list_type.get_basic_info().repetition() {
Repetition::REQUIRED => (context.def_level, false),
Repetition::OPTIONAL => (context.def_level + 1, true),
- Repetition::REPEATED => {
- return Err(arrow_err!("List type cannot be repeated"))
- }
+ Repetition::REPEATED => return Err(arrow_err!("List type cannot be repeated")),
};
let arrow_field = match &context.data_type {
@@ -542,11 +533,7 @@ impl Visitor {
///
/// The resulting [`Field`] will have the type dictated by `field`, a name
/// dictated by the `parquet_type`, and any metadata from `arrow_hint`
-fn convert_field(
- parquet_type: &Type,
- field: &ParquetField,
- arrow_hint: Option<&Field>,
-) -> Field {
+fn convert_field(parquet_type: &Type, field: &ParquetField, arrow_hint: Option<&Field>) -> Field {
let name = parquet_type.name();
let data_type = field.arrow_type.clone();
let nullable = field.nullable;
@@ -568,11 +555,14 @@ fn convert_field(
let basic_info = parquet_type.get_basic_info();
if basic_info.has_id() {
let mut meta = HashMap::with_capacity(1);
- meta.insert(PARQUET_FIELD_ID_META_KEY.to_string(), basic_info.id().to_string());
+ meta.insert(
+ PARQUET_FIELD_ID_META_KEY.to_string(),
+ basic_info.id().to_string(),
+ );
ret.set_metadata(meta);
}
ret
- },
+ }
}
}
diff --git a/parquet/src/arrow/schema/mod.rs b/parquet/src/arrow/schema/mod.rs
index a3528b6c8adb..fab8966952b2 100644
--- a/parquet/src/arrow/schema/mod.rs
+++ b/parquet/src/arrow/schema/mod.rs
@@ -1483,11 +1483,25 @@ mod tests {
),
Field::new("date", DataType::Date32, true),
Field::new("time_milli", DataType::Time32(TimeUnit::Millisecond), true),
- Field::new("time_milli_utc", DataType::Time32(TimeUnit::Millisecond), true)
- .with_metadata(HashMap::from_iter(vec![("adjusted_to_utc".to_string(), "".to_string())])),
+ Field::new(
+ "time_milli_utc",
+ DataType::Time32(TimeUnit::Millisecond),
+ true,
+ )
+ .with_metadata(HashMap::from_iter(vec![(
+ "adjusted_to_utc".to_string(),
+ "".to_string(),
+ )])),
Field::new("time_micro", DataType::Time64(TimeUnit::Microsecond), true),
- Field::new("time_micro_utc", DataType::Time64(TimeUnit::Microsecond), true)
- .with_metadata(HashMap::from_iter(vec![("adjusted_to_utc".to_string(), "".to_string())])),
+ Field::new(
+ "time_micro_utc",
+ DataType::Time64(TimeUnit::Microsecond),
+ true,
+ )
+ .with_metadata(HashMap::from_iter(vec![(
+ "adjusted_to_utc".to_string(),
+ "".to_string(),
+ )])),
Field::new(
"ts_milli",
DataType::Timestamp(TimeUnit::Millisecond, None),
diff --git a/parquet/src/compression.rs b/parquet/src/compression.rs
index 10560210e4e8..edf675f1302a 100644
--- a/parquet/src/compression.rs
+++ b/parquet/src/compression.rs
@@ -150,35 +150,47 @@ pub fn create_codec(codec: CodecType, _options: &CodecOptions) -> Result<Option<
CodecType::BROTLI(level) => {
#[cfg(any(feature = "brotli", test))]
return Ok(Some(Box::new(BrotliCodec::new(level))));
- Err(ParquetError::General("Disabled feature at compile time: brotli".into()))
- },
+ Err(ParquetError::General(
+ "Disabled feature at compile time: brotli".into(),
+ ))
+ }
CodecType::GZIP(level) => {
#[cfg(any(feature = "flate2", test))]
return Ok(Some(Box::new(GZipCodec::new(level))));
- Err(ParquetError::General("Disabled feature at compile time: flate2".into()))
- },
+ Err(ParquetError::General(
+ "Disabled feature at compile time: flate2".into(),
+ ))
+ }
CodecType::SNAPPY => {
#[cfg(any(feature = "snap", test))]
return Ok(Some(Box::new(SnappyCodec::new())));
- Err(ParquetError::General("Disabled feature at compile time: snap".into()))
- },
+ Err(ParquetError::General(
+ "Disabled feature at compile time: snap".into(),
+ ))
+ }
CodecType::LZ4 => {
#[cfg(any(feature = "lz4", test))]
return Ok(Some(Box::new(LZ4HadoopCodec::new(
_options.backward_compatible_lz4,
))));
- Err(ParquetError::General("Disabled feature at compile time: lz4".into()))
- },
+ Err(ParquetError::General(
+ "Disabled feature at compile time: lz4".into(),
+ ))
+ }
CodecType::ZSTD(level) => {
#[cfg(any(feature = "zstd", test))]
return Ok(Some(Box::new(ZSTDCodec::new(level))));
- Err(ParquetError::General("Disabled feature at compile time: zstd".into()))
- },
+ Err(ParquetError::General(
+ "Disabled feature at compile time: zstd".into(),
+ ))
+ }
CodecType::LZ4_RAW => {
#[cfg(any(feature = "lz4", test))]
return Ok(Some(Box::new(LZ4RawCodec::new())));
- Err(ParquetError::General("Disabled feature at compile time: lz4".into()))
- },
+ Err(ParquetError::General(
+ "Disabled feature at compile time: lz4".into(),
+ ))
+ }
CodecType::UNCOMPRESSED => Ok(None),
_ => Err(nyi_err!("The codec type {} is not supported yet", codec)),
}
diff --git a/parquet/src/encodings/encoding/dict_encoder.rs b/parquet/src/encodings/encoding/dict_encoder.rs
index 98283b574ebb..8efb845219d3 100644
--- a/parquet/src/encodings/encoding/dict_encoder.rs
+++ b/parquet/src/encodings/encoding/dict_encoder.rs
@@ -148,7 +148,6 @@ impl<T: DataType> DictEncoder<T> {
fn bit_width(&self) -> u8 {
num_required_bits(self.num_entries().saturating_sub(1) as u64)
}
-
}
impl<T: DataType> Encoder<T> for DictEncoder<T> {
diff --git a/parquet/src/encodings/levels.rs b/parquet/src/encodings/levels.rs
index 8d4e48ba16bd..6f662b614fca 100644
--- a/parquet/src/encodings/levels.rs
+++ b/parquet/src/encodings/levels.rs
@@ -27,11 +27,7 @@ use crate::util::bit_util::{ceil, num_required_bits, BitWriter};
/// repetition/definition level and number of total buffered values (includes null
/// values).
#[inline]
-pub fn max_buffer_size(
- encoding: Encoding,
- max_level: i16,
- num_buffered_values: usize,
-) -> usize {
+pub fn max_buffer_size(encoding: Encoding, max_level: i16, num_buffered_values: usize) -> usize {
let bit_width = num_required_bits(max_level as u64);
match encoding {
Encoding::RLE => RleEncoder::max_buffer_size(bit_width, num_buffered_values),
diff --git a/parquet/src/encodings/rle.rs b/parquet/src/encodings/rle.rs
index 97a122941f17..e1ca8cd745e3 100644
--- a/parquet/src/encodings/rle.rs
+++ b/parquet/src/encodings/rle.rs
@@ -199,13 +199,9 @@ impl RleEncoder {
/// internal writer.
#[inline]
pub fn flush(&mut self) {
- if self.bit_packed_count > 0
- || self.repeat_count > 0
- || self.num_buffered_values > 0
- {
+ if self.bit_packed_count > 0 || self.repeat_count > 0 || self.num_buffered_values > 0 {
let all_repeat = self.bit_packed_count == 0
- && (self.repeat_count == self.num_buffered_values
- || self.num_buffered_values == 0);
+ && (self.repeat_count == self.num_buffered_values || self.num_buffered_values == 0);
if self.repeat_count > 0 && all_repeat {
self.flush_rle_run();
} else {
@@ -250,11 +246,8 @@ impl RleEncoder {
// Write the indicator byte to the reserved position in `bit_writer`
let num_groups = self.bit_packed_count / 8;
let indicator_byte = ((num_groups << 1) | 1) as u8;
- self.bit_writer.put_aligned_offset(
- indicator_byte,
- 1,
- self.indicator_byte_pos as usize,
- );
+ self.bit_writer
+ .put_aligned_offset(indicator_byte, 1, self.indicator_byte_pos as usize);
self.indicator_byte_pos = -1;
self.bit_packed_count = 0;
}
@@ -288,9 +281,7 @@ impl RleEncoder {
/// return the estimated memory size of this encoder.
pub(crate) fn estimated_memory_size(&self) -> usize {
- self.bit_writer.estimated_memory_size()
- + std::mem::size_of::<Self>()
-
+ self.bit_writer.estimated_memory_size() + std::mem::size_of::<Self>()
}
}
@@ -384,12 +375,10 @@ impl RleDecoder {
let mut values_read = 0;
while values_read < buffer.len() {
if self.rle_left > 0 {
- let num_values =
- cmp::min(buffer.len() - values_read, self.rle_left as usize);
+ let num_values = cmp::min(buffer.len() - values_read, self.rle_left as usize);
for i in 0..num_values {
- let repeated_value = T::try_from_le_slice(
- &self.current_value.as_mut().unwrap().to_ne_bytes(),
- )?;
+ let repeated_value =
+ T::try_from_le_slice(&self.current_value.as_mut().unwrap().to_ne_bytes())?;
buffer[values_read + i] = repeated_value;
}
self.rle_left -= num_values as u32;
@@ -397,8 +386,7 @@ impl RleDecoder {
} else if self.bit_packed_left > 0 {
let mut num_values =
cmp::min(buffer.len() - values_read, self.bit_packed_left as usize);
- let bit_reader =
- self.bit_reader.as_mut().expect("bit_reader should be set");
+ let bit_reader = self.bit_reader.as_mut().expect("bit_reader should be set");
num_values = bit_reader.get_batch::<T>(
&mut buffer[values_read..values_read + num_values],
@@ -424,15 +412,13 @@ impl RleDecoder {
let mut values_skipped = 0;
while values_skipped < num_values {
if self.rle_left > 0 {
- let num_values =
- cmp::min(num_values - values_skipped, self.rle_left as usize);
+ let num_values = cmp::min(num_values - values_skipped, self.rle_left as usize);
self.rle_left -= num_values as u32;
values_skipped += num_values;
} else if self.bit_packed_left > 0 {
let mut num_values =
cmp::min(num_values - values_skipped, self.bit_packed_left as usize);
- let bit_reader =
- self.bit_reader.as_mut().expect("bit_reader should be set");
+ let bit_reader = self.bit_reader.as_mut().expect("bit_reader should be set");
num_values = bit_reader.skip(num_values, self.bit_width as usize);
if num_values == 0 {
@@ -467,8 +453,7 @@ impl RleDecoder {
let index_buf = self.index_buf.get_or_insert_with(|| Box::new([0; 1024]));
if self.rle_left > 0 {
- let num_values =
- cmp::min(max_values - values_read, self.rle_left as usize);
+ let num_values = cmp::min(max_values - values_read, self.rle_left as usize);
let dict_idx = self.current_value.unwrap() as usize;
for i in 0..num_values {
buffer[values_read + i].clone_from(&dict[dict_idx]);
@@ -476,8 +461,7 @@ impl RleDecoder {
self.rle_left -= num_values as u32;
values_read += num_values;
} else if self.bit_packed_left > 0 {
- let bit_reader =
- self.bit_reader.as_mut().expect("bit_reader should be set");
+ let bit_reader = self.bit_reader.as_mut().expect("bit_reader should be set");
loop {
let to_read = index_buf
@@ -489,10 +473,8 @@ impl RleDecoder {
break;
}
- let num_values = bit_reader.get_batch::<i32>(
- &mut index_buf[..to_read],
- self.bit_width as usize,
- );
+ let num_values = bit_reader
+ .get_batch::<i32>(&mut index_buf[..to_read], self.bit_width as usize);
if num_values == 0 {
// Handle writers which truncate the final block
self.bit_packed_left = 0;
@@ -708,14 +690,10 @@ mod tests {
decoder.set_data(data.into());
let mut buffer = vec![""; 12];
let expected = vec![
- "ddd", "eee", "fff", "ddd", "eee", "fff", "ddd", "eee", "fff", "eee", "fff",
- "fff",
+ "ddd", "eee", "fff", "ddd", "eee", "fff", "ddd", "eee", "fff", "eee", "fff", "fff",
];
- let result = decoder.get_batch_with_dict::<&str>(
- dict.as_slice(),
- buffer.as_mut_slice(),
- 12,
- );
+ let result =
+ decoder.get_batch_with_dict::<&str>(dict.as_slice(), buffer.as_mut_slice(), 12);
assert!(result.is_ok());
assert_eq!(buffer, expected);
}
@@ -1042,8 +1020,7 @@ mod tests {
for _ in 0..niters {
values.clear();
let rng = thread_rng();
- let seed_vec: Vec<u8> =
- rng.sample_iter::<u8, _>(&Standard).take(seed_len).collect();
+ let seed_vec: Vec<u8> = rng.sample_iter::<u8, _>(&Standard).take(seed_len).collect();
let mut seed = [0u8; 32];
seed.copy_from_slice(&seed_vec[0..seed_len]);
let mut gen = rand::rngs::StdRng::from_seed(seed);
diff --git a/parquet/src/util/bit_util.rs b/parquet/src/util/bit_util.rs
index a17202254cd6..062f93270386 100644
--- a/parquet/src/util/bit_util.rs
+++ b/parquet/src/util/bit_util.rs
@@ -1027,7 +1027,10 @@ mod tests {
.collect();
// Generic values used to check against actual values read from `get_batch`.
- let expected_values: Vec<T> = values.iter().map(|v| T::try_from_le_slice(v.as_bytes()).unwrap()).collect();
+ let expected_values: Vec<T> = values
+ .iter()
+ .map(|v| T::try_from_le_slice(v.as_bytes()).unwrap())
+ .collect();
(0..total).for_each(|i| writer.put_value(values[i], num_bits));
diff --git a/parquet/src/util/interner.rs b/parquet/src/util/interner.rs
index f57fc3a71409..a804419b5da7 100644
--- a/parquet/src/util/interner.rs
+++ b/parquet/src/util/interner.rs
@@ -35,7 +35,7 @@ pub trait Storage {
/// Return an estimate of the memory used in this storage, in bytes
#[allow(dead_code)] // not used in parquet_derive, so is dead there
- fn estimated_memory_size(&self) -> usize;
+ fn estimated_memory_size(&self) -> usize;
}
/// A generic value interner supporting various different [`Storage`]
|
diff --git a/parquet/src/util/test_common/file_util.rs b/parquet/src/util/test_common/file_util.rs
index c2dcd677360d..6c031358e795 100644
--- a/parquet/src/util/test_common/file_util.rs
+++ b/parquet/src/util/test_common/file_util.rs
@@ -19,8 +19,7 @@ use std::{fs, path::PathBuf, str::FromStr};
/// Returns path to the test parquet file in 'data' directory
pub fn get_test_path(file_name: &str) -> PathBuf {
- let mut pathbuf =
- PathBuf::from_str(&arrow::util::test_util::parquet_test_data()).unwrap();
+ let mut pathbuf = PathBuf::from_str(&arrow::util::test_util::parquet_test_data()).unwrap();
pathbuf.push(file_name);
pathbuf
}
diff --git a/parquet/src/util/test_common/mod.rs b/parquet/src/util/test_common/mod.rs
index 504219ecae19..8cfc1e6dd423 100644
--- a/parquet/src/util/test_common/mod.rs
+++ b/parquet/src/util/test_common/mod.rs
@@ -21,4 +21,4 @@ pub mod page_util;
pub mod file_util;
#[cfg(test)]
-pub mod rand_gen;
\ No newline at end of file
+pub mod rand_gen;
diff --git a/parquet/src/util/test_common/rand_gen.rs b/parquet/src/util/test_common/rand_gen.rs
index a267c34840c1..ec80d3a593ae 100644
--- a/parquet/src/util/test_common/rand_gen.rs
+++ b/parquet/src/util/test_common/rand_gen.rs
@@ -173,8 +173,7 @@ pub fn make_pages<T: DataType>(
// Generate the current page
- let mut pb =
- DataPageBuilderImpl::new(desc.clone(), num_values_cur_page as u32, use_v2);
+ let mut pb = DataPageBuilderImpl::new(desc.clone(), num_values_cur_page as u32, use_v2);
if max_rep_level > 0 {
pb.add_rep_levels(max_rep_level, &rep_levels[level_range.clone()]);
}
|
Is cargo fmt no longer working properly in parquet crate
**Which part is this question about**
Code formatter.
**Describe your question**
I've noticed recently that running `cargo fmt` while I'm editing files doesn't always seem to catch problems. Running rustfmt directly will work. For instance, running `cargo fmt` on the parquet crate yields no output.
```
% cargo +stable fmt -p parquet -v -- --check
[bench (2021)] "/Users/seidl/src/arrow-rs/parquet/benches/arrow_reader.rs"
[bench (2021)] "/Users/seidl/src/arrow-rs/parquet/benches/arrow_statistics.rs"
[bench (2021)] "/Users/seidl/src/arrow-rs/parquet/benches/arrow_writer.rs"
[bench (2021)] "/Users/seidl/src/arrow-rs/parquet/benches/compression.rs"
[bench (2021)] "/Users/seidl/src/arrow-rs/parquet/benches/encoding.rs"
[bench (2021)] "/Users/seidl/src/arrow-rs/parquet/benches/metadata.rs"
[example (2021)] "/Users/seidl/src/arrow-rs/parquet/examples/async_read_parquet.rs"
[example (2021)] "/Users/seidl/src/arrow-rs/parquet/examples/read_parquet.rs"
[example (2021)] "/Users/seidl/src/arrow-rs/parquet/examples/read_with_rowgroup.rs"
[example (2021)] "/Users/seidl/src/arrow-rs/parquet/examples/write_parquet.rs"
[bin (2021)] "/Users/seidl/src/arrow-rs/parquet/src/bin/parquet-concat.rs"
[bin (2021)] "/Users/seidl/src/arrow-rs/parquet/src/bin/parquet-fromcsv.rs"
[bin (2021)] "/Users/seidl/src/arrow-rs/parquet/src/bin/parquet-index.rs"
[bin (2021)] "/Users/seidl/src/arrow-rs/parquet/src/bin/parquet-layout.rs"
[bin (2021)] "/Users/seidl/src/arrow-rs/parquet/src/bin/parquet-read.rs"
[bin (2021)] "/Users/seidl/src/arrow-rs/parquet/src/bin/parquet-rewrite.rs"
[bin (2021)] "/Users/seidl/src/arrow-rs/parquet/src/bin/parquet-rowcount.rs"
[bin (2021)] "/Users/seidl/src/arrow-rs/parquet/src/bin/parquet-schema.rs"
[bin (2021)] "/Users/seidl/src/arrow-rs/parquet/src/bin/parquet-show-bloom-filter.rs"
[lib (2021)] "/Users/seidl/src/arrow-rs/parquet/src/lib.rs"
[test (2021)] "/Users/seidl/src/arrow-rs/parquet/tests/arrow_reader/mod.rs"
[test (2021)] "/Users/seidl/src/arrow-rs/parquet/tests/arrow_writer_layout.rs"
rustfmt --edition 2021 --check /Users/seidl/src/arrow-rs/parquet/benches/arrow_reader.rs /Users/seidl/src/arrow-rs/parquet/benches/arrow_statistics.rs /Users/seidl/src/arrow-rs/parquet/benches/arrow_writer.rs /Users/seidl/src/arrow-rs/parquet/benches/compression.rs /Users/seidl/src/arrow-rs/parquet/benches/encoding.rs /Users/seidl/src/arrow-rs/parquet/benches/metadata.rs /Users/seidl/src/arrow-rs/parquet/examples/async_read_parquet.rs /Users/seidl/src/arrow-rs/parquet/examples/read_parquet.rs /Users/seidl/src/arrow-rs/parquet/examples/read_with_rowgroup.rs /Users/seidl/src/arrow-rs/parquet/examples/write_parquet.rs /Users/seidl/src/arrow-rs/parquet/src/bin/parquet-concat.rs /Users/seidl/src/arrow-rs/parquet/src/bin/parquet-fromcsv.rs /Users/seidl/src/arrow-rs/parquet/src/bin/parquet-index.rs /Users/seidl/src/arrow-rs/parquet/src/bin/parquet-layout.rs /Users/seidl/src/arrow-rs/parquet/src/bin/parquet-read.rs /Users/seidl/src/arrow-rs/parquet/src/bin/parquet-rewrite.rs /Users/seidl/src/arrow-rs/parquet/src/bin/parquet-rowcount.rs /Users/seidl/src/arrow-rs/parquet/src/bin/parquet-schema.rs /Users/seidl/src/arrow-rs/parquet/src/bin/parquet-show-bloom-filter.rs /Users/seidl/src/arrow-rs/parquet/src/lib.rs /Users/seidl/src/arrow-rs/parquet/tests/arrow_reader/mod.rs /Users/seidl/src/arrow-rs/parquet/tests/arrow_writer_layout.rs
%
```
But there are files that when run manually do (running the rustfmt command line above with the addition of `parquet/src/compression.rs`):
```
% rustfmt --edition 2021 --check /Users/seidl/src/arrow-rs/parquet/benches/arrow_reader.rs /Users/seidl/src/arrow-rs/parquet/benches/arrow_statistics.rs /Users/seidl/src/arrow-rs/parquet/benches/arrow_writer.rs /Users/seidl/src/arrow-rs/parquet/benches/compression.rs /Users/seidl/src/arrow-rs/parquet/benches/encoding.rs /Users/seidl/src/arrow-rs/parquet/benches/metadata.rs /Users/seidl/src/arrow-rs/parquet/examples/async_read_parquet.rs /Users/seidl/src/arrow-rs/parquet/examples/read_parquet.rs /Users/seidl/src/arrow-rs/parquet/examples/read_with_rowgroup.rs /Users/seidl/src/arrow-rs/parquet/examples/write_parquet.rs /Users/seidl/src/arrow-rs/parquet/src/bin/parquet-concat.rs /Users/seidl/src/arrow-rs/parquet/src/bin/parquet-fromcsv.rs /Users/seidl/src/arrow-rs/parquet/src/bin/parquet-index.rs /Users/seidl/src/arrow-rs/parquet/src/bin/parquet-layout.rs /Users/seidl/src/arrow-rs/parquet/src/bin/parquet-read.rs /Users/seidl/src/arrow-rs/parquet/src/bin/parquet-rewrite.rs /Users/seidl/src/arrow-rs/parquet/src/bin/parquet-rowcount.rs /Users/seidl/src/arrow-rs/parquet/src/bin/parquet-schema.rs /Users/seidl/src/arrow-rs/parquet/src/bin/parquet-show-bloom-filter.rs /Users/seidl/src/arrow-rs/parquet/src/lib.rs /Users/seidl/src/arrow-rs/parquet/tests/arrow_reader/mod.rs /Users/seidl/src/arrow-rs/parquet/tests/arrow_writer_layout.rs /Users/seidl/src/arrow-rs/parquet/src/compression.rs
Diff in /Users/seidl/src/arrow-rs/parquet/src/compression.rs at line 150:
CodecType::BROTLI(level) => {
#[cfg(any(feature = "brotli", test))]
return Ok(Some(Box::new(BrotliCodec::new(level))));
- Err(ParquetError::General("Disabled feature at compile time: brotli".into()))
- },
+ Err(ParquetError::General(
+ "Disabled feature at compile time: brotli".into(),
+ ))
+ }
CodecType::GZIP(level) => {
#[cfg(any(feature = "flate2", test))]
return Ok(Some(Box::new(GZipCodec::new(level))));
Diff in /Users/seidl/src/arrow-rs/parquet/src/compression.rs at line 158:
- Err(ParquetError::General("Disabled feature at compile time: flate2".into()))
- },
+ Err(ParquetError::General(
+ "Disabled feature at compile time: flate2".into(),
+ ))
+ }
CodecType::SNAPPY => {
#[cfg(any(feature = "snap", test))]
return Ok(Some(Box::new(SnappyCodec::new())));
Diff in /Users/seidl/src/arrow-rs/parquet/src/compression.rs at line 163:
- Err(ParquetError::General("Disabled feature at compile time: snap".into()))
- },
+ Err(ParquetError::General(
+ "Disabled feature at compile time: snap".into(),
+ ))
+ }
CodecType::LZ4 => {
#[cfg(any(feature = "lz4", test))]
return Ok(Some(Box::new(LZ4HadoopCodec::new(
Diff in /Users/seidl/src/arrow-rs/parquet/src/compression.rs at line 168:
_options.backward_compatible_lz4,
))));
- Err(ParquetError::General("Disabled feature at compile time: lz4".into()))
- },
+ Err(ParquetError::General(
+ "Disabled feature at compile time: lz4".into(),
+ ))
+ }
CodecType::ZSTD(level) => {
#[cfg(any(feature = "zstd", test))]
return Ok(Some(Box::new(ZSTDCodec::new(level))));
Diff in /Users/seidl/src/arrow-rs/parquet/src/compression.rs at line 175:
- Err(ParquetError::General("Disabled feature at compile time: zstd".into()))
- },
+ Err(ParquetError::General(
+ "Disabled feature at compile time: zstd".into(),
+ ))
+ }
CodecType::LZ4_RAW => {
#[cfg(any(feature = "lz4", test))]
return Ok(Some(Box::new(LZ4RawCodec::new())));
Diff in /Users/seidl/src/arrow-rs/parquet/src/compression.rs at line 180:
- Err(ParquetError::General("Disabled feature at compile time: lz4".into()))
- },
+ Err(ParquetError::General(
+ "Disabled feature at compile time: lz4".into(),
+ ))
+ }
CodecType::UNCOMPRESSED => Ok(None),
_ => Err(nyi_err!("The codec type {} is not supported yet", codec)),
}
%
```
**Additional context**
There are many reports of fmt silently failing, one such is https://github.com/rust-lang/rustfmt/issues/3008.
Has anyone else noticed this or is it something to do with my setup.
```
% cargo --version
cargo 1.80.0 (376290515 2024-07-16)
% rustfmt --version
rustfmt 1.7.0-stable (05147895 2024-07-21)
```
|
Fascinatingly, I am seeing the same thing
For example I deliberately introduced a formatting issue:
```
andrewlamb@Andrews-MacBook-Pro-2:~/Software/arrow-rs$ git diff
diff --git a/parquet/src/compression.rs b/parquet/src/compression.rs
index 10560210e4e..119af7e156f 100644
--- a/parquet/src/compression.rs
+++ b/parquet/src/compression.rs
@@ -15,6 +15,10 @@
// specific language governing permissions and limitations
// under the License.
+
+
+
+
//! Contains codec interface and supported codec implementations.
//!
//! See [`Compression`](crate::basic::Compression) enum for all available compression
```
And then when I ran fmt it didn't seem to fix it
```shell
andrewlamb@Andrews-MacBook-Pro-2:~/Software/arrow-rs$ cargo fmt
andrewlamb@Andrews-MacBook-Pro-2:~/Software/arrow-rs$ git diff
diff --git a/parquet/src/compression.rs b/parquet/src/compression.rs
index 10560210e4e..119af7e156f 100644
--- a/parquet/src/compression.rs
+++ b/parquet/src/compression.rs
@@ -15,6 +15,10 @@
// specific language governing permissions and limitations
// under the License.
+
+
+
+
//! Contains codec interface and supported codec implementations.
//!
//! See [`Compression`](crate::basic::Compression) enum for all available compression
```
```shell
andrewlamb@Andrews-MacBook-Pro-2:~/Software/arrow-rs$ cargo --version
cargo 1.80.0 (376290515 2024-07-16)
andrewlamb@Andrews-MacBook-Pro-2:~/Software/arrow-rs$ rustfmt --version
rustfmt 1.7.0-stable (05147895 2024-07-21)
andrewlamb@Andrews-MacBook-Pro-2:~/Software/arrow-rs$
```
Interestingly I found that removing this line:
https://github.com/apache/arrow-rs/blob/b72098fee551086b2b4eb4b131aeaf920cf7e4b3/parquet/src/lib.rs#L121
Make `cargo fmt` come back
Though it looks like (I) added that line 3 months ago: https://github.com/apache/arrow-rs/commit/98784bd059653cdc316454380ac93a34d53089fd 🤔
It's the first time for me to know `#[rustfmt::skip]` :laughing:
I tried reverting #5727 and still no output :(
Still tracking this. Here's a list of non-compliant files in the parquet source (`format.rs` is expected).
```
% rustfmt --check -l `find . -name "*.rs"` |sort|uniq
/Users/seidl2/src/arrow-rs/parquet/src/arrow/array_reader/empty_array.rs
/Users/seidl2/src/arrow-rs/parquet/src/arrow/array_reader/fixed_size_list_array.rs
/Users/seidl2/src/arrow-rs/parquet/src/arrow/array_reader/list_array.rs
/Users/seidl2/src/arrow-rs/parquet/src/arrow/array_reader/map_array.rs
/Users/seidl2/src/arrow-rs/parquet/src/arrow/array_reader/struct_array.rs
/Users/seidl2/src/arrow-rs/parquet/src/arrow/schema/complex.rs
/Users/seidl2/src/arrow-rs/parquet/src/arrow/schema/mod.rs
/Users/seidl2/src/arrow-rs/parquet/src/compression.rs
/Users/seidl2/src/arrow-rs/parquet/src/encodings/encoding/dict_encoder.rs
/Users/seidl2/src/arrow-rs/parquet/src/encodings/levels.rs
/Users/seidl2/src/arrow-rs/parquet/src/encodings/rle.rs
/Users/seidl2/src/arrow-rs/parquet/src/format.rs
/Users/seidl2/src/arrow-rs/parquet/src/util/bit_util.rs
/Users/seidl2/src/arrow-rs/parquet/src/util/interner.rs
/Users/seidl2/src/arrow-rs/parquet/src/util/test_common/file_util.rs
/Users/seidl2/src/arrow-rs/parquet/src/util/test_common/mod.rs
/Users/seidl2/src/arrow-rs/parquet/src/util/test_common/rand_gen.rs
```
Ah, so apparently rustfmt will not format modules that are declared in macros, such as those in the `experimental!` macro https://github.com/apache/arrow-rs/blob/a937869f892dc12c4730189e216bf3bd48c2561d/parquet/src/lib.rs#L132-L137
There is an open issue for this https://github.com/rust-lang/rustfmt/issues/3253
One workaround is to tack a list of files to format to the end of the commandline
```
cargo +stable fmt --all --check -- `find parquet -name "*.rs" \! -name format.rs`
```
Perhaps something like this could be added to `.github/workflows/rust.yml`.
Edit: this has also been a problem in DataFusion https://github.com/apache/datafusion/pull/9367. I don't know if a similar solution would work here.
|
2024-08-29T16:36:10Z
|
52.2
|
678517018ddfd21b202a94df13b06dfa1ab8a378
|
apache/arrow-rs
| 6,320
|
apache__arrow-rs-6320
|
[
"6318"
] |
a937869f892dc12c4730189e216bf3bd48c2561d
|
diff --git a/arrow/src/pyarrow.rs b/arrow/src/pyarrow.rs
index 43cdb4fe0919..336398cbf22f 100644
--- a/arrow/src/pyarrow.rs
+++ b/arrow/src/pyarrow.rs
@@ -59,7 +59,7 @@ use std::convert::{From, TryFrom};
use std::ptr::{addr_of, addr_of_mut};
use std::sync::Arc;
-use arrow_array::{RecordBatchIterator, RecordBatchReader, StructArray};
+use arrow_array::{RecordBatchIterator, RecordBatchOptions, RecordBatchReader, StructArray};
use pyo3::exceptions::{PyTypeError, PyValueError};
use pyo3::ffi::Py_uintptr_t;
use pyo3::import_exception;
@@ -361,6 +361,7 @@ impl FromPyArrow for RecordBatch {
"Expected Struct type from __arrow_c_array.",
));
}
+ let options = RecordBatchOptions::default().with_row_count(Some(array_data.len()));
let array = StructArray::from(array_data);
// StructArray does not embed metadata from schema. We need to override
// the output schema with the schema from the capsule.
@@ -371,7 +372,7 @@ impl FromPyArrow for RecordBatch {
0,
"Cannot convert nullable StructArray to RecordBatch, see StructArray documentation"
);
- return RecordBatch::try_new(schema, columns).map_err(to_py_err);
+ return RecordBatch::try_new_with_options(schema, columns, &options).map_err(to_py_err);
}
validate_class("RecordBatch", value)?;
@@ -386,7 +387,14 @@ impl FromPyArrow for RecordBatch {
.map(|a| Ok(make_array(ArrayData::from_pyarrow_bound(&a)?)))
.collect::<PyResult<_>>()?;
- let batch = RecordBatch::try_new(schema, arrays).map_err(to_py_err)?;
+ let row_count = value
+ .getattr("num_rows")
+ .ok()
+ .and_then(|x| x.extract().ok());
+ let options = RecordBatchOptions::default().with_row_count(row_count);
+
+ let batch =
+ RecordBatch::try_new_with_options(schema, arrays, &options).map_err(to_py_err)?;
Ok(batch)
}
}
|
diff --git a/arrow-pyarrow-integration-testing/tests/test_sql.py b/arrow-pyarrow-integration-testing/tests/test_sql.py
index 5320d0a5343e..3b46d5729a1f 100644
--- a/arrow-pyarrow-integration-testing/tests/test_sql.py
+++ b/arrow-pyarrow-integration-testing/tests/test_sql.py
@@ -476,6 +476,29 @@ def test_tensor_array():
del b
+
+def test_empty_recordbatch_with_row_count():
+ """
+ A pyarrow.RecordBatch with no columns but with `num_rows` set.
+
+ `datafusion-python` gets this as the result of a `count(*)` query.
+ """
+
+ # Create an empty schema with no fields
+ batch = pa.RecordBatch.from_pydict({"a": [1, 2, 3, 4]}).select([])
+ num_rows = 4
+ assert batch.num_rows == num_rows
+ assert batch.num_columns == 0
+
+ b = rust.round_trip_record_batch(batch)
+ assert b == batch
+ assert b.schema == batch.schema
+ assert b.schema.metadata == batch.schema.metadata
+
+ assert b.num_rows == batch.num_rows
+
+ del b
+
def test_record_batch_reader():
"""
Python -> Rust -> Python
|
Allow converting empty `pyarrow.RecordBatch` to `arrow::RecordBatch`
**Is your feature request related to a problem or challenge? Please describe what you are trying to do.**
`datafusion-python` currently errors when calling `select count(*) from t` when `t` is a `pyarrow.Dataset`.
The resulting `pyarrow.RecordBatch` contains no rows and has a schema with no columns, but it does have `num_rows` set to the correct number.
**Describe the solution you'd like**
Support was added to arrow-rs in https://github.com/apache/arrow-rs/pull/1552 for a `RecordBatch` with zero columns but non zero row count.
I'd like `impl FromPyArrow for RecordBatch` to use this functionality.
https://github.com/apache/arrow-rs/blob/b711f23a136e0b094a70a4aafb020d4bb9f60619/arrow/src/pyarrow.rs#L334-L392
**Additional Context**
datafusion-python issue: https://github.com/apache/datafusion-python/issues/800
|
2024-08-28T17:30:36Z
|
52.2
|
678517018ddfd21b202a94df13b06dfa1ab8a378
|
|
apache/arrow-rs
| 6,295
|
apache__arrow-rs-6295
|
[
"3577"
] |
8c956a9f9ab26c14072740cce64c2b99cb039b13
|
diff --git a/parquet/src/encodings/rle.rs b/parquet/src/encodings/rle.rs
index 581f14b3c99a..97a122941f17 100644
--- a/parquet/src/encodings/rle.rs
+++ b/parquet/src/encodings/rle.rs
@@ -20,7 +20,6 @@ use std::{cmp, mem::size_of};
use bytes::Bytes;
use crate::errors::{ParquetError, Result};
-use crate::util::bit_util::from_le_slice;
use crate::util::bit_util::{self, BitReader, BitWriter, FromBytes};
/// Rle/Bit-Packing Hybrid Encoding
@@ -356,13 +355,13 @@ impl RleDecoder {
}
let value = if self.rle_left > 0 {
- let rle_value = from_le_slice(
+ let rle_value = T::try_from_le_slice(
&self
.current_value
.as_mut()
.expect("current_value should be Some")
.to_ne_bytes(),
- );
+ )?;
self.rle_left -= 1;
rle_value
} else {
@@ -388,9 +387,9 @@ impl RleDecoder {
let num_values =
cmp::min(buffer.len() - values_read, self.rle_left as usize);
for i in 0..num_values {
- let repeated_value = from_le_slice(
+ let repeated_value = T::try_from_le_slice(
&self.current_value.as_mut().unwrap().to_ne_bytes(),
- );
+ )?;
buffer[values_read + i] = repeated_value;
}
self.rle_left -= num_values as u32;
diff --git a/parquet/src/file/page_index/index.rs b/parquet/src/file/page_index/index.rs
index 0c23e4aa38b8..662ba45621ab 100644
--- a/parquet/src/file/page_index/index.rs
+++ b/parquet/src/file/page_index/index.rs
@@ -23,7 +23,6 @@ use crate::data_type::{AsBytes, ByteArray, FixedLenByteArray, Int96};
use crate::errors::ParquetError;
use crate::file::metadata::LevelHistogram;
use crate::format::{BoundaryOrder, ColumnIndex};
-use crate::util::bit_util::from_le_slice;
use std::fmt::Debug;
/// Typed statistics for one data page
@@ -192,7 +191,7 @@ impl<T: ParquetValueType> NativeIndex<T> {
let indexes = index
.min_values
.iter()
- .zip(index.max_values.into_iter())
+ .zip(index.max_values.iter())
.zip(index.null_pages.into_iter())
.zip(null_counts.into_iter())
.zip(rep_hists.into_iter())
@@ -205,9 +204,10 @@ impl<T: ParquetValueType> NativeIndex<T> {
let (min, max) = if is_null {
(None, None)
} else {
- let min = min.as_slice();
- let max = max.as_slice();
- (Some(from_le_slice::<T>(min)), Some(from_le_slice::<T>(max)))
+ (
+ Some(T::try_from_le_slice(min)?),
+ Some(T::try_from_le_slice(max)?),
+ )
};
Ok(PageIndex {
min,
@@ -321,4 +321,29 @@ mod tests {
assert_eq!(page_index.repetition_level_histogram(), None);
assert_eq!(page_index.definition_level_histogram(), None);
}
+
+ #[test]
+ fn test_invalid_column_index() {
+ let column_index = ColumnIndex {
+ null_pages: vec![true, false],
+ min_values: vec![
+ vec![],
+ vec![], // this shouldn't be empty as null_pages[1] is false
+ ],
+ max_values: vec![
+ vec![],
+ vec![], // this shouldn't be empty as null_pages[1] is false
+ ],
+ null_counts: None,
+ repetition_level_histograms: None,
+ definition_level_histograms: None,
+ boundary_order: BoundaryOrder::UNORDERED,
+ };
+
+ let err = NativeIndex::<i32>::try_new(column_index).unwrap_err();
+ assert_eq!(
+ err.to_string(),
+ "Parquet error: error converting value, expected 4 bytes got 0"
+ );
+ }
}
diff --git a/parquet/src/file/serialized_reader.rs b/parquet/src/file/serialized_reader.rs
index 0a3e51931867..b8ee4001a99c 100644
--- a/parquet/src/file/serialized_reader.rs
+++ b/parquet/src/file/serialized_reader.rs
@@ -781,7 +781,6 @@ mod tests {
use crate::file::writer::SerializedFileWriter;
use crate::record::RowAccessor;
use crate::schema::parser::parse_message_type;
- use crate::util::bit_util::from_le_slice;
use crate::util::test_common::file_util::{get_test_file, get_test_path};
use super::*;
@@ -1537,8 +1536,8 @@ mod tests {
assert_eq!(row_group_index.indexes.len(), page_size);
assert_eq!(row_group_index.boundary_order, boundary_order);
row_group_index.indexes.iter().all(|x| {
- x.min.as_ref().unwrap() >= &from_le_slice::<T>(min_max.0)
- && x.max.as_ref().unwrap() <= &from_le_slice::<T>(min_max.1)
+ x.min.as_ref().unwrap() >= &T::try_from_le_slice(min_max.0).unwrap()
+ && x.max.as_ref().unwrap() <= &T::try_from_le_slice(min_max.1).unwrap()
});
}
diff --git a/parquet/src/file/statistics.rs b/parquet/src/file/statistics.rs
index 680c75d6b2e5..854900f1edb9 100644
--- a/parquet/src/file/statistics.rs
+++ b/parquet/src/file/statistics.rs
@@ -47,7 +47,7 @@ use crate::basic::Type;
use crate::data_type::private::ParquetValueType;
use crate::data_type::*;
use crate::errors::{ParquetError, Result};
-use crate::util::bit_util::from_le_slice;
+use crate::util::bit_util::FromBytes;
pub(crate) mod private {
use super::*;
@@ -186,14 +186,18 @@ pub fn from_thrift(
// INT96 statistics may not be correct, because comparison is signed
// byte-wise, not actual timestamps. It is recommended to ignore
// min/max statistics for INT96 columns.
- let min = min.map(|data| {
+ let min = if let Some(data) = min {
assert_eq!(data.len(), 12);
- from_le_slice::<Int96>(&data)
- });
- let max = max.map(|data| {
+ Some(Int96::try_from_le_slice(&data)?)
+ } else {
+ None
+ };
+ let max = if let Some(data) = max {
assert_eq!(data.len(), 12);
- from_le_slice::<Int96>(&data)
- });
+ Some(Int96::try_from_le_slice(&data)?)
+ } else {
+ None
+ };
Statistics::int96(min, max, distinct_count, null_count, old_format)
}
Type::FLOAT => Statistics::float(
diff --git a/parquet/src/util/bit_util.rs b/parquet/src/util/bit_util.rs
index adbf45014c9d..a17202254cd6 100644
--- a/parquet/src/util/bit_util.rs
+++ b/parquet/src/util/bit_util.rs
@@ -23,12 +23,6 @@ use crate::data_type::{AsBytes, ByteArray, FixedLenByteArray, Int96};
use crate::errors::{ParquetError, Result};
use crate::util::bit_pack::{unpack16, unpack32, unpack64, unpack8};
-#[inline]
-pub fn from_le_slice<T: FromBytes>(bs: &[u8]) -> T {
- // TODO: propagate the error (#3577)
- T::try_from_le_slice(bs).unwrap()
-}
-
#[inline]
fn array_from_slice<const N: usize>(bs: &[u8]) -> Result<[u8; N]> {
// Need to slice as may be called with zero-padded values
@@ -91,15 +85,22 @@ unsafe impl FromBytes for Int96 {
type Buffer = [u8; 12];
fn try_from_le_slice(b: &[u8]) -> Result<Self> {
- Ok(Self::from_le_bytes(array_from_slice(b)?))
+ let bs: [u8; 12] = array_from_slice(b)?;
+ let mut i = Int96::new();
+ i.set_data(
+ u32::try_from_le_slice(&bs[0..4])?,
+ u32::try_from_le_slice(&bs[4..8])?,
+ u32::try_from_le_slice(&bs[8..12])?,
+ );
+ Ok(i)
}
fn from_le_bytes(bs: Self::Buffer) -> Self {
let mut i = Int96::new();
i.set_data(
- from_le_slice(&bs[0..4]),
- from_le_slice(&bs[4..8]),
- from_le_slice(&bs[8..12]),
+ u32::try_from_le_slice(&bs[0..4]).unwrap(),
+ u32::try_from_le_slice(&bs[4..8]).unwrap(),
+ u32::try_from_le_slice(&bs[8..12]).unwrap(),
);
i
}
@@ -438,7 +439,7 @@ impl BitReader {
}
// TODO: better to avoid copying here
- Some(from_le_slice(v.as_bytes()))
+ T::try_from_le_slice(v.as_bytes()).ok()
}
/// Read multiple values from their packed representation where each element is represented
@@ -1026,7 +1027,7 @@ mod tests {
.collect();
// Generic values used to check against actual values read from `get_batch`.
- let expected_values: Vec<T> = values.iter().map(|v| from_le_slice(v.as_bytes())).collect();
+ let expected_values: Vec<T> = values.iter().map(|v| T::try_from_le_slice(v.as_bytes()).unwrap()).collect();
(0..total).for_each(|i| writer.put_value(values[i], num_bits));
|
diff --git a/parquet/tests/arrow_reader/bad_data.rs b/parquet/tests/arrow_reader/bad_data.rs
index 6e325f119710..cbd5d4d3b29e 100644
--- a/parquet/tests/arrow_reader/bad_data.rs
+++ b/parquet/tests/arrow_reader/bad_data.rs
@@ -134,3 +134,28 @@ fn read_file(name: &str) -> Result<usize, ParquetError> {
}
Ok(num_rows)
}
+
+#[cfg(feature = "async")]
+#[tokio::test]
+async fn bad_metadata_err() {
+ use bytes::Bytes;
+ use parquet::arrow::async_reader::MetadataLoader;
+
+ let metadata_buffer = Bytes::from_static(include_bytes!("bad_raw_metadata.bin"));
+
+ let metadata_length = metadata_buffer.len();
+
+ let mut reader = std::io::Cursor::new(&metadata_buffer);
+ let mut loader = MetadataLoader::load(&mut reader, metadata_length, None)
+ .await
+ .unwrap();
+ loader.load_page_index(false, false).await.unwrap();
+ loader.load_page_index(false, true).await.unwrap();
+
+ let err = loader.load_page_index(true, false).await.unwrap_err();
+
+ assert_eq!(
+ err.to_string(),
+ "Parquet error: error converting value, expected 4 bytes got 0"
+ );
+}
diff --git a/parquet/tests/arrow_reader/bad_raw_metadata.bin b/parquet/tests/arrow_reader/bad_raw_metadata.bin
new file mode 100644
index 000000000000..47f9aa1c092b
Binary files /dev/null and b/parquet/tests/arrow_reader/bad_raw_metadata.bin differ
|
Don't Panic on Invalid Parquet Statistics
**Is your feature request related to a problem or challenge? Please describe what you are trying to do.**
<!--
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
(This section helps Arrow developers understand the context and *why* for this feature, in addition to the *what*)
-->
Invalid statistics will currently result in the parquet reader panicking, we should instead return an error
**Describe the solution you'd like**
<!--
A clear and concise description of what you want to happen.
-->
**Describe alternatives you've considered**
<!--
A clear and concise description of any alternative solutions or features you've considered.
-->
**Additional context**
<!--
Add any other context or screenshots about the feature request here.
-->
|
2024-08-23T19:10:39Z
|
52.2
|
678517018ddfd21b202a94df13b06dfa1ab8a378
|
|
apache/arrow-rs
| 6,290
|
apache__arrow-rs-6290
|
[
"6289"
] |
ebcc4a585136cd1d9696c38c41f71c9ced181f57
|
diff --git a/parquet/Cargo.toml b/parquet/Cargo.toml
index b97b2a571646..1d38e67a0f02 100644
--- a/parquet/Cargo.toml
+++ b/parquet/Cargo.toml
@@ -68,6 +68,7 @@ twox-hash = { version = "1.6", default-features = false }
paste = { version = "1.0" }
half = { version = "2.1", default-features = false, features = ["num-traits"] }
sysinfo = { version = "0.31.2", optional = true, default-features = false, features = ["system"] }
+crc32fast = { version = "1.4.2", optional = true, default-features = false }
[dev-dependencies]
base64 = { version = "0.22", default-features = false, features = ["std"] }
@@ -117,6 +118,8 @@ object_store = ["dep:object_store", "async"]
zstd = ["dep:zstd", "zstd-sys"]
# Display memory in example/write_parquet.rs
sysinfo = ["dep:sysinfo"]
+# Verify 32-bit CRC checksum when decoding parquet pages
+crc = ["dep:crc32fast"]
[[example]]
name = "read_parquet"
diff --git a/parquet/README.md b/parquet/README.md
index 0360d15db14f..a0441ee6026d 100644
--- a/parquet/README.md
+++ b/parquet/README.md
@@ -60,6 +60,7 @@ The `parquet` crate provides the following features which may be enabled in your
- `zstd` (default) - support for parquet using `zstd` compression
- `snap` (default) - support for parquet using `snappy` compression
- `cli` - parquet [CLI tools](https://github.com/apache/arrow-rs/tree/master/parquet/src/bin)
+- `crc` - enables functionality to automatically verify checksums of each page (if present) when decoding
- `experimental` - Experimental APIs which may change, even between minor releases
## Parquet Feature Status
@@ -82,4 +83,4 @@ The `parquet` crate provides the following features which may be enabled in your
## License
-Licensed under the Apache License, Version 2.0: http://www.apache.org/licenses/LICENSE-2.0.
+Licensed under the Apache License, Version 2.0: <http://www.apache.org/licenses/LICENSE-2.0>.
diff --git a/parquet/src/file/serialized_reader.rs b/parquet/src/file/serialized_reader.rs
index 6fb0f78c1613..b253b73a4fa0 100644
--- a/parquet/src/file/serialized_reader.rs
+++ b/parquet/src/file/serialized_reader.rs
@@ -390,6 +390,15 @@ pub(crate) fn decode_page(
physical_type: Type,
decompressor: Option<&mut Box<dyn Codec>>,
) -> Result<Page> {
+ // Verify the 32-bit CRC checksum of the page
+ #[cfg(feature = "crc")]
+ if let Some(expected_crc) = page_header.crc {
+ let crc = crc32fast::hash(&buffer);
+ if crc != expected_crc as u32 {
+ return Err(general_err!("Page CRC checksum mismatch"));
+ }
+ }
+
// When processing data page v2, depending on enabled compression for the
// page, we should account for uncompressed data ('offset') of
// repetition and definition levels.
|
diff --git a/parquet/tests/arrow_reader/checksum.rs b/parquet/tests/arrow_reader/checksum.rs
new file mode 100644
index 000000000000..c60908d8b95d
--- /dev/null
+++ b/parquet/tests/arrow_reader/checksum.rs
@@ -0,0 +1,73 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! This file contains an end to end test for verifying checksums when reading parquet files.
+
+use std::path::PathBuf;
+
+use arrow::util::test_util::parquet_test_data;
+use parquet::arrow::arrow_reader::ArrowReaderBuilder;
+
+#[test]
+fn test_datapage_v1_corrupt_checksum() {
+ let errors = read_file_batch_errors("datapage_v1-corrupt-checksum.parquet");
+ assert_eq!(errors, [
+ Err("Parquet argument error: Parquet error: Page CRC checksum mismatch".to_string()),
+ Ok(()),
+ Ok(()),
+ Err("Parquet argument error: Parquet error: Page CRC checksum mismatch".to_string()),
+ Err("Parquet argument error: Parquet error: Not all children array length are the same!".to_string())
+ ]);
+}
+
+#[test]
+fn test_datapage_v1_uncompressed_checksum() {
+ let errors = read_file_batch_errors("datapage_v1-uncompressed-checksum.parquet");
+ assert_eq!(errors, [Ok(()), Ok(()), Ok(()), Ok(()), Ok(())]);
+}
+
+#[test]
+fn test_datapage_v1_snappy_compressed_checksum() {
+ let errors = read_file_batch_errors("datapage_v1-snappy-compressed-checksum.parquet");
+ assert_eq!(errors, [Ok(()), Ok(()), Ok(()), Ok(()), Ok(())]);
+}
+
+#[test]
+fn test_plain_dict_uncompressed_checksum() {
+ let errors = read_file_batch_errors("plain-dict-uncompressed-checksum.parquet");
+ assert_eq!(errors, [Ok(())]);
+}
+#[test]
+fn test_rle_dict_snappy_checksum() {
+ let errors = read_file_batch_errors("rle-dict-snappy-checksum.parquet");
+ assert_eq!(errors, [Ok(())]);
+}
+
+/// Reads a file and returns a vector with one element per record batch.
+/// The record batch data is replaced with () and errors are stringified.
+fn read_file_batch_errors(name: &str) -> Vec<Result<(), String>> {
+ let path = PathBuf::from(parquet_test_data()).join(name);
+ println!("Reading file: {:?}", path);
+ let file = std::fs::File::open(&path).unwrap();
+ let reader = ArrowReaderBuilder::try_new(file).unwrap().build().unwrap();
+ reader
+ .map(|x| match x {
+ Ok(_) => Ok(()),
+ Err(e) => Err(e.to_string()),
+ })
+ .collect()
+}
diff --git a/parquet/tests/arrow_reader/mod.rs b/parquet/tests/arrow_reader/mod.rs
index cc4c8f3c977b..0e6783583cd5 100644
--- a/parquet/tests/arrow_reader/mod.rs
+++ b/parquet/tests/arrow_reader/mod.rs
@@ -36,6 +36,8 @@ use std::sync::Arc;
use tempfile::NamedTempFile;
mod bad_data;
+#[cfg(feature = "crc")]
+mod checksum;
mod statistics;
// returns a struct array with columns "int32_col", "float32_col" and "float64_col" with the specified values
|
Optionally verify 32-bit CRC checksum when decoding parquet pages
Currently the PageHeader::crc is never used
|
2024-08-22T23:28:20Z
|
53.0
|
f41c258246cd4bd9d89228cded9ed54dbd00faff
|
|
apache/arrow-rs
| 6,269
|
apache__arrow-rs-6269
|
[
"6268"
] |
acdd27a66ac7b5e07816dc648db00532110fb89a
|
diff --git a/parquet_derive/src/lib.rs b/parquet_derive/src/lib.rs
index 16b6a6699e2d..9c93e2cca978 100644
--- a/parquet_derive/src/lib.rs
+++ b/parquet_derive/src/lib.rs
@@ -146,10 +146,10 @@ pub fn parquet_record_writer(input: proc_macro::TokenStream) -> proc_macro::Toke
/// Derive flat, simple RecordReader implementations. Works by parsing
/// a struct tagged with `#[derive(ParquetRecordReader)]` and emitting
/// the correct writing code for each field of the struct. Column readers
-/// are generated in the order they are defined.
+/// are generated by matching names in the schema to the names in the struct.
///
-/// It is up to the programmer to keep the order of the struct
-/// fields lined up with the schema.
+/// It is up to the programmer to ensure the names in the struct
+/// fields line up with the schema.
///
/// Example:
///
@@ -189,7 +189,6 @@ pub fn parquet_record_reader(input: proc_macro::TokenStream) -> proc_macro::Toke
let field_names: Vec<_> = fields.iter().map(|f| f.ident.clone()).collect();
let reader_snippets: Vec<proc_macro2::TokenStream> =
field_infos.iter().map(|x| x.reader_snippet()).collect();
- let i: Vec<_> = (0..reader_snippets.len()).collect();
let derived_for = input.ident;
let generics = input.generics;
@@ -206,6 +205,12 @@ pub fn parquet_record_reader(input: proc_macro::TokenStream) -> proc_macro::Toke
let mut row_group_reader = row_group_reader;
+ // key: parquet file column name, value: column index
+ let mut name_to_index = std::collections::HashMap::new();
+ for (idx, col) in row_group_reader.metadata().schema_descr().columns().iter().enumerate() {
+ name_to_index.insert(col.name().to_string(), idx);
+ }
+
for _ in 0..num_records {
self.push(#derived_for {
#(
@@ -218,7 +223,14 @@ pub fn parquet_record_reader(input: proc_macro::TokenStream) -> proc_macro::Toke
#(
{
- if let Ok(mut column_reader) = row_group_reader.get_column_reader(#i) {
+ let idx: usize = match name_to_index.get(stringify!(#field_names)) {
+ Some(&col_idx) => col_idx,
+ None => {
+ let error_msg = format!("column name '{}' is not found in parquet file!", stringify!(#field_names));
+ return Err(::parquet::errors::ParquetError::General(error_msg));
+ }
+ };
+ if let Ok(mut column_reader) = row_group_reader.get_column_reader(idx) {
#reader_snippets
} else {
return Err(::parquet::errors::ParquetError::General("Failed to get next column".into()))
|
diff --git a/parquet_derive_test/src/lib.rs b/parquet_derive_test/src/lib.rs
index e7c7896cb7f3..2cd69d03d731 100644
--- a/parquet_derive_test/src/lib.rs
+++ b/parquet_derive_test/src/lib.rs
@@ -73,9 +73,9 @@ struct APartiallyCompleteRecord {
struct APartiallyOptionalRecord {
pub bool: bool,
pub string: String,
- pub maybe_i16: Option<i16>,
- pub maybe_i32: Option<i32>,
- pub maybe_u64: Option<u64>,
+ pub i16: Option<i16>,
+ pub i32: Option<i32>,
+ pub u64: Option<u64>,
pub isize: isize,
pub float: f32,
pub double: f64,
@@ -85,6 +85,22 @@ struct APartiallyOptionalRecord {
pub byte_vec: Vec<u8>,
}
+// This struct removes several fields from the "APartiallyCompleteRecord",
+// and it shuffles the fields.
+// we should still be able to load it from APartiallyCompleteRecord parquet file
+#[derive(PartialEq, ParquetRecordReader, Debug)]
+struct APrunedRecord {
+ pub bool: bool,
+ pub string: String,
+ pub byte_vec: Vec<u8>,
+ pub float: f32,
+ pub double: f64,
+ pub i16: i16,
+ pub i32: i32,
+ pub u64: u64,
+ pub isize: isize,
+}
+
#[cfg(test)]
mod tests {
use super::*;
@@ -240,12 +256,12 @@ mod tests {
#[test]
fn test_parquet_derive_read_optional_but_valid_column() {
let file = get_temp_file("test_parquet_derive_read_optional", &[]);
- let drs: Vec<APartiallyOptionalRecord> = vec![APartiallyOptionalRecord {
+ let drs = vec![APartiallyOptionalRecord {
bool: true,
string: "a string".into(),
- maybe_i16: Some(-45),
- maybe_i32: Some(456),
- maybe_u64: Some(4563424),
+ i16: Some(-45),
+ i32: Some(456),
+ u64: Some(4563424),
isize: -365,
float: 3.5,
double: f64::NAN,
@@ -273,9 +289,57 @@ mod tests {
let mut row_group = reader.get_row_group(0).unwrap();
out.read_from_row_group(&mut *row_group, 1).unwrap();
- assert_eq!(drs[0].maybe_i16.unwrap(), out[0].i16);
- assert_eq!(drs[0].maybe_i32.unwrap(), out[0].i32);
- assert_eq!(drs[0].maybe_u64.unwrap(), out[0].u64);
+ assert_eq!(drs[0].i16.unwrap(), out[0].i16);
+ assert_eq!(drs[0].i32.unwrap(), out[0].i32);
+ assert_eq!(drs[0].u64.unwrap(), out[0].u64);
+ }
+
+ #[test]
+ fn test_parquet_derive_read_pruned_and_shuffled_columns() {
+ let file = get_temp_file("test_parquet_derive_read_pruned", &[]);
+ let drs = vec![APartiallyCompleteRecord {
+ bool: true,
+ string: "a string".into(),
+ i16: -45,
+ i32: 456,
+ u64: 4563424,
+ isize: -365,
+ float: 3.5,
+ double: f64::NAN,
+ now: chrono::Utc::now().naive_local(),
+ date: chrono::naive::NaiveDate::from_ymd_opt(2015, 3, 14).unwrap(),
+ uuid: uuid::Uuid::new_v4(),
+ byte_vec: vec![0x65, 0x66, 0x67],
+ }];
+
+ let generated_schema = drs.as_slice().schema().unwrap();
+
+ let props = Default::default();
+ let mut writer =
+ SerializedFileWriter::new(file.try_clone().unwrap(), generated_schema, props).unwrap();
+
+ let mut row_group = writer.next_row_group().unwrap();
+ drs.as_slice().write_to_row_group(&mut row_group).unwrap();
+ row_group.close().unwrap();
+ writer.close().unwrap();
+
+ use parquet::file::{reader::FileReader, serialized_reader::SerializedFileReader};
+ let reader = SerializedFileReader::new(file).unwrap();
+ let mut out: Vec<APrunedRecord> = Vec::new();
+
+ let mut row_group = reader.get_row_group(0).unwrap();
+ out.read_from_row_group(&mut *row_group, 1).unwrap();
+
+ assert_eq!(drs[0].bool, out[0].bool);
+ assert_eq!(drs[0].string, out[0].string);
+ assert_eq!(drs[0].byte_vec, out[0].byte_vec);
+ assert_eq!(drs[0].float, out[0].float);
+ assert!(drs[0].double.is_nan());
+ assert!(out[0].double.is_nan());
+ assert_eq!(drs[0].i16, out[0].i16);
+ assert_eq!(drs[0].i32, out[0].i32);
+ assert_eq!(drs[0].u64, out[0].u64);
+ assert_eq!(drs[0].isize, out[0].isize);
}
/// Returns file handle for a temp file in 'target' directory with a provided content
|
parquet_derive: support reading selected columns from parquet file
# Feature Description
I'm effectively using `parquet_derive` in my project, and I found that there are two inconvenient constraints:
1. The `ParquetRecordReader` enforces the struct to organize fields exactly in the **same order** in the parquet file.
2. The `ParquetRecordReader` enforces the struct to parse **all fields** in the parquet file. "all" might be exaggerating, but it is what happens if you want to get the last column, even only the last column.
As describe in its document:
> Derive flat, simple RecordReader implementations. Works by parsing a struct tagged with #[derive(ParquetRecordReader)] and emitting the correct writing code for each field of the struct. Column readers are generated in the order they are defined.
In my use cases (and I believe these are common requests), user should be able to read pruned parquet file, and they should have the freedom to re-organize fields' ordering in decoded struct.
# My Solution
I introduced a `HashMap` to map field name to its index. Of course, it assumes field name is unique, and this is always true since the current `parquet_derive` macro is applied to a flat struct without nesting.
# Pros and Cons
Obviously removing those two constraints makes `parquet_derive` a more handy tool.
But it has some implied changes:
- previously, since the `ParquetRecordReader` relies only on the index of fields, it allows that a field is named as `abc` to implicitly rename itself to `bcd` in the encoded struct. After this change, user must guarantee that the field name in `ParquetRecordReader` to exist in parquet columns.
- I think it is more intuitive and more natural to constrain the "field name" rather than "index", if we use `ParquetRecordReader` to derive a decoder macro.
- allowing reading partial parquet file may improve the performance for some users, but introducing a `HashMap` in the parser may slowdown the function a bit.
- when the `num_records` in a single parsing call is large enough, the cost of `HashMap` lookup is negligible.
Both implied changes seem to have a more positive impact than negative impact. Please review if this is a reasonable feature request.
|
2024-08-18T14:39:49Z
|
52.2
|
678517018ddfd21b202a94df13b06dfa1ab8a378
|
|
apache/arrow-rs
| 6,204
|
apache__arrow-rs-6204
|
[
"6203"
] |
db239e5b3aa05985b0149187c8b93b88e2285b48
| "diff --git a/parquet/benches/arrow_reader.rs b/parquet/benches/arrow_reader.rs\nindex 814e75c249bf.(...TRUNCATED)
| "diff --git a/parquet/src/util/test_common/page_util.rs b/parquet/src/util/test_common/page_util.rs\(...TRUNCATED)
| "Add benchmarks for `BYTE_STREAM_SPLIT` encoded Parquet `FIXED_LEN_BYTE_ARRAY` data\n**Is your featu(...TRUNCATED)
|
2024-08-06T22:53:02Z
|
52.2
|
678517018ddfd21b202a94df13b06dfa1ab8a378
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 2