repo
stringclasses 1
value | pull_number
int64 443
6.37k
| instance_id
stringlengths 20
21
| issue_numbers
sequencelengths 1
1
| base_commit
stringlengths 40
40
| patch
stringlengths 602
65k
| test_patch
stringlengths 543
17.6k
| problem_statement
stringlengths 495
4.62k
| hints_text
stringlengths 0
5.6k
| created_at
stringlengths 20
20
| version
stringlengths 3
4
| environment_setup_commit
stringlengths 40
40
| FAIL_TO_PASS
sequencelengths 1
5
| PASS_TO_PASS
sequencelengths 2
67
| FAIL_TO_FAIL
sequencelengths 0
0
| PASS_TO_FAIL
sequencelengths 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
apache/arrow-rs
| 4,351
|
apache__arrow-rs-4351
|
[
"4350"
] |
008cf9c27424d581a67ba97f338a22b6eace9cc1
|
diff --git a/object_store/Cargo.toml b/object_store/Cargo.toml
index 28bf29f7f1e0..4002a1865fa6 100644
--- a/object_store/Cargo.toml
+++ b/object_store/Cargo.toml
@@ -75,3 +75,7 @@ tempfile = "3.1.0"
futures-test = "0.3"
rand = "0.8"
hyper = { version = "0.14.24", features = ["server"] }
+
+[[test]]
+name = "get_range_file"
+path = "tests/get_range_file.rs"
diff --git a/object_store/src/lib.rs b/object_store/src/lib.rs
index 98bbb7adceb9..864cabc4a8c0 100644
--- a/object_store/src/lib.rs
+++ b/object_store/src/lib.rs
@@ -359,10 +359,20 @@ pub trait ObjectStore: std::fmt::Display + Send + Sync + Debug + 'static {
/// in the given byte range
async fn get_range(&self, location: &Path, range: Range<usize>) -> Result<Bytes> {
let options = GetOptions {
- range: Some(range),
+ range: Some(range.clone()),
..Default::default()
};
- self.get_opts(location, options).await?.bytes().await
+ // Temporary until GetResult::File supports range (#4352)
+ match self.get_opts(location, options).await? {
+ GetResult::Stream(s) => collect_bytes(s, None).await,
+ #[cfg(not(target_arch = "wasm32"))]
+ GetResult::File(mut file, path) => {
+ maybe_spawn_blocking(move || local::read_range(&mut file, &path, range))
+ .await
+ }
+ #[cfg(target_arch = "wasm32")]
+ _ => unimplemented!("File IO not implemented on wasm32."),
+ }
}
/// Return the bytes that are stored at the specified location
diff --git a/object_store/src/local.rs b/object_store/src/local.rs
index 6039f8dbadf3..ffff6a5739d5 100644
--- a/object_store/src/local.rs
+++ b/object_store/src/local.rs
@@ -863,7 +863,7 @@ impl AsyncWrite for LocalUpload {
}
}
-fn read_range(file: &mut File, path: &PathBuf, range: Range<usize>) -> Result<Bytes> {
+pub(crate) fn read_range(file: &mut File, path: &PathBuf, range: Range<usize>) -> Result<Bytes> {
let to_read = range.end - range.start;
file.seek(SeekFrom::Start(range.start as u64))
.context(SeekSnafu { path })?;
|
diff --git a/object_store/tests/get_range_file.rs b/object_store/tests/get_range_file.rs
new file mode 100644
index 000000000000..f926e3b07f2a
--- /dev/null
+++ b/object_store/tests/get_range_file.rs
@@ -0,0 +1,116 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! Tests the default implementation of get_range handles GetResult::File correctly (#4350)
+
+use async_trait::async_trait;
+use bytes::Bytes;
+use futures::stream::BoxStream;
+use object_store::local::LocalFileSystem;
+use object_store::path::Path;
+use object_store::{
+ GetOptions, GetResult, ListResult, MultipartId, ObjectMeta, ObjectStore,
+};
+use std::fmt::Formatter;
+use tempfile::tempdir;
+use tokio::io::AsyncWrite;
+
+#[derive(Debug)]
+struct MyStore(LocalFileSystem);
+
+impl std::fmt::Display for MyStore {
+ fn fmt(&self, _: &mut Formatter<'_>) -> std::fmt::Result {
+ todo!()
+ }
+}
+
+#[async_trait]
+impl ObjectStore for MyStore {
+ async fn put(&self, path: &Path, data: Bytes) -> object_store::Result<()> {
+ self.0.put(path, data).await
+ }
+
+ async fn put_multipart(
+ &self,
+ _: &Path,
+ ) -> object_store::Result<(MultipartId, Box<dyn AsyncWrite + Unpin + Send>)> {
+ todo!()
+ }
+
+ async fn abort_multipart(
+ &self,
+ _: &Path,
+ _: &MultipartId,
+ ) -> object_store::Result<()> {
+ todo!()
+ }
+
+ async fn get_opts(
+ &self,
+ location: &Path,
+ options: GetOptions,
+ ) -> object_store::Result<GetResult> {
+ self.0.get_opts(location, options).await
+ }
+
+ async fn head(&self, _: &Path) -> object_store::Result<ObjectMeta> {
+ todo!()
+ }
+
+ async fn delete(&self, _: &Path) -> object_store::Result<()> {
+ todo!()
+ }
+
+ async fn list(
+ &self,
+ _: Option<&Path>,
+ ) -> object_store::Result<BoxStream<'_, object_store::Result<ObjectMeta>>> {
+ todo!()
+ }
+
+ async fn list_with_delimiter(
+ &self,
+ _: Option<&Path>,
+ ) -> object_store::Result<ListResult> {
+ todo!()
+ }
+
+ async fn copy(&self, _: &Path, _: &Path) -> object_store::Result<()> {
+ todo!()
+ }
+
+ async fn copy_if_not_exists(&self, _: &Path, _: &Path) -> object_store::Result<()> {
+ todo!()
+ }
+}
+
+#[tokio::test]
+async fn test_get_range() {
+ let tmp = tempdir().unwrap();
+ let store = MyStore(LocalFileSystem::new_with_prefix(tmp.path()).unwrap());
+ let path = Path::from("foo");
+
+ let expected = Bytes::from_static(b"hello world");
+ store.put(&path, expected.clone()).await.unwrap();
+ let fetched = store.get(&path).await.unwrap().bytes().await.unwrap();
+ assert_eq!(expected, fetched);
+
+ for range in [0..10, 3..5, 0..expected.len()] {
+ let data = store.get_range(&path, range.clone()).await.unwrap();
+ assert_eq!(&data[..], &expected[range])
+ }
+}
|
Default ObjectStore::get_range Doesn't Apply Range to GetResult::File
**Describe the bug**
<!--
A clear and concise description of what the bug is.
-->
The default implementation of `ObjectStore::get_range` added in #4212 incorrectly handles if `GetResult::File` is returned, instead returning the entire byte range. This is incorrect
**To Reproduce**
<!--
Steps to reproduce the behavior:
-->
**Expected behavior**
<!--
A clear and concise description of what you expected to happen.
-->
**Additional context**
<!--
Add any other context about the problem here.
-->
|
2023-06-02T16:36:39Z
|
40.0
|
008cf9c27424d581a67ba97f338a22b6eace9cc1
|
[
"test_get_range"
] |
[
"src/lib.rs - (line 154)",
"src/lib.rs - (line 53)",
"src/lib.rs - (line 178)",
"src/lib.rs - (line 106)",
"src/limit.rs - limit::LimitStore (line 39)",
"src/path/mod.rs - path::Path (line 123)",
"src/path/mod.rs - path::Path (line 102)",
"src/path/mod.rs - path::Path (line 112)"
] |
[] |
[] |
|
apache/arrow-rs
| 4,343
|
apache__arrow-rs-4343
|
[
"4324"
] |
795259502d8d19f1e929d8ebf1b2819b6ab145c4
|
diff --git a/arrow-data/src/transform/mod.rs b/arrow-data/src/transform/mod.rs
index c74875072233..f4b2b46d1723 100644
--- a/arrow-data/src/transform/mod.rs
+++ b/arrow-data/src/transform/mod.rs
@@ -53,7 +53,7 @@ struct _MutableArrayData<'a> {
pub null_count: usize,
pub len: usize,
- pub null_buffer: MutableBuffer,
+ pub null_buffer: Option<MutableBuffer>,
// arrow specification only allows up to 3 buffers (2 ignoring the nulls above).
// Thus, we place them in the stack to avoid bound checks and greater data locality.
@@ -63,6 +63,12 @@ struct _MutableArrayData<'a> {
}
impl<'a> _MutableArrayData<'a> {
+ fn null_buffer(&mut self) -> &mut MutableBuffer {
+ self.null_buffer
+ .as_mut()
+ .expect("MutableArrayData not nullable")
+ }
+
fn freeze(self, dictionary: Option<ArrayData>) -> ArrayDataBuilder {
let buffers = into_buffers(&self.data_type, self.buffer1, self.buffer2);
@@ -77,10 +83,13 @@ impl<'a> _MutableArrayData<'a> {
}
};
- let nulls = (self.null_count > 0).then(|| {
- let bools = BooleanBuffer::new(self.null_buffer.into(), 0, self.len);
- unsafe { NullBuffer::new_unchecked(bools, self.null_count) }
- });
+ let nulls = self
+ .null_buffer
+ .map(|nulls| {
+ let bools = BooleanBuffer::new(nulls.into(), 0, self.len);
+ unsafe { NullBuffer::new_unchecked(bools, self.null_count) }
+ })
+ .filter(|n| n.null_count() > 0);
ArrayDataBuilder::new(self.data_type)
.offset(0)
@@ -95,22 +104,25 @@ fn build_extend_null_bits(array: &ArrayData, use_nulls: bool) -> ExtendNullBits
if let Some(nulls) = array.nulls() {
let bytes = nulls.validity();
Box::new(move |mutable, start, len| {
- utils::resize_for_bits(&mut mutable.null_buffer, mutable.len + len);
+ let mutable_len = mutable.len;
+ let out = mutable.null_buffer();
+ utils::resize_for_bits(out, mutable_len + len);
mutable.null_count += set_bits(
- mutable.null_buffer.as_slice_mut(),
+ out.as_slice_mut(),
bytes,
- mutable.len,
+ mutable_len,
nulls.offset() + start,
len,
);
})
} else if use_nulls {
Box::new(|mutable, _, len| {
- utils::resize_for_bits(&mut mutable.null_buffer, mutable.len + len);
- let write_data = mutable.null_buffer.as_slice_mut();
- let offset = mutable.len;
+ let mutable_len = mutable.len;
+ let out = mutable.null_buffer();
+ utils::resize_for_bits(out, mutable_len + len);
+ let write_data = out.as_slice_mut();
(0..len).for_each(|i| {
- bit_util::set_bit(write_data, offset + i);
+ bit_util::set_bit(write_data, mutable_len + i);
});
})
} else {
@@ -555,13 +567,10 @@ impl<'a> MutableArrayData<'a> {
.map(|array| build_extend_null_bits(array, use_nulls))
.collect();
- let null_buffer = if use_nulls {
+ let null_buffer = use_nulls.then(|| {
let null_bytes = bit_util::ceil(array_capacity, 8);
MutableBuffer::from_len_zeroed(null_bytes)
- } else {
- // create 0 capacity mutable buffer with the intention that it won't be used
- MutableBuffer::with_capacity(0)
- };
+ });
let extend_values = match &data_type {
DataType::Dictionary(_, _) => {
@@ -624,13 +633,18 @@ impl<'a> MutableArrayData<'a> {
}
/// Extends this [MutableArrayData] with null elements, disregarding the bound arrays
+ ///
+ /// # Panics
+ ///
+ /// Panics if [`MutableArrayData`] not created with `use_nulls` or nullable source arrays
+ ///
pub fn extend_nulls(&mut self, len: usize) {
- // TODO: null_buffer should probably be extended here as well
- // otherwise is_valid() could later panic
- // add test to confirm
+ self.data.len += len;
+ let bit_len = bit_util::ceil(self.data.len, 8);
+ let nulls = self.data.null_buffer();
+ nulls.resize(bit_len, 0);
self.data.null_count += len;
(self.extend_nulls)(&mut self.data, len);
- self.data.len += len;
}
/// Returns the current length
|
diff --git a/arrow/tests/array_transform.rs b/arrow/tests/array_transform.rs
index 40938c80f4c3..ebbadc00aecd 100644
--- a/arrow/tests/array_transform.rs
+++ b/arrow/tests/array_transform.rs
@@ -922,6 +922,29 @@ fn test_fixed_size_binary_append() {
assert_eq!(result, expected);
}
+#[test]
+fn test_extend_nulls() {
+ let int = Int32Array::from(vec![1, 2, 3, 4]).into_data();
+ let mut mutable = MutableArrayData::new(vec![&int], true, 4);
+ mutable.extend(0, 2, 3);
+ mutable.extend_nulls(2);
+
+ let data = mutable.freeze();
+ data.validate_full().unwrap();
+ let out = Int32Array::from(data);
+
+ assert_eq!(out.null_count(), 2);
+ assert_eq!(out.iter().collect::<Vec<_>>(), vec![Some(3), None, None]);
+}
+
+#[test]
+#[should_panic(expected = "MutableArrayData not nullable")]
+fn test_extend_nulls_panic() {
+ let int = Int32Array::from(vec![1, 2, 3, 4]).into_data();
+ let mut mutable = MutableArrayData::new(vec![&int], false, 4);
+ mutable.extend_nulls(2);
+}
+
/*
// this is an old test used on a meanwhile removed dead code
// that is still useful when `MutableArrayData` supports fixed-size lists.
|
concat_batches panics with total_len <= bit_len assertion for records with lists
**Describe the bug**
`concat`, used by `concat_batches`, does not appear to allocate sufficient `capacities` when constructing the `MutableArrayData`. Concatenating records that contain lists of structs results in the following panic:
```
assertion failed: total_len <= bit_len
thread 'concat_test' panicked at 'assertion failed: total_len <= bit_len', /Users/x/.cargo/registry/src/index.crates.io-6f17d22bba15001f/arrow-buffer-40.0.0/src/buffer/boolean.rs:55:9
stack backtrace:
0: rust_begin_unwind
at /rustc/84c898d65adf2f39a5a98507f1fe0ce10a2b8dbc/library/std/src/panicking.rs:579:5
1: core::panicking::panic_fmt
at /rustc/84c898d65adf2f39a5a98507f1fe0ce10a2b8dbc/library/core/src/panicking.rs:64:14
2: core::panicking::panic
at /rustc/84c898d65adf2f39a5a98507f1fe0ce10a2b8dbc/library/core/src/panicking.rs:114:5
3: arrow_buffer::buffer::boolean::BooleanBuffer::new
at /Users/x/.cargo/registry/src/index.crates.io-6f17d22bba15001f/arrow-buffer-40.0.0/src/buffer/boolean.rs:55:9
4: arrow_data::transform::_MutableArrayData::freeze::{{closure}}
at /Users/x/.cargo/registry/src/index.crates.io-6f17d22bba15001f/arrow-data-40.0.0/src/transform/mod.rs:81:25
5: core::bool::<impl bool>::then
at /rustc/84c898d65adf2f39a5a98507f1fe0ce10a2b8dbc/library/core/src/bool.rs:71:24
6: arrow_data::transform::_MutableArrayData::freeze
at /Users/x/.cargo/registry/src/index.crates.io-6f17d22bba15001f/arrow-data-40.0.0/src/transform/mod.rs:80:21
7: arrow_data::transform::MutableArrayData::freeze
at /Users/x/.cargo/registry/src/index.crates.io-6f17d22bba15001f/arrow-data-40.0.0/src/transform/mod.rs:656:18
8: arrow_data::transform::_MutableArrayData::freeze
at /Users/x/.cargo/registry/src/index.crates.io-6f17d22bba15001f/arrow-data-40.0.0/src/transform/mod.rs:74:37
9: arrow_data::transform::MutableArrayData::freeze
at /Users/x/.cargo/registry/src/index.crates.io-6f17d22bba15001f/arrow-data-40.0.0/src/transform/mod.rs:656:18
10: arrow_data::transform::_MutableArrayData::freeze
at /Users/x/.cargo/registry/src/index.crates.io-6f17d22bba15001f/arrow-data-40.0.0/src/transform/mod.rs:74:37
11: arrow_data::transform::MutableArrayData::freeze
at /Users/x/.cargo/registry/src/index.crates.io-6f17d22bba15001f/arrow-data-40.0.0/src/transform/mod.rs:656:18
12: arrow_data::transform::_MutableArrayData::freeze
at /Users/x/.cargo/registry/src/index.crates.io-6f17d22bba15001f/arrow-data-40.0.0/src/transform/mod.rs:74:37
13: arrow_data::transform::MutableArrayData::freeze
at /Users/x/.cargo/registry/src/index.crates.io-6f17d22bba15001f/arrow-data-40.0.0/src/transform/mod.rs:656:18
```
**To Reproduce**
Call `concat_batches` with `RecordBatch`s that contain lists of structs (on the order of 20–50 structs in the list per `RecordBatch`). If I modify [the capacity calculation in concat](https://github.com/apache/arrow-rs/blob/c295b172b37902d5fa41ef275ff5b86caf9fde75/arrow-select/src/concat.rs#L76-L82) to add a constant factor for lists, the error does not occur:
```rust
let capacity = match d {
DataType::Utf8 => binary_capacity::<Utf8Type>(arrays),
DataType::LargeUtf8 => binary_capacity::<LargeUtf8Type>(arrays),
DataType::Binary => binary_capacity::<BinaryType>(arrays),
DataType::LargeBinary => binary_capacity::<LargeBinaryType>(arrays),
DataType::List(_) => {
Capacities::Array(arrays.iter().map(|a| a.len()).sum::<usize>() + 500) // <- 500 added here
}
_ => Capacities::Array(arrays.iter().map(|a| a.len()).sum()),
};
```
**Expected behavior**
No panics when concatenating `RecordBatch`s with lists.
**Additional context**
Reproduced with Arrow versions 37–40.
|
Possibly related to https://github.com/apache/arrow-rs/issues/1230. The error would suggest that the validity buffer is not the correct length. I'll take a look in a bit, MutableArrayData is overdue some TLC in this regard (#1225)
Are you able to share a reproducer for this?
Sure, see https://github.com/ElementalCognition/arrow-bug.
Thank you, very helpful. It looks like this is https://github.com/apache/arrow-rs/issues/1230, which I will fix now
As a happy accident https://github.com/apache/arrow-rs/pull/4333 fixed your reproducer as it removed the use of extend_nulls when appending structs
|
2023-06-02T07:51:43Z
|
40.0
|
008cf9c27424d581a67ba97f338a22b6eace9cc1
|
[
"test_extend_nulls_panic - should panic"
] |
[
"arrow/src/lib.rs - (line 140)",
"arrow/src/lib.rs - (line 121)",
"arrow/src/lib.rs - (line 253)",
"arrow/src/lib.rs - (line 63)",
"arrow/src/lib.rs - (line 227)",
"arrow/src/lib.rs - (line 282)",
"arrow/src/util/string_writer.rs - util::string_writer (line 25)",
"arrow/src/lib.rs - (line 97)",
"arrow/src/lib.rs - (line 78)",
"arrow/src/lib.rs - (line 161)",
"arrow/src/lib.rs - (line 198)",
"test_binary_fixed_sized_offsets",
"test_bool",
"test_decimal",
"test_decimal_null_offset_nulls",
"test_extend_nulls",
"test_decimal_offset",
"test_fixed_size_binary_append",
"test_dictionary",
"test_list_append",
"test_list_null_offset",
"test_list_nulls_append",
"test_list_of_strings_append",
"test_map_nulls_append",
"test_multiple_with_nulls",
"test_null",
"test_primitive",
"test_primitive_null_offset",
"test_primitive_null_offset_nulls",
"test_primitive_offset",
"test_string_null_offset_nulls",
"test_string_offsets",
"test_struct",
"test_struct_many",
"test_struct_nulls",
"test_struct_offset",
"test_variable_sized_offsets",
"test_variable_sized_nulls"
] |
[] |
[] |
apache/arrow-rs
| 4,327
|
apache__arrow-rs-4327
|
[
"3680"
] |
30196d89bfab698c50bcde6c304f0599011a1100
|
diff --git a/parquet/src/column/writer/mod.rs b/parquet/src/column/writer/mod.rs
index 310519f4a39c..fc5e29b03256 100644
--- a/parquet/src/column/writer/mod.rs
+++ b/parquet/src/column/writer/mod.rs
@@ -308,6 +308,17 @@ impl<'a, E: ColumnValueEncoder> GenericColumnWriter<'a, E> {
max: Option<&E::T>,
distinct_count: Option<u64>,
) -> Result<usize> {
+ // Check if number of definition levels is the same as number of repetition levels.
+ if let (Some(def), Some(rep)) = (def_levels, rep_levels) {
+ if def.len() != rep.len() {
+ return Err(general_err!(
+ "Inconsistent length of definition and repetition levels: {} != {}",
+ def.len(),
+ rep.len()
+ ));
+ }
+ }
+
// We check for DataPage limits only after we have inserted the values. If a user
// writes a large number of values, the DataPage size can be well above the limit.
//
@@ -323,10 +334,6 @@ impl<'a, E: ColumnValueEncoder> GenericColumnWriter<'a, E> {
None => values.len(),
};
- // Find out number of batches to process.
- let write_batch_size = self.props.write_batch_size();
- let num_batches = num_levels / write_batch_size;
-
// If only computing chunk-level statistics compute them here, page-level statistics
// are computed in [`Self::write_mini_batch`] and used to update chunk statistics in
// [`Self::add_data_page`]
@@ -374,27 +381,28 @@ impl<'a, E: ColumnValueEncoder> GenericColumnWriter<'a, E> {
let mut values_offset = 0;
let mut levels_offset = 0;
- for _ in 0..num_batches {
+ let base_batch_size = self.props.write_batch_size();
+ while levels_offset < num_levels {
+ let mut end_offset = num_levels.min(levels_offset + base_batch_size);
+
+ // Split at record boundary
+ if let Some(r) = rep_levels {
+ while end_offset < r.len() && r[end_offset] != 0 {
+ end_offset += 1;
+ }
+ }
+
values_offset += self.write_mini_batch(
values,
values_offset,
value_indices,
- write_batch_size,
- def_levels.map(|lv| &lv[levels_offset..levels_offset + write_batch_size]),
- rep_levels.map(|lv| &lv[levels_offset..levels_offset + write_batch_size]),
+ end_offset - levels_offset,
+ def_levels.map(|lv| &lv[levels_offset..end_offset]),
+ rep_levels.map(|lv| &lv[levels_offset..end_offset]),
)?;
- levels_offset += write_batch_size;
+ levels_offset = end_offset;
}
- values_offset += self.write_mini_batch(
- values,
- values_offset,
- value_indices,
- num_levels - levels_offset,
- def_levels.map(|lv| &lv[levels_offset..]),
- rep_levels.map(|lv| &lv[levels_offset..]),
- )?;
-
// Return total number of values processed.
Ok(values_offset)
}
@@ -522,18 +530,6 @@ impl<'a, E: ColumnValueEncoder> GenericColumnWriter<'a, E> {
def_levels: Option<&[i16]>,
rep_levels: Option<&[i16]>,
) -> Result<usize> {
- // Check if number of definition levels is the same as number of repetition
- // levels.
- if let (Some(def), Some(rep)) = (def_levels, rep_levels) {
- if def.len() != rep.len() {
- return Err(general_err!(
- "Inconsistent length of definition and repetition levels: {} != {}",
- def.len(),
- rep.len()
- ));
- }
- }
-
// Process definition levels and determine how many values to write.
let values_to_write = if self.descr.max_def_level() > 0 {
let levels = def_levels.ok_or_else(|| {
@@ -569,6 +565,13 @@ impl<'a, E: ColumnValueEncoder> GenericColumnWriter<'a, E> {
)
})?;
+ if !levels.is_empty() && levels[0] != 0 {
+ return Err(general_err!(
+ "Write must start at a record boundary, got non-zero repetition level of {}",
+ levels[0]
+ ));
+ }
+
// Count the occasions where we start a new row
for &level in levels {
self.page_metrics.num_buffered_rows += (level == 0) as u32
@@ -2255,6 +2258,7 @@ mod tests {
let mut buf: Vec<i16> = Vec::new();
let rep_levels = if max_rep_level > 0 {
random_numbers_range(max_size, 0, max_rep_level + 1, &mut buf);
+ buf[0] = 0; // Must start on record boundary
Some(&buf[..])
} else {
None
|
diff --git a/parquet/tests/arrow_writer_layout.rs b/parquet/tests/arrow_writer_layout.rs
index 142112b7b686..3142c8c52063 100644
--- a/parquet/tests/arrow_writer_layout.rs
+++ b/parquet/tests/arrow_writer_layout.rs
@@ -19,6 +19,7 @@
use arrow::array::{Int32Array, StringArray};
use arrow::record_batch::RecordBatch;
+use arrow_array::builder::{Int32Builder, ListBuilder};
use bytes::Bytes;
use parquet::arrow::arrow_reader::{ArrowReaderOptions, ParquetRecordBatchReaderBuilder};
use parquet::arrow::ArrowWriter;
@@ -502,3 +503,45 @@ fn test_string() {
},
});
}
+
+#[test]
+fn test_list() {
+ let mut list = ListBuilder::new(Int32Builder::new());
+ for _ in 0..200 {
+ let values = list.values();
+ for i in 0..8 {
+ values.append_value(i);
+ }
+ list.append(true);
+ }
+ let array = Arc::new(list.finish()) as _;
+
+ let batch = RecordBatch::try_from_iter([("col", array)]).unwrap();
+ let props = WriterProperties::builder()
+ .set_dictionary_enabled(false)
+ .set_data_page_row_count_limit(20)
+ .set_write_batch_size(3)
+ .build();
+
+ // Test rows not split across pages
+ do_test(LayoutTest {
+ props,
+ batches: vec![batch],
+ layout: Layout {
+ row_groups: vec![RowGroup {
+ columns: vec![ColumnChunk {
+ pages: (0..10)
+ .map(|_| Page {
+ rows: 20,
+ page_header_size: 34,
+ compressed_size: 672,
+ encoding: Encoding::PLAIN,
+ page_type: PageType::DATA_PAGE,
+ })
+ .collect(),
+ dictionary_page: None,
+ }],
+ }],
+ },
+ });
+}
|
Should Parquet pages begin on the start of a row?
**Which part is this question about**
Parquet writer
<!--
Is it code base, library api, documentation or some other part?
-->
**Describe your question**
In #1777 it was brought up [here](https://github.com/apache/arrow-rs/issues/1777#issuecomment-1147686956) that the Parquet spec seems to require that pages begin on record boundaries when writing offset indices. Additionally, the same can be said for V2 page headers (see comment in the parquet-format [thrift](https://github.com/apache/parquet-format/blob/5205dc7b7c0b910ea6af33cadbd2963c0c47c726/src/main/thrift/parquet.thrift#L564) file). It appears that this reasoning was rejected, and the Parquet writer continues to write files where rows can span multiple pages. I'm wondering if this should still be considered a bug given how difficult finding individual rows is made with this behavior in place.
<!--
A clear and concise description of what the question is.
-->
**Additional context**
I've been working with the cuDF Parquet reader, and files with large nested rows can create havoc when rows span pages. Parquet-mr appears to hew to the "pages start with R=0" rule.
<!--
Add any other context about the problem here.
-->
|
I'm not sure it is a bug per se, but I definitely think the APIs shouldn't do it unilaterally as they currently do.
I would support making `GenericColumnWriter::write_batch_internal` call `write_mini_batch` treating the `WriterProperties::write_batch_size` as being a number of rows, as opposed to levels. Tbh this is probably what most people assume it does anyway. I'll try to get something up today
This should mean that if the user writes in a whole number of rows, the rows won't be split across multiple pages. Handling if the user writes partial rows would be significantly more complicated, and I think we can safely punt on it.
Edit: Ran out of time today, will do first thing tomorrow. Will try to ensure this makes the release slated for tomorrow, but will depend on reviews
> Will try to ensure this makes the release slated for tomorrow, but will depend on reviews
No rush, this might want some thought. One thing to consider is that this can lead to very uneven page sizes if the data is very "lumpy". That's the price to pay when using V2 headers or page indexes, but you may want to consider keeping the current behavior when neither of those is requested.
|
2023-05-31T19:17:10Z
|
40.0
|
008cf9c27424d581a67ba97f338a22b6eace9cc1
|
[
"test_list"
] |
[
"parquet/src/file/mod.rs - file (line 63) - compile",
"parquet/src/record/api.rs - record::api::Row::get_column_iter (line 62) - compile",
"parquet/src/file/mod.rs - file (line 80) - compile",
"parquet/src/file/mod.rs - file (line 29) - compile",
"parquet/src/column/mod.rs - column (line 38) - compile",
"parquet/src/schema/types.rs - schema::types::ColumnPath::append (line 687)",
"parquet/src/file/properties.rs - file::properties (line 55)",
"parquet/src/schema/types.rs - schema::types::ColumnPath::string (line 676)",
"parquet/src/file/statistics.rs - file::statistics (line 23)",
"parquet/src/arrow/mod.rs - arrow (line 52)",
"parquet/src/arrow/arrow_reader/selection.rs - arrow::arrow_reader::selection::RowSelection (line 63)",
"parquet/src/file/properties.rs - file::properties (line 22)",
"parquet/src/schema/parser.rs - schema::parser (line 24)",
"parquet/src/schema/printer.rs - schema::printer (line 23)",
"parquet/src/schema/mod.rs - schema (line 22)",
"parquet/src/record/api.rs - record::api::RowFormatter (line 140)",
"parquet/src/arrow/mod.rs - arrow (line 27)",
"parquet/src/arrow/arrow_writer/mod.rs - arrow::arrow_writer::ArrowWriter (line 64)",
"parquet/src/arrow/mod.rs - arrow (line 65)",
"test_primitive",
"test_string"
] |
[] |
[] |
apache/arrow-rs
| 5,439
|
apache__arrow-rs-5439
|
[
"5438"
] |
ef5c45cf4186a8124da5a1603ebdbc09ef9928fc
|
diff --git a/arrow-flight/src/error.rs b/arrow-flight/src/error.rs
index e054883e965d..ba979ca9f7a6 100644
--- a/arrow-flight/src/error.rs
+++ b/arrow-flight/src/error.rs
@@ -49,17 +49,24 @@ impl FlightError {
impl std::fmt::Display for FlightError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- // TODO better format / error
- write!(f, "{self:?}")
+ match self {
+ FlightError::Arrow(source) => write!(f, "Arrow error: {}", source),
+ FlightError::NotYetImplemented(desc) => write!(f, "Not yet implemented: {}", desc),
+ FlightError::Tonic(source) => write!(f, "Tonic error: {}", source),
+ FlightError::ProtocolError(desc) => write!(f, "Protocol error: {}", desc),
+ FlightError::DecodeError(desc) => write!(f, "Decode error: {}", desc),
+ FlightError::ExternalError(source) => write!(f, "External error: {}", source),
+ }
}
}
impl Error for FlightError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
- if let Self::ExternalError(e) = self {
- Some(e.as_ref())
- } else {
- None
+ match self {
+ FlightError::Arrow(source) => Some(source),
+ FlightError::Tonic(source) => Some(source),
+ FlightError::ExternalError(source) => Some(source.as_ref()),
+ _ => None,
}
}
}
diff --git a/arrow-schema/src/error.rs b/arrow-schema/src/error.rs
index b7bf8d6e12a6..d9a0f3452c86 100644
--- a/arrow-schema/src/error.rs
+++ b/arrow-schema/src/error.rs
@@ -114,10 +114,10 @@ impl Display for ArrowError {
impl Error for ArrowError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
- if let Self::ExternalError(e) = self {
- Some(e.as_ref())
- } else {
- None
+ match self {
+ ArrowError::ExternalError(source) => Some(source.as_ref()),
+ ArrowError::IoError(_, source) => Some(source),
+ _ => None,
}
}
}
|
diff --git a/arrow-flight/tests/client.rs b/arrow-flight/tests/client.rs
index 9e19bce92338..47565334cb63 100644
--- a/arrow-flight/tests/client.rs
+++ b/arrow-flight/tests/client.rs
@@ -935,7 +935,7 @@ async fn test_cancel_flight_info_error_no_response() {
assert_eq!(
err.to_string(),
- "ProtocolError(\"Received no response for cancel_flight_info call\")"
+ "Protocol error: Received no response for cancel_flight_info call"
);
// server still got the request
let expected_request = Action::new("CancelFlightInfo", request.encode_to_vec());
@@ -985,7 +985,7 @@ async fn test_renew_flight_endpoint_error_no_response() {
assert_eq!(
err.to_string(),
- "ProtocolError(\"Received no response for renew_flight_endpoint call\")"
+ "Protocol error: Received no response for renew_flight_endpoint call"
);
// server still got the request
let expected_request = Action::new("RenewFlightEndpoint", request.encode_to_vec());
diff --git a/arrow-flight/tests/encode_decode.rs b/arrow-flight/tests/encode_decode.rs
index f4741d743e57..789233b918d0 100644
--- a/arrow-flight/tests/encode_decode.rs
+++ b/arrow-flight/tests/encode_decode.rs
@@ -57,7 +57,7 @@ async fn test_error() {
let result: Result<Vec<_>, _> = decode_stream.try_collect().await;
let result = result.unwrap_err();
- assert_eq!(result.to_string(), r#"NotYetImplemented("foo")"#);
+ assert_eq!(result.to_string(), "Not yet implemented: foo");
}
#[tokio::test]
@@ -287,7 +287,7 @@ async fn test_mismatched_record_batch_schema() {
let err = result.unwrap_err();
assert_eq!(
err.to_string(),
- "Arrow(InvalidArgumentError(\"number of columns(1) must match number of fields(2) in schema\"))"
+ "Arrow error: Invalid argument error: number of columns(1) must match number of fields(2) in schema"
);
}
@@ -312,7 +312,7 @@ async fn test_chained_streams_batch_decoder() {
let err = result.unwrap_err();
assert_eq!(
err.to_string(),
- "ProtocolError(\"Unexpectedly saw multiple Schema messages in FlightData stream\")"
+ "Protocol error: Unexpectedly saw multiple Schema messages in FlightData stream"
);
}
|
Refine `Display` implementation for `FlightError`
**Is your feature request related to a problem or challenge? Please describe what you are trying to do.**
There's a `TODO` for a better `std::fmt::Display` implementation on `FlightError`. Currently it forwards to `std::fmt::Debug`, which does not appear to be a good practice as errors should describe themselves with friendly messages provided by `Display`.
https://github.com/apache/arrow-rs/blob/ef5c45cf4186a8124da5a1603ebdbc09ef9928fc/arrow-flight/src/error.rs#L50-L55
**Describe the solution you'd like**
Match the variants of the error and specify different prompts like what we did for `ArrowError`.
https://github.com/apache/arrow-rs/blob/ef5c45cf4186a8124da5a1603ebdbc09ef9928fc/arrow-schema/src/error.rs#L79-L87
**Describe alternatives you've considered**
Derive the implementation with `thiserror`. The code can be more concise with the cost of introducing a new build-time dependency.
**Additional context**
A better practice to implement `Display` for errors is **NOT** to include the error source. AWS SDK has adopted this as described in https://github.com/awslabs/aws-sdk-rust/issues/657. However, this could be considered as a breaking change as many developers have not realize that one should leverage something like [`std::error::Report`](https://doc.rust-lang.org/stable/std/error/struct.Report.html) to get the error sources printed.
|
2024-02-27T07:27:28Z
|
50.0
|
82fc0df73ab97e239ce5c748a05c57ce582f3d5d
|
[
"test_cancel_flight_info_error_no_response",
"test_renew_flight_endpoint_error_no_response",
"test_chained_streams_batch_decoder",
"test_error",
"test_mismatched_record_batch_schema"
] |
[
"arrow-flight/src/client.rs - client::FlightClient::cancel_flight_info (line 609) - compile",
"arrow-flight/src/client.rs - client::FlightClient::get_schema (line 497) - compile",
"arrow-flight/src/client.rs - client::FlightClient::do_action (line 564) - compile",
"arrow-flight/src/client.rs - client::FlightClient (line 49) - compile",
"arrow-flight/src/client.rs - client::FlightClient::list_flights (line 454) - compile",
"arrow-flight/src/client.rs - client::FlightClient::do_get (line 182) - compile",
"arrow-flight/src/client.rs - client::FlightClient::renew_flight_endpoint (line 647) - compile",
"arrow-flight/src/client.rs - client::FlightClient::poll_flight_info (line 283) - compile",
"arrow-flight/src/client.rs - client::FlightClient::get_flight_info (line 227) - compile",
"arrow-flight/src/decode.rs - decode::FlightRecordBatchStream (line 39) - compile",
"arrow-flight/src/client.rs - client::FlightClient::list_actions (line 529) - compile",
"arrow-flight/src/client.rs - client::FlightClient::do_put (line 335) - compile",
"arrow-flight/src/client.rs - client::FlightClient::do_exchange (line 404) - compile",
"arrow-flight/src/encode.rs - encode::FlightDataEncoderBuilder (line 46) - compile",
"arrow-flight/src/encode.rs - encode::FlightDataEncoderBuilder (line 74)",
"arrow-flight/src/lib.rs - FlightData::new (line 461)",
"arrow-flight/src/lib.rs - FlightInfo::new (line 536)",
"arrow-flight/src/lib.rs - FlightEndpoint::new (line 761)",
"arrow-flight/src/lib.rs - PollInfo::new (line 634)",
"arrow-flight/src/lib.rs - Ticket::new (line 745)",
"test_cancel_flight_info",
"test_do_get_error",
"test_do_action_error",
"test_do_action",
"test_do_exchange_error_stream",
"test_do_put_error_stream_server",
"test_do_put_error_client",
"test_do_put_error_client_and_server",
"test_do_exchange_error",
"test_do_action_error_in_stream",
"test_do_put",
"test_do_get_error_in_record_batch_stream",
"test_do_put_error_server",
"test_get_flight_info_error",
"test_get_schema_error",
"test_list_flights_error",
"test_poll_flight_info",
"test_get_schema",
"test_list_actions",
"test_get_flight_info",
"test_handshake_error",
"test_list_flights",
"test_list_actions_error_in_stream",
"test_list_actions_error",
"test_handshake",
"test_list_flights_error_in_stream",
"test_poll_flight_info_error",
"test_renew_flight_endpoint",
"test_do_get",
"test_do_exchange",
"test_empty",
"test_app_metadata",
"test_empty_batch",
"test_dictionary_one",
"test_chained_streams_data_decoder",
"test_max_message_size",
"test_dictionary_many",
"test_primitive_empty",
"test_primitive_many",
"test_mismatched_schema_message",
"test_schema_metadata",
"test_with_flight_descriptor",
"test_zero_batches_schema_specified",
"test_zero_batches_no_schema",
"test_zero_batches_dictionary_schema_specified",
"test_primitive_one",
"test_max_message_size_fuzz"
] |
[] |
[] |
|
apache/arrow-rs
| 443
|
apache__arrow-rs-443
|
[
"349"
] |
0c0077697e55eb154dbfcf3127a3f39e63be2df8
|
diff --git a/parquet/src/data_type.rs b/parquet/src/data_type.rs
index aa1def3db977..f97df3cdaf59 100644
--- a/parquet/src/data_type.rs
+++ b/parquet/src/data_type.rs
@@ -661,8 +661,15 @@ pub(crate) mod private {
_: &mut W,
bit_writer: &mut BitWriter,
) -> Result<()> {
+ if bit_writer.bytes_written() + values.len() / 8 >= bit_writer.capacity() {
+ bit_writer.extend(256);
+ }
for value in values {
- bit_writer.put_value(*value as u64, 1);
+ if !bit_writer.put_value(*value as u64, 1) {
+ return Err(ParquetError::EOF(
+ "unable to put boolean value".to_string(),
+ ));
+ }
}
Ok(())
}
diff --git a/parquet/src/util/bit_util.rs b/parquet/src/util/bit_util.rs
index 8dfb63122bcc..45cfe2b6d48f 100644
--- a/parquet/src/util/bit_util.rs
+++ b/parquet/src/util/bit_util.rs
@@ -223,6 +223,20 @@ impl BitWriter {
}
}
+ /// Extend buffer size
+ #[inline]
+ pub fn extend(&mut self, increment: usize) {
+ self.max_bytes += increment;
+ let extra = vec![0; increment];
+ self.buffer.extend(extra);
+ }
+
+ /// Report buffer size
+ #[inline]
+ pub fn capacity(&mut self) -> usize {
+ self.max_bytes
+ }
+
/// Consumes and returns the current buffer.
#[inline]
pub fn consume(mut self) -> Vec<u8> {
|
diff --git a/parquet/tests/boolean_writer.rs b/parquet/tests/boolean_writer.rs
new file mode 100644
index 000000000000..b9d757e71a8e
--- /dev/null
+++ b/parquet/tests/boolean_writer.rs
@@ -0,0 +1,100 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use parquet::column::writer::ColumnWriter;
+use parquet::file::properties::WriterProperties;
+use parquet::file::reader::FileReader;
+use parquet::file::serialized_reader::SerializedFileReader;
+use parquet::file::writer::FileWriter;
+use parquet::file::writer::SerializedFileWriter;
+use parquet::schema::parser::parse_message_type;
+use std::fs;
+use std::path::Path;
+use std::sync::{mpsc, Arc};
+use std::thread;
+use std::time::Duration;
+
+#[test]
+fn it_writes_data_without_hanging() {
+ let path = Path::new("it_writes_data_without_hanging.parquet");
+
+ let message_type = "
+ message BooleanType {
+ REQUIRED BOOLEAN DIM0;
+ }
+";
+ let schema = Arc::new(parse_message_type(message_type).expect("parse schema"));
+ let props = Arc::new(WriterProperties::builder().build());
+ let file = fs::File::create(&path).expect("create file");
+ let mut writer =
+ SerializedFileWriter::new(file, schema, props).expect("create parquet writer");
+ for _group in 0..1 {
+ let mut row_group_writer = writer.next_row_group().expect("get row group writer");
+ let values: Vec<i64> = vec![0; 2049];
+ let my_bool_values: Vec<bool> = values
+ .iter()
+ .enumerate()
+ .map(|(count, _x)| count % 2 == 0)
+ .collect();
+ while let Some(mut col_writer) =
+ row_group_writer.next_column().expect("next column")
+ {
+ match col_writer {
+ ColumnWriter::BoolColumnWriter(ref mut typed_writer) => {
+ typed_writer
+ .write_batch(&my_bool_values, None, None)
+ .expect("writing bool column");
+ }
+ _ => {
+ panic!("only test boolean values");
+ }
+ }
+ row_group_writer
+ .close_column(col_writer)
+ .expect("close column");
+ }
+ let rg_md = row_group_writer.close().expect("close row group");
+ println!("total rows written: {}", rg_md.num_rows());
+ writer
+ .close_row_group(row_group_writer)
+ .expect("close row groups");
+ }
+ writer.close().expect("close writer");
+
+ let bytes = fs::read(&path).expect("read file");
+ assert_eq!(&bytes[0..4], &[b'P', b'A', b'R', b'1']);
+
+ // Now that we have written our data and are happy with it, make
+ // sure we can read it back in < 5 seconds...
+ let (sender, receiver) = mpsc::channel();
+ let _t = thread::spawn(move || {
+ let file = fs::File::open(&Path::new("it_writes_data_without_hanging.parquet"))
+ .expect("open file");
+ let reader = SerializedFileReader::new(file).expect("get serialized reader");
+ let iter = reader.get_row_iter(None).expect("get iterator");
+ for record in iter {
+ println!("reading: {}", record);
+ }
+ println!("finished reading");
+ if let Ok(()) = sender.send(true) {}
+ });
+ assert_ne!(
+ Err(mpsc::RecvTimeoutError::Timeout),
+ receiver.recv_timeout(Duration::from_millis(5000))
+ );
+ fs::remove_file("it_writes_data_without_hanging.parquet").expect("remove file");
+}
|
parquet reading hangs when row_group contains more than 2048 rows of data
**Describe the bug**
Reading an apparently valid parquet file (which can be read by java tools such as parquet-tools) from any rust program will hang. CPU load goes to 100%. Reproduced on both 4.0.0 and 4.1.0. rustc: 1.51.0
**To Reproduce**
Create a parquet file with at least 1 row group (e.g.: 1). Each row group must have > 2048 rows (e.g.: 2049). Run a (rust) program to read the file and it will hang when visiting the 2048th row. Java program (parquet-tools) reads with no issue.
This test program can be used to produce a file that can then be read using parquet-read to reproduce:
```
#[test]
fn it_writes_data() {
let path = Path::new("sample.parquet");
let message_type = "
message ItHangs {
REQUIRED INT64 DIM0;
REQUIRED DOUBLE DIM1;
REQUIRED BYTE_ARRAY DIM2;
REQUIRED BOOLEAN DIM3;
}
";
let schema = Arc::new(parse_message_type(message_type).unwrap());
let props = Arc::new(WriterProperties::builder().build());
let file = fs::File::create(&path).unwrap();
let mut writer = SerializedFileWriter::new(file, schema, props).unwrap();
for _group in 0..1 {
let mut row_group_writer = writer.next_row_group().unwrap();
let values: Vec<i64> = vec![0; 2049];
let my_values: Vec<i64> = values
.iter()
.enumerate()
.map(|(count, _x)| count.try_into().unwrap())
.collect();
let my_double_values: Vec<f64> = values
.iter()
.enumerate()
.map(|(count, _x)| count as f64)
.collect();
let my_bool_values: Vec<bool> = values
.iter()
.enumerate()
.map(|(count, _x)| count % 2 == 0)
.collect();
let my_ba_values: Vec<ByteArray> = values
.iter()
.enumerate()
.map(|(count, _x)| {
let s = format!("{}", count);
ByteArray::from(s.as_ref())
})
.collect();
while let Some(mut col_writer) = row_group_writer.next_column().expect("next column") {
match col_writer {
// ... write values to a column writer
// You can also use `get_typed_column_writer` method to extract typed writer.
ColumnWriter::Int64ColumnWriter(ref mut typed_writer) => {
typed_writer
.write_batch(&my_values, None, None)
.expect("writing int column");
}
ColumnWriter::DoubleColumnWriter(ref mut typed_writer) => {
typed_writer
.write_batch(&my_double_values, None, None)
.expect("writing double column");
}
ColumnWriter::BoolColumnWriter(ref mut typed_writer) => {
typed_writer
.write_batch(&my_bool_values, None, None)
.expect("writing bool column");
}
ColumnWriter::ByteArrayColumnWriter(ref mut typed_writer) => {
typed_writer
.write_batch(&my_ba_values, None, None)
.expect("writing bytes column");
}
_ => {
println!("huh:!");
}
}
row_group_writer
.close_column(col_writer)
.expect("close column");
}
let rg_md = row_group_writer.close().expect("close row group");
println!("total rows written: {}", rg_md.num_rows());
writer
.close_row_group(row_group_writer)
.expect("close row groups");
}
writer.close().expect("close writer");
let bytes = fs::read(&path).unwrap();
assert_eq!(&bytes[0..4], &[b'P', b'A', b'R', b'1']);
}
```
**Expected behavior**
The read will complete without hanging.
**Additional context**
My development system is Mac OS X, so only tested on OS X.
rustup reports:
active toolchain
----------------
1.51.0-x86_64-apple-darwin (default)
rustc 1.51.0 (2fd73fabe 2021-03-23)
|
Thanks for the report @garyanaplan !
yw.
Extra Info: It happens with debug or release builds and I reproduced it with 1.51.0 on a linux system.
I've also just encountered it. Common element with this reproduction is BOOLEAN field. It worked without BOOLEAN as well.
After quick investigation of the looping code, I've found something suspect, but it's just about naming - not sure if it's actual bug.
This function returns something initialized as input's length and called `values_to_read`: https://github.com/apache/arrow-rs/blob/0f55b828883b3b3afda43ae404b130d374e6f1a1/parquet/src/util/bit_util.rs#L588
Meanwhile calling site (which I can't find on Github, because admittedly I'm using older version - will add it later) assigns the return value to `values_read`.
Btw it loops because after reading 2048 values, this returned value is 0.
Yep. If I update my test to remove BOOLEAN from the schema, the problem goes away. I've done some digging around today and noticed that it looks like the problem might lie in the generation of the file.
I previously reported that parquet-tools dump <file> would happily process the file, however I trimmed down the example to just include BOOLEAN field in schema and increased the number of rows in the group and noted the following when dumping:
`value 2039: R:0 D:0 V:true
value 2040: R:0 D:0 V:false
value 2041: R:0 D:0 V:true
value 2042: R:0 D:0 V:false
value 2043: R:0 D:0 V:true
value 2044: R:0 D:0 V:false
value 2045: R:0 D:0 V:true
value 2046: R:0 D:0 V:false
value 2047: R:0 D:0 V:true
value 2048: R:0 D:0 V:false
value 2049: R:0 D:0 V:false
value 2050: R:0 D:0 V:false
value 2051: R:0 D:0 V:false
value 2052: R:0 D:0 V:false
value 2053: R:0 D:0 V:false
value 2054: R:0 D:0 V:false
value 2055: R:0 D:0 V:false
`
All the values after 2048 are false and they continue to be false until the end of the file.
It looks like the generated input file is invalid, so I'm going to poke around there a little next.
More poking reveals that PlainEncoder has a bit_writer with a hard-coded size of 256 (big enough to hold 2048 bits...).
`src/encodings/encoding.rs: line bit_writer: BitWriter::new(256),
`
If you adjust that value up or down you trip the error at different times. So, that looks like it's a contributing factor. I'm now trying to understand the logic around buffer flushing and re-use. Feel, like I'm getting close to the root cause.
Looks like that hard-coded value (256) in the bit-writer is the root cause. When writing, if we try to put > 2048 boolean values, then the writer just "ignores" the writes. This is caused by the fact that bool encoder silently ignores calls to put_value that return false.
I have a fix for this which works by extending the size of the BitWriter (in 256 byte) increments and also checks the return of put_value in BoolType::encode() and raises an error if the call fails.
Can anyone comment on this approach?
(diff attached)
[a.diff.txt](https://github.com/apache/arrow-rs/files/6631262/a.diff.txt)
@garyanaplan -- I think the best way to get feedback on the approach would be to open a pull request
Yeah. I'm not really happy with it, because I don't love the special handling for Booleans via the BitWriter. Just growing the buffer indefinitely seems "wrong", but I think any other kind of fix would be much more extensive/intrusive.
I'll file the PR and see what feedback I get.
|
2021-06-10T13:13:10Z
|
0.3
|
4c7d4189e72901a78fb4f4250c11421241dd9e13
|
[
"it_writes_data_without_hanging"
] |
[
"parquet/src/compression.rs - compression (line 25) - compile",
"parquet/src/file/mod.rs - file (line 29) - compile",
"parquet/src/arrow/mod.rs - arrow (line 25) - compile",
"parquet/src/file/mod.rs - file (line 64) - compile",
"parquet/src/file/mod.rs - file (line 81) - compile",
"parquet/src/record/api.rs - record::api::Row::get_column_iter (line 62) - compile",
"parquet/src/column/mod.rs - column (line 38) - compile",
"parquet/src/file/properties.rs - file::properties (line 22)",
"parquet/src/file/statistics.rs - file::statistics (line 23)",
"parquet/src/schema/types.rs - schema::types::ColumnPath::string (line 663)",
"parquet/src/schema/mod.rs - schema (line 22)",
"parquet/src/schema/printer.rs - schema::printer (line 23)",
"parquet/src/schema/parser.rs - schema::parser (line 24)",
"parquet/src/schema/types.rs - schema::types::ColumnPath::append (line 674)"
] |
[] |
[] |
apache/arrow-rs
| 4,201
|
apache__arrow-rs-4201
|
[
"1936"
] |
378a9fcc9ee31fff4a9a13f5de5a326dc449541e
|
diff --git a/arrow-cast/src/cast.rs b/arrow-cast/src/cast.rs
index 37fede0a6fe0..2b286bfa9119 100644
--- a/arrow-cast/src/cast.rs
+++ b/arrow-cast/src/cast.rs
@@ -35,7 +35,7 @@
//! assert_eq!(7.0, c.value(2));
//! ```
-use chrono::{NaiveTime, TimeZone, Timelike, Utc};
+use chrono::{NaiveTime, Offset, TimeZone, Timelike, Utc};
use std::cmp::Ordering;
use std::sync::Arc;
@@ -1770,7 +1770,7 @@ pub fn cast_with_options(
tz.clone(),
)),
- (Timestamp(from_unit, _), Timestamp(to_unit, to_tz)) => {
+ (Timestamp(from_unit, from_tz), Timestamp(to_unit, to_tz)) => {
let array = cast_with_options(array, &Int64, cast_options)?;
let time_array = array.as_primitive::<Int64Type>();
let from_size = time_unit_multiple(from_unit);
@@ -1792,8 +1792,52 @@ pub fn cast_with_options(
}
}
};
+ // Normalize timezone
+ let adjusted = match (from_tz, to_tz) {
+ // Only this case needs to be adjusted because we're casting from
+ // unknown time offset to some time offset, we want the time to be
+ // unchanged.
+ //
+ // i.e. Timestamp('2001-01-01T00:00', None) -> Timestamp('2001-01-01T00:00', '+0700')
+ (None, Some(to_tz)) => {
+ let to_tz: Tz = to_tz.parse()?;
+ match to_unit {
+ TimeUnit::Second => {
+ adjust_timestamp_to_timezone::<TimestampSecondType>(
+ converted,
+ &to_tz,
+ cast_options,
+ )?
+ }
+ TimeUnit::Millisecond => {
+ adjust_timestamp_to_timezone::<TimestampMillisecondType>(
+ converted,
+ &to_tz,
+ cast_options,
+ )?
+ }
+ TimeUnit::Microsecond => {
+ adjust_timestamp_to_timezone::<TimestampMicrosecondType>(
+ converted,
+ &to_tz,
+ cast_options,
+ )?
+ }
+ TimeUnit::Nanosecond => {
+ adjust_timestamp_to_timezone::<TimestampNanosecondType>(
+ converted,
+ &to_tz,
+ cast_options,
+ )?
+ }
+ }
+ }
+ _ => {
+ converted
+ }
+ };
Ok(make_timestamp_array(
- &converted,
+ &adjusted,
to_unit.clone(),
to_tz.clone(),
))
@@ -3005,6 +3049,30 @@ fn cast_string_to_month_day_nano_interval<Offset: OffsetSizeTrait>(
Ok(Arc::new(interval_array) as ArrayRef)
}
+fn adjust_timestamp_to_timezone<T: ArrowTimestampType>(
+ array: PrimitiveArray<Int64Type>,
+ to_tz: &Tz,
+ cast_options: &CastOptions,
+) -> Result<PrimitiveArray<Int64Type>, ArrowError> {
+ let adjust = |o| {
+ let local = as_datetime::<T>(o)?;
+ let offset = to_tz.offset_from_local_datetime(&local).single()?;
+ T::make_value(local - offset.fix())
+ };
+ let adjusted = if cast_options.safe {
+ array.unary_opt::<_, Int64Type>(adjust)
+ } else {
+ array.try_unary::<_, Int64Type, _>(|o| {
+ adjust(o).ok_or_else(|| {
+ ArrowError::CastError(
+ "Cannot cast timezone to different timezone".to_string(),
+ )
+ })
+ })?
+ };
+ Ok(adjusted)
+}
+
/// Casts Utf8 to Boolean
fn cast_utf8_to_boolean<OffsetSize>(
from: &dyn Array,
@@ -5978,6 +6046,83 @@ mod tests {
assert!(b.is_err());
}
+ // Cast Timestamp(_, None) -> Timestamp(_, Some(timezone))
+ #[test]
+ fn test_cast_timestamp_with_timezone_1() {
+ let string_array: Arc<dyn Array> = Arc::new(StringArray::from(vec![
+ Some("2000-01-01T00:00:00.123456789"),
+ Some("2010-01-01T00:00:00.123456789"),
+ None,
+ ]));
+ let to_type = DataType::Timestamp(TimeUnit::Nanosecond, None);
+ let timestamp_array = cast(&string_array, &to_type).unwrap();
+
+ let to_type = DataType::Timestamp(TimeUnit::Microsecond, Some("+0700".into()));
+ let timestamp_array = cast(×tamp_array, &to_type).unwrap();
+
+ let string_array = cast(×tamp_array, &DataType::Utf8).unwrap();
+ let result = string_array.as_string::<i32>();
+ assert_eq!("2000-01-01T00:00:00.123456+07:00", result.value(0));
+ assert_eq!("2010-01-01T00:00:00.123456+07:00", result.value(1));
+ assert!(result.is_null(2));
+ }
+
+ // Cast Timestamp(_, Some(timezone)) -> Timestamp(_, None)
+ #[test]
+ fn test_cast_timestamp_with_timezone_2() {
+ let string_array: Arc<dyn Array> = Arc::new(StringArray::from(vec![
+ Some("2000-01-01T07:00:00.123456789"),
+ Some("2010-01-01T07:00:00.123456789"),
+ None,
+ ]));
+ let to_type = DataType::Timestamp(TimeUnit::Millisecond, Some("+0700".into()));
+ let timestamp_array = cast(&string_array, &to_type).unwrap();
+
+ // Check intermediate representation is correct
+ let string_array = cast(×tamp_array, &DataType::Utf8).unwrap();
+ let result = string_array.as_string::<i32>();
+ assert_eq!("2000-01-01T07:00:00.123+07:00", result.value(0));
+ assert_eq!("2010-01-01T07:00:00.123+07:00", result.value(1));
+ assert!(result.is_null(2));
+
+ let to_type = DataType::Timestamp(TimeUnit::Nanosecond, None);
+ let timestamp_array = cast(×tamp_array, &to_type).unwrap();
+
+ let string_array = cast(×tamp_array, &DataType::Utf8).unwrap();
+ let result = string_array.as_string::<i32>();
+ assert_eq!("2000-01-01T00:00:00.123", result.value(0));
+ assert_eq!("2010-01-01T00:00:00.123", result.value(1));
+ assert!(result.is_null(2));
+ }
+
+ // Cast Timestamp(_, Some(timezone)) -> Timestamp(_, Some(timezone))
+ #[test]
+ fn test_cast_timestamp_with_timezone_3() {
+ let string_array: Arc<dyn Array> = Arc::new(StringArray::from(vec![
+ Some("2000-01-01T07:00:00.123456789"),
+ Some("2010-01-01T07:00:00.123456789"),
+ None,
+ ]));
+ let to_type = DataType::Timestamp(TimeUnit::Microsecond, Some("+0700".into()));
+ let timestamp_array = cast(&string_array, &to_type).unwrap();
+
+ // Check intermediate representation is correct
+ let string_array = cast(×tamp_array, &DataType::Utf8).unwrap();
+ let result = string_array.as_string::<i32>();
+ assert_eq!("2000-01-01T07:00:00.123456+07:00", result.value(0));
+ assert_eq!("2010-01-01T07:00:00.123456+07:00", result.value(1));
+ assert!(result.is_null(2));
+
+ let to_type = DataType::Timestamp(TimeUnit::Second, Some("-08:00".into()));
+ let timestamp_array = cast(×tamp_array, &to_type).unwrap();
+
+ let string_array = cast(×tamp_array, &DataType::Utf8).unwrap();
+ let result = string_array.as_string::<i32>();
+ assert_eq!("1999-12-31T16:00:00-08:00", result.value(0));
+ assert_eq!("2009-12-31T16:00:00-08:00", result.value(1));
+ assert!(result.is_null(2));
+ }
+
#[test]
fn test_cast_date64_to_timestamp() {
let array =
|
diff --git a/arrow/tests/array_cast.rs b/arrow/tests/array_cast.rs
index bf7e7a326efc..43dc6dd0eb0a 100644
--- a/arrow/tests/array_cast.rs
+++ b/arrow/tests/array_cast.rs
@@ -18,6 +18,7 @@
use arrow_array::builder::{
PrimitiveDictionaryBuilder, StringDictionaryBuilder, UnionBuilder,
};
+use arrow_array::cast::AsArray;
use arrow_array::types::{
ArrowDictionaryKeyType, Decimal128Type, Decimal256Type, Int16Type, Int32Type,
Int64Type, Int8Type, TimestampMicrosecondType, UInt16Type, UInt32Type, UInt64Type,
@@ -64,6 +65,97 @@ fn test_cast_timestamp_to_string() {
assert!(c.is_null(2));
}
+// See: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones for list of valid
+// timezones
+
+// Cast Timestamp(_, None) -> Timestamp(_, Some(timezone))
+#[test]
+fn test_cast_timestamp_with_timezone_daylight_1() {
+ let string_array: Arc<dyn Array> = Arc::new(StringArray::from(vec![
+ // This is winter in New York so daylight saving is not in effect
+ // UTC offset is -05:00
+ Some("2000-01-01T00:00:00.123456789"),
+ // This is summer in New York so daylight saving is in effect
+ // UTC offset is -04:00
+ Some("2010-07-01T00:00:00.123456789"),
+ None,
+ ]));
+ let to_type = DataType::Timestamp(TimeUnit::Nanosecond, None);
+ let timestamp_array = cast(&string_array, &to_type).unwrap();
+
+ let to_type =
+ DataType::Timestamp(TimeUnit::Microsecond, Some("America/New_York".into()));
+ let timestamp_array = cast(×tamp_array, &to_type).unwrap();
+
+ let string_array = cast(×tamp_array, &DataType::Utf8).unwrap();
+ let result = string_array.as_string::<i32>();
+ assert_eq!("2000-01-01T00:00:00.123456-05:00", result.value(0));
+ assert_eq!("2010-07-01T00:00:00.123456-04:00", result.value(1));
+ assert!(result.is_null(2));
+}
+
+// Cast Timestamp(_, Some(timezone)) -> Timestamp(_, None)
+#[test]
+fn test_cast_timestamp_with_timezone_daylight_2() {
+ let string_array: Arc<dyn Array> = Arc::new(StringArray::from(vec![
+ Some("2000-01-01T07:00:00.123456789"),
+ Some("2010-07-01T07:00:00.123456789"),
+ None,
+ ]));
+ let to_type =
+ DataType::Timestamp(TimeUnit::Millisecond, Some("America/New_York".into()));
+ let timestamp_array = cast(&string_array, &to_type).unwrap();
+
+ // Check intermediate representation is correct
+ let string_array = cast(×tamp_array, &DataType::Utf8).unwrap();
+ let result = string_array.as_string::<i32>();
+ assert_eq!("2000-01-01T07:00:00.123-05:00", result.value(0));
+ assert_eq!("2010-07-01T07:00:00.123-04:00", result.value(1));
+ assert!(result.is_null(2));
+
+ let to_type = DataType::Timestamp(TimeUnit::Nanosecond, None);
+ let timestamp_array = cast(×tamp_array, &to_type).unwrap();
+
+ let string_array = cast(×tamp_array, &DataType::Utf8).unwrap();
+ let result = string_array.as_string::<i32>();
+ assert_eq!("2000-01-01T12:00:00.123", result.value(0));
+ assert_eq!("2010-07-01T11:00:00.123", result.value(1));
+ assert!(result.is_null(2));
+}
+
+// Cast Timestamp(_, Some(timezone)) -> Timestamp(_, Some(timezone))
+#[test]
+fn test_cast_timestamp_with_timezone_daylight_3() {
+ let string_array: Arc<dyn Array> = Arc::new(StringArray::from(vec![
+ // Winter in New York, summer in Sydney
+ // UTC offset is -05:00 (New York) and +11:00 (Sydney)
+ Some("2000-01-01T00:00:00.123456789"),
+ // Summer in New York, winter in Sydney
+ // UTC offset is -04:00 (New York) and +10:00 (Sydney)
+ Some("2010-07-01T00:00:00.123456789"),
+ None,
+ ]));
+ let to_type =
+ DataType::Timestamp(TimeUnit::Microsecond, Some("America/New_York".into()));
+ let timestamp_array = cast(&string_array, &to_type).unwrap();
+
+ // Check intermediate representation is correct
+ let string_array = cast(×tamp_array, &DataType::Utf8).unwrap();
+ let result = string_array.as_string::<i32>();
+ assert_eq!("2000-01-01T00:00:00.123456-05:00", result.value(0));
+ assert_eq!("2010-07-01T00:00:00.123456-04:00", result.value(1));
+ assert!(result.is_null(2));
+
+ let to_type = DataType::Timestamp(TimeUnit::Second, Some("Australia/Sydney".into()));
+ let timestamp_array = cast(×tamp_array, &to_type).unwrap();
+
+ let string_array = cast(×tamp_array, &DataType::Utf8).unwrap();
+ let result = string_array.as_string::<i32>();
+ assert_eq!("2000-01-01T16:00:00+11:00", result.value(0));
+ assert_eq!("2010-07-01T14:00:00+10:00", result.value(1));
+ assert!(result.is_null(2));
+}
+
#[test]
#[cfg_attr(miri, ignore)] // running forever
fn test_can_cast_types() {
|
Cast Kernel Ignores Timezone
**Is your feature request related to a problem or challenge? Please describe what you are trying to do.**
The beginnings of timezone support were added in #824, however, this is currently ignored by the cast kernel
**Describe the solution you'd like**
Timezones should be correctly handled by the cast kernel
**Describe alternatives you've considered**
We could not support timezones
**Additional context**
Noticed whilst investigating #1932
|
I'd like to work on this. And I think `fmt` of timestamp array cannot ignore timezone too.
Could you assign it to me? @tustvold
I would recommend writing up the expected behaviour first, as timezone handling is notoriously messy, and once we have consensus we can move forward with implementing that.
FYI @avantgardnerio @waitingkuo
@tustvold thank you for pinging me, i'm working on these things now as well.
@doki23 it would be great if you could help ❤️
You could find some other related issues here apache/arrow-datafusion#3148 (search timezone)
some hints that might help
1
https://github.com/apache/arrow-rs/blob/3bf6eb98ceb3962e1d9419da6dc93e609f7893e6/arrow/src/compute/kernels/cast.rs#L1284
to make casting function consider timezone, we have to fix the second `_` identifier and check whether it's `None` or `Some`
2
https://github.com/apache/arrow-rs/blob/3bf6eb98ceb3962e1d9419da6dc93e609f7893e6/arrow/src/array/array_primitive.rs#L209
while using fmt to print, we first convert it to `NaiveDateTime` (from chrono-rs) which contains no timezone info so that you could only see timestamp without timezone
@doki23 are you planning to draft the proposal?
> @doki23 are you planning to draft the proposal?
sure, plz wait me hours
We consider tz only if from_type and to_type both needs it. For example, ignoring tz is ok when we cast ts to i64, because i64 array doesn't care about timezone.
So, there're 2 situations:
1. ts is from_type, and the to_type needs tz.
2. ts is to_type, and the from_type has tz.
I noticed that timestamp array is always treat as `PrimitiveArray`. We can't get tz from `ArrowPrimitiveType::DATA_TYPE`, because there're only utc ts definitions like:
```rust
make_type!(
TimestampSecondType,
i64,
DataType::Timestamp(TimeUnit::Second, None)
);
```
So, we may need a `TimestampBuilder` in place of `TimestampSecondArray::from_xxx` which can realize the timezone. And it has a valid `ArrayData::data_type`.
what's expected behavior for casting timestamp with timezone to timestamp without time zone?
e.g. if the timestamp with timezone is `1970-01-01T08:00:00+08:00` (note that the timestamp underline is 0), what's the casted result? `1970-01-01T08:00:00` (underline timestamp as 28800000000000) or `1970-01-01T00:00:00` (underline timestamp as 0)?
i recommend that listing these ambiguous cases and your proposed behavior so we could discuss
btw i tested it on pyarrow, it simply changes the datatype but not change the underline timestamp (`1970-01-01T08:00:00+08:00` becomes `1970-01-01T00:00:00`
> what's expected behavior for casting timestamp with timezone to timestamp without time zone?
The intuitive answer would be to convert it to UTC. I think postgres effectively casts it to server (local) time.
I believe this section of the arrow schema definition is relevant - https://github.com/apache/arrow-rs/blob/master/format/Schema.fbs#L280
In particular
> One possibility is to assume that the original timestamp values are relative to the epoch of the timezone being set; timestamp values should then adjusted to the Unix epoch
Given this is the only possibility enumerated in the schema, I feel this is probably the one we should follow unless people feel strongly otherwise. My 2 cents is that anything relying on the local timezone of the system is best avoided if at all possible, it just feels fragile and error-prone.
> local timezone of the system
Yes - these RecordBatches could be part of Flights, yes? In which case the whole point is to send them around to different computers that may be in different timezones, so it kind of forces our hand here.
And if we are doing it this way in arrow where we don't have the luxury of following postgres, maybe this is also where we break postgres compatibility in DataFusion. Just because postgres did it wrong doesn't mean we should follow...
> what's expected behavior for casting timestamp with timezone to timestamp without time zone?
Tz has no affect to the value of timestamp, it's just used for display.
> what's expected behavior for casting timestamp with timezone to timestamp without time zone?
The specification states
> /// However, if a Timestamp column has no timezone value, changing it to a
> /// non-empty value requires to think about the desired semantics.
> /// One possibility is to assume that the original timestamp values are
> /// relative to the epoch of the timezone being set; timestamp values should
> /// then adjusted to the Unix epoch (for example, changing the timezone from
> /// empty to "Europe/Paris" would require converting the timestamp values
> /// from "Europe/Paris" to "UTC", which seems counter-intuitive but is
> /// nevertheless correct).
As stated above, given this is the only possibility enumerated I think we should follow this. The inverse operation, i.e. removing a timezone, I would therefore expect to do the reverse i.e. `1970-01-01 01:00:01 +01:00` would become `1970-01-01 01:00:01`. This is consistent with both postgres and chrono.
thank you @tustvold , didn't aware this spec before
This looks good to me
btw, i think `with_timezone_opt` already covered another possibility - keep the underline i64 value and change the metadata
Yeah let's do the "hard" operation in the cast kernel, and if people don't like it, they can perform a metadata-only cast using`with_timezone_opt` :+1:
https://github.com/apache/arrow-rs/issues/3794 is related to this, and implements the necessary machinery for timezone aware parsing of strings
|
2023-05-11T11:37:13Z
|
39.0
|
378a9fcc9ee31fff4a9a13f5de5a326dc449541e
|
[
"test_cast_timestamp_with_timezone_daylight_1"
] |
[
"arrow/src/lib.rs - (line 121)",
"arrow/src/lib.rs - (line 140)",
"arrow/src/lib.rs - (line 253)",
"arrow/src/lib.rs - (line 282)",
"arrow/src/lib.rs - (line 78)",
"arrow/src/util/string_writer.rs - util::string_writer (line 25)",
"arrow/src/lib.rs - (line 63)",
"arrow/src/lib.rs - (line 97)",
"arrow/src/lib.rs - (line 227)",
"arrow/src/lib.rs - (line 161)",
"arrow/src/lib.rs - (line 198)",
"test_cast_timestamp_to_string",
"test_pretty_format_timestamp_second_with_incorrect_fixed_offset_timezone",
"test_cast_timestamp_with_timezone_daylight_3",
"test_pretty_format_timestamp_second_with_non_utc_timezone",
"test_cast_timestamp_with_timezone_daylight_2",
"test_pretty_format_timestamp_second_with_unknown_timezone",
"test_timestamp_cast_utf8",
"test_pretty_format_timestamp_second_with_utc_timezone",
"test_can_cast_types"
] |
[] |
[] |
apache/arrow-rs
| 5,717
|
apache__arrow-rs-5717
|
[
"5716"
] |
a126d5097b71273428fba68d1c430f3d4beee684
|
diff --git a/parquet_derive/src/parquet_field.rs b/parquet_derive/src/parquet_field.rs
index 9fff76c42d1d..f99ea3e0356c 100644
--- a/parquet_derive/src/parquet_field.rs
+++ b/parquet_derive/src/parquet_field.rs
@@ -239,7 +239,8 @@ impl Field {
/// because this parsing logic is not sophisticated enough for definition
/// levels beyond 2.
///
- /// `Option` types and references not supported
+ /// `Option` types and references not supported, but the column itself can be nullable
+ /// (i.e., def_level==1), as long as the values are all valid.
pub fn reader_snippet(&self) -> proc_macro2::TokenStream {
let ident = &self.ident;
let column_reader = self.ty.column_reader();
@@ -248,7 +249,13 @@ impl Field {
let write_batch_expr = quote! {
let mut vals = Vec::new();
if let #column_reader(mut typed) = column_reader {
- typed.read_records(num_records, None, None, &mut vals)?;
+ let mut definition_levels = Vec::new();
+ let (total_num, valid_num, decoded_num) = typed.read_records(
+ num_records, Some(&mut definition_levels), None, &mut vals)?;
+ if valid_num != decoded_num {
+ panic!("Support only valid records, found {} null records in column type {}",
+ decoded_num - valid_num, stringify!{#ident});
+ }
} else {
panic!("Schema and struct disagree on type for {}", stringify!{#ident});
}
@@ -876,15 +883,21 @@ mod test {
snippet,
(quote! {
{
- let mut vals = Vec::new();
- if let ColumnReader::Int64ColumnReader(mut typed) = column_reader {
- typed.read_records(num_records, None, None, &mut vals)?;
- } else {
- panic!("Schema and struct disagree on type for {}", stringify!{ counter });
- }
- for (i, r) in &mut records[..num_records].iter_mut().enumerate() {
- r.counter = vals[i] as usize;
- }
+ let mut vals = Vec::new();
+ if let ColumnReader::Int64ColumnReader(mut typed) = column_reader {
+ let mut definition_levels = Vec::new();
+ let (total_num, valid_num, decoded_num) = typed.read_records(
+ num_records, Some(&mut definition_levels), None, &mut vals)?;
+ if valid_num != decoded_num {
+ panic!("Support only valid records, found {} null records in column type {}",
+ decoded_num - valid_num, stringify!{counter});
+ }
+ } else {
+ panic!("Schema and struct disagree on type for {}", stringify!{counter});
+ }
+ for (i, r) in &mut records[..num_records].iter_mut().enumerate() {
+ r.counter = vals[i] as usize;
+ }
}
})
.to_string()
@@ -1291,7 +1304,13 @@ mod test {
{
let mut vals = Vec::new();
if let ColumnReader::Int64ColumnReader(mut typed) = column_reader {
- typed.read_records(num_records, None, None, &mut vals)?;
+ let mut definition_levels = Vec::new();
+ let (total_num, valid_num, decoded_num) = typed.read_records(
+ num_records, Some(&mut definition_levels), None, &mut vals)?;
+ if valid_num != decoded_num {
+ panic!("Support only valid records, found {} null records in column type {}",
+ decoded_num - valid_num, stringify!{henceforth});
+ }
} else {
panic!("Schema and struct disagree on type for {}", stringify!{ henceforth });
}
@@ -1359,7 +1378,13 @@ mod test {
{
let mut vals = Vec::new();
if let ColumnReader::Int32ColumnReader(mut typed) = column_reader {
- typed.read_records(num_records, None, None, &mut vals)?;
+ let mut definition_levels = Vec::new();
+ let (total_num, valid_num, decoded_num) = typed.read_records(
+ num_records, Some(&mut definition_levels), None, &mut vals)?;
+ if valid_num != decoded_num {
+ panic!("Support only valid records, found {} null records in column type {}",
+ decoded_num - valid_num, stringify!{henceforth});
+ }
} else {
panic!("Schema and struct disagree on type for {}", stringify!{ henceforth });
}
@@ -1427,7 +1452,13 @@ mod test {
{
let mut vals = Vec::new();
if let ColumnReader::FixedLenByteArrayColumnReader(mut typed) = column_reader {
- typed.read_records(num_records, None, None, &mut vals)?;
+ let mut definition_levels = Vec::new();
+ let (total_num, valid_num, decoded_num) = typed.read_records(
+ num_records, Some(&mut definition_levels), None, &mut vals)?;
+ if valid_num != decoded_num {
+ panic!("Support only valid records, found {} null records in column type {}",
+ decoded_num - valid_num, stringify!{unique_id});
+ }
} else {
panic!("Schema and struct disagree on type for {}", stringify!{ unique_id });
}
|
diff --git a/parquet_derive_test/src/lib.rs b/parquet_derive_test/src/lib.rs
index 3743c6b55c7c..e168ad5b980a 100644
--- a/parquet_derive_test/src/lib.rs
+++ b/parquet_derive_test/src/lib.rs
@@ -66,6 +66,25 @@ struct APartiallyCompleteRecord {
pub byte_vec: Vec<u8>,
}
+// This struct has OPTIONAL columns
+// If these fields are guaranteed to be valid
+// we can load this struct into APartiallyCompleteRecord
+#[derive(PartialEq, ParquetRecordWriter, Debug)]
+struct APartiallyOptionalRecord {
+ pub bool: bool,
+ pub string: String,
+ pub maybe_i16: Option<i16>,
+ pub maybe_i32: Option<i32>,
+ pub maybe_u64: Option<u64>,
+ pub isize: isize,
+ pub float: f32,
+ pub double: f64,
+ pub now: chrono::NaiveDateTime,
+ pub date: chrono::NaiveDate,
+ pub uuid: uuid::Uuid,
+ pub byte_vec: Vec<u8>,
+}
+
#[cfg(test)]
mod tests {
use super::*;
@@ -218,6 +237,47 @@ mod tests {
assert_eq!(drs[0], out[0]);
}
+ #[test]
+ fn test_parquet_derive_read_optional_but_valid_column() {
+ let file = get_temp_file("test_parquet_derive_read_optional", &[]);
+ let drs: Vec<APartiallyOptionalRecord> = vec![APartiallyOptionalRecord {
+ bool: true,
+ string: "a string".into(),
+ maybe_i16: Some(-45),
+ maybe_i32: Some(456),
+ maybe_u64: Some(4563424),
+ isize: -365,
+ float: 3.5,
+ double: std::f64::NAN,
+ now: chrono::Utc::now().naive_local(),
+ date: chrono::naive::NaiveDate::from_ymd_opt(2015, 3, 14).unwrap(),
+ uuid: uuid::Uuid::new_v4(),
+ byte_vec: vec![0x65, 0x66, 0x67],
+ }];
+
+ let generated_schema = drs.as_slice().schema().unwrap();
+
+ let props = Default::default();
+ let mut writer =
+ SerializedFileWriter::new(file.try_clone().unwrap(), generated_schema, props).unwrap();
+
+ let mut row_group = writer.next_row_group().unwrap();
+ drs.as_slice().write_to_row_group(&mut row_group).unwrap();
+ row_group.close().unwrap();
+ writer.close().unwrap();
+
+ use parquet::file::{reader::FileReader, serialized_reader::SerializedFileReader};
+ let reader = SerializedFileReader::new(file).unwrap();
+ let mut out: Vec<APartiallyCompleteRecord> = Vec::new();
+
+ let mut row_group = reader.get_row_group(0).unwrap();
+ out.read_from_row_group(&mut *row_group, 1).unwrap();
+
+ assert_eq!(drs[0].maybe_i16.unwrap(), out[0].i16);
+ assert_eq!(drs[0].maybe_i32.unwrap(), out[0].i32);
+ assert_eq!(drs[0].maybe_u64.unwrap(), out[0].u64);
+ }
+
/// Returns file handle for a temp file in 'target' directory with a provided content
pub fn get_temp_file(file_name: &str, content: &[u8]) -> fs::File {
// build tmp path to a file in "target/debug/testdata"
|
[parquet_derive] support OPTIONAL (def_level = 1) columns by default
## Problem Description
<!--
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
(This section helps Arrow developers understand the context and *why* for this feature, in addition to the *what*)
-->
I'm working on parquet files written by `pyarrow` (embedded in `pandas`). I came across `parquet_derive` and it avoids boilerplates in my project.
The problem is, it doesn't work on the parquet files that is written by `pandas` with default setup, it throws error information:
```text
Parquet error: must specify definition levels
```
After digging into this, I found that the problem is the parquet file generated by `pyarrow` has def_level=1, i.e., every column, even without a null value, is OPTIONAL.
<img width="677" alt="image" src="https://github.com/apache/arrow-rs/assets/27212391/b6b4cc96-8c53-4d41-9c66-4f802476dd7a">
However, the macro generate code that does not allow definition level, thus it fails to parsing columns with OPTIONAL value, even there is no actual NULL values:
```rust
typed.read_records(num_records, None, None, &mut vals)?;
```
The API it calls is: https://docs.rs/parquet/latest/parquet/column/reader/struct.GenericColumnReader.html#method.read_records .
## My Solution
The solution is straight-forward. I have fixed the problem locally, I'm willing to contribute a pull request, but I don't know if this solution is reasonable in the scope of the whole `arrow` project.
Basically, I think we need to provide definition level in `read_record`:
```rust
typed.read_records(num_records, None /*should use a Some(&mut Vec<i16>)*/, None, &mut vals)?;
```
In one word, with this solution, `parquet_derive` can now handle:
1. (already supported) parquet file with all columns REQUIRED
2. **(new introduced) parquet file with OPTIONAL columns but are always guaranteed to be valid**.
### Pros
- This solution does not break current features
- This solution makes parquet_derive more general in handling parquet files.
It can pass the tests in `parquet_derive_tests`. I also add checks against the parsed records and valid records, to avoid abusing it for columns with NULLs.
### Cons
- It will be slightly slower since it allocates an extra `Vec<i16>` for each column when invoking `read_from_row_group`.
I don't think it is a big deal, though, compared to the inconvenience of not supporting OPTIONAL columns. Moreover, we can make use of the max_def_levels (for REQUIRED column, it is 0) to skip creating the Vec.
|
What happens if you specify the field as `Option` to match the schema?
> What happens if you specify the field as `Option` to match the schema?
I have tried, it does not work, because:
1. the OPTIONAL, REQUIRED, REPEATED tags are generated automatically in the macro.
2. even I workaround 1 by enforce the field to be OPTIONAL, the actual parsing code, i.e.:
```rust
typed.read_records(num_records, None, None, &mut vals)?;
```
is not dynamic and still provide no definition level.
Hmm... My knowledge of parquet_derive is limited, but I would have expected an optional field to map to `Option<T>` on the annotated struct.
It might also be that it simply doesn't support nulls, in which case I'm a little unsure about adding partial support in a manner that might prevent adding such support in future
> Hmm... My knowledge of parquet_derive is limited, but I would have expected an optional field to map to `Option<T>` on the annotated struct.
>
> It might also be that it simply doesn't support nulls, in which case I'm a little unsure about adding partial support in a manner that might prevent adding such support in future
I understand your concern, and that is why I said this in the description:
> but I don't know if this solution is reasonable in the scope of the whole arrow project.
Yes, I agree it is more natural and rigorous to map and OPTIONAL column to `Option<T>` in theory. The pipeline should be like this:
```text
OPTIONAL columns --> struct with Option<T> (by parquet_derive, not implemented) --> struct without Option<T> (by user)
```
However, in practice, when write a parquet file, the default attribute of a column is OPTIONAL (see https://arrow.apache.org/docs/python/generated/pyarrow.field.html), no matter whether there is a NULL. Those APIs are not even exposed to user in higher level APIs. So, most of the parquet files users deal with have OPTIONAL columns everywhere, though values in those columns are in fact all valid. I don't think it is handy for users to declare a struct with all fields in `Option<T>`.
Another point I want to say is, this change DOES NOT mean that the reader support `Option<T>` in struct now, if you derive `ParquetRecordReader` for a struct with `Option<T>`, it fails to compile as before:
```text
help: message: not implemented: Unsupported: Option(...)
```
The only difference is that if a parquet file is written with OPTIONAL columns, but in fact the values in those columns are in fact all valid, the reader should still be able to load the records into a struct WITHOUT `Option<T>`, i.e., the pipeline becomes:
```text
OPTIONAL columns --> struct without Option<T> (by parquet_derive, with checks)
```
This change is only to relax parquet_derive's restriction against parquet input, without introducing risk since checks are done after parsing. If user's input does have NULL values, the parser will panic, like what it is doing now.
To sum up, I think this change does not make things worse or unsafe. I really appreciate your time to review this issue, and even better if you can invite more experts to review it.
> However, in practice, when write a parquet file, the default attribute of a column is OPTIONAL
Is this the case if you set `nullable` to `false`? If so I would probably raise a bug on pyarrow as that is incorrect.
> This change is only to relax parquet_derive's restriction against parquet input, without introducing risk since checks are done after parsing. If user's input does have NULL values, the parser will panic, like what it is doing now.
So long as we don't regress performance for existing workloads I suppose this is an acceptable workaround. I will try to take a look next week at your PR, although I will need to allocate enough time to get up to speed on that crate (there isn't really anyone maintaining it actively anymore).
FWIW reading parquet via the arrow interface will be faster, especially for string columns, but appreciate if you'd rather stick to a row-oriented model
> Is this the case if you set nullable to false? If so I would probably raise a bug on pyarrow as that is incorrect.
`pyarrow` is not the culprit. I believe "convention" is the one to blame. `pandas` has a [to_parquet](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_parquet.html) interface, and it does not accept schema before https://github.com/pandas-dev/pandas/pull/30270 . Even after this improvement, most people use the default schema, which falls in to `OPTIONAL` columns by default (nullable=True in `pyarrow`).
> I will try to take a look next week at your PR, although I will need to allocate enough time to get up to speed on that crate
I really appreciate your time, my use case requires a row-oriented model, and this crate is still very useful to our project, that's why I raise this issue. I'm willing to spend time polishing this pull request.
|
2024-05-04T11:18:22Z
|
51.0
|
30767a687b48d0dbd2e030eef327826c39095123
|
[
"tests::test_parquet_derive_read_optional_but_valid_column"
] |
[
"tests::test_parquet_derive_hello",
"tests::test_parquet_derive_read_write_combined"
] |
[] |
[] |
apache/arrow-rs
| 6,269
|
apache__arrow-rs-6269
|
[
"6268"
] |
acdd27a66ac7b5e07816dc648db00532110fb89a
|
diff --git a/parquet_derive/src/lib.rs b/parquet_derive/src/lib.rs
index 16b6a6699e2d..9c93e2cca978 100644
--- a/parquet_derive/src/lib.rs
+++ b/parquet_derive/src/lib.rs
@@ -146,10 +146,10 @@ pub fn parquet_record_writer(input: proc_macro::TokenStream) -> proc_macro::Toke
/// Derive flat, simple RecordReader implementations. Works by parsing
/// a struct tagged with `#[derive(ParquetRecordReader)]` and emitting
/// the correct writing code for each field of the struct. Column readers
-/// are generated in the order they are defined.
+/// are generated by matching names in the schema to the names in the struct.
///
-/// It is up to the programmer to keep the order of the struct
-/// fields lined up with the schema.
+/// It is up to the programmer to ensure the names in the struct
+/// fields line up with the schema.
///
/// Example:
///
@@ -189,7 +189,6 @@ pub fn parquet_record_reader(input: proc_macro::TokenStream) -> proc_macro::Toke
let field_names: Vec<_> = fields.iter().map(|f| f.ident.clone()).collect();
let reader_snippets: Vec<proc_macro2::TokenStream> =
field_infos.iter().map(|x| x.reader_snippet()).collect();
- let i: Vec<_> = (0..reader_snippets.len()).collect();
let derived_for = input.ident;
let generics = input.generics;
@@ -206,6 +205,12 @@ pub fn parquet_record_reader(input: proc_macro::TokenStream) -> proc_macro::Toke
let mut row_group_reader = row_group_reader;
+ // key: parquet file column name, value: column index
+ let mut name_to_index = std::collections::HashMap::new();
+ for (idx, col) in row_group_reader.metadata().schema_descr().columns().iter().enumerate() {
+ name_to_index.insert(col.name().to_string(), idx);
+ }
+
for _ in 0..num_records {
self.push(#derived_for {
#(
@@ -218,7 +223,14 @@ pub fn parquet_record_reader(input: proc_macro::TokenStream) -> proc_macro::Toke
#(
{
- if let Ok(mut column_reader) = row_group_reader.get_column_reader(#i) {
+ let idx: usize = match name_to_index.get(stringify!(#field_names)) {
+ Some(&col_idx) => col_idx,
+ None => {
+ let error_msg = format!("column name '{}' is not found in parquet file!", stringify!(#field_names));
+ return Err(::parquet::errors::ParquetError::General(error_msg));
+ }
+ };
+ if let Ok(mut column_reader) = row_group_reader.get_column_reader(idx) {
#reader_snippets
} else {
return Err(::parquet::errors::ParquetError::General("Failed to get next column".into()))
|
diff --git a/parquet_derive_test/src/lib.rs b/parquet_derive_test/src/lib.rs
index e7c7896cb7f3..2cd69d03d731 100644
--- a/parquet_derive_test/src/lib.rs
+++ b/parquet_derive_test/src/lib.rs
@@ -73,9 +73,9 @@ struct APartiallyCompleteRecord {
struct APartiallyOptionalRecord {
pub bool: bool,
pub string: String,
- pub maybe_i16: Option<i16>,
- pub maybe_i32: Option<i32>,
- pub maybe_u64: Option<u64>,
+ pub i16: Option<i16>,
+ pub i32: Option<i32>,
+ pub u64: Option<u64>,
pub isize: isize,
pub float: f32,
pub double: f64,
@@ -85,6 +85,22 @@ struct APartiallyOptionalRecord {
pub byte_vec: Vec<u8>,
}
+// This struct removes several fields from the "APartiallyCompleteRecord",
+// and it shuffles the fields.
+// we should still be able to load it from APartiallyCompleteRecord parquet file
+#[derive(PartialEq, ParquetRecordReader, Debug)]
+struct APrunedRecord {
+ pub bool: bool,
+ pub string: String,
+ pub byte_vec: Vec<u8>,
+ pub float: f32,
+ pub double: f64,
+ pub i16: i16,
+ pub i32: i32,
+ pub u64: u64,
+ pub isize: isize,
+}
+
#[cfg(test)]
mod tests {
use super::*;
@@ -240,12 +256,12 @@ mod tests {
#[test]
fn test_parquet_derive_read_optional_but_valid_column() {
let file = get_temp_file("test_parquet_derive_read_optional", &[]);
- let drs: Vec<APartiallyOptionalRecord> = vec![APartiallyOptionalRecord {
+ let drs = vec![APartiallyOptionalRecord {
bool: true,
string: "a string".into(),
- maybe_i16: Some(-45),
- maybe_i32: Some(456),
- maybe_u64: Some(4563424),
+ i16: Some(-45),
+ i32: Some(456),
+ u64: Some(4563424),
isize: -365,
float: 3.5,
double: f64::NAN,
@@ -273,9 +289,57 @@ mod tests {
let mut row_group = reader.get_row_group(0).unwrap();
out.read_from_row_group(&mut *row_group, 1).unwrap();
- assert_eq!(drs[0].maybe_i16.unwrap(), out[0].i16);
- assert_eq!(drs[0].maybe_i32.unwrap(), out[0].i32);
- assert_eq!(drs[0].maybe_u64.unwrap(), out[0].u64);
+ assert_eq!(drs[0].i16.unwrap(), out[0].i16);
+ assert_eq!(drs[0].i32.unwrap(), out[0].i32);
+ assert_eq!(drs[0].u64.unwrap(), out[0].u64);
+ }
+
+ #[test]
+ fn test_parquet_derive_read_pruned_and_shuffled_columns() {
+ let file = get_temp_file("test_parquet_derive_read_pruned", &[]);
+ let drs = vec![APartiallyCompleteRecord {
+ bool: true,
+ string: "a string".into(),
+ i16: -45,
+ i32: 456,
+ u64: 4563424,
+ isize: -365,
+ float: 3.5,
+ double: f64::NAN,
+ now: chrono::Utc::now().naive_local(),
+ date: chrono::naive::NaiveDate::from_ymd_opt(2015, 3, 14).unwrap(),
+ uuid: uuid::Uuid::new_v4(),
+ byte_vec: vec![0x65, 0x66, 0x67],
+ }];
+
+ let generated_schema = drs.as_slice().schema().unwrap();
+
+ let props = Default::default();
+ let mut writer =
+ SerializedFileWriter::new(file.try_clone().unwrap(), generated_schema, props).unwrap();
+
+ let mut row_group = writer.next_row_group().unwrap();
+ drs.as_slice().write_to_row_group(&mut row_group).unwrap();
+ row_group.close().unwrap();
+ writer.close().unwrap();
+
+ use parquet::file::{reader::FileReader, serialized_reader::SerializedFileReader};
+ let reader = SerializedFileReader::new(file).unwrap();
+ let mut out: Vec<APrunedRecord> = Vec::new();
+
+ let mut row_group = reader.get_row_group(0).unwrap();
+ out.read_from_row_group(&mut *row_group, 1).unwrap();
+
+ assert_eq!(drs[0].bool, out[0].bool);
+ assert_eq!(drs[0].string, out[0].string);
+ assert_eq!(drs[0].byte_vec, out[0].byte_vec);
+ assert_eq!(drs[0].float, out[0].float);
+ assert!(drs[0].double.is_nan());
+ assert!(out[0].double.is_nan());
+ assert_eq!(drs[0].i16, out[0].i16);
+ assert_eq!(drs[0].i32, out[0].i32);
+ assert_eq!(drs[0].u64, out[0].u64);
+ assert_eq!(drs[0].isize, out[0].isize);
}
/// Returns file handle for a temp file in 'target' directory with a provided content
|
parquet_derive: support reading selected columns from parquet file
# Feature Description
I'm effectively using `parquet_derive` in my project, and I found that there are two inconvenient constraints:
1. The `ParquetRecordReader` enforces the struct to organize fields exactly in the **same order** in the parquet file.
2. The `ParquetRecordReader` enforces the struct to parse **all fields** in the parquet file. "all" might be exaggerating, but it is what happens if you want to get the last column, even only the last column.
As describe in its document:
> Derive flat, simple RecordReader implementations. Works by parsing a struct tagged with #[derive(ParquetRecordReader)] and emitting the correct writing code for each field of the struct. Column readers are generated in the order they are defined.
In my use cases (and I believe these are common requests), user should be able to read pruned parquet file, and they should have the freedom to re-organize fields' ordering in decoded struct.
# My Solution
I introduced a `HashMap` to map field name to its index. Of course, it assumes field name is unique, and this is always true since the current `parquet_derive` macro is applied to a flat struct without nesting.
# Pros and Cons
Obviously removing those two constraints makes `parquet_derive` a more handy tool.
But it has some implied changes:
- previously, since the `ParquetRecordReader` relies only on the index of fields, it allows that a field is named as `abc` to implicitly rename itself to `bcd` in the encoded struct. After this change, user must guarantee that the field name in `ParquetRecordReader` to exist in parquet columns.
- I think it is more intuitive and more natural to constrain the "field name" rather than "index", if we use `ParquetRecordReader` to derive a decoder macro.
- allowing reading partial parquet file may improve the performance for some users, but introducing a `HashMap` in the parser may slowdown the function a bit.
- when the `num_records` in a single parsing call is large enough, the cost of `HashMap` lookup is negligible.
Both implied changes seem to have a more positive impact than negative impact. Please review if this is a reasonable feature request.
|
2024-08-18T14:39:49Z
|
52.2
|
678517018ddfd21b202a94df13b06dfa1ab8a378
|
[
"tests::test_parquet_derive_read_pruned_and_shuffled_columns"
] |
[
"tests::test_parquet_derive_hello",
"tests::test_parquet_derive_read_write_combined",
"tests::test_parquet_derive_read_optional_but_valid_column"
] |
[] |
[] |
|
apache/arrow-rs
| 2,407
|
apache__arrow-rs-2407
|
[
"2406"
] |
27f4762c8794ef1c5d042933562185980eb85ae5
|
diff --git a/parquet/src/arrow/record_reader/mod.rs b/parquet/src/arrow/record_reader/mod.rs
index 88d45f3d746a..18b4c9e07026 100644
--- a/parquet/src/arrow/record_reader/mod.rs
+++ b/parquet/src/arrow/record_reader/mod.rs
@@ -786,4 +786,186 @@ mod tests {
assert_eq!(record_reader.num_records(), 8);
assert_eq!(record_reader.num_values(), 14);
}
+
+ #[test]
+ fn test_skip_required_records() {
+ // Construct column schema
+ let message_type = "
+ message test_schema {
+ REQUIRED INT32 leaf;
+ }
+ ";
+ let desc = parse_message_type(message_type)
+ .map(|t| SchemaDescriptor::new(Arc::new(t)))
+ .map(|s| s.column(0))
+ .unwrap();
+
+ // Construct record reader
+ let mut record_reader = RecordReader::<Int32Type>::new(desc.clone());
+
+ // First page
+
+ // Records data:
+ // test_schema
+ // leaf: 4
+ // test_schema
+ // leaf: 7
+ // test_schema
+ // leaf: 6
+ // test_schema
+ // left: 3
+ // test_schema
+ // left: 2
+ {
+ let values = [4, 7, 6, 3, 2];
+ let mut pb = DataPageBuilderImpl::new(desc.clone(), 5, true);
+ pb.add_values::<Int32Type>(Encoding::PLAIN, &values);
+ let page = pb.consume();
+
+ let page_reader = Box::new(InMemoryPageReader::new(vec![page]));
+ record_reader.set_page_reader(page_reader).unwrap();
+ assert_eq!(2, record_reader.skip_records(2).unwrap());
+ assert_eq!(0, record_reader.num_records());
+ assert_eq!(0, record_reader.num_values());
+ assert_eq!(3, record_reader.read_records(3).unwrap());
+ assert_eq!(3, record_reader.num_records());
+ assert_eq!(3, record_reader.num_values());
+ }
+
+ // Second page
+
+ // Records data:
+ // test_schema
+ // leaf: 8
+ // test_schema
+ // leaf: 9
+ {
+ let values = [8, 9];
+ let mut pb = DataPageBuilderImpl::new(desc, 2, true);
+ pb.add_values::<Int32Type>(Encoding::PLAIN, &values);
+ let page = pb.consume();
+
+ let page_reader = Box::new(InMemoryPageReader::new(vec![page]));
+ record_reader.set_page_reader(page_reader).unwrap();
+ assert_eq!(2, record_reader.skip_records(10).unwrap());
+ assert_eq!(3, record_reader.num_records());
+ assert_eq!(3, record_reader.num_values());
+ assert_eq!(0, record_reader.read_records(10).unwrap());
+ }
+
+ let mut bb = Int32BufferBuilder::new(3);
+ bb.append_slice(&[6, 3, 2]);
+ let expected_buffer = bb.finish();
+ assert_eq!(expected_buffer, record_reader.consume_record_data());
+ assert_eq!(None, record_reader.consume_def_levels());
+ assert_eq!(None, record_reader.consume_bitmap());
+ }
+
+ #[test]
+ fn test_skip_optional_records() {
+ // Construct column schema
+ let message_type = "
+ message test_schema {
+ OPTIONAL Group test_struct {
+ OPTIONAL INT32 leaf;
+ }
+ }
+ ";
+
+ let desc = parse_message_type(message_type)
+ .map(|t| SchemaDescriptor::new(Arc::new(t)))
+ .map(|s| s.column(0))
+ .unwrap();
+
+ // Construct record reader
+ let mut record_reader = RecordReader::<Int32Type>::new(desc.clone());
+
+ // First page
+
+ // Records data:
+ // test_schema
+ // test_struct
+ // test_schema
+ // test_struct
+ // leaf: 7
+ // test_schema
+ // test_schema
+ // test_struct
+ // leaf: 6
+ // test_schema
+ // test_struct
+ // leaf: 6
+ {
+ let values = [7, 6, 3];
+ //empty, non-empty, empty, non-empty, non-empty
+ let def_levels = [1i16, 2i16, 0i16, 2i16, 2i16];
+ let mut pb = DataPageBuilderImpl::new(desc.clone(), 5, true);
+ pb.add_def_levels(2, &def_levels);
+ pb.add_values::<Int32Type>(Encoding::PLAIN, &values);
+ let page = pb.consume();
+
+ let page_reader = Box::new(InMemoryPageReader::new(vec![page]));
+ record_reader.set_page_reader(page_reader).unwrap();
+ assert_eq!(2, record_reader.skip_records(2).unwrap());
+ assert_eq!(0, record_reader.num_records());
+ assert_eq!(0, record_reader.num_values());
+ assert_eq!(3, record_reader.read_records(3).unwrap());
+ assert_eq!(3, record_reader.num_records());
+ assert_eq!(3, record_reader.num_values());
+ }
+
+ // Second page
+
+ // Records data:
+ // test_schema
+ // test_schema
+ // test_struct
+ // left: 8
+ {
+ let values = [8];
+ //empty, non-empty
+ let def_levels = [0i16, 2i16];
+ let mut pb = DataPageBuilderImpl::new(desc, 2, true);
+ pb.add_def_levels(2, &def_levels);
+ pb.add_values::<Int32Type>(Encoding::PLAIN, &values);
+ let page = pb.consume();
+
+ let page_reader = Box::new(InMemoryPageReader::new(vec![page]));
+ record_reader.set_page_reader(page_reader).unwrap();
+ assert_eq!(2, record_reader.skip_records(10).unwrap());
+ assert_eq!(3, record_reader.num_records());
+ assert_eq!(3, record_reader.num_values());
+ assert_eq!(0, record_reader.read_records(10).unwrap());
+ }
+
+ // Verify result def levels
+ let mut bb = Int16BufferBuilder::new(7);
+ bb.append_slice(&[0i16, 2i16, 2i16]);
+ let expected_def_levels = bb.finish();
+ assert_eq!(
+ Some(expected_def_levels),
+ record_reader.consume_def_levels()
+ );
+
+ // Verify bitmap
+ let expected_valid = &[false, true, true];
+ let expected_buffer = Buffer::from_iter(expected_valid.iter().cloned());
+ let expected_bitmap = Bitmap::from(expected_buffer);
+ assert_eq!(Some(expected_bitmap), record_reader.consume_bitmap());
+
+ // Verify result record data
+ let actual = record_reader.consume_record_data();
+ let actual_values = actual.typed_data::<i32>();
+
+ let expected = &[0, 6, 3];
+ assert_eq!(actual_values.len(), expected.len());
+
+ // Only validate valid values are equal
+ let iter = expected_valid.iter().zip(actual_values).zip(expected);
+ for ((valid, actual), expected) in iter {
+ if *valid {
+ assert_eq!(actual, expected)
+ }
+ }
+ }
}
|
diff --git a/parquet/src/util/test_common/page_util.rs b/parquet/src/util/test_common/page_util.rs
index dffcb2a44e87..243fb6f8b897 100644
--- a/parquet/src/util/test_common/page_util.rs
+++ b/parquet/src/util/test_common/page_util.rs
@@ -24,6 +24,7 @@ use crate::encodings::levels::LevelEncoder;
use crate::errors::Result;
use crate::schema::types::{ColumnDescPtr, SchemaDescPtr};
use crate::util::memory::ByteBufferPtr;
+use std::iter::Peekable;
use std::mem;
pub trait DataPageBuilder {
@@ -127,8 +128,8 @@ impl DataPageBuilder for DataPageBuilderImpl {
encoding: self.encoding.unwrap(),
num_nulls: 0, /* set to dummy value - don't need this when reading
* data page */
- num_rows: self.num_values, /* also don't need this when reading
- * data page */
+ num_rows: self.num_values, /* num_rows only needs in skip_records, now we not support skip REPEATED field,
+ * so we can assume num_values == num_rows */
def_levels_byte_len: self.def_levels_byte_len,
rep_levels_byte_len: self.rep_levels_byte_len,
is_compressed: false,
@@ -149,13 +150,13 @@ impl DataPageBuilder for DataPageBuilderImpl {
/// A utility page reader which stores pages in memory.
pub struct InMemoryPageReader<P: Iterator<Item = Page>> {
- page_iter: P,
+ page_iter: Peekable<P>,
}
impl<P: Iterator<Item = Page>> InMemoryPageReader<P> {
pub fn new(pages: impl IntoIterator<Item = Page, IntoIter = P>) -> Self {
Self {
- page_iter: pages.into_iter(),
+ page_iter: pages.into_iter().peekable(),
}
}
}
@@ -166,11 +167,29 @@ impl<P: Iterator<Item = Page> + Send> PageReader for InMemoryPageReader<P> {
}
fn peek_next_page(&mut self) -> Result<Option<PageMetadata>> {
- unimplemented!()
+ if let Some(x) = self.page_iter.peek() {
+ match x {
+ Page::DataPage { num_values, .. } => Ok(Some(PageMetadata {
+ num_rows: *num_values as usize,
+ is_dict: false,
+ })),
+ Page::DataPageV2 { num_rows, .. } => Ok(Some(PageMetadata {
+ num_rows: *num_rows as usize,
+ is_dict: false,
+ })),
+ Page::DictionaryPage { .. } => Ok(Some(PageMetadata {
+ num_rows: 0,
+ is_dict: true,
+ })),
+ }
+ } else {
+ Ok(None)
+ }
}
fn skip_next_page(&mut self) -> Result<()> {
- unimplemented!()
+ self.page_iter.next();
+ Ok(())
}
}
|
Support `peek_next_page` and `skip_next_page` in `InMemoryPageReader`
**Is your feature request related to a problem or challenge? Please describe what you are trying to do.**
when i was implementing bench using `skip_records` got
```
Benchmarking arrow_array_reader/Int32Array/binary packed skip, mandatory, no NULLs: Warming up for 3.0000 sthread 'main' panicked at 'not implemented', /CLionProjects/github/arrow-rs/parquet/src/util/test_common/page_util.rs:169:9
```
which is unimplemented
**Describe the solution you'd like**
<!--
A clear and concise description of what you want to happen.
-->
**Describe alternatives you've considered**
<!--
A clear and concise description of any alternative solutions or features you've considered.
-->
**Additional context**
<!--
Add any other context or screenshots about the feature request here.
-->
|
2022-08-11T07:39:09Z
|
20.0
|
27f4762c8794ef1c5d042933562185980eb85ae5
|
[
"parquet/src/arrow/mod.rs - arrow (line 55)"
] |
[
"parquet/src/file/mod.rs - file (line 81) - compile",
"parquet/src/arrow/arrow_reader.rs - arrow::arrow_reader::ParquetFileArrowReader::try_new (line 203) - compile",
"parquet/src/file/mod.rs - file (line 64) - compile",
"parquet/src/file/mod.rs - file (line 29) - compile",
"parquet/src/record/api.rs - record::api::Row::get_column_iter (line 62) - compile",
"parquet/src/column/mod.rs - column (line 38) - compile",
"parquet/src/arrow/arrow_writer/mod.rs - arrow::arrow_writer::ArrowWriter (line 54)",
"parquet/src/arrow/mod.rs - arrow (line 27)",
"parquet/src/schema/types.rs - schema::types::ColumnPath::append (line 674)",
"parquet/src/schema/parser.rs - schema::parser (line 24)",
"parquet/src/schema/types.rs - schema::types::ColumnPath::string (line 663)",
"parquet/src/record/api.rs - record::api::RowFormatter (line 140)",
"parquet/src/file/statistics.rs - file::statistics (line 23)",
"parquet/src/schema/mod.rs - schema (line 22)",
"parquet/src/file/properties.rs - file::properties (line 22)",
"parquet/src/schema/printer.rs - schema::printer (line 23)",
"parquet/src/arrow/mod.rs - arrow (line 68)"
] |
[] |
[] |
|
apache/arrow-rs
| 2,377
|
apache__arrow-rs-2377
|
[
"1254"
] |
613b99dcc43ef3af9603fd823f8fe42a801bac19
| "diff --git a/.github/workflows/parquet.yml b/.github/workflows/parquet.yml\nindex d8e09f04ba83..ad6(...TRUNCATED)
| "diff --git a/parquet/src/arrow/array_reader/test_util.rs b/parquet/src/arrow/array_reader/test_util(...TRUNCATED)
| "Fix all clippy lints in parquet crate\n**Describe the bug**\r\nDue to \"historical reasons\" there (...TRUNCATED)
| "I'd like to have a try if no one else has been doing it! \r\nRust Clippy is really interesting!\nTh(...TRUNCATED)
|
2022-08-08T19:09:34Z
|
20.0
|
27f4762c8794ef1c5d042933562185980eb85ae5
|
[
"parquet/src/schema/types.rs - schema::types::ColumnPath::append (line 674)"
] | ["parquet/src/file/mod.rs - file (line 64) - compile","parquet/src/file/mod.rs - file (line 81) - co(...TRUNCATED)
|
[] |
[
"parquet/src/arrow/arrow_writer/mod.rs - arrow::arrow_writer::ArrowWriter (line 55)"
] |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 1