Use streaming ParquetWriter for single-file output
Browse files- reshard_parquet.py +22 -46
reshard_parquet.py
CHANGED
|
@@ -14,7 +14,6 @@ Usage:
|
|
| 14 |
"""
|
| 15 |
|
| 16 |
import argparse
|
| 17 |
-
import math
|
| 18 |
import os
|
| 19 |
from pathlib import Path
|
| 20 |
|
|
@@ -29,59 +28,37 @@ def collect_shards(directory: Path, prefix: str) -> list[Path]:
|
|
| 29 |
return sorted(p for p in directory.glob(f"{prefix}-*.parquet"))
|
| 30 |
|
| 31 |
|
| 32 |
-
def reshard(shards: list[Path], split: str,
|
| 33 |
-
"""
|
| 34 |
-
# Count total rows
|
| 35 |
total_rows = sum(pq.read_metadata(s).num_rows for s in shards)
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
for shard_path in shards:
|
| 44 |
table = pq.read_table(shard_path)
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
import pyarrow as pa
|
| 50 |
-
merged = pa.concat_tables(buf_rows)
|
| 51 |
-
|
| 52 |
-
# Write exactly rows_per_shard, keep remainder
|
| 53 |
-
to_write = merged.slice(0, rows_per_shard)
|
| 54 |
-
remainder = merged.slice(rows_per_shard)
|
| 55 |
-
|
| 56 |
-
name = f"{split}-{out_idx:05d}-of-{num_out:05d}.parquet"
|
| 57 |
-
pq.write_table(to_write, out_dir / name, compression="zstd", row_group_size=200_000, write_page_index=True, use_content_defined_chunking=True)
|
| 58 |
-
out_idx += 1
|
| 59 |
-
print(f" wrote {name} ({to_write.num_rows:,} rows)")
|
| 60 |
-
|
| 61 |
-
if remainder.num_rows > 0:
|
| 62 |
-
buf_rows = [remainder]
|
| 63 |
-
rows_in_buf = remainder.num_rows
|
| 64 |
-
else:
|
| 65 |
-
buf_rows = []
|
| 66 |
-
rows_in_buf = 0
|
| 67 |
-
|
| 68 |
-
# Flush remaining
|
| 69 |
-
if buf_rows:
|
| 70 |
-
import pyarrow as pa
|
| 71 |
-
merged = pa.concat_tables(buf_rows)
|
| 72 |
-
name = f"{split}-{out_idx:05d}-of-{num_out:05d}.parquet"
|
| 73 |
-
pq.write_table(merged, out_dir / name, compression="zstd", row_group_size=200_000, write_page_index=True, use_content_defined_chunking=True)
|
| 74 |
-
out_idx += 1
|
| 75 |
-
print(f" wrote {name} ({merged.num_rows:,} rows)")
|
| 76 |
|
| 77 |
-
|
|
|
|
| 78 |
|
| 79 |
|
| 80 |
def main() -> None:
|
| 81 |
parser = argparse.ArgumentParser(description="Re-shard Parquet files into fewer, larger shards")
|
| 82 |
parser.add_argument("--input-dir", default=DEFAULT_DIR, help="Directory with existing shards")
|
| 83 |
parser.add_argument("--output-dir", default=None, help="Output directory (default: input-dir + '_resharded')")
|
| 84 |
-
parser.add_argument("--rows-per-shard", type=int, default=1_000_000, help="Rows per output shard")
|
| 85 |
args = parser.parse_args()
|
| 86 |
|
| 87 |
in_dir = Path(args.input_dir)
|
|
@@ -90,13 +67,12 @@ def main() -> None:
|
|
| 90 |
|
| 91 |
print(f"Input: {in_dir}")
|
| 92 |
print(f"Output: {out_dir}")
|
| 93 |
-
print(f"Rows per shard: {args.rows_per_shard:,}")
|
| 94 |
print()
|
| 95 |
|
| 96 |
for split in ("validation", "train"):
|
| 97 |
shards = collect_shards(in_dir, split)
|
| 98 |
if shards:
|
| 99 |
-
reshard(shards, split,
|
| 100 |
|
| 101 |
print(f"\nDone! Output in {out_dir}")
|
| 102 |
|
|
|
|
| 14 |
"""
|
| 15 |
|
| 16 |
import argparse
|
|
|
|
| 17 |
import os
|
| 18 |
from pathlib import Path
|
| 19 |
|
|
|
|
| 28 |
return sorted(p for p in directory.glob(f"{prefix}-*.parquet"))
|
| 29 |
|
| 30 |
|
| 31 |
+
def reshard(shards: list[Path], split: str, out_dir: Path) -> None:
|
| 32 |
+
"""Stream input shards into a single output file using ParquetWriter (low memory)."""
|
|
|
|
| 33 |
total_rows = sum(pq.read_metadata(s).num_rows for s in shards)
|
| 34 |
+
print(f" {split}: {total_rows:,} rows across {len(shards)} shards -> 1 output file")
|
| 35 |
+
|
| 36 |
+
name = f"{split}-00000-of-00001.parquet"
|
| 37 |
+
out_path = out_dir / name
|
| 38 |
+
schema = pq.read_schema(shards[0])
|
| 39 |
+
writer = pq.ParquetWriter(
|
| 40 |
+
out_path, schema,
|
| 41 |
+
compression="zstd",
|
| 42 |
+
write_page_index=True,
|
| 43 |
+
use_content_defined_chunking=True,
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
rows_written = 0
|
| 47 |
for shard_path in shards:
|
| 48 |
table = pq.read_table(shard_path)
|
| 49 |
+
writer.write_table(table, row_group_size=200_000)
|
| 50 |
+
rows_written += table.num_rows
|
| 51 |
+
print(f" appended {shard_path.name} ({rows_written:,}/{total_rows:,} rows)")
|
| 52 |
+
del table
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
|
| 54 |
+
writer.close()
|
| 55 |
+
print(f" wrote {name}")
|
| 56 |
|
| 57 |
|
| 58 |
def main() -> None:
|
| 59 |
parser = argparse.ArgumentParser(description="Re-shard Parquet files into fewer, larger shards")
|
| 60 |
parser.add_argument("--input-dir", default=DEFAULT_DIR, help="Directory with existing shards")
|
| 61 |
parser.add_argument("--output-dir", default=None, help="Output directory (default: input-dir + '_resharded')")
|
|
|
|
| 62 |
args = parser.parse_args()
|
| 63 |
|
| 64 |
in_dir = Path(args.input_dir)
|
|
|
|
| 67 |
|
| 68 |
print(f"Input: {in_dir}")
|
| 69 |
print(f"Output: {out_dir}")
|
|
|
|
| 70 |
print()
|
| 71 |
|
| 72 |
for split in ("validation", "train"):
|
| 73 |
shards = collect_shards(in_dir, split)
|
| 74 |
if shards:
|
| 75 |
+
reshard(shards, split, out_dir)
|
| 76 |
|
| 77 |
print(f"\nDone! Output in {out_dir}")
|
| 78 |
|