parameter-golf / convert_to_parquet.py
mishig's picture
mishig HF Staff
Add page index and content-defined chunking to conversion scripts
c0d7b9f verified
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "pyarrow>=15.0",
# ]
# ///
"""Convert parameter-golf docs_selected.jsonl to sharded Parquet files.
Reads the 45GB JSONL (15.3M docs with {"text": ...}), splits into
validation (first 50k) and train (remainder), and writes sharded
Parquet files in data/{split}-XXXXX-of-YYYYY.parquet layout that
the HuggingFace dataset viewer auto-detects.
Usage:
uv run convert_to_parquet.py [--input PATH] [--output-dir PATH] [--rows-per-shard N]
"""
import argparse
import json
import math
import os
from pathlib import Path
import pyarrow as pa
import pyarrow.parquet as pq
DEFAULT_INPUT = os.path.expanduser(
"~/.cache/huggingface/hub/datasets--willdepueoai--parameter-golf/"
"snapshots/a85b0e6035c3c94bc23685a07c81a8f3bf89db80/"
"datasets/docs_selected.jsonl"
)
DEFAULT_OUTPUT = os.path.expanduser("~/parameter-golf/data")
NUM_VAL_DOCS = 50_000
ROWS_PER_SHARD = 50_000
SCHEMA = pa.schema([("text", pa.string())])
def write_shard(rows: list[str], path: Path) -> None:
table = pa.table({"text": rows}, schema=SCHEMA)
pq.write_table(table, path, compression="zstd", row_group_size=200_000, write_page_index=True, use_content_defined_chunking=True)
def flush_split(
rows: list[str],
shard_idx: int,
total_shards: int,
split: str,
out_dir: Path,
) -> None:
name = f"{split}-{shard_idx:05d}-of-{total_shards:05d}.parquet"
write_shard(rows, out_dir / name)
def count_lines(path: str) -> int:
"""Fast line count without loading whole file into memory."""
count = 0
with open(path, "rb") as f:
while chunk := f.raw.read(1 << 20):
count += chunk.count(b"\n")
return count
def main() -> None:
parser = argparse.ArgumentParser(description="Convert JSONL to sharded Parquet")
parser.add_argument("--input", default=DEFAULT_INPUT, help="Path to docs_selected.jsonl")
parser.add_argument("--output-dir", default=DEFAULT_OUTPUT, help="Output directory for parquet shards")
parser.add_argument("--rows-per-shard", type=int, default=ROWS_PER_SHARD, help="Rows per parquet shard")
parser.add_argument("--num-val-docs", type=int, default=NUM_VAL_DOCS, help="Number of validation documents (taken from start)")
args = parser.parse_args()
input_path = os.path.realpath(args.input)
out_dir = Path(args.output_dir)
out_dir.mkdir(parents=True, exist_ok=True)
rows_per_shard = args.rows_per_shard
num_val = args.num_val_docs
print(f"Counting lines in {input_path} ...")
total_docs = count_lines(input_path)
num_train = total_docs - num_val
print(f"Total docs: {total_docs:,} (val: {num_val:,}, train: {num_train:,})")
val_shards = math.ceil(num_val / rows_per_shard)
train_shards = math.ceil(num_train / rows_per_shard)
print(f"Shards — val: {val_shards}, train: {train_shards}")
val_shard_idx = 0
train_shard_idx = 0
buf: list[str] = []
line_no = 0
with open(input_path, "r") as f:
for raw_line in f:
text = json.loads(raw_line)["text"]
buf.append(text)
line_no += 1
if line_no <= num_val:
# Validation split
if len(buf) == rows_per_shard or line_no == num_val:
flush_split(buf, val_shard_idx, val_shards, "validation", out_dir)
print(f" wrote validation shard {val_shard_idx + 1}/{val_shards}")
val_shard_idx += 1
buf = []
else:
# Train split
if len(buf) == rows_per_shard or line_no == total_docs:
flush_split(buf, train_shard_idx, train_shards, "train", out_dir)
if (train_shard_idx + 1) % 20 == 0 or line_no == total_docs:
print(f" wrote train shard {train_shard_idx + 1}/{train_shards}")
train_shard_idx += 1
buf = []
# Flush any remaining rows (shouldn't happen if counts are exact)
if buf:
if train_shard_idx < train_shards:
flush_split(buf, train_shard_idx, train_shards, "train", out_dir)
print(f" wrote final train shard {train_shard_idx + 1}/{train_shards}")
print(f"\nDone! Wrote {val_shard_idx} validation + {train_shard_idx} train shards to {out_dir}")
if __name__ == "__main__":
main()