Datasets:

Modalities:
Text
Formats:
arrow
Languages:
English
ArXiv:
Libraries:
Datasets
License:
AV / convert_csv_to_arrow.py
AnnaWegmann
Add Thresholding Arrow files, keep CSVs and conversion script
c80cde6
"""
Convert the Thresholding CSV files to Arrow format,
downloading the real files from HuggingFace (bypassing LFS pointers).
"""
from huggingface_hub import hf_hub_download
import pyarrow as pa
import pyarrow.csv as pcsv
from pathlib import Path
REPO = "AnnaWegmann/AV"
SPLITS = {
"train": "thresholding/train.csv",
"validation": "thresholding/validation.csv",
"test": "thresholding/test.csv",
}
for split, csv_repo_path in SPLITS.items():
print(f"\n--- {split} ---")
# Download real CSV from HuggingFace
local_csv = hf_hub_download(REPO, csv_repo_path, repo_type="dataset")
print(f" Downloaded: {local_csv}")
# Read CSV into Arrow table (texts contain newlines)
parse_opts = pcsv.ParseOptions(newlines_in_values=True)
table = pcsv.read_csv(local_csv, parse_options=parse_opts)
print(f" Rows: {table.num_rows}, Cols: {table.column_names}")
# Write as Arrow IPC streaming format (same as the working Contrastive_Learning files)
out_dir = Path("thresholding") / split
out_dir.mkdir(parents=True, exist_ok=True)
out_path = out_dir / "data-00000-of-00001.arrow"
with open(out_path, "wb") as f:
writer = pa.ipc.new_stream(f, table.schema)
writer.write_table(table)
writer.close()
print(f" Wrote: {out_path} ({out_path.stat().st_size:,} bytes)")
print("\nDone! Now delete the old CSV files, update README.md, and push.")