Datasets:
File size: 1,419 Bytes
c80cde6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 | """
Convert the Thresholding CSV files to Arrow format,
downloading the real files from HuggingFace (bypassing LFS pointers).
"""
from huggingface_hub import hf_hub_download
import pyarrow as pa
import pyarrow.csv as pcsv
from pathlib import Path
REPO = "AnnaWegmann/AV"
SPLITS = {
"train": "thresholding/train.csv",
"validation": "thresholding/validation.csv",
"test": "thresholding/test.csv",
}
for split, csv_repo_path in SPLITS.items():
print(f"\n--- {split} ---")
# Download real CSV from HuggingFace
local_csv = hf_hub_download(REPO, csv_repo_path, repo_type="dataset")
print(f" Downloaded: {local_csv}")
# Read CSV into Arrow table (texts contain newlines)
parse_opts = pcsv.ParseOptions(newlines_in_values=True)
table = pcsv.read_csv(local_csv, parse_options=parse_opts)
print(f" Rows: {table.num_rows}, Cols: {table.column_names}")
# Write as Arrow IPC streaming format (same as the working Contrastive_Learning files)
out_dir = Path("thresholding") / split
out_dir.mkdir(parents=True, exist_ok=True)
out_path = out_dir / "data-00000-of-00001.arrow"
with open(out_path, "wb") as f:
writer = pa.ipc.new_stream(f, table.schema)
writer.write_table(table)
writer.close()
print(f" Wrote: {out_path} ({out_path.stat().st_size:,} bytes)")
print("\nDone! Now delete the old CSV files, update README.md, and push.")
|