Datasets:
Tasks:
Visual Question Answering
Formats:
parquet
Languages:
English
Size:
10K - 100K
ArXiv:
License:
File size: 1,727 Bytes
d7cb1a3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
#!/usr/bin/env python3
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "fastparquet",
# "pandas",
# "pathlib",
# "pyarrow",
# ]
# ///
"""
Create parquet files for config subsets of the VSI-Bench dataset.
* debiased: all examples not pruned by Iterative Bias Pruning (aka VSI-Bench-Debiased)
* pruned: all examples pruned by Iterative Bias Pruning
> [!NOTE]
> If you do not pass `index=False`, the parquet files will have a `__index_level_0__` column
"""
import pandas as pd
from pathlib import Path
script_dir = Path(__file__).parent
pruned_ids_path = script_dir / "pruned_ids.txt"
test_jsonl_path = script_dir / "test.jsonl"
pq_debiased_path = script_dir / "test_debiased.parquet"
pq_pruned_path = script_dir / "test_pruned.parquet"
print("Creating parquet files...")
print(f"Loading pruned ids from '{pruned_ids_path}'...")
with open(pruned_ids_path, "r") as f:
pruned_ids = f.read().splitlines()
print(f" -> Loaded {len(pruned_ids)} pruned ids.")
print(f"Loading test data from '{test_jsonl_path}'...")
df = pd.read_json(str(test_jsonl_path), lines=True)
print(f" -> Loaded {len(df)} examples.")
df["pruned"] = df["id"].astype(str).isin(pruned_ids)
print(f" -> Added pruned column.")
# save the debiased and pruned subsets separately to parquet files
df_debiased = df[~df["pruned"]]
df_pruned = df[df["pruned"]]
print(f"Saving debiased examples to '{pq_debiased_path}'...")
df_debiased.to_parquet(pq_debiased_path, index=False)
print(f" -> Saved {len(df_debiased)} debiased examples.")
print(f"Saving pruned examples to '{pq_pruned_path}'...")
df_pruned.to_parquet(pq_pruned_path, index=False)
print(f" -> Saved {len(df_pruned)} pruned examples.")
print("Done.")
|