| """Stratified sampling of SenseBench dataset (1000 instances). |
| |
| Sampling strategy: |
| - Preserve the original distribution of task types |
| - Preserve the original distribution of image_count (single/paired) |
| - Fixed random seed for reproducibility |
| - Output structure: Perception/{single,paired}, Description/{single,paired} |
| """ |
|
|
| import json |
| import random |
| import shutil |
| from pathlib import Path |
| from collections import Counter |
|
|
| SEED = 42 |
| N_SAMPLE = 1000 |
| SRC_DIR = Path("/home/anxiao/zhongchen/Sensebench/data_hf") |
| DST_DIR = Path("/home/anxiao/zhongchen/Sensebench/codex_code/seleted") |
|
|
|
|
| def main(): |
| random.seed(SEED) |
|
|
| records = [] |
| with open(SRC_DIR / "questions.jsonl", encoding="utf-8") as f: |
| for line in f: |
| records.append(json.loads(line)) |
|
|
| total = len(records) |
| print(f"Total records: {total}") |
|
|
| |
| groups = {} |
| for r in records: |
| meta = r["meta"] |
| cat = "Description" if meta["task"] == "description" else "Perception" |
| sub = "paired" if meta["image_count"] == "multi" else "single" |
| key = (cat, sub) |
| groups.setdefault(key, []).append(r) |
|
|
| print("\nOriginal distribution:") |
| for key in sorted(groups): |
| print(f" {key}: {len(groups[key])} ({len(groups[key])/total*100:.1f}%)") |
|
|
| |
| sampled = [] |
| for key in sorted(groups): |
| group = groups[key] |
| n = max(1, round(len(group) / total * N_SAMPLE)) |
| sampled.extend(random.sample(group, min(n, len(group)))) |
|
|
| if len(sampled) > N_SAMPLE: |
| sampled = random.sample(sampled, N_SAMPLE) |
| elif len(sampled) < N_SAMPLE: |
| remaining = [r for r in records if r not in sampled] |
| sampled.extend(random.sample(remaining, N_SAMPLE - len(sampled))) |
|
|
| random.shuffle(sampled) |
|
|
| |
| sampled_counter = Counter() |
| for r in sampled: |
| meta = r["meta"] |
| cat = "Description" if meta["task"] == "description" else "Perception" |
| sub = "paired" if meta["image_count"] == "multi" else "single" |
| sampled_counter[(cat, sub)] += 1 |
|
|
| print(f"\nSampled: {len(sampled)}") |
| print("Sampled distribution:") |
| for key in sorted(sampled_counter): |
| n = sampled_counter[key] |
| print(f" {key}: {n} ({n/len(sampled)*100:.1f}%)") |
|
|
| |
| for cat in ["Perception", "Description"]: |
| for sub in ["single", "paired"]: |
| (DST_DIR / cat / sub).mkdir(parents=True, exist_ok=True) |
|
|
| copied = set() |
| with open(DST_DIR / "questions.jsonl", "w", encoding="utf-8") as f: |
| for r in sampled: |
| meta = r["meta"] |
| cat = "Description" if meta["task"] == "description" else "Perception" |
| sub = "paired" if meta["image_count"] == "multi" else "single" |
|
|
| new_images = [] |
| for img_rel in r["images"]: |
| img_name = Path(img_rel).name |
| new_images.append(f"{cat}/{sub}/{img_name}") |
|
|
| if img_name not in copied: |
| src_img = SRC_DIR / img_rel |
| dst_img = DST_DIR / cat / sub / img_name |
| if src_img.exists() and not dst_img.exists(): |
| shutil.copy2(src_img, dst_img) |
| copied.add(img_name) |
|
|
| r["images"] = new_images |
| f.write(json.dumps(r, ensure_ascii=False) + "\n") |
|
|
| |
| print("\nOutput:") |
| total_size = 0 |
| for cat in ["Perception", "Description"]: |
| for sub in ["single", "paired"]: |
| d = DST_DIR / cat / sub |
| n = len(list(d.iterdir())) |
| s = sum(f.stat().st_size for f in d.iterdir()) / (1024**2) |
| total_size += s |
| print(f" {cat}/{sub}: {n} images, {s:.1f} MB") |
| print(f" Total: {total_size:.1f} MB") |
| print(f" Seed: {SEED}") |
| print(f"\nOutput: {DST_DIR}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|