from datasets import load_dataset import io from PIL import Image import json import os from tqdm import tqdm # 创建保存图像的目录 os.makedirs("images", exist_ok=True) dataset = load_dataset( "/mnt/dolphinfs/ssd_pool/docker/user/hadoop-mlm-hl/hadoop-mlm/common/spatial_data/spatial_relation/SAT", data_files={ "train": "SAT_train.parquet", "validation": "SAT_val.parquet", }, batch_size=128, ) def process_dataset(dataset, split_name): processed_data = [] for i, example in enumerate(tqdm(dataset[split_name])): # 保存图像到本地 image_paths = [] # 计算子目录路径(每1000张图像一个目录) subdir_num = i // 1000 subdir_path = os.path.join("images", split_name, f"{subdir_num:03d}") os.makedirs(subdir_path, exist_ok=True) for j, img in enumerate(example['image_bytes']): # 生成唯一的图像文件名 img_filename = f"{split_name}_{i:06d}_{j}.jpg" img_path = os.path.join(subdir_path, img_filename) img.save(img_path) image_paths.append(img_path) # 创建新的数据样本 processed_example = { 'image': image_paths, 'question': example['question'], 'answers': example['answers'], 'question_type': example['question_type'], 'correct_answer': example['correct_answer'] } processed_data.append(processed_example) # 保存为JSON文件 output_file = f"{split_name}_data.json" with open(output_file, 'w', encoding='utf-8') as f: json.dump(processed_data, f, ensure_ascii=False, indent=2) print(f"Saved {len(processed_data)} examples to {output_file}") # 处理训练集和验证集 process_dataset(dataset, "train") process_dataset(dataset, "validation")