File size: 1,888 Bytes
f706a86 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
from datasets import load_dataset
import io
from PIL import Image
import json
import os
from tqdm import tqdm
# 创建保存图像的目录
os.makedirs("images", exist_ok=True)
dataset = load_dataset(
"/mnt/dolphinfs/ssd_pool/docker/user/hadoop-mlm-hl/hadoop-mlm/common/spatial_data/spatial_relation/SAT",
data_files={
"train": "SAT_train.parquet",
"validation": "SAT_val.parquet",
},
batch_size=128,
)
def process_dataset(dataset, split_name):
processed_data = []
for i, example in enumerate(tqdm(dataset[split_name])):
# 保存图像到本地
image_paths = []
# 计算子目录路径(每1000张图像一个目录)
subdir_num = i // 1000
subdir_path = os.path.join("images", split_name, f"{subdir_num:03d}")
os.makedirs(subdir_path, exist_ok=True)
for j, img in enumerate(example['image_bytes']):
# 生成唯一的图像文件名
img_filename = f"{split_name}_{i:06d}_{j}.jpg"
img_path = os.path.join(subdir_path, img_filename)
img.save(img_path)
image_paths.append(img_path)
# 创建新的数据样本
processed_example = {
'image': image_paths,
'question': example['question'],
'answers': example['answers'],
'question_type': example['question_type'],
'correct_answer': example['correct_answer']
}
processed_data.append(processed_example)
# 保存为JSON文件
output_file = f"{split_name}_data.json"
with open(output_file, 'w', encoding='utf-8') as f:
json.dump(processed_data, f, ensure_ascii=False, indent=2)
print(f"Saved {len(processed_data)} examples to {output_file}")
# 处理训练集和验证集
process_dataset(dataset, "train")
process_dataset(dataset, "validation")
|