|
|
from datasets import load_dataset |
|
|
import io |
|
|
from PIL import Image |
|
|
import json |
|
|
import os |
|
|
from tqdm import tqdm |
|
|
|
|
|
|
|
|
os.makedirs("images", exist_ok=True) |
|
|
|
|
|
dataset = load_dataset( |
|
|
"/mnt/dolphinfs/ssd_pool/docker/user/hadoop-mlm-hl/hadoop-mlm/common/spatial_data/spatial_relation/SAT", |
|
|
data_files={ |
|
|
"train": "SAT_train.parquet", |
|
|
"validation": "SAT_val.parquet", |
|
|
}, |
|
|
batch_size=128, |
|
|
) |
|
|
|
|
|
def process_dataset(dataset, split_name): |
|
|
processed_data = [] |
|
|
|
|
|
for i, example in enumerate(tqdm(dataset[split_name])): |
|
|
|
|
|
image_paths = [] |
|
|
|
|
|
|
|
|
subdir_num = i // 1000 |
|
|
subdir_path = os.path.join("images", split_name, f"{subdir_num:03d}") |
|
|
os.makedirs(subdir_path, exist_ok=True) |
|
|
|
|
|
for j, img in enumerate(example['image_bytes']): |
|
|
|
|
|
img_filename = f"{split_name}_{i:06d}_{j}.jpg" |
|
|
img_path = os.path.join(subdir_path, img_filename) |
|
|
img.save(img_path) |
|
|
image_paths.append(img_path) |
|
|
|
|
|
|
|
|
processed_example = { |
|
|
'image': image_paths, |
|
|
'question': example['question'], |
|
|
'answers': example['answers'], |
|
|
'question_type': example['question_type'], |
|
|
'correct_answer': example['correct_answer'] |
|
|
} |
|
|
processed_data.append(processed_example) |
|
|
|
|
|
|
|
|
output_file = f"{split_name}_data.json" |
|
|
with open(output_file, 'w', encoding='utf-8') as f: |
|
|
json.dump(processed_data, f, ensure_ascii=False, indent=2) |
|
|
|
|
|
print(f"Saved {len(processed_data)} examples to {output_file}") |
|
|
|
|
|
|
|
|
process_dataset(dataset, "train") |
|
|
process_dataset(dataset, "validation") |
|
|
|