| from datasets import load_dataset | |
| import io | |
| from PIL import Image | |
| split = "val" | |
| # # 修改为直接加载单个文件 | |
| # dataset = load_dataset( | |
| # "parquet", | |
| # data_files="/mnt/dolphinfs/ssd_pool/docker/user/hadoop-mlm-hl/hadoop-mlm/common/spatial_data/spatial_relation/SAT/SAT_train.parquet", | |
| # streaming=False | |
| # ) | |
| dataset = load_dataset( | |
| "/mnt/dolphinfs/ssd_pool/docker/user/hadoop-mlm-hl/hadoop-mlm/common/spatial_data/spatial_relation/SAT", | |
| data_files={ | |
| "train": "SAT_train.parquet", | |
| "validation": "SAT_val.parquet", | |
| }, | |
| batch_size=128, | |
| ) | |
| # 注意:现在 dataset 是一个 DatasetDict,需要选择对应的split(通常是'train') | |
| print(dataset) | |
| # train_dataset = dataset['train'] | |
| # example = train_dataset[10] # example 10th item | |
| # print(example) | |
| # images = example['image_bytes'] | |
| # question = example['question'] | |
| # print(question) | |
| # answer_choices = example['answers'] | |
| # print(answer_choices) | |
| # correct_answer = example['correct_answer'] | |
| # print(correct_answer) | |