| from datasets import load_dataset, Dataset, DatasetDict | |
| def main(): | |
| datasets = load_dataset('allenai/qasc') | |
| _datasets = {} | |
| for split in ['train', 'validation']: | |
| data = [] | |
| split_dataset = datasets[split] | |
| for example in split_dataset: | |
| question = str(example['question']).strip() | |
| choices = example['choices']['text'] | |
| answer = example['answerKey'] | |
| answer_index = ord(answer) - ord('A') | |
| data.append({ | |
| 'question': question, | |
| 'choices': choices, | |
| 'answer': answer, | |
| 'answer_index': answer_index, | |
| }) | |
| _datasets[split] = Dataset.from_list(data) | |
| datasets = DatasetDict(_datasets) | |
| datasets.push_to_hub('extraordinarylab/qasc') | |
| if __name__ == '__main__': | |
| main() | |