| import os |
| import json |
| import datasets |
| from datasets import BuilderConfig, Features, ClassLabel, Value, Sequence |
|
|
|
|
| _DESCRIPTION = """ |
| # 한국어 지시학습 데이터셋 |
| - glue 데이터셋을 한국어로 변역한 데이터셋 |
| """ |
|
|
| _CITATION = """ |
| @inproceedings{KITD, |
| title={언어 번역 모델을 통한 한국어 지시 학습 데이터 세트 구축}, |
| author={임영서, 추현창, 김산, 장진예, 정민영, 신사임}, |
| booktitle={제 35회 한글 및 한국어 정보처리 학술대회}, |
| pages={591--595}, |
| month=oct, |
| year={2023} |
| } |
| """ |
|
|
| |
| _COLA_FEATURES = Features({ |
| "data_index_by_user": Value(dtype="int32"), |
| "label": Value(dtype="int32"), |
| "sentence": Value(dtype="string"), |
| }) |
|
|
| def _parsing_cola(file_path): |
| with open(file_path, mode="r") as f: |
| dataset = json.load(f) |
| for _idx, data in enumerate(dataset): |
| _data_index_by_user = data["data_index_by_user"] |
| _label = data["label"] |
| _sentence = data["sentence"] |
|
|
| yield _idx, { |
| "data_index_by_user": _data_index_by_user, |
| "label": _label, |
| "sentence": _sentence |
| } |
|
|
| _MRPC_FEATURES = Features({ |
| "data_index_by_user": Value(dtype="int32"), |
| "sentence1": Value(dtype="string"), |
| "sentence2": Value(dtype="string"), |
| "label": Value(dtype="int32"), |
| "idx": Value(dtype="int32") |
| }) |
|
|
| def _parsing_mrpc(file_path): |
| with open(file_path, mode="r") as f: |
| dataset = json.load(f) |
| for _i, data in enumerate(dataset): |
| _data_index_by_user = data["data_index_by_user"] |
| _sentence1 = data["sentence1"] |
| _sentence2 = data["sentence2"] |
| _label = data["label"] |
| _idx = data["idx"] |
|
|
| yield _i, { |
| "data_index_by_user": _data_index_by_user, |
| "sentence1": _sentence1, |
| "sentence2": _sentence2, |
| "label": _label, |
| "idx": _idx, |
| } |
|
|
| _QNLI_FEATURES = Features({ |
| "data_index_by_user": Value(dtype="int32"), |
| "label": Value(dtype="int32"), |
| "question": Value(dtype="string"), |
| "sentence": Value(dtype="string"), |
| }) |
|
|
| def _parsing_qnli(file_path): |
| with open(file_path, mode="r") as f: |
| dataset = json.load(f) |
| for _idx, data in enumerate(dataset): |
| _data_index_by_user = data["data_index_by_user"] |
| _label = data["label"] |
| _question = data["question"] |
| _sentence = data["sentence"] |
|
|
| yield _idx, { |
| "data_index_by_user": _data_index_by_user, |
| "label": _label, |
| "question": _question, |
| "sentence": _sentence, |
| } |
|
|
| _QQP_FEATURES = Features({ |
| "data_index_by_user": Value(dtype="int32"), |
| "question1": Value(dtype="string"), |
| "question2": Value(dtype="string"), |
| "label": Value(dtype="int32"), |
| "idx": Value(dtype="int32") |
| }) |
|
|
| def _parsing_qqp(file_path): |
| with open(file_path, mode="r") as f: |
| dataset = json.load(f) |
| for _i, data in enumerate(dataset): |
| _data_index_by_user = data["data_index_by_user"] |
| _question1 = data["question1"] |
| _question2 = data["question2"] |
| _label = data["label"] |
| _idx = data["idx"] |
|
|
| yield _i, { |
| "data_index_by_user": _data_index_by_user, |
| "question1": _question1, |
| "question2": _question2, |
| "label": _label, |
| "idx": _idx, |
| } |
|
|
| _WNLI_FEATURES = Features({ |
| "data_index_by_user": Value(dtype="int32"), |
| "sentence1": Value(dtype="string"), |
| "sentence2": Value(dtype="string"), |
| "label": Value(dtype="int32"), |
| "idx": Value(dtype="int32") |
| }) |
|
|
| def _parsing_wnli(file_path): |
| with open(file_path, mode="r") as f: |
| dataset = json.load(f) |
| for _i, data in enumerate(dataset): |
| _data_index_by_user = data["data_index_by_user"] |
| _sentence1 = data["sentence1"] |
| _sentence2 = data["sentence2"] |
| _label = data["label"] |
| _idx = data["idx"] |
|
|
| yield _i, { |
| "data_index_by_user": _data_index_by_user, |
| "sentence1": _sentence1, |
| "sentence2": _sentence2, |
| "label": _label, |
| "idx": _idx, |
| } |
|
|
| class GlueConfig(BuilderConfig): |
| def __init__(self, name, feature, reading_fn, parsing_fn, citation, **kwargs): |
| super(GlueConfig, self).__init__( |
| name = name, |
| version=datasets.Version("1.0.0"), |
| **kwargs) |
| self.feature = feature |
| self.reading_fn = reading_fn |
| self.parsing_fn = parsing_fn |
| self.citation = citation |
|
|
| class GLUE(datasets.GeneratorBasedBuilder): |
| BUILDER_CONFIGS = [ |
| GlueConfig( |
| name = "cola", |
| data_dir = "./glue", |
| feature = _COLA_FEATURES, |
| reading_fn = _parsing_cola, |
| parsing_fn = lambda x:x, |
| citation = _CITATION, |
| ), |
| GlueConfig( |
| name = "mrpc", |
| data_dir = "./glue", |
| feature = _MRPC_FEATURES, |
| reading_fn = _parsing_mrpc, |
| parsing_fn = lambda x:x, |
| citation = _CITATION, |
| ), |
| GlueConfig( |
| name = "qnli", |
| data_dir = "./glue", |
| feature = _QNLI_FEATURES, |
| reading_fn = _parsing_qnli, |
| parsing_fn = lambda x:x, |
| citation = _CITATION, |
| ), |
| GlueConfig( |
| name = "qqp", |
| data_dir = "./glue", |
| feature = _QQP_FEATURES, |
| reading_fn = _parsing_qqp, |
| parsing_fn = lambda x:x, |
| citation = _CITATION, |
| ), |
| GlueConfig( |
| name = "wnli", |
| data_dir = "./glue", |
| feature = _WNLI_FEATURES, |
| reading_fn = _parsing_wnli, |
| parsing_fn = lambda x:x, |
| citation = _CITATION, |
| ), |
| ] |
| |
| def _info(self) -> datasets.DatasetInfo: |
| """Returns the dataset metadata.""" |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=self.config.feature, |
| citation=_CITATION, |
| ) |
| |
| def _split_generators(self, dl_manager: datasets.DownloadManager): |
| """Returns SplitGenerators""" |
| if self.config.name == "qqp": |
| path_kv = { |
| datasets.Split.TRAIN:[ |
| os.path.join(dl_manager.manual_dir, f"{self.config.name}/train.json") |
| ], |
| } |
| else: |
| path_kv = { |
| datasets.Split.TRAIN:[ |
| os.path.join(dl_manager.manual_dir, f"{self.config.name}/train.json") |
| ], |
| datasets.Split.VALIDATION:[ |
| os.path.join(dl_manager.manual_dir, f"{self.config.name}/validation.json") |
| ], |
| datasets.Split.TEST:[ |
| os.path.join(dl_manager.manual_dir, f"{self.config.name}/test.json") |
| ], |
| } |
| return [ |
| datasets.SplitGenerator(name=k, gen_kwargs={"path_list": v}) |
| for k, v in path_kv.items() |
| ] |
| |
| def _generate_examples(self, path_list): |
| """Yields examples.""" |
| for path in path_list: |
| try: |
| for example in iter(self.config.reading_fn(path)): |
| yield self.config.parsing_fn(example) |
| except Exception as e: |
| print(e) |