| import os |
| import json |
| from glob import glob |
| |
| import datasets |
|
|
| _DESCRIPTION = """\ |
| # KoWoW: Korean Wizard of Wikipedia |
| - WoW(Wizard of Wikipedia) 데이터셋을 한국어로 번역한 데이터셋 |
| |
| ## Data |
| |
| - en: KoWoW En-En, Knowledge-English, Utterance-English |
| - ko: KoWoW Ko-Ko, Knowledge-Korean, Utterance-Korean |
| - ek: KoWoW En-Ko, Knowledge-English, Utterance-Korean |
| - ke: KoWoW Ko-En, Knowledge-Korean, Utterance-English |
| |
| ## Usage |
| ```python |
| import datasets |
| |
| raw_datsets = datasets.load_dataset( |
| "kowow.py", |
| "kowow.ko.random.v1.0", |
| cache_dir="huggingface_datasets", |
| data_dir="data/ko", # choose en, ko, ek, or ke |
| ) |
| |
| ``` |
| |
| """ |
|
|
| _KOWOW_CITATION = """ |
| @inproceedings{kim-etal-2021-model-cross, |
| title = "A Model of Cross-Lingual Knowledge-Grounded Response Generation for Open-Domain Dialogue Systems", |
| author = "Kim, San and |
| Jang, Jin Yea and |
| Jung, Minyoung and |
| Shin, Saim", |
| booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2021", |
| month = nov, |
| year = "2021", |
| address = "Punta Cana, Dominican Republic", |
| publisher = "Association for Computational Linguistics", |
| url = "https://aclanthology.org/2021.findings-emnlp.33", |
| doi = "10.18653/v1/2021.findings-emnlp.33", |
| pages = "352--365", |
| abstract = "Research on open-domain dialogue systems that allow free topics is challenging in the field of natural language processing (NLP). The performance of the dialogue system has been improved recently by the method utilizing dialogue-related knowledge; however, non-English dialogue systems suffer from reproducing the performance of English dialogue systems because securing knowledge in the same language with the dialogue system is relatively difficult. Through experiments with a Korean dialogue system, this paper proves that the performance of a non-English dialogue system can be improved by utilizing English knowledge, highlighting the system uses cross-lingual knowledge. For the experiments, we 1) constructed a Korean version of the Wizard of Wikipedia dataset, 2) built Korean-English T5 (KE-T5), a language model pre-trained with Korean and English corpus, and 3) developed a knowledge-grounded Korean dialogue model based on KE-T5. We observed the performance improvement in the open-domain Korean dialogue model even only English knowledge was given. The experimental results showed that the knowledge inherent in cross-lingual language models can be helpful for generating responses in open dialogue systems.", |
| } |
| """ |
|
|
| _VERSION = datasets.Version('1.0.0', "") |
|
|
| TOKEN_NOCHOSEN = 'no_passages_used' |
| TITLE_PASSAGE_SEPERATOR = "\t" |
| SPEAKER_UTTERANCE_SEPERATOR = "\t" |
|
|
| _WOW_UTTERANCE_FEATURE = datasets.Features({ |
| "speaker": datasets.Value("string"), |
| "utterance": datasets.Value("string") |
| }) |
|
|
| _WOW_KNOWLEDGE_FEATURE = datasets.Features({ |
| "title": datasets.Value("string"), |
| "passage": datasets.Value("string") |
| }) |
|
|
|
|
| _WOW_FEATURE = datasets.Features({ |
| "id": datasets.Value("string"), |
| "chosen_topic": datasets.Value("string"), |
| "persona": datasets.Value("string"), |
| "wizard_eval": datasets.Value("int32"), |
| "speaker": datasets.Value("string"), |
| "current_utterance": _WOW_UTTERANCE_FEATURE, |
| "history": datasets.Sequence(_WOW_UTTERANCE_FEATURE), |
| "gold_knowledge": _WOW_KNOWLEDGE_FEATURE, |
| "retrieved_passages": datasets.Sequence(_WOW_KNOWLEDGE_FEATURE), |
| "candidate_responses": datasets.Sequence(datasets.Value("string")), |
| }) |
|
|
| _WOW_FEATURE_FORMAT = datasets.Features({ |
| "id": datasets.Value("string"), |
| "chosen_topic": datasets.Value("string"), |
| "persona": datasets.Value("string"), |
| "wizard_eval": datasets.Value("int32"), |
| "speaker": datasets.Value("string"), |
| "current_utterance": datasets.Value("string"), |
| "previous_utterance": datasets.Value("string"), |
| "history": datasets.Sequence(datasets.Value("string")), |
| "gold_knowledge": datasets.Value("string"), |
| "negative_candidates": datasets.Sequence(datasets.Value("string")), |
| "candidate_responses": datasets.Sequence(datasets.Value("string")), |
| }) |
|
|
| def formatting_context4retriever(title, passage): |
| return '{} {} {}'.format(title, TITLE_PASSAGE_SEPERATOR, passage) |
|
|
| def get_no_chosen(): |
| return formatting_context4retriever(TOKEN_NOCHOSEN, TOKEN_NOCHOSEN) |
|
|
| def formatting_utterance(speaker, utterance): |
| return "{} {} {}".format(speaker, SPEAKER_UTTERANCE_SEPERATOR, utterance) |
|
|
| def _first_val(dictionary): |
| vals = list(dictionary.values()) |
| if len(vals) > 0: |
| return vals[0] |
| return '' |
|
|
| def _first_key(dictionary): |
| keys = list(dictionary.keys()) |
| if len(keys) > 0: |
| return keys[0] |
| return '' |
|
|
| def _get_chosen_title_and_sent(wizard_entry, k_dict): |
| """ |
| Return a nicely extracted title and chosen sentence. |
| :return: pair (title, sentence) |
| """ |
| title_dict = wizard_entry.get('checked_passage', 'none') |
| sentence_dict = wizard_entry.get('checked_sentence', {}) |
| title = None |
| sentence = None |
| if sentence_dict == {}: |
| title = sentence = TOKEN_NOCHOSEN |
| else: |
| sentence = _first_val(sentence_dict) |
| if sentence == TOKEN_NOCHOSEN: |
| title = TOKEN_NOCHOSEN |
| else: |
| title = '' |
| |
| cand_title1 = _first_val(title_dict) if title_dict else '' |
| |
| |
| cand_title2 = ' '.join(_first_key(sentence_dict).split('_')[1:-1]) |
| if ( |
| cand_title1 |
| and cand_title1 in k_dict |
| and sentence in k_dict[cand_title1] |
| ): |
| title = cand_title1 |
| elif cand_title2 in k_dict and sentence in k_dict[cand_title2]: |
| title = cand_title2 |
| else: |
| for t, passage in k_dict.items(): |
| if sentence in passage: |
| title = t |
| break |
| |
| return title, sentence |
|
|
| def _parsing_wow(file_path): |
| |
| data = json.load(open(file_path, "r")) |
| name = file_path.split("/")[-1].split(".")[0] |
| |
| id_fmt = "{}_{}_{}" |
| for id, d in enumerate(data): |
| |
| _persona = d["persona"] |
| _wizard_eval = d["wizard_eval"] |
| |
| wizard_first = 'wizard' == d["dialog"][0]['speaker'].split("_")[-1].lower() |
| |
| _chosen_topic = d.get("chosen_topic", "") |
| _chosen_topic_passage = d["chosen_topic_passage"] |
| |
| history = [] |
| dialog = d["dialog"] |
| for d_id, _d in enumerate(dialog): |
|
|
| |
| speaker = _d["speaker"].split("_")[-1].lower() |
|
|
| |
| history.append({ |
| "speaker": speaker, |
| "utterance": _d["text"] |
| }) |
|
|
| if speaker != "wizard": |
| continue |
| |
| |
| _id = id_fmt.format(name, id, d_id) |
| |
| |
| knowledge_dict = {_chosen_topic: _chosen_topic_passage} |
| |
| |
| apprentice_ret_passages = wizard_ret_passages = {} |
| apprentice_entry = wizard_prev_entry = None |
| if not wizard_first or d_id > 0: |
| apprentice_entry = dialog[d_id-1] |
| apprentice_ret_passages = apprentice_entry["retrieved_passages"] |
| if d_id >= 2: |
| wizard_prev_entry = dialog[d_id-2] |
| wizard_ret_passages = wizard_prev_entry["retrieved_passages"] |
| |
| for ret_pas in [apprentice_ret_passages, wizard_ret_passages]: |
| for pas in ret_pas: |
| for k,v in pas.items(): |
| if k not in knowledge_dict.keys(): |
| knowledge_dict[k] = v |
| |
| |
| |
| title, sentence = _get_chosen_title_and_sent(wizard_entry=_d, k_dict=knowledge_dict) |
| |
| if not title: |
| continue |
| |
| gold_knowledge = { |
| "title": title, |
| "passage": sentence |
| } |
| |
| |
| candidates = set() |
| |
| for title, passage in knowledge_dict.items(): |
| for p in passage: |
| candidates.add((title,p)) |
| |
| candidates = list(candidates) |
| candidates.remove(gold_knowledge) if gold_knowledge in candidates else candidates |
| |
| candidates = [{ |
| "title": t, |
| "passage": p |
| } for t,p in candidates] |
| |
| if len(candidates) == 0 or candidates is None: |
| continue |
| |
| |
| current_utterance = _d.get("text") |
| |
| |
| candidate_responses = _d.get("candidate_responses", []) |
| candidate_responses.remove(current_utterance) if current_utterance in candidate_responses else candidate_responses |
| |
| current_utterance = { |
| "speaker": speaker, |
| "utterance": current_utterance |
| } |
| |
| yield _id, { |
| "id": _id, |
| "chosen_topic": _chosen_topic, |
| "persona": _persona, |
| "wizard_eval": _wizard_eval, |
| "speaker": speaker, |
| "current_utterance": current_utterance, |
| "history": history[:-1], |
| "gold_knowledge": gold_knowledge, |
| "retrieved_passages": candidates, |
| "candidate_responses": candidate_responses |
| } |
|
|
| def _parsing_wow_format(file_path): |
| |
| data = json.load(open(file_path, "r")) |
| name = file_path.split("/")[-1].split(".")[0] |
| |
| id_fmt = "{}_{}_{}" |
| for id, d in enumerate(data): |
| |
| _persona = d["persona"] |
| _wizard_eval = d["wizard_eval"] |
| |
| wizard_first = 'wizard' == d["dialog"][0]['speaker'].split("_")[-1].lower() |
| |
| _chosen_topic = d.get("chosen_topic", "") |
| _chosen_topic_passage = d["chosen_topic_passage"] |
| |
| history = [] |
| dialog = d["dialog"] |
| for d_id, _d in enumerate(dialog): |
|
|
| |
| |
| speaker = _d["speaker"].split("_")[-1].lower() |
|
|
| |
| history.append(formatting_utterance(speaker, _d["text"])) |
| |
| if speaker != "wizard": |
| continue |
| |
| |
| _id = id_fmt.format(name, id, d_id) |
| |
| |
| knowledge_dict = {_chosen_topic: _chosen_topic_passage} |
| |
| |
| apprentice_ret_passages = wizard_ret_passages = {} |
| apprentice_entry = wizard_prev_entry = None |
| if not wizard_first or d_id > 0: |
| apprentice_entry = dialog[d_id-1] |
| apprentice_ret_passages = apprentice_entry["retrieved_passages"] |
| if d_id >= 2: |
| wizard_prev_entry = dialog[d_id-2] |
| wizard_ret_passages = wizard_prev_entry["retrieved_passages"] |
| |
| for ret_pas in [apprentice_ret_passages, wizard_ret_passages]: |
| for pas in ret_pas: |
| for k,v in pas.items(): |
| if k not in knowledge_dict.keys(): |
| knowledge_dict[k] = v |
| |
| |
| prev_app_ut = apprentice_entry["text"] if apprentice_entry is not None else "" |
| if prev_app_ut != "": |
| prev_app_ut = formatting_utterance("apprentice", prev_app_ut) |
| |
| |
| |
| title, sentence = _get_chosen_title_and_sent(wizard_entry=_d, k_dict=knowledge_dict) |
| |
| if not title: |
| continue |
| |
| gold_knowledge = formatting_context4retriever(title=title, passage=sentence) |
| |
| |
| candidates = set() |
| no_knowledge_used = get_no_chosen() |
| candidates.add(no_knowledge_used) |
| |
| for title, passage in knowledge_dict.items(): |
| for p in passage: |
| cand = formatting_context4retriever(title=title, passage=p) |
| candidates.add(cand) |
| |
| candidates = list(candidates) |
| candidates.remove(gold_knowledge) if gold_knowledge in candidates else candidates |
| |
| if len(candidates) == 0 or candidates is None: |
| continue |
| |
| |
| current_utterance = _d.get("text") |
| |
| |
| candidate_responses = _d.get("candidate_responses", []) |
| candidate_responses.remove(current_utterance) if current_utterance in candidate_responses else candidate_responses |
| |
| current_utterance = formatting_utterance(speaker, current_utterance) |
| |
| yield _id, { |
| "id": _id, |
| "chosen_topic": _chosen_topic, |
| "persona": _persona, |
| "wizard_eval": _wizard_eval, |
| "speaker": speaker, |
| "current_utterance": current_utterance, |
| "previous_utterance": prev_app_ut, |
| "history": history[:-2], |
| "gold_knowledge": gold_knowledge, |
| "negative_candidates": candidates, |
| "candidate_responses": candidate_responses |
| } |
| |
|
|
| class KOWOWConfig(datasets.BuilderConfig): |
| def __init__(self, |
| name, |
| feature, |
| data_sp_path, |
| reading_fn, |
| parsing_fn, |
| citation, |
| additional_data_root=None, |
| homepage='https://parl.ai/', |
| split_fn=None, |
| metadata=None, |
| **kwargs): |
| super(KOWOWConfig, self).__init__( |
| name=name, |
| version=_VERSION, |
| **kwargs |
| ) |
| self.feature = feature |
| self.data_sp_path = data_sp_path |
| self.reading_fn = reading_fn |
| self.parsing_fn = parsing_fn |
| self.citation = citation |
| self.additional_data_root = additional_data_root |
| self.homepage = homepage |
| self.split_fn = split_fn |
| self.metadata = metadata |
| |
| class KOWOW(datasets.GeneratorBasedBuilder): |
| """DatasetBuilder for KOWOW dataset.""" |
|
|
| RELEASE_NOTES = { |
| '1.0.0': 'Initial release.', |
| } |
| |
| BUILDER_CONFIGS = [ |
| |
| KOWOWConfig( |
| name='kowow.en.random.v1.0', |
| feature=_WOW_FEATURE, |
| data_sp_path={ |
| "train": "train.json", |
| "valid": "valid_random_split.json", |
| "test": "test_random_split.json" |
| }, |
| reading_fn=_parsing_wow, |
| parsing_fn=lambda x:x, |
| citation=_KOWOW_CITATION |
| ), |
| KOWOWConfig( |
| name='kowow.en.topic.v1.0', |
| feature=_WOW_FEATURE, |
| data_sp_path={ |
| "train": "train.json", |
| "valid": "valid_topic_split.json", |
| "test": "test_topic_split.json" |
| }, |
| reading_fn=_parsing_wow, |
| parsing_fn=lambda x:x, |
| citation=_KOWOW_CITATION |
| ), |
| KOWOWConfig( |
| name='kowow.en.all.v1.0', |
| feature=_WOW_FEATURE, |
| data_sp_path={ |
| "train": "train.json", |
| "valid": "valid*.json", |
| "test": "test*.json" |
| }, |
| reading_fn=_parsing_wow, |
| parsing_fn=lambda x:x, |
| citation=_KOWOW_CITATION |
| ), |
| |
| KOWOWConfig( |
| name='kowow.ko.random.v1.0', |
| feature=_WOW_FEATURE, |
| data_sp_path={ |
| "train": "train.json", |
| "valid": "valid_random_split.json", |
| "test": "test_random_split.json" |
| }, |
| reading_fn=_parsing_wow, |
| parsing_fn=lambda x:x, |
| citation=_KOWOW_CITATION |
| ), |
| KOWOWConfig( |
| name='kowow.ko.topic.v1.0', |
| feature=_WOW_FEATURE, |
| data_sp_path={ |
| "train": "train.json", |
| "valid": "valid_topic_split.json", |
| "test": "test_topic_split.json" |
| }, |
| reading_fn=_parsing_wow, |
| parsing_fn=lambda x:x, |
| citation=_KOWOW_CITATION |
| ), |
| KOWOWConfig( |
| name='kowow.ko.all.v1.0', |
| feature=_WOW_FEATURE, |
| data_sp_path={ |
| "train": "train.json", |
| "valid": "valid*.json", |
| "test": "test*.json" |
| }, |
| reading_fn=_parsing_wow, |
| parsing_fn=lambda x:x, |
| citation=_KOWOW_CITATION |
| ), |
| |
| KOWOWConfig( |
| name='kowow.ek.random.v1.0', |
| feature=_WOW_FEATURE, |
| data_sp_path={ |
| "train": "train.json", |
| "valid": "valid_random_split.json", |
| "test": "test_random_split.json" |
| }, |
| reading_fn=_parsing_wow, |
| parsing_fn=lambda x:x, |
| citation=_KOWOW_CITATION |
| ), |
| KOWOWConfig( |
| name='kowow.ek.topic.v1.0', |
| feature=_WOW_FEATURE, |
| data_sp_path={ |
| "train": "train.json", |
| "valid": "valid_topic_split.json", |
| "test": "test_topic_split.json" |
| }, |
| reading_fn=_parsing_wow, |
| parsing_fn=lambda x:x, |
| citation=_KOWOW_CITATION |
| ), |
| KOWOWConfig( |
| name='kowow.ek.all.v1.0', |
| feature=_WOW_FEATURE, |
| data_sp_path={ |
| "train": "train.json", |
| "valid": "valid*.json", |
| "test": "test*.json" |
| }, |
| reading_fn=_parsing_wow, |
| parsing_fn=lambda x:x, |
| citation=_KOWOW_CITATION |
| ), |
| |
| KOWOWConfig( |
| name='kowow.ke.random.v1.0', |
| feature=_WOW_FEATURE, |
| data_sp_path={ |
| "train": "train.json", |
| "valid": "valid_random_split.json", |
| "test": "test_random_split.json" |
| }, |
| reading_fn=_parsing_wow, |
| parsing_fn=lambda x:x, |
| citation=_KOWOW_CITATION |
| ), |
| KOWOWConfig( |
| name='kowow.ke.topic.v1.0', |
| feature=_WOW_FEATURE, |
| data_sp_path={ |
| "train": "train.json", |
| "valid": "valid_topic_split.json", |
| "test": "test_topic_split.json" |
| }, |
| reading_fn=_parsing_wow, |
| parsing_fn=lambda x:x, |
| citation=_KOWOW_CITATION |
| ), |
| KOWOWConfig( |
| name='kowow.ke.all.v1.0', |
| feature=_WOW_FEATURE, |
| data_sp_path={ |
| "train": "train.json", |
| "valid": "valid*.json", |
| "test": "test*.json" |
| }, |
| reading_fn=_parsing_wow, |
| parsing_fn=lambda x:x, |
| citation=_KOWOW_CITATION |
| ), |
| |
| KOWOWConfig( |
| name='kowow.en.random.v1.1', |
| feature=_WOW_FEATURE_FORMAT, |
| data_sp_path={ |
| "train": "train.json", |
| "valid": "valid_random_split.json", |
| "test": "test_random_split.json" |
| }, |
| reading_fn=_parsing_wow_format, |
| parsing_fn=lambda x:x, |
| citation=_KOWOW_CITATION |
| ), |
| KOWOWConfig( |
| name='kowow.en.topic.v1.1', |
| feature=_WOW_FEATURE_FORMAT, |
| data_sp_path={ |
| "train": "train.json", |
| "valid": "valid_topic_split.json", |
| "test": "test_topic_split.json" |
| }, |
| reading_fn=_parsing_wow_format, |
| parsing_fn=lambda x:x, |
| citation=_KOWOW_CITATION |
| ), |
| KOWOWConfig( |
| name='kowow.en.all.v1.1', |
| feature=_WOW_FEATURE_FORMAT, |
| data_sp_path={ |
| "train": "train.json", |
| "valid": "valid*.json", |
| "test": "test*.json" |
| }, |
| reading_fn=_parsing_wow_format, |
| parsing_fn=lambda x:x, |
| citation=_KOWOW_CITATION |
| ), |
| |
| KOWOWConfig( |
| name='kowow.ko.random.v1.1', |
| feature=_WOW_FEATURE_FORMAT, |
| data_sp_path={ |
| "train": "train.json", |
| "valid": "valid_random_split.json", |
| "test": "test_random_split.json" |
| }, |
| reading_fn=_parsing_wow_format, |
| parsing_fn=lambda x:x, |
| citation=_KOWOW_CITATION |
| ), |
| KOWOWConfig( |
| name='kowow.ko.topic.v1.1', |
| feature=_WOW_FEATURE_FORMAT, |
| data_sp_path={ |
| "train": "train.json", |
| "valid": "valid_topic_split.json", |
| "test": "test_topic_split.json" |
| }, |
| reading_fn=_parsing_wow_format, |
| parsing_fn=lambda x:x, |
| citation=_KOWOW_CITATION |
| ), |
| KOWOWConfig( |
| name='kowow.ko.all.v1.1', |
| feature=_WOW_FEATURE_FORMAT, |
| data_sp_path={ |
| "train": "train.json", |
| "valid": "valid*.json", |
| "test": "test*.json" |
| }, |
| reading_fn=_parsing_wow_format, |
| parsing_fn=lambda x:x, |
| citation=_KOWOW_CITATION |
| ), |
| |
| KOWOWConfig( |
| name='kowow.ek.random.v1.1', |
| feature=_WOW_FEATURE_FORMAT, |
| data_sp_path={ |
| "train": "train.json", |
| "valid": "valid_random_split.json", |
| "test": "test_random_split.json" |
| }, |
| reading_fn=_parsing_wow_format, |
| parsing_fn=lambda x:x, |
| citation=_KOWOW_CITATION |
| ), |
| KOWOWConfig( |
| name='kowow.ek.topic.v1.1', |
| feature=_WOW_FEATURE_FORMAT, |
| data_sp_path={ |
| "train": "train.json", |
| "valid": "valid_topic_split.json", |
| "test": "test_topic_split.json" |
| }, |
| reading_fn=_parsing_wow_format, |
| parsing_fn=lambda x:x, |
| citation=_KOWOW_CITATION |
| ), |
| KOWOWConfig( |
| name='kowow.ek.all.v1.1', |
| feature=_WOW_FEATURE_FORMAT, |
| data_sp_path={ |
| "train": "train.json", |
| "valid": "valid*.json", |
| "test": "test*.json" |
| }, |
| reading_fn=_parsing_wow_format, |
| parsing_fn=lambda x:x, |
| citation=_KOWOW_CITATION |
| ), |
| |
| KOWOWConfig( |
| name='kowow.ke.random.v1.1', |
| feature=_WOW_FEATURE_FORMAT, |
| data_sp_path={ |
| "train": "train.json", |
| "valid": "valid_random_split.json", |
| "test": "test_random_split.json" |
| }, |
| reading_fn=_parsing_wow_format, |
| parsing_fn=lambda x:x, |
| citation=_KOWOW_CITATION |
| ), |
| KOWOWConfig( |
| name='kowow.ke.topic.v1.1', |
| feature=_WOW_FEATURE_FORMAT, |
| data_sp_path={ |
| "train": "train.json", |
| "valid": "valid_topic_split.json", |
| "test": "test_topic_split.json" |
| }, |
| reading_fn=_parsing_wow_format, |
| parsing_fn=lambda x:x, |
| citation=_KOWOW_CITATION |
| ), |
| KOWOWConfig( |
| name='kowow.ke.all.v1.1', |
| feature=_WOW_FEATURE_FORMAT, |
| data_sp_path={ |
| "train": "train.json", |
| "valid": "valid*.json", |
| "test": "test*.json" |
| }, |
| reading_fn=_parsing_wow_format, |
| parsing_fn=lambda x:x, |
| citation=_KOWOW_CITATION |
| ), |
| ] |
| |
| MANUAL_DOWNLOAD_INSTRUCTIONS = """ |
| |
| """ |
| |
| def _info(self) -> datasets.DatasetInfo: |
| """Returns the dataset metadata.""" |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=self.config.feature, |
| homepage=self.config.homepage, |
| citation=self.config.citation, |
| ) |
|
|
| def _split_generators(self, dl_manager: datasets.DownloadManager): |
| """Returns SplitGenerators.""" |
| |
| split_kwargs = { |
| datasets.Split.TRAIN: glob(os.path.join(dl_manager.manual_dir, |
| self.config.data_sp_path["train"])), |
| datasets.Split.VALIDATION: glob(os.path.join(dl_manager.manual_dir, |
| self.config.data_sp_path["valid"])), |
| datasets.Split.TEST: glob(os.path.join(dl_manager.manual_dir, |
| self.config.data_sp_path["test"])), |
| } |
| |
| return [ |
| datasets.SplitGenerator( |
| name=k, |
| gen_kwargs={ |
| 'path_list': v, |
| }) for k, v in split_kwargs.items() |
| ] |
|
|
| def _generate_examples(self, path_list, split_fn=None): |
| """Yields examples.""" |
|
|
| for file_path in path_list: |
| try: |
| for example in iter(self.config.reading_fn(file_path)): |
| yield self.config.parsing_fn(example) |
| except Exception as e: |
| print(e) |