Datasets:

License:
seona commited on
Commit
09eaff0
·
1 Parent(s): b0e6086

Update data and kowow.py

Browse files
Files changed (1) hide show
  1. kowow.py +745 -0
kowow.py ADDED
@@ -0,0 +1,745 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from glob import glob
4
+
5
+ import datasets
6
+
7
+ _DESCRIPTION = """\
8
+ # KoWoW: Korean Wizard of Wikipedia
9
+ - WoW(Wizard of Wikipedia) 데이터셋을 한국어로 번역한 데이터셋
10
+
11
+ ## Data
12
+
13
+ - en: KoWoW En-En, Knowledge-English, Utterance-English
14
+ - ko: KoWoW Ko-Ko, Knowledge-Korean, Utterance-Korean
15
+ - ek: KoWoW En-Ko, Knowledge-English, Utterance-Korean
16
+ - ke: KoWoW Ko-En, Knowledge-Korean, Utterance-English
17
+
18
+ ## Usage
19
+ ```python
20
+ import datasets
21
+
22
+ raw_datsets = datasets.load_dataset(
23
+ "kowow.py",
24
+ "kowow.ko.random.v1.0",
25
+ cache_dir="huggingface_datasets",
26
+ data_dir="data/ko", # choose en, ko, ek, or ke
27
+ )
28
+
29
+ ```
30
+
31
+ """
32
+
33
+ _KOWOW_CITATION = """
34
+ @inproceedings{kim-etal-2021-model-cross,
35
+ title = "A Model of Cross-Lingual Knowledge-Grounded Response Generation for Open-Domain Dialogue Systems",
36
+ author = "Kim, San and
37
+ Jang, Jin Yea and
38
+ Jung, Minyoung and
39
+ Shin, Saim",
40
+ booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2021",
41
+ month = nov,
42
+ year = "2021",
43
+ address = "Punta Cana, Dominican Republic",
44
+ publisher = "Association for Computational Linguistics",
45
+ url = "https://aclanthology.org/2021.findings-emnlp.33",
46
+ doi = "10.18653/v1/2021.findings-emnlp.33",
47
+ pages = "352--365",
48
+ abstract = "Research on open-domain dialogue systems that allow free topics is challenging in the field of natural language processing (NLP). The performance of the dialogue system has been improved recently by the method utilizing dialogue-related knowledge; however, non-English dialogue systems suffer from reproducing the performance of English dialogue systems because securing knowledge in the same language with the dialogue system is relatively difficult. Through experiments with a Korean dialogue system, this paper proves that the performance of a non-English dialogue system can be improved by utilizing English knowledge, highlighting the system uses cross-lingual knowledge. For the experiments, we 1) constructed a Korean version of the Wizard of Wikipedia dataset, 2) built Korean-English T5 (KE-T5), a language model pre-trained with Korean and English corpus, and 3) developed a knowledge-grounded Korean dialogue model based on KE-T5. We observed the performance improvement in the open-domain Korean dialogue model even only English knowledge was given. The experimental results showed that the knowledge inherent in cross-lingual language models can be helpful for generating responses in open dialogue systems.",
49
+ }
50
+ """
51
+
52
+ _VERSION = datasets.Version('1.0.0', "")
53
+
54
+ TOKEN_NOCHOSEN = 'no_passages_used'
55
+ TITLE_PASSAGE_SEPERATOR = "\t"
56
+ SPEAKER_UTTERANCE_SEPERATOR = "\t"
57
+
58
+ _WOW_UTTERANCE_FEATURE = datasets.Features({
59
+ "speaker": datasets.Value("string"),
60
+ "utterance": datasets.Value("string")
61
+ })
62
+
63
+ _WOW_KNOWLEDGE_FEATURE = datasets.Features({
64
+ "title": datasets.Value("string"),
65
+ "passage": datasets.Value("string")
66
+ })
67
+
68
+
69
+ _WOW_FEATURE = datasets.Features({
70
+ "id": datasets.Value("string"),
71
+ "chosen_topic": datasets.Value("string"),
72
+ "persona": datasets.Value("string"),
73
+ "wizard_eval": datasets.Value("int32"),
74
+ "speaker": datasets.Value("string"),
75
+ "current_utterance": _WOW_UTTERANCE_FEATURE,
76
+ "history": datasets.Sequence(_WOW_UTTERANCE_FEATURE),
77
+ "gold_knowledge": _WOW_KNOWLEDGE_FEATURE,
78
+ "retrieved_passages": datasets.Sequence(_WOW_KNOWLEDGE_FEATURE),
79
+ "candidate_responses": datasets.Sequence(datasets.Value("string")),
80
+ })
81
+
82
+ _WOW_FEATURE_FORMAT = datasets.Features({
83
+ "id": datasets.Value("string"),
84
+ "chosen_topic": datasets.Value("string"),
85
+ "persona": datasets.Value("string"),
86
+ "wizard_eval": datasets.Value("int32"),
87
+ "speaker": datasets.Value("string"),
88
+ "current_utterance": datasets.Value("string"),
89
+ "previous_utterance": datasets.Value("string"),
90
+ "history": datasets.Sequence(datasets.Value("string")),
91
+ "gold_knowledge": datasets.Value("string"),
92
+ "negative_candidates": datasets.Sequence(datasets.Value("string")),
93
+ "candidate_responses": datasets.Sequence(datasets.Value("string")),
94
+ })
95
+
96
+ def formatting_context4retriever(title, passage):
97
+ return '{} {} {}'.format(title, TITLE_PASSAGE_SEPERATOR, passage)
98
+
99
+ def get_no_chosen():
100
+ return formatting_context4retriever(TOKEN_NOCHOSEN, TOKEN_NOCHOSEN)
101
+
102
+ def formatting_utterance(speaker, utterance):
103
+ return "{} {} {}".format(speaker, SPEAKER_UTTERANCE_SEPERATOR, utterance)
104
+
105
+ def _first_val(dictionary):
106
+ vals = list(dictionary.values())
107
+ if len(vals) > 0:
108
+ return vals[0]
109
+ return ''
110
+
111
+ def _first_key(dictionary):
112
+ keys = list(dictionary.keys())
113
+ if len(keys) > 0:
114
+ return keys[0]
115
+ return ''
116
+
117
+ def _get_chosen_title_and_sent(wizard_entry, k_dict):
118
+ """
119
+ Return a nicely extracted title and chosen sentence.
120
+ :return: pair (title, sentence)
121
+ """
122
+ title_dict = wizard_entry.get('checked_passage', 'none')
123
+ sentence_dict = wizard_entry.get('checked_sentence', {})
124
+ title = None
125
+ sentence = None
126
+ if sentence_dict == {}:
127
+ title = sentence = TOKEN_NOCHOSEN
128
+ else:
129
+ sentence = _first_val(sentence_dict)
130
+ if sentence == TOKEN_NOCHOSEN:
131
+ title = TOKEN_NOCHOSEN
132
+ else:
133
+ title = ''
134
+ # cand_title1 is the title from the `checked_passage`
135
+ cand_title1 = _first_val(title_dict) if title_dict else ''
136
+ # cand_title2 is the extracted title of the passage from the
137
+ # sentence dict, which is e.g. `self_Vermont_Syrup_0`
138
+ cand_title2 = ' '.join(_first_key(sentence_dict).split('_')[1:-1])
139
+ if (
140
+ cand_title1
141
+ and cand_title1 in k_dict
142
+ and sentence in k_dict[cand_title1]
143
+ ):
144
+ title = cand_title1
145
+ elif cand_title2 in k_dict and sentence in k_dict[cand_title2]:
146
+ title = cand_title2
147
+ else: # neither candidate title is the right one
148
+ for t, passage in k_dict.items():
149
+ if sentence in passage:
150
+ title = t
151
+ break
152
+
153
+ return title, sentence
154
+
155
+ def _parsing_wow(file_path):
156
+ # file open
157
+ data = json.load(open(file_path, "r"))
158
+ name = file_path.split("/")[-1].split(".")[0]
159
+
160
+ id_fmt = "{}_{}_{}"
161
+ for id, d in enumerate(data):
162
+
163
+ _persona = d["persona"]
164
+ _wizard_eval = d["wizard_eval"]
165
+
166
+ wizard_first = 'wizard' == d["dialog"][0]['speaker'].split("_")[-1].lower()
167
+
168
+ _chosen_topic = d.get("chosen_topic", "")
169
+ _chosen_topic_passage = d["chosen_topic_passage"]
170
+
171
+ history = []
172
+ dialog = d["dialog"]
173
+ for d_id, _d in enumerate(dialog):
174
+
175
+ # training only wizard
176
+ speaker = _d["speaker"].split("_")[-1].lower()
177
+
178
+ # history
179
+ history.append({
180
+ "speaker": speaker,
181
+ "utterance": _d["text"]
182
+ })
183
+
184
+ if speaker != "wizard":
185
+ continue
186
+
187
+ # id
188
+ _id = id_fmt.format(name, id, d_id)
189
+
190
+ # knowledge candidates
191
+ knowledge_dict = {_chosen_topic: _chosen_topic_passage}
192
+
193
+ # previous utterance, retrieved passage
194
+ apprentice_ret_passages = wizard_ret_passages = {}
195
+ apprentice_entry = wizard_prev_entry = None
196
+ if not wizard_first or d_id > 0:
197
+ apprentice_entry = dialog[d_id-1]
198
+ apprentice_ret_passages = apprentice_entry["retrieved_passages"]
199
+ if d_id >= 2:
200
+ wizard_prev_entry = dialog[d_id-2]
201
+ wizard_ret_passages = wizard_prev_entry["retrieved_passages"]
202
+
203
+ for ret_pas in [apprentice_ret_passages, wizard_ret_passages]:
204
+ for pas in ret_pas:
205
+ for k,v in pas.items():
206
+ if k not in knowledge_dict.keys():
207
+ knowledge_dict[k] = v
208
+
209
+
210
+ # gold knowledge
211
+ title, sentence = _get_chosen_title_and_sent(wizard_entry=_d, k_dict=knowledge_dict)
212
+
213
+ if not title:
214
+ continue
215
+
216
+ gold_knowledge = {
217
+ "title": title,
218
+ "passage": sentence
219
+ }
220
+
221
+ # candidates
222
+ candidates = set()
223
+
224
+ for title, passage in knowledge_dict.items():
225
+ for p in passage:
226
+ candidates.add((title,p))
227
+
228
+ candidates = list(candidates)
229
+ candidates.remove(gold_knowledge) if gold_knowledge in candidates else candidates
230
+
231
+ candidates = [{
232
+ "title": t,
233
+ "passage": p
234
+ } for t,p in candidates]
235
+
236
+ if len(candidates) == 0 or candidates is None:
237
+ continue
238
+
239
+ # current utterance
240
+ current_utterance = _d.get("text")
241
+
242
+ # only test
243
+ candidate_responses = _d.get("candidate_responses", [])
244
+ candidate_responses.remove(current_utterance) if current_utterance in candidate_responses else candidate_responses
245
+
246
+ current_utterance = {
247
+ "speaker": speaker,
248
+ "utterance": current_utterance
249
+ }
250
+
251
+ yield _id, {
252
+ "id": _id, # str
253
+ "chosen_topic": _chosen_topic, # str
254
+ "persona": _persona, # str
255
+ "wizard_eval": _wizard_eval, # int
256
+ "speaker": speaker,
257
+ "current_utterance": current_utterance, # list
258
+ "history": history[:-1], # list
259
+ "gold_knowledge": gold_knowledge, # list
260
+ "retrieved_passages": candidates, # list
261
+ "candidate_responses": candidate_responses # list
262
+ }
263
+
264
+ def _parsing_wow_format(file_path):
265
+ # file open
266
+ data = json.load(open(file_path, "r"))
267
+ name = file_path.split("/")[-1].split(".")[0]
268
+
269
+ id_fmt = "{}_{}_{}"
270
+ for id, d in enumerate(data):
271
+
272
+ _persona = d["persona"]
273
+ _wizard_eval = d["wizard_eval"]
274
+
275
+ wizard_first = 'wizard' == d["dialog"][0]['speaker'].split("_")[-1].lower()
276
+
277
+ _chosen_topic = d.get("chosen_topic", "")
278
+ _chosen_topic_passage = d["chosen_topic_passage"]
279
+
280
+ history = []
281
+ dialog = d["dialog"]
282
+ for d_id, _d in enumerate(dialog):
283
+
284
+ # training only wizard
285
+ # speaker: idx_{Apprentice, Wizard}
286
+ speaker = _d["speaker"].split("_")[-1].lower()
287
+
288
+ # history
289
+ history.append(formatting_utterance(speaker, _d["text"]))
290
+
291
+ if speaker != "wizard":
292
+ continue
293
+
294
+ # id
295
+ _id = id_fmt.format(name, id, d_id)
296
+
297
+ # knowledge candidates
298
+ knowledge_dict = {_chosen_topic: _chosen_topic_passage}
299
+
300
+ # previous utterance, retrieved passage
301
+ apprentice_ret_passages = wizard_ret_passages = {}
302
+ apprentice_entry = wizard_prev_entry = None
303
+ if not wizard_first or d_id > 0:
304
+ apprentice_entry = dialog[d_id-1]
305
+ apprentice_ret_passages = apprentice_entry["retrieved_passages"]
306
+ if d_id >= 2:
307
+ wizard_prev_entry = dialog[d_id-2]
308
+ wizard_ret_passages = wizard_prev_entry["retrieved_passages"]
309
+
310
+ for ret_pas in [apprentice_ret_passages, wizard_ret_passages]:
311
+ for pas in ret_pas:
312
+ for k,v in pas.items():
313
+ if k not in knowledge_dict.keys():
314
+ knowledge_dict[k] = v
315
+
316
+ # previous apprentice utterance
317
+ prev_app_ut = apprentice_entry["text"] if apprentice_entry is not None else ""
318
+ if prev_app_ut != "":
319
+ prev_app_ut = formatting_utterance("apprentice", prev_app_ut)
320
+
321
+
322
+ # gold knowledge
323
+ title, sentence = _get_chosen_title_and_sent(wizard_entry=_d, k_dict=knowledge_dict)
324
+
325
+ if not title:
326
+ continue
327
+
328
+ gold_knowledge = formatting_context4retriever(title=title, passage=sentence)
329
+
330
+ # candidates
331
+ candidates = set()
332
+ no_knowledge_used = get_no_chosen()
333
+ candidates.add(no_knowledge_used)
334
+
335
+ for title, passage in knowledge_dict.items():
336
+ for p in passage:
337
+ cand = formatting_context4retriever(title=title, passage=p)
338
+ candidates.add(cand)
339
+
340
+ candidates = list(candidates)
341
+ candidates.remove(gold_knowledge) if gold_knowledge in candidates else candidates
342
+
343
+ if len(candidates) == 0 or candidates is None:
344
+ continue
345
+
346
+ # current utterance
347
+ current_utterance = _d.get("text")
348
+
349
+ # only test
350
+ candidate_responses = _d.get("candidate_responses", [])
351
+ candidate_responses.remove(current_utterance) if current_utterance in candidate_responses else candidate_responses
352
+
353
+ current_utterance = formatting_utterance(speaker, current_utterance)
354
+
355
+ yield _id, {
356
+ "id": _id, # str
357
+ "chosen_topic": _chosen_topic, # str
358
+ "persona": _persona, # str
359
+ "wizard_eval": _wizard_eval, # int
360
+ "speaker": speaker, # str
361
+ "current_utterance": current_utterance, # str
362
+ "previous_utterance": prev_app_ut, # str
363
+ "history": history[:-2], # list
364
+ "gold_knowledge": gold_knowledge, # str
365
+ "negative_candidates": candidates, # list
366
+ "candidate_responses": candidate_responses # list
367
+ }
368
+
369
+
370
+ class KOWOWConfig(datasets.BuilderConfig):
371
+ def __init__(self,
372
+ name,
373
+ feature,
374
+ data_sp_path,
375
+ reading_fn,
376
+ parsing_fn,
377
+ citation,
378
+ additional_data_root=None,
379
+ homepage='https://parl.ai/',
380
+ split_fn=None,
381
+ metadata=None,
382
+ **kwargs):
383
+ super(KOWOWConfig, self).__init__(
384
+ name=name,
385
+ version=_VERSION,
386
+ **kwargs
387
+ )
388
+ self.feature = feature
389
+ self.data_sp_path = data_sp_path
390
+ self.reading_fn = reading_fn
391
+ self.parsing_fn = parsing_fn
392
+ self.citation = citation
393
+ self.additional_data_root = additional_data_root
394
+ self.homepage = homepage
395
+ self.split_fn = split_fn
396
+ self.metadata = metadata
397
+
398
+ class KOWOW(datasets.GeneratorBasedBuilder):
399
+ """DatasetBuilder for KOWOW dataset."""
400
+
401
+ RELEASE_NOTES = {
402
+ '1.0.0': 'Initial release.',
403
+ }
404
+
405
+ BUILDER_CONFIGS = [
406
+ # en
407
+ KOWOWConfig(
408
+ name='kowow.en.random.v1.0',
409
+ feature=_WOW_FEATURE,
410
+ data_sp_path={
411
+ "train": "train.json",
412
+ "valid": "valid_random_split.json",
413
+ "test": "test_random_split.json"
414
+ },
415
+ reading_fn=_parsing_wow,
416
+ parsing_fn=lambda x:x,
417
+ citation=_KOWOW_CITATION
418
+ ),
419
+ KOWOWConfig(
420
+ name='kowow.en.topic.v1.0',
421
+ feature=_WOW_FEATURE,
422
+ data_sp_path={
423
+ "train": "train.json",
424
+ "valid": "valid_topic_split.json",
425
+ "test": "test_topic_split.json"
426
+ },
427
+ reading_fn=_parsing_wow,
428
+ parsing_fn=lambda x:x,
429
+ citation=_KOWOW_CITATION
430
+ ),
431
+ KOWOWConfig(
432
+ name='kowow.en.all.v1.0',
433
+ feature=_WOW_FEATURE,
434
+ data_sp_path={
435
+ "train": "train.json",
436
+ "valid": "valid*.json",
437
+ "test": "test*.json"
438
+ },
439
+ reading_fn=_parsing_wow,
440
+ parsing_fn=lambda x:x,
441
+ citation=_KOWOW_CITATION
442
+ ),
443
+ # ko
444
+ KOWOWConfig(
445
+ name='kowow.ko.random.v1.0',
446
+ feature=_WOW_FEATURE,
447
+ data_sp_path={
448
+ "train": "train.json",
449
+ "valid": "valid_random_split.json",
450
+ "test": "test_random_split.json"
451
+ },
452
+ reading_fn=_parsing_wow,
453
+ parsing_fn=lambda x:x,
454
+ citation=_KOWOW_CITATION
455
+ ),
456
+ KOWOWConfig(
457
+ name='kowow.ko.topic.v1.0',
458
+ feature=_WOW_FEATURE,
459
+ data_sp_path={
460
+ "train": "train.json",
461
+ "valid": "valid_topic_split.json",
462
+ "test": "test_topic_split.json"
463
+ },
464
+ reading_fn=_parsing_wow,
465
+ parsing_fn=lambda x:x,
466
+ citation=_KOWOW_CITATION
467
+ ),
468
+ KOWOWConfig(
469
+ name='kowow.ko.all.v1.0',
470
+ feature=_WOW_FEATURE,
471
+ data_sp_path={
472
+ "train": "train.json",
473
+ "valid": "valid*.json",
474
+ "test": "test*.json"
475
+ },
476
+ reading_fn=_parsing_wow,
477
+ parsing_fn=lambda x:x,
478
+ citation=_KOWOW_CITATION
479
+ ),
480
+ # ek
481
+ KOWOWConfig(
482
+ name='kowow.ek.random.v1.0',
483
+ feature=_WOW_FEATURE,
484
+ data_sp_path={
485
+ "train": "train.json",
486
+ "valid": "valid_random_split.json",
487
+ "test": "test_random_split.json"
488
+ },
489
+ reading_fn=_parsing_wow,
490
+ parsing_fn=lambda x:x,
491
+ citation=_KOWOW_CITATION
492
+ ),
493
+ KOWOWConfig(
494
+ name='kowow.ek.topic.v1.0',
495
+ feature=_WOW_FEATURE,
496
+ data_sp_path={
497
+ "train": "train.json",
498
+ "valid": "valid_topic_split.json",
499
+ "test": "test_topic_split.json"
500
+ },
501
+ reading_fn=_parsing_wow,
502
+ parsing_fn=lambda x:x,
503
+ citation=_KOWOW_CITATION
504
+ ),
505
+ KOWOWConfig(
506
+ name='kowow.ek.all.v1.0',
507
+ feature=_WOW_FEATURE,
508
+ data_sp_path={
509
+ "train": "train.json",
510
+ "valid": "valid*.json",
511
+ "test": "test*.json"
512
+ },
513
+ reading_fn=_parsing_wow,
514
+ parsing_fn=lambda x:x,
515
+ citation=_KOWOW_CITATION
516
+ ),
517
+ # ke
518
+ KOWOWConfig(
519
+ name='kowow.ke.random.v1.0',
520
+ feature=_WOW_FEATURE,
521
+ data_sp_path={
522
+ "train": "train.json",
523
+ "valid": "valid_random_split.json",
524
+ "test": "test_random_split.json"
525
+ },
526
+ reading_fn=_parsing_wow,
527
+ parsing_fn=lambda x:x,
528
+ citation=_KOWOW_CITATION
529
+ ),
530
+ KOWOWConfig(
531
+ name='kowow.ke.topic.v1.0',
532
+ feature=_WOW_FEATURE,
533
+ data_sp_path={
534
+ "train": "train.json",
535
+ "valid": "valid_topic_split.json",
536
+ "test": "test_topic_split.json"
537
+ },
538
+ reading_fn=_parsing_wow,
539
+ parsing_fn=lambda x:x,
540
+ citation=_KOWOW_CITATION
541
+ ),
542
+ KOWOWConfig(
543
+ name='kowow.ke.all.v1.0',
544
+ feature=_WOW_FEATURE,
545
+ data_sp_path={
546
+ "train": "train.json",
547
+ "valid": "valid*.json",
548
+ "test": "test*.json"
549
+ },
550
+ reading_fn=_parsing_wow,
551
+ parsing_fn=lambda x:x,
552
+ citation=_KOWOW_CITATION
553
+ ),
554
+ # en
555
+ KOWOWConfig(
556
+ name='kowow.en.random.v1.1',
557
+ feature=_WOW_FEATURE_FORMAT,
558
+ data_sp_path={
559
+ "train": "train.json",
560
+ "valid": "valid_random_split.json",
561
+ "test": "test_random_split.json"
562
+ },
563
+ reading_fn=_parsing_wow_format,
564
+ parsing_fn=lambda x:x,
565
+ citation=_KOWOW_CITATION
566
+ ),
567
+ KOWOWConfig(
568
+ name='kowow.en.topic.v1.1',
569
+ feature=_WOW_FEATURE_FORMAT,
570
+ data_sp_path={
571
+ "train": "train.json",
572
+ "valid": "valid_topic_split.json",
573
+ "test": "test_topic_split.json"
574
+ },
575
+ reading_fn=_parsing_wow_format,
576
+ parsing_fn=lambda x:x,
577
+ citation=_KOWOW_CITATION
578
+ ),
579
+ KOWOWConfig(
580
+ name='kowow.en.all.v1.1',
581
+ feature=_WOW_FEATURE_FORMAT,
582
+ data_sp_path={
583
+ "train": "train.json",
584
+ "valid": "valid*.json",
585
+ "test": "test*.json"
586
+ },
587
+ reading_fn=_parsing_wow_format,
588
+ parsing_fn=lambda x:x,
589
+ citation=_KOWOW_CITATION
590
+ ),
591
+ # ko
592
+ KOWOWConfig(
593
+ name='kowow.ko.random.v1.1',
594
+ feature=_WOW_FEATURE_FORMAT,
595
+ data_sp_path={
596
+ "train": "train.json",
597
+ "valid": "valid_random_split.json",
598
+ "test": "test_random_split.json"
599
+ },
600
+ reading_fn=_parsing_wow_format,
601
+ parsing_fn=lambda x:x,
602
+ citation=_KOWOW_CITATION
603
+ ),
604
+ KOWOWConfig(
605
+ name='kowow.ko.topic.v1.1',
606
+ feature=_WOW_FEATURE_FORMAT,
607
+ data_sp_path={
608
+ "train": "train.json",
609
+ "valid": "valid_topic_split.json",
610
+ "test": "test_topic_split.json"
611
+ },
612
+ reading_fn=_parsing_wow_format,
613
+ parsing_fn=lambda x:x,
614
+ citation=_KOWOW_CITATION
615
+ ),
616
+ KOWOWConfig(
617
+ name='kowow.ko.all.v1.1',
618
+ feature=_WOW_FEATURE_FORMAT,
619
+ data_sp_path={
620
+ "train": "train.json",
621
+ "valid": "valid*.json",
622
+ "test": "test*.json"
623
+ },
624
+ reading_fn=_parsing_wow_format,
625
+ parsing_fn=lambda x:x,
626
+ citation=_KOWOW_CITATION
627
+ ),
628
+ # ek
629
+ KOWOWConfig(
630
+ name='kowow.ek.random.v1.1',
631
+ feature=_WOW_FEATURE_FORMAT,
632
+ data_sp_path={
633
+ "train": "train.json",
634
+ "valid": "valid_random_split.json",
635
+ "test": "test_random_split.json"
636
+ },
637
+ reading_fn=_parsing_wow_format,
638
+ parsing_fn=lambda x:x,
639
+ citation=_KOWOW_CITATION
640
+ ),
641
+ KOWOWConfig(
642
+ name='kowow.ek.topic.v1.1',
643
+ feature=_WOW_FEATURE_FORMAT,
644
+ data_sp_path={
645
+ "train": "train.json",
646
+ "valid": "valid_topic_split.json",
647
+ "test": "test_topic_split.json"
648
+ },
649
+ reading_fn=_parsing_wow_format,
650
+ parsing_fn=lambda x:x,
651
+ citation=_KOWOW_CITATION
652
+ ),
653
+ KOWOWConfig(
654
+ name='kowow.ek.all.v1.1',
655
+ feature=_WOW_FEATURE_FORMAT,
656
+ data_sp_path={
657
+ "train": "train.json",
658
+ "valid": "valid*.json",
659
+ "test": "test*.json"
660
+ },
661
+ reading_fn=_parsing_wow_format,
662
+ parsing_fn=lambda x:x,
663
+ citation=_KOWOW_CITATION
664
+ ),
665
+ # ke
666
+ KOWOWConfig(
667
+ name='kowow.ke.random.v1.1',
668
+ feature=_WOW_FEATURE_FORMAT,
669
+ data_sp_path={
670
+ "train": "train.json",
671
+ "valid": "valid_random_split.json",
672
+ "test": "test_random_split.json"
673
+ },
674
+ reading_fn=_parsing_wow_format,
675
+ parsing_fn=lambda x:x,
676
+ citation=_KOWOW_CITATION
677
+ ),
678
+ KOWOWConfig(
679
+ name='kowow.ke.topic.v1.1',
680
+ feature=_WOW_FEATURE_FORMAT,
681
+ data_sp_path={
682
+ "train": "train.json",
683
+ "valid": "valid_topic_split.json",
684
+ "test": "test_topic_split.json"
685
+ },
686
+ reading_fn=_parsing_wow_format,
687
+ parsing_fn=lambda x:x,
688
+ citation=_KOWOW_CITATION
689
+ ),
690
+ KOWOWConfig(
691
+ name='kowow.ke.all.v1.1',
692
+ feature=_WOW_FEATURE_FORMAT,
693
+ data_sp_path={
694
+ "train": "train.json",
695
+ "valid": "valid*.json",
696
+ "test": "test*.json"
697
+ },
698
+ reading_fn=_parsing_wow_format,
699
+ parsing_fn=lambda x:x,
700
+ citation=_KOWOW_CITATION
701
+ ),
702
+ ]
703
+
704
+ MANUAL_DOWNLOAD_INSTRUCTIONS = """
705
+
706
+ """
707
+
708
+ def _info(self) -> datasets.DatasetInfo:
709
+ """Returns the dataset metadata."""
710
+ return datasets.DatasetInfo(
711
+ description=_DESCRIPTION,
712
+ features=self.config.feature,
713
+ homepage=self.config.homepage,
714
+ citation=self.config.citation,
715
+ )
716
+
717
+ def _split_generators(self, dl_manager: datasets.DownloadManager):
718
+ """Returns SplitGenerators."""
719
+
720
+ split_kwargs = {
721
+ datasets.Split.TRAIN: glob(os.path.join(dl_manager.manual_dir,
722
+ self.config.data_sp_path["train"])),
723
+ datasets.Split.VALIDATION: glob(os.path.join(dl_manager.manual_dir,
724
+ self.config.data_sp_path["valid"])),
725
+ datasets.Split.TEST: glob(os.path.join(dl_manager.manual_dir,
726
+ self.config.data_sp_path["test"])),
727
+ }
728
+
729
+ return [
730
+ datasets.SplitGenerator(
731
+ name=k,
732
+ gen_kwargs={
733
+ 'path_list': v,
734
+ }) for k, v in split_kwargs.items()
735
+ ]
736
+
737
+ def _generate_examples(self, path_list, split_fn=None):
738
+ """Yields examples."""
739
+
740
+ for file_path in path_list:
741
+ try:
742
+ for example in iter(self.config.reading_fn(file_path)):
743
+ yield self.config.parsing_fn(example)
744
+ except Exception as e:
745
+ print(e)