Afonso Oliveira commited on
Commit
8d2658e
·
1 Parent(s): deb7ec0

Clean scripts

Browse files
.gitattributes CHANGED
@@ -17,3 +17,4 @@
17
  test.zip filter=lfs diff=lfs merge=lfs -text
18
  train.zip filter=lfs diff=lfs merge=lfs -text
19
  validation.zip filter=lfs diff=lfs merge=lfs -text
 
 
17
  test.zip filter=lfs diff=lfs merge=lfs -text
18
  train.zip filter=lfs diff=lfs merge=lfs -text
19
  validation.zip filter=lfs diff=lfs merge=lfs -text
20
+ *.parquet filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # DailyDialog: A Manually Labelled Multi-turn Dialogue Dataset
2
 
3
  The data is based on the original distribution ([link to original website](http://yanran.li/dailydialog)) ([link to paper](https://aclanthology.org/I17-1099/)).
 
1
+ ---
2
+ configs:
3
+ - config_name: "full"
4
+ data_files:
5
+ - split: "train"
6
+ path: "data/train-*.parquet"
7
+ - split: "validation"
8
+ path: "data/validation-*.parquet"
9
+ - split: "test"
10
+ path: "data/test-*.parquet"
11
+
12
+ dataset_info:
13
+ config_name: "full"
14
+ features:
15
+ - name: "id"
16
+ dtype: "string"
17
+ - name: "acts"
18
+ sequence:
19
+ dtype: "int8"
20
+ - name: "emotions"
21
+ sequence:
22
+ dtype: "int8"
23
+ - name: "utterances"
24
+ sequence:
25
+ dtype: "string"
26
+ splits:
27
+ - name: "train"
28
+ num_bytes: 3489563
29
+ num_examples: 11118
30
+ - name: "validation"
31
+ num_bytes: 333393
32
+ num_examples: 1000
33
+ - name: "test"
34
+ num_bytes: 329284
35
+ num_examples: 1000
36
+ download_size: 4152240
37
+ dataset_size: 4152240
38
+ ---
39
+
40
  # DailyDialog: A Manually Labelled Multi-turn Dialogue Dataset
41
 
42
  The data is based on the original distribution ([link to original website](http://yanran.li/dailydialog)) ([link to paper](https://aclanthology.org/I17-1099/)).
dailydialog.py DELETED
@@ -1,138 +0,0 @@
1
- import io
2
- import json
3
- import zipfile
4
- from itertools import zip_longest
5
- from pathlib import Path
6
-
7
- import datasets
8
- from datasets.features import Sequence
9
-
10
- _SPLITS = {
11
- "train": "./train.zip",
12
- "validation": "./validation.zip",
13
- "test": "./test.zip",
14
- }
15
-
16
- _DESCRIPTION = """
17
- The DailyDialog dataset as provided in the original form with a bit of preprocessing applied to enable dast prototyping.
18
- The splits are as in the original distribution.
19
- """
20
-
21
- _CITATION = """
22
- @inproceedings{li2017dailydialog,
23
- title={DailyDialog: A Manually Labelled Multi-turn Dialogue Dataset},
24
- author={Li, Yanran and Su, Hui and Shen, Xiaoyu and Li, Wenjie and Cao, Ziqiang and Niu, Shuzi},
25
- booktitle={Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 1: Long Papers)},
26
- pages={986--995},
27
- year={2017}
28
- }
29
- """
30
-
31
- _LICENSE = "Like the original DailyDialogue dataset, this dataset is released under the CC BY-NC-SA 4.0."
32
- _HOMEPAGE = "http://yanran.li/dailydialog"
33
-
34
-
35
- class DailyDialog(datasets.GeneratorBasedBuilder):
36
- """
37
- The DailyDialog dataset as provided in the original form with a bit of preprocessing applied to enable dast prototyping.
38
- The splits are as in the original distribution.
39
- """
40
-
41
- VERSION = datasets.Version("1.0.0")
42
- BUILDER_CONFIGS = [
43
- datasets.BuilderConfig(
44
- name="full", version=VERSION, description="The full dataset."
45
- )
46
- ]
47
-
48
- def _info(self):
49
- return datasets.DatasetInfo(
50
- description=_DESCRIPTION,
51
- features=datasets.Features(
52
- {
53
- "id": datasets.Value("string"),
54
- "acts": Sequence(datasets.Value("int8")),
55
- "emotions": Sequence(datasets.Value("int8")),
56
- "utterances": Sequence(datasets.Value("string")),
57
- }
58
- ),
59
- citation=_CITATION,
60
- license=_LICENSE,
61
- homepage=_HOMEPAGE,
62
- )
63
-
64
- def _split_generators(self, dl_manager: datasets.DownloadManager):
65
- dl_paths = dl_manager.download(_SPLITS)
66
- return [
67
- datasets.SplitGenerator(
68
- name=split_name,
69
- # These kwargs will be passed to _generate_examples
70
- gen_kwargs={"data_path": split_path},
71
- )
72
- for split_name, split_path in dl_paths.items()
73
- ]
74
-
75
- def _generate_examples(self, data_path: str):
76
- split_name: str = str(Path(data_path).stem)
77
-
78
- with zipfile.ZipFile(data_path) as zip_file:
79
- files_list = list(map(str, zip_file.namelist()))
80
-
81
- acts_file = next((f for f in files_list if "act" in f.lower()))
82
- emotions_file = next((f for f in files_list if "emotion" in f.lower()))
83
- utterances_file = next(
84
- (
85
- f
86
- for f in files_list
87
- if "act" not in f.lower()
88
- and "emotion" not in f.lower()
89
- and "dialogues" in f.lower()
90
- )
91
- )
92
-
93
- acts_file = io.TextIOWrapper(
94
- zip_file.open(acts_file),
95
- encoding="utf-8",
96
- )
97
- emotions_file = io.TextIOWrapper(
98
- zip_file.open(emotions_file),
99
- encoding="utf-8",
100
- )
101
- utterances_file = io.TextIOWrapper(
102
- zip_file.open(utterances_file),
103
- encoding="utf-8",
104
- )
105
-
106
- sentinel = object()
107
-
108
- misalignments = 0
109
-
110
- for idx, combo in enumerate(
111
- zip_longest(
112
- acts_file, emotions_file, utterances_file, fillvalue=sentinel
113
- )
114
- ):
115
- if sentinel in combo:
116
- raise ValueError("Iterables have different lengths")
117
-
118
- acts, emos, utts = combo
119
-
120
- acts = [int(a.strip()) for a in acts.strip().split(" ")]
121
- emos = [int(a.strip()) for a in emos.strip().split(" ")]
122
- utts = [
123
- a.strip() for a in utts.strip().strip("__eou__").split("__eou__")
124
- ]
125
-
126
- lens = dict(utts_len=len(utts), acts_len=len(acts), emos_len=len(emos))
127
-
128
- assert len(utts) == len(acts), lens
129
- assert len(acts) == len(emos), lens
130
-
131
- item = {
132
- "id": f"{split_name}_{idx}",
133
- "acts": acts,
134
- "emotions": emos,
135
- "utterances": utts,
136
- }
137
-
138
- yield item["id"], item
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
test.zip → data/test-00000-of-00001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cfffc0bab93a3697c2562a9ee35c4533f0de2a93e86a4e4d3ea45cbde4bea00f
3
- size 178601
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15fa23cff86374c12f4730d103bb5d418ecaf995dc8bc8241952217d61b9612e
3
+ size 329284
train.zip → data/train-00000-of-00001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c5179ab5a9a86a77b9d29114087c6b82cc4cb366abea25a43a0c24be761133f7
3
- size 1935190
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c5e8d62f79360d4d95d94b727550c788c04f92fc351f127e6bebbafde39cc5a
3
+ size 3489563
validation.zip → data/validation-00000-of-00001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1dbcb00a0b4fe709788ce6604b561ba76557c88fe41f049d7899287838f0ec6a
3
- size 180450
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db3d0f5f52f03dd6efc5b3b8729fde2cbffada6bb5b1c7e2d6f6caf093463f5a
3
+ size 333393