Datasets:

Modalities:
Text
Libraries:
Datasets
cjziems commited on
Commit
1067923
·
1 Parent(s): 5f73602

Upload wikisql_VALUE.py

Browse files
Files changed (1) hide show
  1. wikisql_VALUE.py +221 -0
wikisql_VALUE.py ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """A large crowd-sourced dataset for developing natural language interfaces for relational databases"""
2
+
3
+
4
+ import json
5
+ import os
6
+
7
+ import datasets
8
+
9
+
10
+ _CITATION = """\
11
+ @article{zhongSeq2SQL2017,
12
+ author = {Victor Zhong and
13
+ Caiming Xiong and
14
+ Richard Socher},
15
+ title = {Seq2SQL: Generating Structured Queries from Natural Language using
16
+ Reinforcement Learning},
17
+ journal = {CoRR},
18
+ volume = {abs/1709.00103},
19
+ year = {2017}
20
+ }
21
+ """
22
+
23
+ _DESCRIPTION = """\
24
+ A large crowd-sourced dataset for developing natural language interfaces for relational databases
25
+ """
26
+
27
+ _DATA_URL = "https://huggingface.co/datasets/SALT-NLP/wikisql_VALUE/resolve/main/data.zip"
28
+
29
+ _AGG_OPS = ["", "MAX", "MIN", "COUNT", "SUM", "AVG"]
30
+ _COND_OPS = ["=", ">", "<", "OP"]
31
+
32
+
33
+ class WikiSQL(datasets.GeneratorBasedBuilder):
34
+ """WikiSQL: A large crowd-sourced dataset for developing natural language interfaces for relational databases"""
35
+
36
+ VERSION = datasets.Version("0.1.0")
37
+
38
+ def _info(self):
39
+ return datasets.DatasetInfo(
40
+ description=_DESCRIPTION,
41
+ features=datasets.Features(
42
+ {
43
+ "phase": datasets.Value("int32"),
44
+ "question": datasets.Value("string"),
45
+ "table": {
46
+ "header": datasets.features.Sequence(datasets.Value("string")),
47
+ "page_title": datasets.Value("string"),
48
+ "page_id": datasets.Value("string"),
49
+ "types": datasets.features.Sequence(datasets.Value("string")),
50
+ "id": datasets.Value("string"),
51
+ "section_title": datasets.Value("string"),
52
+ "caption": datasets.Value("string"),
53
+ "rows": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("string"))),
54
+ "name": datasets.Value("string"),
55
+ },
56
+ "sql": {
57
+ "human_readable": datasets.Value("string"),
58
+ "sel": datasets.Value("int32"),
59
+ "agg": datasets.Value("int32"),
60
+ "conds": datasets.features.Sequence(
61
+ {
62
+ "column_index": datasets.Value("int32"),
63
+ "operator_index": datasets.Value("int32"),
64
+ "condition": datasets.Value("string"),
65
+ }
66
+ ),
67
+ },
68
+ }
69
+ ),
70
+ # If there's a common (input, target) tuple from the features,
71
+ # specify them here. They'll be used if as_supervised=True in
72
+ # builder.as_dataset.
73
+ supervised_keys=None,
74
+ # Homepage of the dataset for documentation
75
+ homepage="https://github.com/salesforce/WikiSQL",
76
+ citation=_CITATION,
77
+ )
78
+
79
+ def _split_generators(self, dl_manager):
80
+ """Returns SplitGenerators."""
81
+ dl_dir = dl_manager.download_and_extract(_DATA_URL)
82
+ dl_dir = os.path.join(dl_dir, "data")
83
+
84
+ return [
85
+ datasets.SplitGenerator(
86
+ name=datasets.Split.AppE.TEST,
87
+ gen_kwargs={
88
+ "main_filepath": os.path.join(dl_dir, "test_AppE.jsonl"),
89
+ "tables_filepath": os.path.join(dl_dir, "test.tables.jsonl"),
90
+ },
91
+ ),
92
+ datasets.SplitGenerator(
93
+ name=datasets.Split.AppE.VALIDATION,
94
+ gen_kwargs={
95
+ "main_filepath": os.path.join(dl_dir, "dev_AppE.jsonl"),
96
+ "tables_filepath": os.path.join(dl_dir, "dev.tables.jsonl"),
97
+ },
98
+ ),
99
+ datasets.SplitGenerator(
100
+ name=datasets.Split.AppE.TRAIN,
101
+ gen_kwargs={
102
+ "main_filepath": os.path.join(dl_dir, "train_AppE.jsonl"),
103
+ "tables_filepath": os.path.join(dl_dir, "train.tables.jsonl"),
104
+ },
105
+ ),
106
+ datasets.SplitGenerator(
107
+ name=datasets.Split.ChcE.TEST,
108
+ gen_kwargs={
109
+ "main_filepath": os.path.join(dl_dir, "test_ChcE.jsonl"),
110
+ "tables_filepath": os.path.join(dl_dir, "test.tables.jsonl"),
111
+ },
112
+ ),
113
+ datasets.SplitGenerator(
114
+ name=datasets.Split.ChcE.VALIDATION,
115
+ gen_kwargs={
116
+ "main_filepath": os.path.join(dl_dir, "dev_ChcE.jsonl"),
117
+ "tables_filepath": os.path.join(dl_dir, "dev.tables.jsonl"),
118
+ },
119
+ ),
120
+ datasets.SplitGenerator(
121
+ name=datasets.Split.ChcE.TRAIN,
122
+ gen_kwargs={
123
+ "main_filepath": os.path.join(dl_dir, "train_ChcE.jsonl"),
124
+ "tables_filepath": os.path.join(dl_dir, "train.tables.jsonl"),
125
+ },
126
+ ),
127
+ datasets.SplitGenerator(
128
+ name=datasets.Split.CollSgE.TEST,
129
+ gen_kwargs={
130
+ "main_filepath": os.path.join(dl_dir, "test_CollSgE.jsonl"),
131
+ "tables_filepath": os.path.join(dl_dir, "test.tables.jsonl"),
132
+ },
133
+ ),
134
+ datasets.SplitGenerator(
135
+ name=datasets.Split.CollSgE.VALIDATION,
136
+ gen_kwargs={
137
+ "main_filepath": os.path.join(dl_dir, "dev_CollSgE.jsonl"),
138
+ "tables_filepath": os.path.join(dl_dir, "dev.tables.jsonl"),
139
+ },
140
+ ),
141
+ datasets.SplitGenerator(
142
+ name=datasets.Split.CollSgE.TRAIN,
143
+ gen_kwargs={
144
+ "main_filepath": os.path.join(dl_dir, "train_CollSgE.jsonl"),
145
+ "tables_filepath": os.path.join(dl_dir, "train.tables.jsonl"),
146
+ },
147
+ ),
148
+ datasets.SplitGenerator(
149
+ name=datasets.Split.SAE.TEST,
150
+ gen_kwargs={
151
+ "main_filepath": os.path.join(dl_dir, "test.jsonl"),
152
+ "tables_filepath": os.path.join(dl_dir, "test.tables.jsonl"),
153
+ },
154
+ ),
155
+ datasets.SplitGenerator(
156
+ name=datasets.Split.SAE.VALIDATION,
157
+ gen_kwargs={
158
+ "main_filepath": os.path.join(dl_dir, "dev.jsonl"),
159
+ "tables_filepath": os.path.join(dl_dir, "dev.tables.jsonl"),
160
+ },
161
+ ),
162
+ datasets.SplitGenerator(
163
+ name=datasets.Split.SAE.TRAIN,
164
+ gen_kwargs={
165
+ "main_filepath": os.path.join(dl_dir, "train.jsonl"),
166
+ "tables_filepath": os.path.join(dl_dir, "train.tables.jsonl"),
167
+ },
168
+ ),
169
+ ]
170
+
171
+ def _convert_to_human_readable(self, sel, agg, columns, conditions):
172
+ """Make SQL query string. Based on https://github.com/salesforce/WikiSQL/blob/c2ed4f9b22db1cc2721805d53e6e76e07e2ccbdc/lib/query.py#L10"""
173
+
174
+ rep = f"SELECT {_AGG_OPS[agg]} {columns[sel] if columns is not None else f'col{sel}'} FROM table"
175
+
176
+ if conditions:
177
+ rep += " WHERE " + " AND ".join([f"{columns[i]} {_COND_OPS[o]} {v}" for i, o, v in conditions])
178
+ return " ".join(rep.split())
179
+
180
+ def _generate_examples(self, main_filepath, tables_filepath):
181
+ """Yields examples."""
182
+
183
+ # Build dictionary to table_ids:tables
184
+ with open(tables_filepath, encoding="utf-8") as f:
185
+ tables = [json.loads(line) for line in f]
186
+ id_to_tables = {x["id"]: x for x in tables}
187
+
188
+ with open(main_filepath, encoding="utf-8") as f:
189
+ for idx, line in enumerate(f):
190
+ row = json.loads(line)
191
+ row["table"] = id_to_tables[row["table_id"]]
192
+ del row["table_id"]
193
+
194
+ # Handle missing data
195
+ row["table"]["page_title"] = row["table"].get("page_title", "")
196
+ row["table"]["section_title"] = row["table"].get("section_title", "")
197
+ row["table"]["caption"] = row["table"].get("caption", "")
198
+ row["table"]["name"] = row["table"].get("name", "")
199
+ row["table"]["page_id"] = str(row["table"].get("page_id", ""))
200
+
201
+ # Fix row types
202
+ row["table"]["rows"] = [[str(e) for e in r] for r in row["table"]["rows"]]
203
+
204
+ # Get human-readable version
205
+ row["sql"]["human_readable"] = self._convert_to_human_readable(
206
+ row["sql"]["sel"],
207
+ row["sql"]["agg"],
208
+ row["table"]["header"],
209
+ row["sql"]["conds"],
210
+ )
211
+
212
+ # Restructure sql->conds
213
+ # - wikiSQL provides a tuple [column_index, operator_index, condition]
214
+ # as 'condition' can have 2 types (float or str) we convert to dict
215
+ for i in range(len(row["sql"]["conds"])):
216
+ row["sql"]["conds"][i] = {
217
+ "column_index": row["sql"]["conds"][i][0],
218
+ "operator_index": row["sql"]["conds"][i][1],
219
+ "condition": str(row["sql"]["conds"][i][2]),
220
+ }
221
+ yield idx, row