Datasets:
Tasks:
Question Answering
Modalities:
Text
Sub-tasks:
extractive-qa
Languages:
code
Size:
100K - 1M
License:
Refactor twostep data loading
Browse files- codequeries.py +38 -17
codequeries.py
CHANGED
|
@@ -119,7 +119,7 @@ class Codequeries(datasets.GeneratorBasedBuilder):
|
|
| 119 |
"subtokenized_input_sequence", "label_sequence"],
|
| 120 |
citation=_CODEQUERIES_CITATION,
|
| 121 |
data_url={
|
| 122 |
-
"test": "twostep_relevance"
|
| 123 |
},
|
| 124 |
url="",
|
| 125 |
),
|
|
@@ -209,20 +209,41 @@ class Codequeries(datasets.GeneratorBasedBuilder):
|
|
| 209 |
assert split == datasets.Split.TEST
|
| 210 |
logger.info("generating examples from = %s", filepath)
|
| 211 |
|
| 212 |
-
|
| 213 |
key = 0
|
| 214 |
-
for
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 119 |
"subtokenized_input_sequence", "label_sequence"],
|
| 120 |
citation=_CODEQUERIES_CITATION,
|
| 121 |
data_url={
|
| 122 |
+
"test": "twostep_relevance/twostep_relevance_test_"
|
| 123 |
},
|
| 124 |
url="",
|
| 125 |
),
|
|
|
|
| 209 |
assert split == datasets.Split.TEST
|
| 210 |
logger.info("generating examples from = %s", filepath)
|
| 211 |
|
| 212 |
+
if self.config.name == "twostep":
|
| 213 |
key = 0
|
| 214 |
+
for i in range(10):
|
| 215 |
+
with open(filepath + str(i) + '.json', encoding="utf-8") as f:
|
| 216 |
+
for line in f:
|
| 217 |
+
row = json.loads(line)
|
| 218 |
+
|
| 219 |
+
instance_key = str(key) + "_" + row["query_name"] + "_" + row["code_file_path"]
|
| 220 |
+
yield instance_key, {
|
| 221 |
+
"query_name": row["query_name"],
|
| 222 |
+
"context_blocks": row["context_blocks"],
|
| 223 |
+
"answer_spans": row["answer_spans"],
|
| 224 |
+
"supporting_fact_spans": row["supporting_fact_spans"],
|
| 225 |
+
"code_file_path": row["code_file_path"],
|
| 226 |
+
"example_type": row["example_type"],
|
| 227 |
+
"subtokenized_input_sequence": row["subtokenized_input_sequence"],
|
| 228 |
+
"label_sequence": row["label_sequence"],
|
| 229 |
+
}
|
| 230 |
+
key += 1
|
| 231 |
+
else:
|
| 232 |
+
with open(filepath, encoding="utf-8") as f:
|
| 233 |
+
key = 0
|
| 234 |
+
for line in f:
|
| 235 |
+
row = json.loads(line)
|
| 236 |
+
|
| 237 |
+
instance_key = str(key) + "_" + row["query_name"] + "_" + row["code_file_path"]
|
| 238 |
+
yield instance_key, {
|
| 239 |
+
"query_name": row["query_name"],
|
| 240 |
+
"context_blocks": row["context_blocks"],
|
| 241 |
+
"answer_spans": row["answer_spans"],
|
| 242 |
+
"supporting_fact_spans": row["supporting_fact_spans"],
|
| 243 |
+
"code_file_path": row["code_file_path"],
|
| 244 |
+
"example_type": row["example_type"],
|
| 245 |
+
"subtokenized_input_sequence": row["subtokenized_input_sequence"],
|
| 246 |
+
"label_sequence": row["label_sequence"],
|
| 247 |
+
}
|
| 248 |
+
key += 1
|
| 249 |
+
|