Datasets:
updated readme
Browse files
README.md
CHANGED
|
@@ -264,6 +264,36 @@ We provide the un-annotated additional data stats below:
|
|
| 264 |
| English-New York | 6,454 |
|
| 265 |
| **Total** | **55,702** |
|
| 266 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 267 |
### License
|
| 268 |
The dataset is distributed under the Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC BY-NC-SA 4.0). The full license text can be found in the accompanying licenses_by-nc-sa_4.0_legalcode.txt file.
|
| 269 |
|
|
|
|
| 264 |
| English-New York | 6,454 |
|
| 265 |
| **Total** | **55,702** |
|
| 266 |
|
| 267 |
+
### How to download data
|
| 268 |
+
|
| 269 |
+
```
|
| 270 |
+
import os
|
| 271 |
+
import json
|
| 272 |
+
from datasets import load_dataset
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
dataset_names = ['arabic_qa', 'assamese_in', 'bangla_bd', 'bangla_in', 'english_bd',
|
| 276 |
+
'english_qa', 'hindi_in', 'nepali_np', 'turkish_tr']
|
| 277 |
+
base_dir="./MNQA/"
|
| 278 |
+
|
| 279 |
+
for dname in dataset_names:
|
| 280 |
+
output_dir = os.path.join(base_dir, dname)
|
| 281 |
+
# load each language
|
| 282 |
+
dataset = load_dataset("QCRI/MultiNativQA", name=dname)
|
| 283 |
+
# Save the dataset to the specified directory. This will save all splits to the output directory.
|
| 284 |
+
dataset.save_to_disk(output_dir)
|
| 285 |
+
# iterate over splits to save the data into json format
|
| 286 |
+
for split in ['train','dev','test']:
|
| 287 |
+
data = []
|
| 288 |
+
if split not in dataset:
|
| 289 |
+
continue
|
| 290 |
+
for idx, item in enumerate(dataset[split]):
|
| 291 |
+
data.append(item)
|
| 292 |
+
output_file = os.path.join(output_dir, f"{split}.json")
|
| 293 |
+
with open(output_file, 'w', encoding='utf-8') as f:
|
| 294 |
+
json.dump(data, f, ensure_ascii=False, indent=4)
|
| 295 |
+
```
|
| 296 |
+
|
| 297 |
### License
|
| 298 |
The dataset is distributed under the Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC BY-NC-SA 4.0). The full license text can be found in the accompanying licenses_by-nc-sa_4.0_legalcode.txt file.
|
| 299 |
|