Datasets:
Tasks:
Token Classification
Sub-tasks:
named-entity-recognition
Languages:
Hindi
Size:
100K<n<1M
ArXiv:
License:
Commit
·
769be5c
1
Parent(s):
f37d0b9
add json files
Browse files- HiNER-original.py +1 -2
HiNER-original.py
CHANGED
|
@@ -89,7 +89,7 @@ class HiNERConfig(datasets.GeneratorBasedBuilder):
|
|
| 89 |
|
| 90 |
return [
|
| 91 |
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
|
| 92 |
-
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["
|
| 93 |
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]})
|
| 94 |
]
|
| 95 |
|
|
@@ -103,6 +103,5 @@ class HiNERConfig(datasets.GeneratorBasedBuilder):
|
|
| 103 |
yield id_, {
|
| 104 |
"id": str(id_),
|
| 105 |
"tokens": object['tokens'],
|
| 106 |
-
# "pos_tags": object['pos_tags'],
|
| 107 |
"ner_tags": object['ner_tags'],
|
| 108 |
}
|
|
|
|
| 89 |
|
| 90 |
return [
|
| 91 |
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
|
| 92 |
+
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["validation"]}),
|
| 93 |
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]})
|
| 94 |
]
|
| 95 |
|
|
|
|
| 103 |
yield id_, {
|
| 104 |
"id": str(id_),
|
| 105 |
"tokens": object['tokens'],
|
|
|
|
| 106 |
"ner_tags": object['ner_tags'],
|
| 107 |
}
|