Update files from the datasets library (from 1.1.3)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.1.3
- dataset_infos.json +0 -0
- xtreme.py +50 -10
dataset_infos.json
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
xtreme.py
CHANGED
|
@@ -361,9 +361,9 @@ _TEXT_FEATURES = {
|
|
| 361 |
"tatoeba": {"source_sentence": "", "target_sentence": "", "source_lang": "", "target_lang": ""},
|
| 362 |
"bucc18": {"source_sentence": "", "target_sentence": "", "source_lang": "", "target_lang": ""},
|
| 363 |
"PAWS-X": {"sentence1": "sentence1", "sentence2": "sentence2"},
|
| 364 |
-
"udpos": {"
|
| 365 |
"SQuAD": {"id": "id", "title": "title", "context": "context", "question": "question", "answers": "answers"},
|
| 366 |
-
"PAN-X": {"
|
| 367 |
}
|
| 368 |
_DATA_URLS = {
|
| 369 |
"tydiqa": "https://storage.googleapis.com/tydiqa/",
|
|
@@ -451,11 +451,51 @@ class Xtreme(datasets.GeneratorBasedBuilder):
|
|
| 451 |
if self.config.name == "XNLI":
|
| 452 |
features["gold_label"] = datasets.Value("string")
|
| 453 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 454 |
if self.config.name.startswith("PAN-X"):
|
| 455 |
features = datasets.Features(
|
| 456 |
{
|
| 457 |
-
"
|
| 458 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 459 |
"langs": datasets.Sequence(datasets.Value("string")),
|
| 460 |
}
|
| 461 |
)
|
|
@@ -885,19 +925,19 @@ class Xtreme(datasets.GeneratorBasedBuilder):
|
|
| 885 |
data = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
|
| 886 |
for id_row, row in enumerate(data):
|
| 887 |
if len(row) >= 10 and row[1] != "_":
|
| 888 |
-
yield str(id_file) + "_" + str(id_row), {"
|
| 889 |
if self.config.name.startswith("PAN-X"):
|
| 890 |
guid_index = 1
|
| 891 |
with open(filepath, encoding="utf-8") as f:
|
| 892 |
-
|
| 893 |
ner_tags = []
|
| 894 |
langs = []
|
| 895 |
for line in f:
|
| 896 |
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
|
| 897 |
-
if
|
| 898 |
-
yield guid_index, {"
|
| 899 |
guid_index += 1
|
| 900 |
-
|
| 901 |
ner_tags = []
|
| 902 |
langs = []
|
| 903 |
else:
|
|
@@ -905,7 +945,7 @@ class Xtreme(datasets.GeneratorBasedBuilder):
|
|
| 905 |
splits = line.split("\t")
|
| 906 |
# strip out en: prefix
|
| 907 |
langs.append(splits[0][:2])
|
| 908 |
-
|
| 909 |
if len(splits) > 1:
|
| 910 |
ner_tags.append(splits[-1].replace("\n", ""))
|
| 911 |
else:
|
|
|
|
| 361 |
"tatoeba": {"source_sentence": "", "target_sentence": "", "source_lang": "", "target_lang": ""},
|
| 362 |
"bucc18": {"source_sentence": "", "target_sentence": "", "source_lang": "", "target_lang": ""},
|
| 363 |
"PAWS-X": {"sentence1": "sentence1", "sentence2": "sentence2"},
|
| 364 |
+
"udpos": {"token": "", "pos_tag": ""},
|
| 365 |
"SQuAD": {"id": "id", "title": "title", "context": "context", "question": "question", "answers": "answers"},
|
| 366 |
+
"PAN-X": {"tokens": "", "ner_tags": "", "lang": ""},
|
| 367 |
}
|
| 368 |
_DATA_URLS = {
|
| 369 |
"tydiqa": "https://storage.googleapis.com/tydiqa/",
|
|
|
|
| 451 |
if self.config.name == "XNLI":
|
| 452 |
features["gold_label"] = datasets.Value("string")
|
| 453 |
|
| 454 |
+
if self.config.name.startswith("udpos"):
|
| 455 |
+
features = datasets.Features(
|
| 456 |
+
{
|
| 457 |
+
"token": datasets.Value("string"),
|
| 458 |
+
"pos_tag": datasets.features.ClassLabel(
|
| 459 |
+
names=[
|
| 460 |
+
"ADJ",
|
| 461 |
+
"ADP",
|
| 462 |
+
"ADV",
|
| 463 |
+
"AUX",
|
| 464 |
+
"CCONJ",
|
| 465 |
+
"DET",
|
| 466 |
+
"INTJ",
|
| 467 |
+
"NOUN",
|
| 468 |
+
"NUM",
|
| 469 |
+
"PART",
|
| 470 |
+
"PRON",
|
| 471 |
+
"PROPN",
|
| 472 |
+
"PUNCT",
|
| 473 |
+
"SCONJ",
|
| 474 |
+
"SYM",
|
| 475 |
+
"VERB",
|
| 476 |
+
"X",
|
| 477 |
+
]
|
| 478 |
+
),
|
| 479 |
+
}
|
| 480 |
+
)
|
| 481 |
+
|
| 482 |
if self.config.name.startswith("PAN-X"):
|
| 483 |
features = datasets.Features(
|
| 484 |
{
|
| 485 |
+
"tokens": datasets.Sequence(datasets.Value("string")),
|
| 486 |
+
"ner_tags": datasets.Sequence(
|
| 487 |
+
datasets.features.ClassLabel(
|
| 488 |
+
names=[
|
| 489 |
+
"O",
|
| 490 |
+
"B-PER",
|
| 491 |
+
"I-PER",
|
| 492 |
+
"B-ORG",
|
| 493 |
+
"I-ORG",
|
| 494 |
+
"B-LOC",
|
| 495 |
+
"I-LOC",
|
| 496 |
+
]
|
| 497 |
+
)
|
| 498 |
+
),
|
| 499 |
"langs": datasets.Sequence(datasets.Value("string")),
|
| 500 |
}
|
| 501 |
)
|
|
|
|
| 925 |
data = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
|
| 926 |
for id_row, row in enumerate(data):
|
| 927 |
if len(row) >= 10 and row[1] != "_":
|
| 928 |
+
yield str(id_file) + "_" + str(id_row), {"token": row[1], "pos_tag": row[3]}
|
| 929 |
if self.config.name.startswith("PAN-X"):
|
| 930 |
guid_index = 1
|
| 931 |
with open(filepath, encoding="utf-8") as f:
|
| 932 |
+
tokens = []
|
| 933 |
ner_tags = []
|
| 934 |
langs = []
|
| 935 |
for line in f:
|
| 936 |
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
|
| 937 |
+
if tokens:
|
| 938 |
+
yield guid_index, {"tokens": tokens, "ner_tags": ner_tags, "langs": langs}
|
| 939 |
guid_index += 1
|
| 940 |
+
tokens = []
|
| 941 |
ner_tags = []
|
| 942 |
langs = []
|
| 943 |
else:
|
|
|
|
| 945 |
splits = line.split("\t")
|
| 946 |
# strip out en: prefix
|
| 947 |
langs.append(splits[0][:2])
|
| 948 |
+
tokens.append(splits[0][3:])
|
| 949 |
if len(splits) > 1:
|
| 950 |
ner_tags.append(splits[-1].replace("\n", ""))
|
| 951 |
else:
|