Datasets:
Regenerate dataset files with v2.0 template
Browse filesGenerated from updated template with v2.0 improvements:
- universal_dependencies.py: MWT support, bug fix, optimizations
- tools/universal_dependencies-2.17: Source generated file
- tools/README-2.17: Source generated file
Changes in universal_dependencies.py:
- Added MWT field to schema (Sequence of {id, form, misc})
- Implemented MWT extraction for tuple IDs
- Fixed bug: filter to syntactic words only (sent_filtered)
- Optimized: reuse tokens variable instead of rebuilding
- All token fields now use sent_filtered (excludes MWTs)
Generated with: ./03_fill_universal_dependencies_tamplate.py -o
Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
- tools/README-2.17 +1 -1
- tools/universal_dependencies-2.17 +31 -10
- universal_dependencies.py +31 -10
tools/README-2.17
CHANGED
|
@@ -12908,4 +12908,4 @@ The `./tools/` are licensed under the [Apache-2.0](https://www.apache.org/licens
|
|
| 12908 |
|
| 12909 |
### Contributions
|
| 12910 |
|
| 12911 |
-
Thanks to [universal-dependencies](https://huggingface.co/universal-dependencies) for [the original of this dataset](https://huggingface.co/datasets/universal-dependencies/universal_dependencies).
|
|
|
|
| 12908 |
|
| 12909 |
### Contributions
|
| 12910 |
|
| 12911 |
+
Thanks to [universal-dependencies](https://huggingface.co/universal-dependencies) for [the original of this dataset](https://huggingface.co/datasets/universal-dependencies/universal_dependencies).
|
tools/universal_dependencies-2.17
CHANGED
|
@@ -1778,6 +1778,13 @@ class UniversalDependencies(datasets.GeneratorBasedBuilder):
|
|
| 1778 |
"deprel": datasets.Sequence(datasets.Value("string")),
|
| 1779 |
"deps": datasets.Sequence(datasets.Value("string")),
|
| 1780 |
"misc": datasets.Sequence(datasets.Value("string")),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1781 |
}
|
| 1782 |
),
|
| 1783 |
supervised_keys=None,
|
|
@@ -1829,7 +1836,20 @@ class UniversalDependencies(datasets.GeneratorBasedBuilder):
|
|
| 1829 |
else:
|
| 1830 |
idx = id
|
| 1831 |
|
| 1832 |
-
tokens
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1833 |
|
| 1834 |
if "text" in sent.metadata:
|
| 1835 |
txt = sent.metadata["text"]
|
|
@@ -1839,14 +1859,15 @@ class UniversalDependencies(datasets.GeneratorBasedBuilder):
|
|
| 1839 |
yield id, {
|
| 1840 |
"idx": str(idx),
|
| 1841 |
"text": txt,
|
| 1842 |
-
"tokens":
|
| 1843 |
-
"lemmas": [token["lemma"] for token in
|
| 1844 |
-
"upos": [token["upos"] for token in
|
| 1845 |
-
"xpos": [token["xpos"] for token in
|
| 1846 |
-
"feats": [str(token["feats"]) for token in
|
| 1847 |
-
"head": [str(token["head"]) for token in
|
| 1848 |
-
"deprel": [str(token["deprel"]) for token in
|
| 1849 |
-
"deps": [str(token["deps"]) for token in
|
| 1850 |
-
"misc": [str(token["misc"]) for token in
|
|
|
|
| 1851 |
}
|
| 1852 |
id += 1
|
|
|
|
| 1778 |
"deprel": datasets.Sequence(datasets.Value("string")),
|
| 1779 |
"deps": datasets.Sequence(datasets.Value("string")),
|
| 1780 |
"misc": datasets.Sequence(datasets.Value("string")),
|
| 1781 |
+
"mwt": datasets.Sequence(
|
| 1782 |
+
{
|
| 1783 |
+
"id": datasets.Value("string"),
|
| 1784 |
+
"form": datasets.Value("string"),
|
| 1785 |
+
"misc": datasets.Value("string"),
|
| 1786 |
+
}
|
| 1787 |
+
),
|
| 1788 |
}
|
| 1789 |
),
|
| 1790 |
supervised_keys=None,
|
|
|
|
| 1836 |
else:
|
| 1837 |
idx = id
|
| 1838 |
|
| 1839 |
+
# Extract Multi-Word Tokens (MWTs) - tokens with tuple IDs like (1, '-', 2)
|
| 1840 |
+
mwts = []
|
| 1841 |
+
for token in sent:
|
| 1842 |
+
if isinstance(token["id"], tuple): # MWT line (e.g., "1-2")
|
| 1843 |
+
mwts.append({
|
| 1844 |
+
"id": f"{token['id'][0]}-{token['id'][2]}",
|
| 1845 |
+
"form": token["form"],
|
| 1846 |
+
"misc": str(token["misc"]) if token["misc"] else ""
|
| 1847 |
+
})
|
| 1848 |
+
|
| 1849 |
+
# Filter to syntactic words only (exclude MWTs and empty nodes)
|
| 1850 |
+
sent_filtered = sent.filter(id=lambda x: type(x) is int)
|
| 1851 |
+
|
| 1852 |
+
tokens = [token["form"] for token in sent_filtered]
|
| 1853 |
|
| 1854 |
if "text" in sent.metadata:
|
| 1855 |
txt = sent.metadata["text"]
|
|
|
|
| 1859 |
yield id, {
|
| 1860 |
"idx": str(idx),
|
| 1861 |
"text": txt,
|
| 1862 |
+
"tokens": tokens,
|
| 1863 |
+
"lemmas": [token["lemma"] for token in sent_filtered],
|
| 1864 |
+
"upos": [token["upos"] for token in sent_filtered],
|
| 1865 |
+
"xpos": [token["xpos"] for token in sent_filtered],
|
| 1866 |
+
"feats": [str(token["feats"]) for token in sent_filtered],
|
| 1867 |
+
"head": [str(token["head"]) for token in sent_filtered],
|
| 1868 |
+
"deprel": [str(token["deprel"]) for token in sent_filtered],
|
| 1869 |
+
"deps": [str(token["deps"]) for token in sent_filtered],
|
| 1870 |
+
"misc": [str(token["misc"]) for token in sent_filtered],
|
| 1871 |
+
"mwt": mwts,
|
| 1872 |
}
|
| 1873 |
id += 1
|
universal_dependencies.py
CHANGED
|
@@ -1778,6 +1778,13 @@ class UniversalDependencies(datasets.GeneratorBasedBuilder):
|
|
| 1778 |
"deprel": datasets.Sequence(datasets.Value("string")),
|
| 1779 |
"deps": datasets.Sequence(datasets.Value("string")),
|
| 1780 |
"misc": datasets.Sequence(datasets.Value("string")),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1781 |
}
|
| 1782 |
),
|
| 1783 |
supervised_keys=None,
|
|
@@ -1829,7 +1836,20 @@ class UniversalDependencies(datasets.GeneratorBasedBuilder):
|
|
| 1829 |
else:
|
| 1830 |
idx = id
|
| 1831 |
|
| 1832 |
-
tokens
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1833 |
|
| 1834 |
if "text" in sent.metadata:
|
| 1835 |
txt = sent.metadata["text"]
|
|
@@ -1839,14 +1859,15 @@ class UniversalDependencies(datasets.GeneratorBasedBuilder):
|
|
| 1839 |
yield id, {
|
| 1840 |
"idx": str(idx),
|
| 1841 |
"text": txt,
|
| 1842 |
-
"tokens":
|
| 1843 |
-
"lemmas": [token["lemma"] for token in
|
| 1844 |
-
"upos": [token["upos"] for token in
|
| 1845 |
-
"xpos": [token["xpos"] for token in
|
| 1846 |
-
"feats": [str(token["feats"]) for token in
|
| 1847 |
-
"head": [str(token["head"]) for token in
|
| 1848 |
-
"deprel": [str(token["deprel"]) for token in
|
| 1849 |
-
"deps": [str(token["deps"]) for token in
|
| 1850 |
-
"misc": [str(token["misc"]) for token in
|
|
|
|
| 1851 |
}
|
| 1852 |
id += 1
|
|
|
|
| 1778 |
"deprel": datasets.Sequence(datasets.Value("string")),
|
| 1779 |
"deps": datasets.Sequence(datasets.Value("string")),
|
| 1780 |
"misc": datasets.Sequence(datasets.Value("string")),
|
| 1781 |
+
"mwt": datasets.Sequence(
|
| 1782 |
+
{
|
| 1783 |
+
"id": datasets.Value("string"),
|
| 1784 |
+
"form": datasets.Value("string"),
|
| 1785 |
+
"misc": datasets.Value("string"),
|
| 1786 |
+
}
|
| 1787 |
+
),
|
| 1788 |
}
|
| 1789 |
),
|
| 1790 |
supervised_keys=None,
|
|
|
|
| 1836 |
else:
|
| 1837 |
idx = id
|
| 1838 |
|
| 1839 |
+
# Extract Multi-Word Tokens (MWTs) - tokens with tuple IDs like (1, '-', 2)
|
| 1840 |
+
mwts = []
|
| 1841 |
+
for token in sent:
|
| 1842 |
+
if isinstance(token["id"], tuple): # MWT line (e.g., "1-2")
|
| 1843 |
+
mwts.append({
|
| 1844 |
+
"id": f"{token['id'][0]}-{token['id'][2]}",
|
| 1845 |
+
"form": token["form"],
|
| 1846 |
+
"misc": str(token["misc"]) if token["misc"] else ""
|
| 1847 |
+
})
|
| 1848 |
+
|
| 1849 |
+
# Filter to syntactic words only (exclude MWTs and empty nodes)
|
| 1850 |
+
sent_filtered = sent.filter(id=lambda x: type(x) is int)
|
| 1851 |
+
|
| 1852 |
+
tokens = [token["form"] for token in sent_filtered]
|
| 1853 |
|
| 1854 |
if "text" in sent.metadata:
|
| 1855 |
txt = sent.metadata["text"]
|
|
|
|
| 1859 |
yield id, {
|
| 1860 |
"idx": str(idx),
|
| 1861 |
"text": txt,
|
| 1862 |
+
"tokens": tokens,
|
| 1863 |
+
"lemmas": [token["lemma"] for token in sent_filtered],
|
| 1864 |
+
"upos": [token["upos"] for token in sent_filtered],
|
| 1865 |
+
"xpos": [token["xpos"] for token in sent_filtered],
|
| 1866 |
+
"feats": [str(token["feats"]) for token in sent_filtered],
|
| 1867 |
+
"head": [str(token["head"]) for token in sent_filtered],
|
| 1868 |
+
"deprel": [str(token["deprel"]) for token in sent_filtered],
|
| 1869 |
+
"deps": [str(token["deps"]) for token in sent_filtered],
|
| 1870 |
+
"misc": [str(token["misc"]) for token in sent_filtered],
|
| 1871 |
+
"mwt": mwts,
|
| 1872 |
}
|
| 1873 |
id += 1
|