Alejandro Escontrela
commited on
Commit
·
354f053
1
Parent(s):
8420f91
Update mdm_data.py
Browse files- mdm_data.py +22 -99
mdm_data.py
CHANGED
|
@@ -18,7 +18,7 @@ _DESCRIPTION = """\
|
|
| 18 |
"""
|
| 19 |
_HOMEPAGE = "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/"
|
| 20 |
_LICENSE = "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)"
|
| 21 |
-
_DATA_URL = "https://
|
| 22 |
|
| 23 |
|
| 24 |
class MDMDataConfig(datasets.BuilderConfig):
|
|
@@ -30,7 +30,7 @@ class MDMDataConfig(datasets.BuilderConfig):
|
|
| 30 |
data_url: `string`, url to the dataset (word or raw level)
|
| 31 |
**kwargs: keyword arguments forwarded to super.
|
| 32 |
"""
|
| 33 |
-
super(
|
| 34 |
version=datasets.Version(
|
| 35 |
"1.0.0",
|
| 36 |
),
|
|
@@ -46,26 +46,9 @@ class MDMData(datasets.GeneratorBasedBuilder):
|
|
| 46 |
VERSION = datasets.Version("0.1.0")
|
| 47 |
BUILDER_CONFIGS = [
|
| 48 |
WikitextConfig(
|
| 49 |
-
name="
|
| 50 |
-
data_url=_DATA_URL + "/" + "
|
| 51 |
-
description="
|
| 52 |
-
),
|
| 53 |
-
WikitextConfig(
|
| 54 |
-
name="wikitext-2-v1",
|
| 55 |
-
data_url=_DATA_URL + "/" + "wikitext-2-v1.zip",
|
| 56 |
-
description="Word level dataset. No processing is needed other than replacing newlines with <eos> tokens.",
|
| 57 |
-
),
|
| 58 |
-
WikitextConfig(
|
| 59 |
-
name="wikitext-103-raw-v1",
|
| 60 |
-
data_url=_DATA_URL + "/" + "wikitext-103-raw-v1.zip",
|
| 61 |
-
description="Raw level dataset: the raw tokens before the addition of <unk> tokens. "
|
| 62 |
-
"They should only be used for character level work or for creating newly derived datasets.",
|
| 63 |
-
),
|
| 64 |
-
WikitextConfig(
|
| 65 |
-
name="wikitext-2-raw-v1",
|
| 66 |
-
data_url=_DATA_URL + "/" + "wikitext-2-raw-v1.zip",
|
| 67 |
-
description="Raw level dataset: the raw tokens before the addition of <unk> tokens. "
|
| 68 |
-
"They should only be used for character level work or for creating newly derived datasets.",
|
| 69 |
),
|
| 70 |
]
|
| 71 |
|
|
@@ -95,83 +78,23 @@ class MDMData(datasets.GeneratorBasedBuilder):
|
|
| 95 |
# TODO(wikitext): Downloads the data and defines the splits
|
| 96 |
# dl_manager is a datasets.download.DownloadManager that can be used to
|
| 97 |
# download and extract URLs
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
datasets.
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
datasets.
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
datasets.
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
else:
|
| 116 |
-
if self.config.name == "wikitext-103-raw-v1":
|
| 117 |
-
data_file = dl_manager.download_and_extract(self.config.data_url)
|
| 118 |
-
data_dir = os.path.join(data_file, "wikitext-103-raw")
|
| 119 |
-
return [
|
| 120 |
-
datasets.SplitGenerator(
|
| 121 |
-
name=datasets.Split.TEST,
|
| 122 |
-
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.raw"), "split": "test"},
|
| 123 |
-
),
|
| 124 |
-
datasets.SplitGenerator(
|
| 125 |
-
name=datasets.Split.TRAIN,
|
| 126 |
-
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.train.raw"), "split": "train"},
|
| 127 |
-
),
|
| 128 |
-
datasets.SplitGenerator(
|
| 129 |
-
name=datasets.Split.VALIDATION,
|
| 130 |
-
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.valid.raw"), "split": "valid"},
|
| 131 |
-
),
|
| 132 |
-
]
|
| 133 |
-
else:
|
| 134 |
-
if self.config.name == "wikitext-2-raw-v1":
|
| 135 |
-
data_file = dl_manager.download_and_extract(self.config.data_url)
|
| 136 |
-
data_dir = os.path.join(data_file, "wikitext-2-raw")
|
| 137 |
-
return [
|
| 138 |
-
datasets.SplitGenerator(
|
| 139 |
-
name=datasets.Split.TEST,
|
| 140 |
-
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.raw"), "split": "test"},
|
| 141 |
-
),
|
| 142 |
-
datasets.SplitGenerator(
|
| 143 |
-
name=datasets.Split.TRAIN,
|
| 144 |
-
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.train.raw"), "split": "train"},
|
| 145 |
-
),
|
| 146 |
-
datasets.SplitGenerator(
|
| 147 |
-
name=datasets.Split.VALIDATION,
|
| 148 |
-
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.valid.raw"), "split": "valid"},
|
| 149 |
-
),
|
| 150 |
-
]
|
| 151 |
-
else:
|
| 152 |
-
if self.config.name == "wikitext-2-v1":
|
| 153 |
-
data_file = dl_manager.download_and_extract(self.config.data_url)
|
| 154 |
-
data_dir = os.path.join(data_file, "wikitext-2")
|
| 155 |
-
return [
|
| 156 |
-
datasets.SplitGenerator(
|
| 157 |
-
name=datasets.Split.TEST,
|
| 158 |
-
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.tokens"), "split": "test"},
|
| 159 |
-
),
|
| 160 |
-
datasets.SplitGenerator(
|
| 161 |
-
name=datasets.Split.TRAIN,
|
| 162 |
-
gen_kwargs={
|
| 163 |
-
"data_file": os.path.join(data_dir, "wiki.train.tokens"),
|
| 164 |
-
"split": "train",
|
| 165 |
-
},
|
| 166 |
-
),
|
| 167 |
-
datasets.SplitGenerator(
|
| 168 |
-
name=datasets.Split.VALIDATION,
|
| 169 |
-
gen_kwargs={
|
| 170 |
-
"data_file": os.path.join(data_dir, "wiki.valid.tokens"),
|
| 171 |
-
"split": "valid",
|
| 172 |
-
},
|
| 173 |
-
),
|
| 174 |
-
]
|
| 175 |
|
| 176 |
def _generate_examples(self, data_file, split):
|
| 177 |
|
|
|
|
| 18 |
"""
|
| 19 |
_HOMEPAGE = "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/"
|
| 20 |
_LICENSE = "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)"
|
| 21 |
+
_DATA_URL = "https://huggingface.co/datasets/alescontrela/mdm_data/tree/main"
|
| 22 |
|
| 23 |
|
| 24 |
class MDMDataConfig(datasets.BuilderConfig):
|
|
|
|
| 30 |
data_url: `string`, url to the dataset (word or raw level)
|
| 31 |
**kwargs: keyword arguments forwarded to super.
|
| 32 |
"""
|
| 33 |
+
super(MDMDataConfig, self).__init__(
|
| 34 |
version=datasets.Version(
|
| 35 |
"1.0.0",
|
| 36 |
),
|
|
|
|
| 46 |
VERSION = datasets.Version("0.1.0")
|
| 47 |
BUILDER_CONFIGS = [
|
| 48 |
WikitextConfig(
|
| 49 |
+
name="MDM Data",
|
| 50 |
+
data_url=_DATA_URL + "/" + "HumanML3D.zip",
|
| 51 |
+
description="Text to motion dataset.",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
),
|
| 53 |
]
|
| 54 |
|
|
|
|
| 78 |
# TODO(wikitext): Downloads the data and defines the splits
|
| 79 |
# dl_manager is a datasets.download.DownloadManager that can be used to
|
| 80 |
# download and extract URLs
|
| 81 |
+
data_file = dl_manager.download_and_extract(self.config.data_url)
|
| 82 |
+
data_dir = os.path.join(data_file, "HumanML3D.zip")
|
| 83 |
+
return [
|
| 84 |
+
datasets.SplitGenerator(
|
| 85 |
+
name=datasets.Split.TEST,
|
| 86 |
+
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.tokens"), "split": "test"},
|
| 87 |
+
),
|
| 88 |
+
datasets.SplitGenerator(
|
| 89 |
+
name=datasets.Split.TRAIN,
|
| 90 |
+
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.train.tokens"), "split": "train"},
|
| 91 |
+
),
|
| 92 |
+
datasets.SplitGenerator(
|
| 93 |
+
name=datasets.Split.VALIDATION,
|
| 94 |
+
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.valid.tokens"), "split": "valid"},
|
| 95 |
+
),
|
| 96 |
+
]
|
| 97 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 98 |
|
| 99 |
def _generate_examples(self, data_file, split):
|
| 100 |
|