Taejun Kim commited on
Commit
bf5a4b4
·
1 Parent(s): b48eccb

no message

Browse files
Files changed (1) hide show
  1. djmix.py +93 -10
djmix.py CHANGED
@@ -44,22 +44,25 @@ _HOMEPAGE = ""
44
  _LICENSE = ""
45
 
46
  # TODO: Add link to the official dataset URLs here
47
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
48
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
49
  _URLS = {
50
  "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
51
  "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
52
  }
53
 
 
 
 
54
 
55
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
56
  class DJMixDataset(datasets.GeneratorBasedBuilder):
57
  """TODO: Short description of my dataset."""
58
 
59
- VERSION = datasets.Version("1.1.0")
60
 
61
  # This is an example of a dataset with multiple configurations.
62
- # If you don't want/need to define several sub-sets in your dataset,
63
  # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
64
 
65
  # If you need to make complex sub-parts in the datasets with configurable options
@@ -67,16 +70,16 @@ class DJMixDataset(datasets.GeneratorBasedBuilder):
67
  # BUILDER_CONFIG_CLASS = MyBuilderConfig
68
 
69
  # You will be able to load one or the other configurations in the following list with
70
- # data = datasets.load_dataset('my_dataset', 'first_domain')
71
- # data = datasets.load_dataset('my_dataset', 'second_domain')
72
  BUILDER_CONFIGS = [
73
- datasets.BuilderConfig(name="first_domain", version=VERSION,
74
  description="This part of my dataset covers a first domain"),
75
- datasets.BuilderConfig(name="second_domain", version=VERSION,
76
  description="This part of my dataset covers a second domain"),
77
  ]
78
 
79
- DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
80
 
81
  def _info(self):
82
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
@@ -103,8 +106,8 @@ class DJMixDataset(datasets.GeneratorBasedBuilder):
103
  description=_DESCRIPTION,
104
  # This defines the different columns of the dataset and their types
105
  features=features, # Here we define them above because they are different between the two configurations
106
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
107
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
108
  # supervised_keys=("sentence", "label"),
109
  # Homepage of the dataset for documentation
110
  homepage=_HOMEPAGE,
@@ -121,7 +124,24 @@ class DJMixDataset(datasets.GeneratorBasedBuilder):
121
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
122
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
123
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
 
 
 
 
 
 
 
 
 
 
 
 
124
  urls = _URLS[self.config.name]
 
 
 
 
 
125
  data_dir = dl_manager.download_and_extract(urls)
126
  return [
127
  datasets.SplitGenerator(
@@ -170,3 +190,66 @@ class DJMixDataset(datasets.GeneratorBasedBuilder):
170
  "option2": data["option2"],
171
  "second_domain_answer": "" if split == "test" else data["second_domain_answer"],
172
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  _LICENSE = ""
45
 
46
  # TODO: Add link to the official dataset URLs here
47
+ # The HuggingFace Datasets library doesn"t host the datasets but only points to the original files.
48
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
49
  _URLS = {
50
  "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
51
  "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
52
  }
53
 
54
+ # _METADATA_URL = "https://huggingface.co/datasets/taejunkim/djmix/resolve/b961f21ab1a22b12e3154229bb40de9c950f8b26/metadata.json"
55
+ _METADATA_URL = "https://huggingface.co/datasets/taejunkim/djmix/resolve/b48eccb4738ba09b96ea21d0bdb33f29e7be3b3c/metadata.json.gz"
56
+
57
 
58
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
59
  class DJMixDataset(datasets.GeneratorBasedBuilder):
60
  """TODO: Short description of my dataset."""
61
 
62
+ VERSION = datasets.Version("1.0.0")
63
 
64
  # This is an example of a dataset with multiple configurations.
65
+ # If you don"t want/need to define several sub-sets in your dataset,
66
  # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
67
 
68
  # If you need to make complex sub-parts in the datasets with configurable options
 
70
  # BUILDER_CONFIG_CLASS = MyBuilderConfig
71
 
72
  # You will be able to load one or the other configurations in the following list with
73
+ # data = datasets.load_dataset("my_dataset", "first_domain")
74
+ # data = datasets.load_dataset("my_dataset", "second_domain")
75
  BUILDER_CONFIGS = [
76
+ datasets.BuilderConfig(name="mixes", version=VERSION,
77
  description="This part of my dataset covers a first domain"),
78
+ datasets.BuilderConfig(name="tracks", version=VERSION,
79
  description="This part of my dataset covers a second domain"),
80
  ]
81
 
82
+ DEFAULT_CONFIG_NAME = "first_domain" # It"s not mandatory to have a default configuration. Just use one if it make sense.
83
 
84
  def _info(self):
85
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
 
106
  description=_DESCRIPTION,
107
  # This defines the different columns of the dataset and their types
108
  features=features, # Here we define them above because they are different between the two configurations
109
+ # If there"s a common (input, target) tuple from the features, uncomment supervised_keys line below and
110
+ # specify them. They"ll be used if as_supervised=True in builder.as_dataset.
111
  # supervised_keys=("sentence", "label"),
112
  # Homepage of the dataset for documentation
113
  homepage=_HOMEPAGE,
 
124
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
125
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
126
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
127
+
128
+ metadata_path = dl_manager.download(_METADATA_URL)
129
+ metadata_path = dl_manager.extract(metadata_path)
130
+ dl_manager.iter_archive()
131
+ with open(metadata_path) as f:
132
+ metadata = json.load(f)
133
+ download_mixes(metadata, self.cache_dir)
134
+ audio_urls = []
135
+ for mix in metadata:
136
+ audio_urls.append(mix["audio_url"])
137
+ dl_manager.download_custom(mix["audio_url"], youtube_dl_download)
138
+ print("HI!!!!!!!!!!!!!!!!!!!!!!")
139
  urls = _URLS[self.config.name]
140
+
141
+ dl_manager.downloaded_paths
142
+
143
+ dl_manager.download_custom("haha.mp3", custom)
144
+
145
  data_dir = dl_manager.download_and_extract(urls)
146
  return [
147
  datasets.SplitGenerator(
 
190
  "option2": data["option2"],
191
  "second_domain_answer": "" if split == "test" else data["second_domain_answer"],
192
  }
193
+
194
+
195
+ def youtube_dl_download(src_url, dst_path):
196
+ from yt_dlp import YoutubeDL
197
+
198
+ params = {
199
+ "format": "bestaudio",
200
+ "outtmpl": f"{dst_path}.%(ext)s",
201
+ "postprocessors": [{ # Extract audio using ffmpeg
202
+ "key": "FFmpegExtractAudio",
203
+ "preferredcodec": "mp3",
204
+ }]
205
+ }
206
+ with YoutubeDL(params) as ydl:
207
+ ydl.download(src_url)
208
+ os.rename(f"{dst_path}.mp3", dst_path)
209
+
210
+ return dst_path
211
+
212
+
213
+ def download_mixes(metadata, cache_dir):
214
+ from yt_dlp.utils import DownloadError
215
+
216
+ for mix in metadata:
217
+ audio_path = os.path.join(cache_dir, mix["id"] + ".mp3")
218
+ # TODO: try/except
219
+ try:
220
+ ret = download_audio(mix["audio_url"], audio_path)
221
+ except DownloadError as e:
222
+ print(e)
223
+
224
+ print()
225
+
226
+
227
+ def download_audio(url, path):
228
+ if os.path.isfile(path):
229
+ # TODO: silence?
230
+ print(f'{path} already exists. Skip downloading.')
231
+ return
232
+
233
+ from yt_dlp import YoutubeDL
234
+
235
+ params = {
236
+ 'format': 'bestaudio',
237
+ 'outtmpl': path,
238
+ 'postprocessors': [{ # Extract audio using ffmpeg
239
+ 'key': 'FFmpegExtractAudio',
240
+ 'preferredcodec': 'mp3',
241
+ }]
242
+ }
243
+ with YoutubeDL(params) as ydl:
244
+ os.makedirs(os.path.dirname(path), exist_ok=True)
245
+ return ydl.download(url)
246
+
247
+
248
+ if __name__ == "__main__":
249
+ builder = DJMixDataset(
250
+ config_name="mixes",
251
+ data_dir="/data/djmix"
252
+ )
253
+ builder.download_and_prepare()
254
+ dir_path = os.path.dirname(os.path.realpath(__file__))
255
+ datasets.load_dataset(dir_path)