Commit
·
208bac9
1
Parent(s):
b64310f
Update try.py
Browse files
try.py
CHANGED
|
@@ -5,6 +5,8 @@ from datasets import DownloadManager, DatasetInfo
|
|
| 5 |
from datasets.data_files import DataFilesDict
|
| 6 |
import os
|
| 7 |
import json
|
|
|
|
|
|
|
| 8 |
|
| 9 |
|
| 10 |
# ここに設定を記入
|
|
@@ -99,50 +101,40 @@ class LoadingScriptPractice(datasets.GeneratorBasedBuilder):
|
|
| 99 |
# huggingfaceのディレクトリからデータを取ってくる
|
| 100 |
hfh_dataset_info = HfApi().dataset_info(_NAME, revision=_REVISION, timeout=100.0)
|
| 101 |
|
| 102 |
-
|
| 103 |
{datasets.Split.TRAIN: ["**"]},
|
| 104 |
dataset_info=hfh_dataset_info,
|
| 105 |
allowed_extensions=["jsonl", ".jsonl"],
|
| 106 |
)
|
| 107 |
|
| 108 |
-
test_metadata_paths = DataFilesDict.from_hf_repo(
|
| 109 |
-
{datasets.Split.TEST: ["**"]},
|
| 110 |
-
dataset_info=hfh_dataset_info,
|
| 111 |
-
allowed_extensions=["jsonl", ".jsonl"],
|
| 112 |
-
)
|
| 113 |
-
|
| 114 |
# **.zipのURLをDict型として取得?
|
| 115 |
-
|
| 116 |
{datasets.Split.TRAIN: ["**"]},
|
| 117 |
dataset_info=hfh_dataset_info,
|
| 118 |
allowed_extensions=["zip", ".zip"],
|
| 119 |
)
|
| 120 |
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
|
| 133 |
-
split_metadata_paths= {
|
| 134 |
-
"train" : train_metadata_paths["train"][0],
|
| 135 |
-
"test" : test_metadata_paths["test"][0]
|
| 136 |
-
}
|
| 137 |
|
| 138 |
gs = []
|
| 139 |
-
for split, files in
|
| 140 |
'''
|
| 141 |
split : "train" or "test" or "val"
|
| 142 |
files : zip files
|
| 143 |
'''
|
| 144 |
# リポジトリからダウンロードしてとりあえずキャッシュしたURLリストを取得
|
| 145 |
-
|
| 146 |
downloaded_files_path = dl_manager.download(files)
|
| 147 |
|
| 148 |
# 元のコードではzipファイルの中身を"filepath"としてそのまま_generate_exampleに引き渡している?
|
|
@@ -151,7 +143,7 @@ class LoadingScriptPractice(datasets.GeneratorBasedBuilder):
|
|
| 151 |
name = split,
|
| 152 |
gen_kwargs={
|
| 153 |
"images" : dl_manager.iter_archive(downloaded_files_path),
|
| 154 |
-
"metadata_path":
|
| 155 |
}
|
| 156 |
)
|
| 157 |
)
|
|
|
|
| 5 |
from datasets.data_files import DataFilesDict
|
| 6 |
import os
|
| 7 |
import json
|
| 8 |
+
from os.path import dirname, basename
|
| 9 |
+
from pathlib import Path
|
| 10 |
|
| 11 |
|
| 12 |
# ここに設定を記入
|
|
|
|
| 101 |
# huggingfaceのディレクトリからデータを取ってくる
|
| 102 |
hfh_dataset_info = HfApi().dataset_info(_NAME, revision=_REVISION, timeout=100.0)
|
| 103 |
|
| 104 |
+
metadata_urls = DataFilesDict.from_hf_repo(
|
| 105 |
{datasets.Split.TRAIN: ["**"]},
|
| 106 |
dataset_info=hfh_dataset_info,
|
| 107 |
allowed_extensions=["jsonl", ".jsonl"],
|
| 108 |
)
|
| 109 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
# **.zipのURLをDict型として取得?
|
| 111 |
+
data_urls = DataFilesDict.from_hf_repo(
|
| 112 |
{datasets.Split.TRAIN: ["**"]},
|
| 113 |
dataset_info=hfh_dataset_info,
|
| 114 |
allowed_extensions=["zip", ".zip"],
|
| 115 |
)
|
| 116 |
|
| 117 |
+
data_paths = dict()
|
| 118 |
+
for path in data_path["train"]:
|
| 119 |
+
dname = dirname(path)
|
| 120 |
+
folder = basename(Path(dname))
|
| 121 |
+
data_paths[folder] = path
|
| 122 |
|
| 123 |
+
metadata_paths = dict()
|
| 124 |
+
for path in data_path["train"]:
|
| 125 |
+
dname = dirname(path)
|
| 126 |
+
folder = basename(Path(dname))
|
| 127 |
+
metadata_paths[folder] = path
|
| 128 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
|
| 130 |
gs = []
|
| 131 |
+
for split, files in data_paths.items():
|
| 132 |
'''
|
| 133 |
split : "train" or "test" or "val"
|
| 134 |
files : zip files
|
| 135 |
'''
|
| 136 |
# リポジトリからダウンロードしてとりあえずキャッシュしたURLリストを取得
|
| 137 |
+
metadata_path = dl_manager.download_and_extract(metadata_paths[split])
|
| 138 |
downloaded_files_path = dl_manager.download(files)
|
| 139 |
|
| 140 |
# 元のコードではzipファイルの中身を"filepath"としてそのまま_generate_exampleに引き渡している?
|
|
|
|
| 143 |
name = split,
|
| 144 |
gen_kwargs={
|
| 145 |
"images" : dl_manager.iter_archive(downloaded_files_path),
|
| 146 |
+
"metadata_path": metadata_path
|
| 147 |
}
|
| 148 |
)
|
| 149 |
)
|