Datasets:
Upload pm25vision.py with huggingface_hub
Browse files- pm25vision.py +46 -8
pm25vision.py
CHANGED
|
@@ -1,5 +1,7 @@
|
|
|
|
|
| 1 |
import csv
|
| 2 |
import datasets
|
|
|
|
| 3 |
|
| 4 |
|
| 5 |
class PM25Vision(datasets.GeneratorBasedBuilder):
|
|
@@ -17,7 +19,7 @@ class PM25Vision(datasets.GeneratorBasedBuilder):
|
|
| 17 |
"quality_score": datasets.Value("float64"),
|
| 18 |
"downloaded_at": datasets.Value("string"),
|
| 19 |
"pm25": datasets.Value("float64"),
|
| 20 |
-
"
|
| 21 |
"quality": datasets.Value("string"),
|
| 22 |
"pm25_bin": datasets.Value("string"),
|
| 23 |
}
|
|
@@ -26,20 +28,56 @@ class PM25Vision(datasets.GeneratorBasedBuilder):
|
|
| 26 |
)
|
| 27 |
|
| 28 |
def _split_generators(self, dl_manager):
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
return [
|
| 31 |
datasets.SplitGenerator(
|
| 32 |
name=datasets.Split.TRAIN,
|
| 33 |
-
gen_kwargs={"
|
| 34 |
),
|
| 35 |
datasets.SplitGenerator(
|
| 36 |
name=datasets.Split.TEST,
|
| 37 |
-
gen_kwargs={"
|
| 38 |
),
|
| 39 |
]
|
| 40 |
|
| 41 |
-
def _generate_examples(self,
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
import csv
|
| 3 |
import datasets
|
| 4 |
+
import tarfile
|
| 5 |
|
| 6 |
|
| 7 |
class PM25Vision(datasets.GeneratorBasedBuilder):
|
|
|
|
| 19 |
"quality_score": datasets.Value("float64"),
|
| 20 |
"downloaded_at": datasets.Value("string"),
|
| 21 |
"pm25": datasets.Value("float64"),
|
| 22 |
+
"image": datasets.Image(), # 图片字段
|
| 23 |
"quality": datasets.Value("string"),
|
| 24 |
"pm25_bin": datasets.Value("string"),
|
| 25 |
}
|
|
|
|
| 28 |
)
|
| 29 |
|
| 30 |
def _split_generators(self, dl_manager):
|
| 31 |
+
# 下载压缩包
|
| 32 |
+
train_path = dl_manager.download("train.tar.gz")
|
| 33 |
+
test_path = dl_manager.download("test.tar.gz")
|
| 34 |
|
| 35 |
return [
|
| 36 |
datasets.SplitGenerator(
|
| 37 |
name=datasets.Split.TRAIN,
|
| 38 |
+
gen_kwargs={"archive_path": train_path, "split": "train"},
|
| 39 |
),
|
| 40 |
datasets.SplitGenerator(
|
| 41 |
name=datasets.Split.TEST,
|
| 42 |
+
gen_kwargs={"archive_path": test_path, "split": "test"},
|
| 43 |
),
|
| 44 |
]
|
| 45 |
|
| 46 |
+
def _generate_examples(self, archive_path, split):
|
| 47 |
+
"""
|
| 48 |
+
如果是普通模式:解压 -> 遍历 metadata.csv
|
| 49 |
+
如果是 streaming 模式:直接读取 tar.gz -> 逐个 yield
|
| 50 |
+
"""
|
| 51 |
+
if self.config.streaming:
|
| 52 |
+
# Streaming 模式:用 dl_manager.iter_archive 直接迭代 tar.gz
|
| 53 |
+
with tarfile.open(archive_path, "r:gz") as tar:
|
| 54 |
+
members = {m.name: tar.extractfile(m) for m in tar.getmembers() if m.isfile()}
|
| 55 |
+
|
| 56 |
+
# 找 metadata.csv
|
| 57 |
+
meta_file = None
|
| 58 |
+
for name in members:
|
| 59 |
+
if name.endswith("metadata.csv"):
|
| 60 |
+
meta_file = members[name]
|
| 61 |
+
break
|
| 62 |
+
if meta_file is None:
|
| 63 |
+
raise FileNotFoundError("metadata.csv not found in archive")
|
| 64 |
+
|
| 65 |
+
reader = csv.DictReader(line.decode("utf-8") for line in meta_file)
|
| 66 |
+
for i, row in enumerate(reader):
|
| 67 |
+
image_name = os.path.join("images", row["filename"])
|
| 68 |
+
if image_name in members:
|
| 69 |
+
row["image"] = {"bytes": members[image_name].read(), "path": row["filename"]}
|
| 70 |
+
yield i, row
|
| 71 |
+
|
| 72 |
+
else:
|
| 73 |
+
# 默认模式:先解压,再从磁盘读
|
| 74 |
+
base_dir = datasets.filesystems.extract_archive(archive_path)
|
| 75 |
+
metadata_file = os.path.join(base_dir, "metadata.csv")
|
| 76 |
+
images_dir = os.path.join(base_dir, "images")
|
| 77 |
+
|
| 78 |
+
with open(metadata_file, encoding="utf-8") as f:
|
| 79 |
+
reader = csv.DictReader(f)
|
| 80 |
+
for i, row in enumerate(reader):
|
| 81 |
+
image_path = os.path.join(images_dir, row["filename"])
|
| 82 |
+
row["image"] = image_path
|
| 83 |
+
yield i, row
|