DeadCardassian commited on
Commit
2a041d5
·
verified ·
1 Parent(s): 145a11c

Upload pm25vision.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. pm25vision.py +41 -17
pm25vision.py CHANGED
@@ -1,6 +1,7 @@
1
  import os
2
  import csv
3
  import datasets
 
4
 
5
 
6
  class PM25Vision(datasets.GeneratorBasedBuilder):
@@ -18,7 +19,7 @@ class PM25Vision(datasets.GeneratorBasedBuilder):
18
  "quality_score": datasets.Value("float64"),
19
  "downloaded_at": datasets.Value("string"),
20
  "pm25": datasets.Value("float64"),
21
- "image": datasets.Image(), # 图片字段
22
  "quality": datasets.Value("string"),
23
  "pm25_bin": datasets.Value("string"),
24
  }
@@ -27,29 +28,52 @@ class PM25Vision(datasets.GeneratorBasedBuilder):
27
  )
28
 
29
  def _split_generators(self, dl_manager):
30
- # unzip
31
- train_path = dl_manager.download_and_extract("train.zip")
32
- test_path = dl_manager.download_and_extract("test.zip")
33
 
34
  return [
35
  datasets.SplitGenerator(
36
  name=datasets.Split.TRAIN,
37
- gen_kwargs={"base_dir": train_path},
38
  ),
39
  datasets.SplitGenerator(
40
  name=datasets.Split.TEST,
41
- gen_kwargs={"base_dir": test_path},
42
  ),
43
  ]
44
 
45
- def _generate_examples(self, base_dir):
46
- # structure: base_dir/metadata.csv + base_dir/images/*
47
- metadata_file = os.path.join(base_dir, "metadata.csv")
48
- images_dir = os.path.join(base_dir, "images")
49
-
50
- with open(metadata_file, encoding="utf-8") as f:
51
- reader = csv.DictReader(f)
52
- for i, row in enumerate(reader):
53
- image_path = os.path.join(images_dir, row["filename"])
54
- row["image"] = image_path
55
- yield i, row
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import csv
3
  import datasets
4
+ import zipfile
5
 
6
 
7
  class PM25Vision(datasets.GeneratorBasedBuilder):
 
19
  "quality_score": datasets.Value("float64"),
20
  "downloaded_at": datasets.Value("string"),
21
  "pm25": datasets.Value("float64"),
22
+ "image": datasets.Image(),
23
  "quality": datasets.Value("string"),
24
  "pm25_bin": datasets.Value("string"),
25
  }
 
28
  )
29
 
30
  def _split_generators(self, dl_manager):
31
+
32
+ train_path = dl_manager.download("train.zip")
33
+ test_path = dl_manager.download("test.zip")
34
 
35
  return [
36
  datasets.SplitGenerator(
37
  name=datasets.Split.TRAIN,
38
+ gen_kwargs={"archive_path": train_path},
39
  ),
40
  datasets.SplitGenerator(
41
  name=datasets.Split.TEST,
42
+ gen_kwargs={"archive_path": test_path},
43
  ),
44
  ]
45
 
46
+ def _generate_examples(self, archive_path):
47
+
48
+ if self.config.streaming:
49
+ # Streaming
50
+ with zipfile.ZipFile(archive_path, "r") as z:
51
+ # find metadata.csv
52
+ meta_name = None
53
+ for name in z.namelist():
54
+ if name.endswith("metadata.csv"):
55
+ meta_name = name
56
+ break
57
+ if meta_name is None:
58
+ raise FileNotFoundError("metadata.csv not found in archive")
59
+
60
+ with z.open(meta_name) as f:
61
+ reader = csv.DictReader(line.decode("utf-8") for line in f)
62
+ for i, row in enumerate(reader):
63
+ img_name = f"images/{row['filename']}"
64
+ if img_name in z.namelist():
65
+ img_bytes = z.read(img_name)
66
+ row["image"] = {"bytes": img_bytes, "path": row["filename"]}
67
+ yield i, row
68
+ else:
69
+ # default
70
+ base_dir = datasets.filesystems.extract_archive(archive_path)
71
+ metadata_file = os.path.join(base_dir, "metadata.csv")
72
+ images_dir = os.path.join(base_dir, "images")
73
+
74
+ with open(metadata_file, encoding="utf-8") as f:
75
+ reader = csv.DictReader(f)
76
+ for i, row in enumerate(reader):
77
+ image_path = os.path.join(images_dir, row["filename"])
78
+ row["image"] = image_path
79
+ yield i, row