leonleyang commited on
Commit
b5c5d16
·
verified ·
1 Parent(s): 61b8f33

Create imagenet.py

Browse files
Files changed (1) hide show
  1. imagenet.py +151 -0
imagenet.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import tarfile
4
+ import urllib.request
5
+ import datasets
6
+ from PIL import Image
7
+ import io
8
+
9
+
10
+ class ImageNet(datasets.GeneratorBasedBuilder):
11
+ """ImageNet (ILSVRC2012) dataset.
12
+
13
+ The training archive contains one tar file per class (named e.g. "n01440764.tar"),
14
+ and the mapping from WordNet ID (wnid) to label is derived from a JSON file.
15
+
16
+ The validation archive contains all images (with filenames such as "ILSVRC2012_val_00000001.JPEG").
17
+ The ground truth labels are read from a file contained in the devkit archive.
18
+ """
19
+
20
+ VERSION = datasets.Version("1.0.0")
21
+
22
+ # URLs for automatic download
23
+ _TRAIN_URL = "https://www.image-net.org/data/ILSVRC/2012/ILSVRC2012_img_train.tar"
24
+ _VAL_URL = "https://www.image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar"
25
+ _DEVKIT_URL = "https://www.image-net.org/data/ILSVRC/2012/ILSVRC2012_devkit_t12.tar.gz"
26
+ _CLASS_INDEX_URL = "https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json"
27
+
28
+ def _info(self):
29
+ # Download the mapping file directly (this file is very small).
30
+ with urllib.request.urlopen(self._CLASS_INDEX_URL) as response:
31
+ mapping = json.load(response)
32
+ # mapping is a dict with keys "0", "1", …, "999"
33
+ # Each entry is a list: [wnid, class_name].
34
+ class_names = [f"{mapping[str(i)][0]}: {mapping[str(i)][1]}" for i in range(1000)]
35
+ return datasets.DatasetInfo(
36
+ description="ImageNet Large Scale Visual Recognition Challenge 2012 dataset.",
37
+ features=datasets.Features({
38
+ "image": datasets.Image(),
39
+ "label": datasets.ClassLabel(names=class_names),
40
+ }),
41
+ supervised_keys=("image", "label"),
42
+ homepage="http://www.image-net.org/challenges/LSVRC/2012/",
43
+ citation="""@article{ILSVRC15,
44
+ Author = {Olga Russakovsky and Jia Deng and Hao Su and Jonathan Krause and Sanjeev Satheesh and Sean Ma and Zhiheng Huang and Andrej Karpathy and Aditya Khosla and Michael Bernstein and Alexander C. Berg and Li Fei-Fei},
45
+ Title = {{ImageNet Large Scale Visual Recognition Challenge}},
46
+ Year = {2015},
47
+ journal = {International Journal of Computer Vision (IJCV)},
48
+ doi = {10.1007/s11263-015-0816-y},
49
+ volume={115},
50
+ number={3},
51
+ pages={211-252}}"""
52
+ )
53
+
54
+ def _split_generators(self, dl_manager):
55
+ # Download the training and validation archives if not already present.
56
+ train_path = "/gpfs/data/shared/imagenet/ILSVRC2012/ILSVRC2012_img_train.tar"
57
+ val_path = "/gpfs/data/shared/imagenet/ILSVRC2012/ILSVRC2012_img_val.tar"
58
+
59
+ if not os.path.exists(train_path):
60
+ train_path = dl_manager.download(self._TRAIN_URL)
61
+ if not os.path.exists(val_path):
62
+ val_path = dl_manager.download(self._VAL_URL)
63
+
64
+ devkit_archive = dl_manager.download(self._DEVKIT_URL)
65
+ return [
66
+ datasets.SplitGenerator(
67
+ name=datasets.Split.TRAIN,
68
+ gen_kwargs={"archive_path": train_path, "split": "train"},
69
+ ),
70
+ datasets.SplitGenerator(
71
+ name=datasets.Split.VALIDATION,
72
+ gen_kwargs={
73
+ "archive_path": val_path,
74
+ "split": "validation",
75
+ "devkit_archive": devkit_archive,
76
+ },
77
+ ),
78
+ ]
79
+
80
+ def _generate_examples(self, archive_path, split, devkit_archive=None):
81
+ if split == "train":
82
+ # Open the training tar archive.
83
+ with tarfile.open(archive_path, "r:*") as train_tar:
84
+ # The training archive contains many tar files (one per class).
85
+ sub_tar_members = [
86
+ m for m in train_tar.getmembers() if m.isfile() and m.name.endswith(".tar")
87
+ ]
88
+ # Download and cache the class mapping (if not already done).
89
+ if not hasattr(self, "_mapping"):
90
+ with urllib.request.urlopen(self._CLASS_INDEX_URL) as response:
91
+ self._mapping = json.load(response)
92
+ mapping = self._mapping
93
+ # Build a mapping from WordNet ID (wnid) to integer label.
94
+ wnid_to_label = {mapping[str(i)][0]: i for i in range(1000)}
95
+
96
+ example_idx = 0
97
+ for m in sub_tar_members:
98
+ # Each member’s name is e.g. "n01440764.tar"; extract the wnid.
99
+ wnid = os.path.splitext(m.name)[0]
100
+ if wnid not in wnid_to_label:
101
+ # Skip any unexpected files.
102
+ continue
103
+ label = wnid_to_label[wnid]
104
+ sub_tar_file = train_tar.extractfile(m)
105
+ # Open the inner tar file containing images for this class.
106
+ with tarfile.open(fileobj=sub_tar_file, mode="r:*") as sub_tar:
107
+ for sub_m in sub_tar.getmembers():
108
+ if sub_m.isfile():
109
+ img_f = sub_tar.extractfile(sub_m)
110
+ image_bytes = img_f.read()
111
+ img = Image.open(io.BytesIO(image_bytes))
112
+ if img.mode != "RGB":
113
+ img = img.convert("RGB")
114
+ yield example_idx, {
115
+ "image": img,
116
+ "label": label,
117
+ }
118
+ example_idx += 1
119
+
120
+ elif split == "validation":
121
+ # For validation, open the validation tar archive.
122
+ with tarfile.open(archive_path, "r:*") as val_tar:
123
+ # Get all file members (each a validation image).
124
+ members = [m for m in val_tar.getmembers() if m.isfile()]
125
+ # Sort by filename to ensure the same order as in the ground truth file.
126
+ members = sorted(members, key=lambda m: m.name)
127
+ # Open the devkit archive to extract the ground truth file.
128
+ with tarfile.open(devkit_archive, "r:*") as devkit_tar:
129
+ gt_member = None
130
+ for m in devkit_tar.getmembers():
131
+ if m.name.endswith("ILSVRC2012_validation_ground_truth.txt"):
132
+ gt_member = m
133
+ break
134
+ if gt_member is None:
135
+ raise ValueError("Could not find the ground truth file in the devkit archive.")
136
+ gt_file = devkit_tar.extractfile(gt_member)
137
+ gt_lines = gt_file.read().decode("utf-8").strip().splitlines()
138
+ if len(gt_lines) != len(members):
139
+ raise ValueError("Mismatch between the number of validation images and ground truth labels.")
140
+ for example_idx, (m, gt) in enumerate(zip(members, gt_lines)):
141
+ # Convert the ground truth label from 1-indexed to 0-indexed.
142
+ label = int(gt) - 1
143
+ img_f = val_tar.extractfile(m)
144
+ image_bytes = img_f.read()
145
+ img = Image.open(io.BytesIO(image_bytes))
146
+ if img.mode != "RGB":
147
+ img = img.convert("RGB")
148
+ yield example_idx, {
149
+ "image": img,
150
+ "label": label,
151
+ }