lucasmaes commited on
Commit
ee4ca3f
·
verified ·
1 Parent(s): 29d3391

Create cifar-10-lt.py

Browse files
Files changed (1) hide show
  1. cifar-10-lt.py +231 -0
cifar-10-lt.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 …
3
+ # (license header unchanged)
4
+
5
+ """CIFAR-10-LT Dataset (HF Datasets 3.6 compatible)"""
6
+
7
+ import os
8
+ import pickle
9
+ from typing import Dict, Iterator, List, Tuple
10
+
11
+ import numpy as np
12
+ import datasets
13
+
14
+ _CITATION = """\
15
+ @TECHREPORT{Krizhevsky09learningmultiple,
16
+ author = {Alex Krizhevsky},
17
+ title = {Learning multiple layers of features from tiny images},
18
+ institution = {},
19
+ year = {2009}
20
+ }
21
+ """
22
+
23
+ _DESCRIPTION = """\
24
+ The CIFAR-10-LT imbalanced dataset is comprised of under 60,000 color images (32×32),
25
+ across 10 classes. The test set has 10,000 images (1,000 per class).
26
+ The training set is imbalanced with exponential factors of 10, 20, 50, 100, or 200.
27
+ """
28
+
29
+ _DATA_URL = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
30
+
31
+ _NAMES = [
32
+ "airplane",
33
+ "automobile",
34
+ "bird",
35
+ "cat",
36
+ "deer",
37
+ "dog",
38
+ "frog",
39
+ "horse",
40
+ "ship",
41
+ "truck",
42
+ ]
43
+
44
+
45
+ class Cifar10LTConfig(datasets.BuilderConfig):
46
+ """BuilderConfig for CIFAR-10-LT."""
47
+
48
+ def __init__(
49
+ self,
50
+ imb_type: str,
51
+ imb_factor: float,
52
+ rand_number: int = 0,
53
+ cls_num: int = 10,
54
+ **kwargs
55
+ ):
56
+ # Keep version here so each config shares it
57
+ super().__init__(version=datasets.Version("1.0.1"), **kwargs)
58
+ self.imb_type = str(imb_type)
59
+ self.imb_factor = float(imb_factor)
60
+ self.rand_number = int(rand_number)
61
+ self.cls_num = int(cls_num)
62
+
63
+
64
+ class Cifar10(datasets.GeneratorBasedBuilder):
65
+ """CIFAR-10-LT Dataset"""
66
+
67
+ DEFAULT_CONFIG_NAME = "r-100"
68
+
69
+ BUILDER_CONFIGS = [
70
+ Cifar10LTConfig(
71
+ name="r-10", description="CIFAR-10-LT r=10 (exp)", imb_type="exp", imb_factor=1 / 10
72
+ ),
73
+ Cifar10LTConfig(
74
+ name="r-20", description="CIFAR-10-LT r=20 (exp)", imb_type="exp", imb_factor=1 / 20
75
+ ),
76
+ Cifar10LTConfig(
77
+ name="r-50", description="CIFAR-10-LT r=50 (exp)", imb_type="exp", imb_factor=1 / 50
78
+ ),
79
+ Cifar10LTConfig(
80
+ name="r-100", description="CIFAR-10-LT r=100 (exp)", imb_type="exp", imb_factor=1 / 100
81
+ ),
82
+ Cifar10LTConfig(
83
+ name="r-200", description="CIFAR-10-LT r=200 (exp)", imb_type="exp", imb_factor=1 / 200
84
+ ),
85
+ ]
86
+
87
+ def _info(self) -> datasets.DatasetInfo:
88
+ return datasets.DatasetInfo(
89
+ description=_DESCRIPTION,
90
+ features=datasets.Features(
91
+ {
92
+ "img": datasets.Image(), # uint8 HWC
93
+ "label": datasets.ClassLabel(names=_NAMES),
94
+ }
95
+ ),
96
+ supervised_keys=("img", "label"),
97
+ homepage="https://www.cs.toronto.edu/~kriz/cifar.html",
98
+ citation=_CITATION,
99
+ )
100
+
101
+ # ---------- split planning / index generation ----------
102
+
103
+ def _split_generators(self, dl_manager: datasets.DownloadManager):
104
+ # Extract once to a directory; avoids relying on iter_archive inside generators.
105
+ extracted_path = dl_manager.download_and_extract(_DATA_URL)
106
+
107
+ # Handle both cases:
108
+ # - returned path == ".../cifar-10-batches-py"
109
+ # - returned path is parent, so we need to append "cifar-10-batches-py"
110
+ if os.path.isdir(os.path.join(extracted_path, "cifar-10-batches-py")):
111
+ data_root = os.path.join(extracted_path, "cifar-10-batches-py")
112
+ else:
113
+ # If the extractor already returned the inner dir
114
+ data_root = extracted_path
115
+
116
+ # Precompute LT indices for the train split deterministically
117
+ train_labels = self._collect_labels_from_dir(data_root, split="train")
118
+ img_num_per_cls = self._get_img_num_per_cls(len(train_labels))
119
+ rs = np.random.RandomState(self.config.rand_number)
120
+ train_indices, _ = self._gen_imbalanced_data(img_num_per_cls, train_labels, rs)
121
+
122
+ return [
123
+ datasets.SplitGenerator(
124
+ name=datasets.Split.TRAIN,
125
+ gen_kwargs={
126
+ "data_root": data_root,
127
+ "split": "train",
128
+ # Pass a JSON-serializable type; convert to set later for O(1) test.
129
+ "selected_indices": sorted(int(i) for i in train_indices),
130
+ },
131
+ ),
132
+ datasets.SplitGenerator(
133
+ name=datasets.Split.TEST,
134
+ gen_kwargs={"data_root": data_root, "split": "test", "selected_indices": None},
135
+ ),
136
+ ]
137
+
138
+ @staticmethod
139
+ def _batch_files_in_dir(data_root: str, split: str) -> List[str]:
140
+ if split == "train":
141
+ batches = ["data_batch_1", "data_batch_2", "data_batch_3", "data_batch_4", "data_batch_5"]
142
+ elif split == "test":
143
+ batches = ["test_batch"]
144
+ else:
145
+ raise ValueError(f"Unknown split: {split}")
146
+ return [os.path.join(data_root, b) for b in batches]
147
+
148
+ def _collect_labels_from_dir(self, data_root: str, split: str) -> List[int]:
149
+ """Read labels across all CIFAR batches for a split (from extracted files)."""
150
+ labels_all: List[int] = []
151
+ for path in self._batch_files_in_dir(data_root, split):
152
+ with open(path, "rb") as fo:
153
+ d = pickle.load(fo, encoding="latin1")
154
+ # Handle both bytes and str keys robustly
155
+ labels = d.get("labels", d.get(b"labels"))
156
+ if labels is None:
157
+ raise KeyError(f"'labels' not found in {path}")
158
+ labels_all.extend(labels)
159
+ return labels_all
160
+
161
+ def _get_img_num_per_cls(self, data_length: int) -> List[int]:
162
+ """Number of images per class given imbalance ratio and total length."""
163
+ img_max = data_length / self.config.cls_num # e.g., 50000 / 10 = 5000
164
+ img_num_per_cls: List[int] = []
165
+ if self.config.imb_type == "exp":
166
+ for cls_idx in range(self.config.cls_num):
167
+ num = img_max * (self.config.imb_factor ** (cls_idx / (self.config.cls_num - 1.0)))
168
+ # Ensure at least one sample per class
169
+ img_num_per_cls.append(max(1, int(num)))
170
+ elif self.config.imb_type == "step":
171
+ for _ in range(self.config.cls_num // 2):
172
+ img_num_per_cls.append(int(img_max))
173
+ for _ in range(self.config.cls_num // 2):
174
+ img_num_per_cls.append(max(1, int(img_max * self.config.imb_factor)))
175
+ else:
176
+ img_num_per_cls.extend([int(img_max)] * self.config.cls_num)
177
+ return img_num_per_cls
178
+
179
+ def _gen_imbalanced_data(
180
+ self, img_num_per_cls: List[int], targets: List[int], rs: np.random.RandomState
181
+ ) -> Tuple[List[int], Dict[int, int]]:
182
+ """Return selected indices (global over concatenated train set) and per-class counts."""
183
+ new_indices: List[int] = []
184
+ targets_np = np.array(targets, dtype=np.int64)
185
+ classes = np.unique(targets_np)
186
+ num_per_cls_dict: Dict[int, int] = {}
187
+ for the_class, the_img_num in zip(classes, img_num_per_cls):
188
+ num_per_cls_dict[int(the_class)] = int(the_img_num)
189
+ idx = np.where(targets_np == the_class)[0]
190
+ rs.shuffle(idx)
191
+ selec_idx = idx[:the_img_num]
192
+ new_indices.extend(selec_idx.tolist())
193
+ return new_indices, num_per_cls_dict
194
+
195
+ # ---------- example generation ----------
196
+
197
+ def _generate_examples(self, data_root: str, split: str, selected_indices=None) -> Iterator[Tuple[str, dict]]:
198
+ """
199
+ Yields (key, example) pairs.
200
+ For train: only indices in `selected_indices` are yielded (LT subset).
201
+ For test: all examples are yielded.
202
+ """
203
+ batch_paths = self._batch_files_in_dir(data_root, split)
204
+
205
+ # For quick membership checks
206
+ selected_set = set(selected_indices) if selected_indices is not None else None
207
+
208
+ global_idx = 0 # global index across all batches in CIFAR order
209
+
210
+ for path in batch_paths:
211
+ with open(path, "rb") as fo:
212
+ d = pickle.load(fo, encoding="latin1")
213
+
214
+ labels = d.get("labels", d.get(b"labels"))
215
+ data = d.get("data", d.get(b"data"))
216
+ if labels is None or data is None:
217
+ raise KeyError(f"Missing 'labels' or 'data' in {path}")
218
+
219
+ # (N, 3072) CHW packed uint8
220
+ num_in_batch = len(labels)
221
+ for i in range(num_in_batch):
222
+ # Train split: only yield if selected
223
+ if selected_set is not None and (global_idx not in selected_set):
224
+ global_idx += 1
225
+ continue
226
+
227
+ # reshape to HWC uint8
228
+ img = np.reshape(data[i], (3, 32, 32)).transpose(1, 2, 0)
229
+
230
+ yield f"{os.path.basename(path)}_{i}", {"img": img, "label": int(labels[i])}
231
+ global_idx += 1