File size: 10,065 Bytes
b96b8b4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fcd820d
 
 
b96b8b4
fcd820d
 
 
 
 
b96b8b4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
"""Build the racing-gears dataset parquet files from raw/ images and labels.

Reads:
    raw/<source>/<label>/*.png        - pre-sorted images (racing-original, mnist)
    raw/<source>/unlabeled/*.png      - extracted frames needing labels
    labels/<source>.csv               - frame range labels (start,end,label)

Writes:
    data/train-00000-of-00001.parquet
    data/validation-00000-of-00001.parquet
    composites/<source>/label_<N>.png - per-label composites for verification

Usage:
    uv run python scripts/build_dataset.py [--val-frac 0.15] [--aug-target 200] [--no-mnist]
"""

import argparse
import csv
import io
import os
import random

import numpy as np
import pandas as pd
from PIL import Image, ImageEnhance


random.seed(42)
np.random.seed(42)

TARGET_SIZE = (32, 32)


def img_to_bytes(img: Image.Image) -> bytes:
    buf = io.BytesIO()
    img.save(buf, format="PNG")
    return buf.getvalue()


def load_labeled_dir(source_dir: str, source_name: str) -> list[dict]:
    """Load images from raw/<source>/<label>/ directory structure."""
    rows = []
    for label_dir in sorted(os.listdir(source_dir)):
        label_path = os.path.join(source_dir, label_dir)
        if not os.path.isdir(label_path) or label_dir == "unlabeled":
            continue
        try:
            label = int(label_dir)
        except ValueError:
            continue
        for f in sorted(os.listdir(label_path)):
            if not f.endswith(".png"):
                continue
            img = Image.open(os.path.join(label_path, f)).convert("L").resize(TARGET_SIZE)
            rows.append({
                "image": {"bytes": img_to_bytes(img), "path": None},
                "label": label,
                "source": source_name,
            })
    return rows


def load_from_labels_csv(source_name: str) -> list[dict]:
    """Load unlabeled frames and apply labels from CSV."""
    csv_path = f"labels/{source_name}.csv"
    frames_dir = f"raw/{source_name}/unlabeled"

    if not os.path.exists(csv_path) or not os.path.exists(frames_dir):
        return []

    # Read label ranges
    ranges = []
    with open(csv_path) as f:
        reader = csv.DictReader(f)
        for row in reader:
            ranges.append((int(row["start"]), int(row["end"]), int(row["label"])))

    # Map frame index to label
    frame_labels = {}
    for start, end, label in ranges:
        for i in range(start, end + 1):
            frame_labels[i] = label

    # Load frames
    frames = sorted(f for f in os.listdir(frames_dir) if f.endswith(".png"))
    rows = []
    skipped = 0
    for idx, f in enumerate(frames):
        if idx not in frame_labels:
            skipped += 1
            continue
        img = Image.open(os.path.join(frames_dir, f)).convert("L").resize(TARGET_SIZE)
        rows.append({
            "image": {"bytes": img_to_bytes(img), "path": None},
            "label": frame_labels[idx],
            "source": source_name,
        })

    if skipped:
        print(f"  Warning: {skipped} frames in {source_name} had no label (skipped)")
    return rows


def augment(img: Image.Image) -> Image.Image:
    """Random augmentation: shift, brightness, contrast, noise."""
    dx, dy = random.randint(-3, 3), random.randint(-3, 3)
    img = img.transform(img.size, Image.AFFINE, (1, 0, dx, 0, 1, dy), fillcolor=0)
    img = ImageEnhance.Brightness(img).enhance(random.uniform(0.7, 1.3))
    img = ImageEnhance.Contrast(img).enhance(random.uniform(0.8, 1.2))
    if random.random() < 0.3:
        arr = np.array(img, dtype=np.float32)
        arr += np.random.normal(0, 8, arr.shape)
        arr = np.clip(arr, 0, 255).astype(np.uint8)
        img = Image.fromarray(arr, mode="L")
    return img


def make_composite(images: list[Image.Image], output_path: str, cell: int = 36, max_cols: int = 50):
    if not images:
        return
    cols = min(max_cols, len(images))
    rows = (len(images) + cols - 1) // cols
    sheet = Image.new("L", (cols * cell, rows * cell), 0)
    for idx, img in enumerate(images):
        r, c = idx // cols, idx % cols
        sheet.paste(img.resize((cell, cell)), (c * cell, r * cell))
    sheet.save(output_path)


def main():
    parser = argparse.ArgumentParser(description="Build racing-gears dataset")
    parser.add_argument("--val-frac", type=float, default=0.15, help="Validation fraction (default: 0.15)")
    parser.add_argument("--aug-target", type=int, default=200, help="Min racing samples per class after augmentation (default: 200)")
    parser.add_argument("--no-mnist", action="store_true", help="Exclude MNIST data")
    args = parser.parse_args()

    all_rows = []

    # 1. Load pre-sorted sources (racing-original, mnist)
    for source_dir in sorted(os.listdir("raw")):
        source_path = os.path.join("raw", source_dir)
        if not os.path.isdir(source_path):
            continue

        # Check if this source has a labels CSV (unlabeled frames)
        csv_rows = load_from_labels_csv(source_dir)
        if csv_rows:
            print(f"Loaded {len(csv_rows)} labeled frames from {source_dir}")
            all_rows.extend(csv_rows)

        # Check if this source has pre-sorted label dirs
        if source_dir == "mnist" and args.no_mnist:
            print(f"Skipping {source_dir} (--no-mnist)")
            continue
        dir_rows = load_labeled_dir(source_path, source_dir)
        if dir_rows:
            print(f"Loaded {len(dir_rows)} pre-sorted images from {source_dir}")
            all_rows.extend(dir_rows)

    df = pd.DataFrame(all_rows)
    print(f"\nTotal: {len(df)} images")
    print(pd.crosstab(df["label"], df["source"]))

    # 2. Separate racing vs mnist
    racing_sources = [s for s in df["source"].unique() if s != "mnist"]
    racing = df[df["source"].isin(racing_sources)].copy()
    mnist = df[df["source"] == "mnist"].copy()

    # 3. Stratified train/val split for racing
    racing_train_parts, racing_val_parts = [], []
    for label in sorted(racing["label"].unique()):
        group = racing[racing["label"] == label].sample(frac=1, random_state=42)
        n_val = max(1, int(len(group) * args.val_frac))
        racing_val_parts.append(group.iloc[:n_val])
        racing_train_parts.append(group.iloc[n_val:])

    racing_train = pd.concat(racing_train_parts, ignore_index=True) if racing_train_parts else pd.DataFrame()
    racing_val = pd.concat(racing_val_parts, ignore_index=True) if racing_val_parts else pd.DataFrame()

    # 4. Augment underrepresented racing classes
    aug_rows = []
    for label in sorted(racing_train["label"].unique()):
        group = racing_train[racing_train["label"] == label]
        n_have = len(group)
        n_need = max(0, args.aug_target - n_have)
        if n_need > 0:
            print(f"  Augmenting label {label}: {n_have} -> {n_have + n_need} (+{n_need})")
            source_rows = group.to_dict("records")
            for _ in range(n_need):
                row = random.choice(source_rows)
                orig_img = Image.open(io.BytesIO(row["image"]["bytes"])).convert("L")
                aug_img = augment(orig_img)
                aug_rows.append({
                    "image": {"bytes": img_to_bytes(aug_img), "path": None},
                    "label": label,
                    "source": "racing_aug",
                })

    if aug_rows:
        racing_train = pd.concat([racing_train, pd.DataFrame(aug_rows)], ignore_index=True)

    # 5. Stratified split for MNIST
    if not mnist.empty:
        mnist_train_parts, mnist_val_parts = [], []
        for label in sorted(mnist["label"].unique()):
            group = mnist[mnist["label"] == label].sample(frac=1, random_state=42)
            n_val = max(5, int(len(group) * args.val_frac))
            mnist_val_parts.append(group.iloc[:n_val])
            mnist_train_parts.append(group.iloc[n_val:])
        mnist_train = pd.concat(mnist_train_parts, ignore_index=True)
        mnist_val = pd.concat(mnist_val_parts, ignore_index=True)
    else:
        mnist_train = pd.DataFrame()
        mnist_val = pd.DataFrame()

    # 6. Combine and shuffle
    train_parts = [p for p in [racing_train, mnist_train] if not p.empty]
    val_parts = [p for p in [racing_val, mnist_val] if not p.empty]
    new_train = pd.concat(train_parts, ignore_index=True).sample(frac=1, random_state=42).reset_index(drop=True)
    new_val = pd.concat(val_parts, ignore_index=True).sample(frac=1, random_state=42).reset_index(drop=True)

    print(f"\n=== Final dataset ===")
    print(f"Train: {len(new_train)}, Val: {len(new_val)}")
    print("\nTrain:")
    print(pd.crosstab(new_train["label"], new_train["source"]))
    print("\nVal:")
    print(pd.crosstab(new_val["label"], new_val["source"]))

    # 7. Write parquet with HF Image feature type
    from datasets import Dataset, Image as HFImage

    os.makedirs("data", exist_ok=True)
    for name, split_df in [("train", new_train), ("val", new_val)]:
        ds = Dataset.from_pandas(split_df.reset_index(drop=True))
        ds = ds.cast_column("image", HFImage())
        fname = "train" if name == "train" else "validation"
        ds.to_parquet(f"data/{fname}-00000-of-00001.parquet")
    print(f"\nWritten to data/")

    # 8. Generate per-label composites for verification
    for split_name, split_df in [("train", new_train), ("val", new_val)]:
        for source in sorted(split_df["source"].unique()):
            if source == "racing_aug":
                continue
            comp_dir = f"composites/{source}"
            os.makedirs(comp_dir, exist_ok=True)
            for label in sorted(split_df["label"].unique()):
                subset = split_df[(split_df["source"] == source) & (split_df["label"] == label)]
                if subset.empty:
                    continue
                images = [
                    Image.open(io.BytesIO(row["image"]["bytes"])).convert("L")
                    for _, row in subset.iterrows()
                ]
                make_composite(images, f"{comp_dir}/{split_name}_label_{label}.png")


if __name__ == "__main__":
    main()