racing-gears / scripts /build_dataset.py
tobil's picture
fix: build_dataset.py writes HF Image feature type
fcd820d unverified
"""Build the racing-gears dataset parquet files from raw/ images and labels.
Reads:
raw/<source>/<label>/*.png - pre-sorted images (racing-original, mnist)
raw/<source>/unlabeled/*.png - extracted frames needing labels
labels/<source>.csv - frame range labels (start,end,label)
Writes:
data/train-00000-of-00001.parquet
data/validation-00000-of-00001.parquet
composites/<source>/label_<N>.png - per-label composites for verification
Usage:
uv run python scripts/build_dataset.py [--val-frac 0.15] [--aug-target 200] [--no-mnist]
"""
import argparse
import csv
import io
import os
import random
import numpy as np
import pandas as pd
from PIL import Image, ImageEnhance
random.seed(42)
np.random.seed(42)
TARGET_SIZE = (32, 32)
def img_to_bytes(img: Image.Image) -> bytes:
buf = io.BytesIO()
img.save(buf, format="PNG")
return buf.getvalue()
def load_labeled_dir(source_dir: str, source_name: str) -> list[dict]:
"""Load images from raw/<source>/<label>/ directory structure."""
rows = []
for label_dir in sorted(os.listdir(source_dir)):
label_path = os.path.join(source_dir, label_dir)
if not os.path.isdir(label_path) or label_dir == "unlabeled":
continue
try:
label = int(label_dir)
except ValueError:
continue
for f in sorted(os.listdir(label_path)):
if not f.endswith(".png"):
continue
img = Image.open(os.path.join(label_path, f)).convert("L").resize(TARGET_SIZE)
rows.append({
"image": {"bytes": img_to_bytes(img), "path": None},
"label": label,
"source": source_name,
})
return rows
def load_from_labels_csv(source_name: str) -> list[dict]:
"""Load unlabeled frames and apply labels from CSV."""
csv_path = f"labels/{source_name}.csv"
frames_dir = f"raw/{source_name}/unlabeled"
if not os.path.exists(csv_path) or not os.path.exists(frames_dir):
return []
# Read label ranges
ranges = []
with open(csv_path) as f:
reader = csv.DictReader(f)
for row in reader:
ranges.append((int(row["start"]), int(row["end"]), int(row["label"])))
# Map frame index to label
frame_labels = {}
for start, end, label in ranges:
for i in range(start, end + 1):
frame_labels[i] = label
# Load frames
frames = sorted(f for f in os.listdir(frames_dir) if f.endswith(".png"))
rows = []
skipped = 0
for idx, f in enumerate(frames):
if idx not in frame_labels:
skipped += 1
continue
img = Image.open(os.path.join(frames_dir, f)).convert("L").resize(TARGET_SIZE)
rows.append({
"image": {"bytes": img_to_bytes(img), "path": None},
"label": frame_labels[idx],
"source": source_name,
})
if skipped:
print(f" Warning: {skipped} frames in {source_name} had no label (skipped)")
return rows
def augment(img: Image.Image) -> Image.Image:
"""Random augmentation: shift, brightness, contrast, noise."""
dx, dy = random.randint(-3, 3), random.randint(-3, 3)
img = img.transform(img.size, Image.AFFINE, (1, 0, dx, 0, 1, dy), fillcolor=0)
img = ImageEnhance.Brightness(img).enhance(random.uniform(0.7, 1.3))
img = ImageEnhance.Contrast(img).enhance(random.uniform(0.8, 1.2))
if random.random() < 0.3:
arr = np.array(img, dtype=np.float32)
arr += np.random.normal(0, 8, arr.shape)
arr = np.clip(arr, 0, 255).astype(np.uint8)
img = Image.fromarray(arr, mode="L")
return img
def make_composite(images: list[Image.Image], output_path: str, cell: int = 36, max_cols: int = 50):
if not images:
return
cols = min(max_cols, len(images))
rows = (len(images) + cols - 1) // cols
sheet = Image.new("L", (cols * cell, rows * cell), 0)
for idx, img in enumerate(images):
r, c = idx // cols, idx % cols
sheet.paste(img.resize((cell, cell)), (c * cell, r * cell))
sheet.save(output_path)
def main():
parser = argparse.ArgumentParser(description="Build racing-gears dataset")
parser.add_argument("--val-frac", type=float, default=0.15, help="Validation fraction (default: 0.15)")
parser.add_argument("--aug-target", type=int, default=200, help="Min racing samples per class after augmentation (default: 200)")
parser.add_argument("--no-mnist", action="store_true", help="Exclude MNIST data")
args = parser.parse_args()
all_rows = []
# 1. Load pre-sorted sources (racing-original, mnist)
for source_dir in sorted(os.listdir("raw")):
source_path = os.path.join("raw", source_dir)
if not os.path.isdir(source_path):
continue
# Check if this source has a labels CSV (unlabeled frames)
csv_rows = load_from_labels_csv(source_dir)
if csv_rows:
print(f"Loaded {len(csv_rows)} labeled frames from {source_dir}")
all_rows.extend(csv_rows)
# Check if this source has pre-sorted label dirs
if source_dir == "mnist" and args.no_mnist:
print(f"Skipping {source_dir} (--no-mnist)")
continue
dir_rows = load_labeled_dir(source_path, source_dir)
if dir_rows:
print(f"Loaded {len(dir_rows)} pre-sorted images from {source_dir}")
all_rows.extend(dir_rows)
df = pd.DataFrame(all_rows)
print(f"\nTotal: {len(df)} images")
print(pd.crosstab(df["label"], df["source"]))
# 2. Separate racing vs mnist
racing_sources = [s for s in df["source"].unique() if s != "mnist"]
racing = df[df["source"].isin(racing_sources)].copy()
mnist = df[df["source"] == "mnist"].copy()
# 3. Stratified train/val split for racing
racing_train_parts, racing_val_parts = [], []
for label in sorted(racing["label"].unique()):
group = racing[racing["label"] == label].sample(frac=1, random_state=42)
n_val = max(1, int(len(group) * args.val_frac))
racing_val_parts.append(group.iloc[:n_val])
racing_train_parts.append(group.iloc[n_val:])
racing_train = pd.concat(racing_train_parts, ignore_index=True) if racing_train_parts else pd.DataFrame()
racing_val = pd.concat(racing_val_parts, ignore_index=True) if racing_val_parts else pd.DataFrame()
# 4. Augment underrepresented racing classes
aug_rows = []
for label in sorted(racing_train["label"].unique()):
group = racing_train[racing_train["label"] == label]
n_have = len(group)
n_need = max(0, args.aug_target - n_have)
if n_need > 0:
print(f" Augmenting label {label}: {n_have} -> {n_have + n_need} (+{n_need})")
source_rows = group.to_dict("records")
for _ in range(n_need):
row = random.choice(source_rows)
orig_img = Image.open(io.BytesIO(row["image"]["bytes"])).convert("L")
aug_img = augment(orig_img)
aug_rows.append({
"image": {"bytes": img_to_bytes(aug_img), "path": None},
"label": label,
"source": "racing_aug",
})
if aug_rows:
racing_train = pd.concat([racing_train, pd.DataFrame(aug_rows)], ignore_index=True)
# 5. Stratified split for MNIST
if not mnist.empty:
mnist_train_parts, mnist_val_parts = [], []
for label in sorted(mnist["label"].unique()):
group = mnist[mnist["label"] == label].sample(frac=1, random_state=42)
n_val = max(5, int(len(group) * args.val_frac))
mnist_val_parts.append(group.iloc[:n_val])
mnist_train_parts.append(group.iloc[n_val:])
mnist_train = pd.concat(mnist_train_parts, ignore_index=True)
mnist_val = pd.concat(mnist_val_parts, ignore_index=True)
else:
mnist_train = pd.DataFrame()
mnist_val = pd.DataFrame()
# 6. Combine and shuffle
train_parts = [p for p in [racing_train, mnist_train] if not p.empty]
val_parts = [p for p in [racing_val, mnist_val] if not p.empty]
new_train = pd.concat(train_parts, ignore_index=True).sample(frac=1, random_state=42).reset_index(drop=True)
new_val = pd.concat(val_parts, ignore_index=True).sample(frac=1, random_state=42).reset_index(drop=True)
print(f"\n=== Final dataset ===")
print(f"Train: {len(new_train)}, Val: {len(new_val)}")
print("\nTrain:")
print(pd.crosstab(new_train["label"], new_train["source"]))
print("\nVal:")
print(pd.crosstab(new_val["label"], new_val["source"]))
# 7. Write parquet with HF Image feature type
from datasets import Dataset, Image as HFImage
os.makedirs("data", exist_ok=True)
for name, split_df in [("train", new_train), ("val", new_val)]:
ds = Dataset.from_pandas(split_df.reset_index(drop=True))
ds = ds.cast_column("image", HFImage())
fname = "train" if name == "train" else "validation"
ds.to_parquet(f"data/{fname}-00000-of-00001.parquet")
print(f"\nWritten to data/")
# 8. Generate per-label composites for verification
for split_name, split_df in [("train", new_train), ("val", new_val)]:
for source in sorted(split_df["source"].unique()):
if source == "racing_aug":
continue
comp_dir = f"composites/{source}"
os.makedirs(comp_dir, exist_ok=True)
for label in sorted(split_df["label"].unique()):
subset = split_df[(split_df["source"] == source) & (split_df["label"] == label)]
if subset.empty:
continue
images = [
Image.open(io.BytesIO(row["image"]["bytes"])).convert("L")
for _, row in subset.iterrows()
]
make_composite(images, f"{comp_dir}/{split_name}_label_{label}.png")
if __name__ == "__main__":
main()