| """Clean the dataset by training on MNIST (known-clean labels) + Paul Ricard |
| (visually verified, very clear digits), then using that model to filter/relabel |
| all racing data. |
| |
| Strategy: |
| 1. Train a strong CNN on MNIST + Paul Ricard (both have clean, unambiguous labels) |
| 2. Run inference on ALL training data |
| 3. Keep only samples where model agrees with label (or relabel if model is very confident) |
| 4. Do the same for val |
| 5. Rebuild parquet files and composites |
| |
| Usage: |
| uv run python scripts/relabel_clean.py |
| """ |
|
|
| import numpy as np |
| import pandas as pd |
| import io |
| import os |
|
|
| import torch |
| import torch.nn as nn |
| import torch.optim as optim |
| from torch.utils.data import DataLoader, TensorDataset |
| from PIL import Image |
| from datasets import Dataset, Image as HFImage |
|
|
| if __name__ != "__main__": |
| import sys; sys.exit(0) |
|
|
| device = torch.device("cuda" if torch.cuda.is_available() |
| else "mps" if torch.backends.mps.is_available() |
| else "cpu") |
| print(f"Using device: {device}") |
|
|
|
|
| class CNN(nn.Module): |
| def __init__(self): |
| super().__init__() |
| self.features = nn.Sequential( |
| nn.Conv2d(1, 32, 3, padding=1), nn.BatchNorm2d(32), nn.ReLU(), nn.MaxPool2d(2), |
| nn.Conv2d(32, 64, 3, padding=1), nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d(2), |
| nn.Conv2d(64, 128, 3, padding=1), nn.BatchNorm2d(128), nn.ReLU(), nn.AdaptiveAvgPool2d(4)) |
| self.classifier = nn.Sequential( |
| nn.Flatten(), nn.Linear(128 * 16, 256), nn.ReLU(), nn.Dropout(0.4), nn.Linear(256, 10)) |
|
|
| def forward(self, x): |
| return self.classifier(self.features(x)) |
|
|
|
|
| def load_images(df): |
| imgs = [] |
| for _, row in df.iterrows(): |
| img = Image.open(io.BytesIO(row["image"]["bytes"])).convert("L") |
| imgs.append(np.array(img, dtype=np.float32) / 255.0) |
| return np.stack(imgs)[:, np.newaxis, :, :] |
|
|
|
|
| def train_model(X, y, epochs=40): |
| model = CNN().to(device) |
| opt = optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-4) |
| scheduler = optim.lr_scheduler.CosineAnnealingLR(opt, epochs) |
| crit = nn.CrossEntropyLoss() |
| loader = DataLoader( |
| TensorDataset(torch.tensor(X), torch.tensor(y, dtype=torch.long)), |
| batch_size=64, shuffle=True) |
|
|
| model.train() |
| for epoch in range(epochs): |
| total_loss = 0 |
| for xb, yb in loader: |
| xb, yb = xb.to(device), yb.to(device) |
| opt.zero_grad() |
| loss = crit(model(xb), yb) |
| loss.backward() |
| opt.step() |
| total_loss += loss.item() |
| scheduler.step() |
| if (epoch + 1) % 10 == 0: |
| print(f" epoch {epoch+1}/{epochs} loss={total_loss/len(loader):.4f}") |
|
|
| return model |
|
|
|
|
| def predict(model, X): |
| model.eval() |
| with torch.no_grad(): |
| probs = torch.softmax(model(torch.tensor(X).to(device)), dim=1).cpu().numpy() |
| return probs |
|
|
|
|
| |
| print("\n=== Building clean seed dataset ===") |
| train_df = pd.read_parquet("data/train-00000-of-00001.parquet") |
| val_df = pd.read_parquet("data/validation-00000-of-00001.parquet") |
|
|
| |
| mnist = train_df[train_df["source"] == "mnist"] |
| print(f"MNIST: {len(mnist)} samples") |
|
|
| |
| paul_ricard = train_df[train_df["source"] == "paul-ricard-alpine"] |
| print(f"Paul Ricard: {len(paul_ricard)} samples") |
|
|
| seed = pd.concat([mnist, paul_ricard], ignore_index=True) |
| X_seed = load_images(seed) |
| y_seed = seed["label"].values |
| print(f"Seed dataset: {len(seed)} samples") |
|
|
| |
| print("\n=== Training 5-model ensemble on seed data ===") |
| models = [] |
| for i in range(5): |
| print(f"Model {i+1}/5:") |
| |
| idx = np.random.choice(len(X_seed), len(X_seed), replace=True) |
| model = train_model(X_seed[idx], y_seed[idx]) |
| models.append(model) |
|
|
| |
| print("\n=== Predicting on all data ===") |
|
|
| for split_name, df in [("train", train_df), ("validation", val_df)]: |
| X = load_images(df) |
| y = df["label"].values |
| sources = df["source"].values |
|
|
| |
| probs = np.zeros((len(X), 10)) |
| for model in models: |
| probs += predict(model, X) |
| probs /= len(models) |
|
|
| preds = probs.argmax(axis=1) |
| conf = probs.max(axis=1) |
|
|
| |
| |
| keep = np.ones(len(df), dtype=bool) |
|
|
| for i in range(len(df)): |
| if sources[i] == "mnist": |
| continue |
| if preds[i] != y[i] and conf[i] > 0.5: |
| keep[i] = False |
|
|
| dropped = (~keep).sum() |
| print(f"\n{split_name}: {len(df)} -> {len(df) - dropped} (dropping {dropped})") |
|
|
| |
| for label in range(10): |
| mask = y == label |
| drop_mask = mask & ~keep |
| if drop_mask.sum() > 0: |
| pred_dist = pd.Series(preds[drop_mask]).value_counts().to_dict() |
| print(f" label={label}: drop {drop_mask.sum()}/{mask.sum()} -> model says {pred_dist}") |
|
|
| df_clean = df[keep].reset_index(drop=True) |
| print(f" Final distribution:") |
| print(pd.crosstab(df_clean["label"], df_clean["source"])) |
|
|
| ds = Dataset.from_pandas(df_clean) |
| ds = ds.cast_column("image", HFImage()) |
| ds.to_parquet(f"data/{split_name}-00000-of-00001.parquet") |
|
|
| print("\nDone! Now run: uv run python scripts/make_composites.py") |
|
|