racing-gears / scripts /find_label_issues.py
tobil's picture
cleanup: remove temp files, fix find_label_issues.py guard
259a090 unverified
"""Find mislabeled samples using Cleanlab + a small CNN.
1. Train a simple CNN on the dataset with cross-validation
2. Get out-of-fold predicted probabilities
3. Run Cleanlab to find label issues, outliers, duplicates
Usage:
uv run python scripts/find_label_issues.py
"""
import io
import numpy as np
import pandas as pd
from PIL import Image
from sklearn.model_selection import StratifiedKFold
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
if __name__ != "__main__":
import sys
sys.exit(0)
# --- 1. Load data ---
print("Loading data...")
df = pd.read_parquet("data/train-00000-of-00001.parquet")
images = []
for _, row in df.iterrows():
img = Image.open(io.BytesIO(row["image"]["bytes"])).convert("L")
arr = np.array(img, dtype=np.float32) / 255.0
images.append(arr)
X = np.stack(images)[:, np.newaxis, :, :] # (N, 1, 32, 32)
y = df["label"].values
sources = df["source"].values
print(f"Loaded {len(X)} images, {len(np.unique(y))} classes")
# --- 2. Simple CNN ---
class SmallCNN(nn.Module):
def __init__(self, num_classes=10):
super().__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 32, 3, padding=1), nn.ReLU(), nn.MaxPool2d(2),
nn.Conv2d(32, 64, 3, padding=1), nn.ReLU(), nn.MaxPool2d(2),
nn.Conv2d(64, 64, 3, padding=1), nn.ReLU(), nn.AdaptiveAvgPool2d(4),
)
self.classifier = nn.Sequential(
nn.Flatten(),
nn.Linear(64 * 4 * 4, 128), nn.ReLU(), nn.Dropout(0.3),
nn.Linear(128, num_classes),
)
def forward(self, x):
return self.classifier(self.features(x))
def train_and_predict(X_train, y_train, X_val, epochs=15, batch_size=64):
device = torch.device("mps" if torch.backends.mps.is_available() else "cpu")
model = SmallCNN().to(device)
optimizer = optim.Adam(model.parameters(), lr=1e-3)
criterion = nn.CrossEntropyLoss()
train_ds = TensorDataset(torch.tensor(X_train), torch.tensor(y_train, dtype=torch.long))
loader = DataLoader(train_ds, batch_size=batch_size, shuffle=True)
model.train()
for epoch in range(epochs):
for xb, yb in loader:
xb, yb = xb.to(device), yb.to(device)
optimizer.zero_grad()
loss = criterion(model(xb), yb)
loss.backward()
optimizer.step()
# Predict probabilities on val
model.eval()
with torch.no_grad():
val_tensor = torch.tensor(X_val).to(device)
logits = model(val_tensor)
probs = torch.softmax(logits, dim=1).cpu().numpy()
return probs
# --- 3. Cross-validated predictions ---
print("\nTraining 5-fold cross-validated CNN...")
n_splits = 5
skf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)
pred_probs = np.zeros((len(X), 10))
for fold, (train_idx, val_idx) in enumerate(skf.split(X, y)):
print(f" Fold {fold + 1}/{n_splits}...")
probs = train_and_predict(X[train_idx], y[train_idx], X[val_idx])
pred_probs[val_idx] = probs
print(f" OOF accuracy: {(pred_probs.argmax(axis=1) == y).mean():.3f}")
# --- 4. Cleanlab (simple API, no multiprocessing) ---
print("\nRunning Cleanlab find_label_issues...")
from cleanlab.filter import find_label_issues as cli_find
from cleanlab.rank import get_label_quality_scores
label_quality_scores = get_label_quality_scores(y, pred_probs)
issue_mask = cli_find(labels=y, pred_probs=pred_probs, return_indices_ranked_by="self_confidence")
print(f"\n=== {len(issue_mask)} Label Issues Found ===")
# Save full results
results = pd.DataFrame({
"index": range(len(y)),
"label": y,
"predicted": pred_probs.argmax(axis=1),
"label_score": label_quality_scores,
"is_label_issue": [i in issue_mask for i in range(len(y))],
"source": sources,
})
results.to_csv("label_issues.csv", index=False)
print(f"Full results saved to label_issues.csv")
for idx in issue_mask[:50]:
given = y[idx]
predicted = pred_probs[idx].argmax()
score = label_quality_scores[idx]
src = sources[idx]
print(f" idx={idx:5d} given={given} predicted={predicted} score={score:.4f} source={src}")
# Make composite of worst issues
print("\nGenerating composite of flagged issues...")
if len(issue_mask) > 0:
from PIL import ImageDraw
cell = 48
n_show = min(100, len(issue_mask))
cols = min(20, n_show)
rows = (n_show + cols - 1) // cols
sheet = Image.new("RGB", (cols * cell, rows * cell), (0, 0, 0))
draw = ImageDraw.Draw(sheet)
for i, idx in enumerate(issue_mask[:n_show]):
img = Image.open(io.BytesIO(df.iloc[idx]["image"]["bytes"])).convert("L")
img_rgb = img.resize((cell, cell)).convert("RGB")
r, c = i // cols, i % cols
x, y_pos = c * cell, r * cell
sheet.paste(img_rgb, (x, y_pos))
given = y[idx]
predicted = pred_probs[idx].argmax()
# Red label = given, Green label = predicted
draw.text((x + 2, y_pos + 2), str(given), fill=(255, 80, 80))
draw.text((x + 2, y_pos + 14), str(predicted), fill=(80, 255, 80))
sheet.save("composites/label_issues.png")
print(f" composites/label_issues.png (red=given, green=predicted)")