Datasets:
File size: 5,261 Bytes
b96b8b4 259a090 b96b8b4 259a090 b96b8b4 259a090 b96b8b4 259a090 b96b8b4 259a090 b96b8b4 259a090 b96b8b4 259a090 b96b8b4 259a090 b96b8b4 259a090 b96b8b4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 | """Find mislabeled samples using Cleanlab + a small CNN.
1. Train a simple CNN on the dataset with cross-validation
2. Get out-of-fold predicted probabilities
3. Run Cleanlab to find label issues, outliers, duplicates
Usage:
uv run python scripts/find_label_issues.py
"""
import io
import numpy as np
import pandas as pd
from PIL import Image
from sklearn.model_selection import StratifiedKFold
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
if __name__ != "__main__":
import sys
sys.exit(0)
# --- 1. Load data ---
print("Loading data...")
df = pd.read_parquet("data/train-00000-of-00001.parquet")
images = []
for _, row in df.iterrows():
img = Image.open(io.BytesIO(row["image"]["bytes"])).convert("L")
arr = np.array(img, dtype=np.float32) / 255.0
images.append(arr)
X = np.stack(images)[:, np.newaxis, :, :] # (N, 1, 32, 32)
y = df["label"].values
sources = df["source"].values
print(f"Loaded {len(X)} images, {len(np.unique(y))} classes")
# --- 2. Simple CNN ---
class SmallCNN(nn.Module):
def __init__(self, num_classes=10):
super().__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 32, 3, padding=1), nn.ReLU(), nn.MaxPool2d(2),
nn.Conv2d(32, 64, 3, padding=1), nn.ReLU(), nn.MaxPool2d(2),
nn.Conv2d(64, 64, 3, padding=1), nn.ReLU(), nn.AdaptiveAvgPool2d(4),
)
self.classifier = nn.Sequential(
nn.Flatten(),
nn.Linear(64 * 4 * 4, 128), nn.ReLU(), nn.Dropout(0.3),
nn.Linear(128, num_classes),
)
def forward(self, x):
return self.classifier(self.features(x))
def train_and_predict(X_train, y_train, X_val, epochs=15, batch_size=64):
device = torch.device("mps" if torch.backends.mps.is_available() else "cpu")
model = SmallCNN().to(device)
optimizer = optim.Adam(model.parameters(), lr=1e-3)
criterion = nn.CrossEntropyLoss()
train_ds = TensorDataset(torch.tensor(X_train), torch.tensor(y_train, dtype=torch.long))
loader = DataLoader(train_ds, batch_size=batch_size, shuffle=True)
model.train()
for epoch in range(epochs):
for xb, yb in loader:
xb, yb = xb.to(device), yb.to(device)
optimizer.zero_grad()
loss = criterion(model(xb), yb)
loss.backward()
optimizer.step()
# Predict probabilities on val
model.eval()
with torch.no_grad():
val_tensor = torch.tensor(X_val).to(device)
logits = model(val_tensor)
probs = torch.softmax(logits, dim=1).cpu().numpy()
return probs
# --- 3. Cross-validated predictions ---
print("\nTraining 5-fold cross-validated CNN...")
n_splits = 5
skf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)
pred_probs = np.zeros((len(X), 10))
for fold, (train_idx, val_idx) in enumerate(skf.split(X, y)):
print(f" Fold {fold + 1}/{n_splits}...")
probs = train_and_predict(X[train_idx], y[train_idx], X[val_idx])
pred_probs[val_idx] = probs
print(f" OOF accuracy: {(pred_probs.argmax(axis=1) == y).mean():.3f}")
# --- 4. Cleanlab (simple API, no multiprocessing) ---
print("\nRunning Cleanlab find_label_issues...")
from cleanlab.filter import find_label_issues as cli_find
from cleanlab.rank import get_label_quality_scores
label_quality_scores = get_label_quality_scores(y, pred_probs)
issue_mask = cli_find(labels=y, pred_probs=pred_probs, return_indices_ranked_by="self_confidence")
print(f"\n=== {len(issue_mask)} Label Issues Found ===")
# Save full results
results = pd.DataFrame({
"index": range(len(y)),
"label": y,
"predicted": pred_probs.argmax(axis=1),
"label_score": label_quality_scores,
"is_label_issue": [i in issue_mask for i in range(len(y))],
"source": sources,
})
results.to_csv("label_issues.csv", index=False)
print(f"Full results saved to label_issues.csv")
for idx in issue_mask[:50]:
given = y[idx]
predicted = pred_probs[idx].argmax()
score = label_quality_scores[idx]
src = sources[idx]
print(f" idx={idx:5d} given={given} predicted={predicted} score={score:.4f} source={src}")
# Make composite of worst issues
print("\nGenerating composite of flagged issues...")
if len(issue_mask) > 0:
from PIL import ImageDraw
cell = 48
n_show = min(100, len(issue_mask))
cols = min(20, n_show)
rows = (n_show + cols - 1) // cols
sheet = Image.new("RGB", (cols * cell, rows * cell), (0, 0, 0))
draw = ImageDraw.Draw(sheet)
for i, idx in enumerate(issue_mask[:n_show]):
img = Image.open(io.BytesIO(df.iloc[idx]["image"]["bytes"])).convert("L")
img_rgb = img.resize((cell, cell)).convert("RGB")
r, c = i // cols, i % cols
x, y_pos = c * cell, r * cell
sheet.paste(img_rgb, (x, y_pos))
given = y[idx]
predicted = pred_probs[idx].argmax()
# Red label = given, Green label = predicted
draw.text((x + 2, y_pos + 2), str(given), fill=(255, 80, 80))
draw.text((x + 2, y_pos + 14), str(predicted), fill=(80, 255, 80))
sheet.save("composites/label_issues.png")
print(f" composites/label_issues.png (red=given, green=predicted)")
|