Datasets:
File size: 5,248 Bytes
148953b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 | """Iterative dataset cleaning: cross-val clean train 3x, then ensemble-clean val."""
import numpy as np
import pandas as pd
import io
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
from PIL import Image
from sklearn.model_selection import StratifiedKFold
from datasets import Dataset, Image as HFImage
if __name__ != "__main__":
import sys; sys.exit(0)
class SmallCNN(nn.Module):
def __init__(self):
super().__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 32, 3, padding=1), nn.ReLU(), nn.MaxPool2d(2),
nn.Conv2d(32, 64, 3, padding=1), nn.ReLU(), nn.MaxPool2d(2),
nn.Conv2d(64, 128, 3, padding=1), nn.ReLU(), nn.AdaptiveAvgPool2d(4))
self.classifier = nn.Sequential(
nn.Flatten(), nn.Linear(128 * 16, 256), nn.ReLU(), nn.Dropout(0.3), nn.Linear(256, 10))
def forward(self, x):
return self.classifier(self.features(x))
def load_images(df):
imgs = []
for _, row in df.iterrows():
img = Image.open(io.BytesIO(row["image"]["bytes"])).convert("L")
imgs.append(np.array(img, dtype=np.float32) / 255.0)
return np.stack(imgs)[:, np.newaxis, :, :]
def crossval_predict(X, y, n_splits=5, epochs=30):
pred_probs = np.zeros((len(X), 10))
skf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)
for fold, (tr, va) in enumerate(skf.split(X, y)):
print(f" fold {fold + 1}/{n_splits}...", flush=True)
model = SmallCNN()
opt = optim.Adam(model.parameters(), lr=1e-3)
crit = nn.CrossEntropyLoss()
loader = DataLoader(
TensorDataset(torch.tensor(X[tr]), torch.tensor(y[tr], dtype=torch.long)),
batch_size=64, shuffle=True)
model.train()
for _ in range(epochs):
for xb, yb in loader:
opt.zero_grad()
crit(model(xb), yb).backward()
opt.step()
model.eval()
with torch.no_grad():
pred_probs[va] = torch.softmax(model(torch.tensor(X[va])), dim=1).numpy()
return pred_probs
def clean_round(df, round_num, conf_threshold=0.6):
print(f"\n=== Round {round_num}: cross-val clean ({len(df)} samples) ===", flush=True)
X = load_images(df)
y = df["label"].values
pred_probs = crossval_predict(X, y)
preds = pred_probs.argmax(axis=1)
acc = (preds == y).mean()
print(f" OOF accuracy: {acc:.3f}")
conf = pred_probs.max(axis=1)
bad = (preds != y) & (conf > conf_threshold)
print(f" Removing {bad.sum()} samples (conf > {conf_threshold})")
for label in range(10):
mask = (y == label) & bad
if mask.sum() > 0:
pred_dist = pd.Series(preds[mask]).value_counts().to_dict()
print(f" label={label}: drop {mask.sum()} -> model says {pred_dist}")
return df[~bad].reset_index(drop=True), acc
# === Iterative train cleaning ===
train_df = pd.read_parquet("data/train-00000-of-00001.parquet")
for round_num in range(1, 4):
train_df, acc = clean_round(train_df, round_num)
if acc > 0.97:
print(" Accuracy high enough, stopping early")
break
print(f"\nFinal train: {len(train_df)}")
print(pd.crosstab(train_df["label"], train_df["source"]))
# === Ensemble clean val ===
print(f"\n=== Cleaning val with 7-model ensemble ===", flush=True)
X_train = load_images(train_df)
y_train = train_df["label"].values
val_df = pd.read_parquet("data/validation-00000-of-00001.parquet")
X_val = load_images(val_df)
y_val = val_df["label"].values
val_probs = np.zeros((len(X_val), 10))
for i in range(7):
print(f" model {i + 1}/7...", flush=True)
model = SmallCNN()
opt = optim.Adam(model.parameters(), lr=1e-3)
crit = nn.CrossEntropyLoss()
idx = np.random.choice(len(X_train), len(X_train), replace=True)
loader = DataLoader(
TensorDataset(torch.tensor(X_train[idx]), torch.tensor(y_train[idx], dtype=torch.long)),
batch_size=64, shuffle=True)
model.train()
for _ in range(30):
for xb, yb in loader:
opt.zero_grad()
crit(model(xb), yb).backward()
opt.step()
model.eval()
with torch.no_grad():
val_probs += torch.softmax(model(torch.tensor(X_val)), dim=1).numpy()
val_probs /= 7
val_pred = val_probs.argmax(axis=1)
val_conf = val_probs.max(axis=1)
print(f" Val accuracy vs labels: {(val_pred == y_val).mean():.3f}")
bad_val = (val_pred != y_val) & (val_conf > 0.7)
print(f" Removing {bad_val.sum()} val samples")
for label in range(10):
mask = (y_val == label) & bad_val
if mask.sum() > 0:
pred_dist = pd.Series(val_pred[mask]).value_counts().to_dict()
print(f" label={label}: drop {mask.sum()} -> model says {pred_dist}")
val_final = val_df[~bad_val].reset_index(drop=True)
print(f"\nFinal val: {len(val_final)}")
print(pd.crosstab(val_final["label"], val_final["source"]))
# === Write ===
for name, df in [("train", train_df), ("validation", val_final)]:
ds = Dataset.from_pandas(df.reset_index(drop=True))
ds = ds.cast_column("image", HFImage())
ds.to_parquet(f"data/{name}-00000-of-00001.parquet")
print("\nDone!")
|