Datasets:
cleanup: remove temp files, fix find_label_issues.py guard
Browse files- pyproject.toml +1 -0
- scripts/find_label_issues.py +24 -29
- uv.lock +2 -0
pyproject.toml
CHANGED
|
@@ -7,6 +7,7 @@ requires-python = ">=3.12"
|
|
| 7 |
dependencies = [
|
| 8 |
"cleanlab>=2.9.0",
|
| 9 |
"datasets>=4.8.4",
|
|
|
|
| 10 |
"pandas>=3.0.1",
|
| 11 |
"pillow>=12.1.1",
|
| 12 |
"pyarrow>=23.0.1",
|
|
|
|
| 7 |
dependencies = [
|
| 8 |
"cleanlab>=2.9.0",
|
| 9 |
"datasets>=4.8.4",
|
| 10 |
+
"huggingface-hub>=1.8.0",
|
| 11 |
"pandas>=3.0.1",
|
| 12 |
"pillow>=12.1.1",
|
| 13 |
"pyarrow>=23.0.1",
|
scripts/find_label_issues.py
CHANGED
|
@@ -20,6 +20,10 @@ import torch.optim as optim
|
|
| 20 |
from torch.utils.data import DataLoader, TensorDataset
|
| 21 |
|
| 22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
# --- 1. Load data ---
|
| 24 |
print("Loading data...")
|
| 25 |
df = pd.read_parquet("data/train-00000-of-00001.parquet")
|
|
@@ -96,56 +100,47 @@ for fold, (train_idx, val_idx) in enumerate(skf.split(X, y)):
|
|
| 96 |
print(f" OOF accuracy: {(pred_probs.argmax(axis=1) == y).mean():.3f}")
|
| 97 |
|
| 98 |
|
| 99 |
-
# --- 4. Cleanlab ---
|
| 100 |
-
print("\nRunning Cleanlab...")
|
| 101 |
-
from cleanlab import
|
| 102 |
-
|
| 103 |
-
lab = Datalab(
|
| 104 |
-
data={"label": y.tolist(), "source": sources.tolist()},
|
| 105 |
-
label_name="label",
|
| 106 |
-
)
|
| 107 |
-
lab.find_issues(pred_probs=pred_probs)
|
| 108 |
-
|
| 109 |
-
print("\n=== Issue Summary ===")
|
| 110 |
-
print(lab.get_issue_summary())
|
| 111 |
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
label_issues = issues[issues["is_label_issue"]].sort_values("label_score")
|
| 115 |
|
| 116 |
-
print(f"\n=== {len(
|
| 117 |
-
if len(label_issues) > 0:
|
| 118 |
-
for idx in label_issues.index[:50]:
|
| 119 |
-
given = y[idx]
|
| 120 |
-
predicted = pred_probs[idx].argmax()
|
| 121 |
-
score = issues.loc[idx, "label_score"]
|
| 122 |
-
src = sources[idx]
|
| 123 |
-
print(f" idx={idx:5d} given={given} predicted={predicted} score={score:.4f} source={src}")
|
| 124 |
|
| 125 |
# Save full results
|
| 126 |
results = pd.DataFrame({
|
| 127 |
"index": range(len(y)),
|
| 128 |
"label": y,
|
| 129 |
"predicted": pred_probs.argmax(axis=1),
|
| 130 |
-
"label_score":
|
| 131 |
-
"is_label_issue":
|
| 132 |
"source": sources,
|
| 133 |
})
|
| 134 |
results.to_csv("label_issues.csv", index=False)
|
| 135 |
-
print(f"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 136 |
|
| 137 |
# Make composite of worst issues
|
| 138 |
print("\nGenerating composite of flagged issues...")
|
| 139 |
-
if len(
|
| 140 |
from PIL import ImageDraw
|
| 141 |
cell = 48
|
| 142 |
-
n_show = min(100, len(
|
| 143 |
cols = min(20, n_show)
|
| 144 |
rows = (n_show + cols - 1) // cols
|
| 145 |
sheet = Image.new("RGB", (cols * cell, rows * cell), (0, 0, 0))
|
| 146 |
draw = ImageDraw.Draw(sheet)
|
| 147 |
|
| 148 |
-
for i, idx in enumerate(
|
| 149 |
img = Image.open(io.BytesIO(df.iloc[idx]["image"]["bytes"])).convert("L")
|
| 150 |
img_rgb = img.resize((cell, cell)).convert("RGB")
|
| 151 |
r, c = i // cols, i % cols
|
|
|
|
| 20 |
from torch.utils.data import DataLoader, TensorDataset
|
| 21 |
|
| 22 |
|
| 23 |
+
if __name__ != "__main__":
|
| 24 |
+
import sys
|
| 25 |
+
sys.exit(0)
|
| 26 |
+
|
| 27 |
# --- 1. Load data ---
|
| 28 |
print("Loading data...")
|
| 29 |
df = pd.read_parquet("data/train-00000-of-00001.parquet")
|
|
|
|
| 100 |
print(f" OOF accuracy: {(pred_probs.argmax(axis=1) == y).mean():.3f}")
|
| 101 |
|
| 102 |
|
| 103 |
+
# --- 4. Cleanlab (simple API, no multiprocessing) ---
|
| 104 |
+
print("\nRunning Cleanlab find_label_issues...")
|
| 105 |
+
from cleanlab.filter import find_label_issues as cli_find
|
| 106 |
+
from cleanlab.rank import get_label_quality_scores
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
|
| 108 |
+
label_quality_scores = get_label_quality_scores(y, pred_probs)
|
| 109 |
+
issue_mask = cli_find(labels=y, pred_probs=pred_probs, return_indices_ranked_by="self_confidence")
|
|
|
|
| 110 |
|
| 111 |
+
print(f"\n=== {len(issue_mask)} Label Issues Found ===")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
|
| 113 |
# Save full results
|
| 114 |
results = pd.DataFrame({
|
| 115 |
"index": range(len(y)),
|
| 116 |
"label": y,
|
| 117 |
"predicted": pred_probs.argmax(axis=1),
|
| 118 |
+
"label_score": label_quality_scores,
|
| 119 |
+
"is_label_issue": [i in issue_mask for i in range(len(y))],
|
| 120 |
"source": sources,
|
| 121 |
})
|
| 122 |
results.to_csv("label_issues.csv", index=False)
|
| 123 |
+
print(f"Full results saved to label_issues.csv")
|
| 124 |
+
|
| 125 |
+
for idx in issue_mask[:50]:
|
| 126 |
+
given = y[idx]
|
| 127 |
+
predicted = pred_probs[idx].argmax()
|
| 128 |
+
score = label_quality_scores[idx]
|
| 129 |
+
src = sources[idx]
|
| 130 |
+
print(f" idx={idx:5d} given={given} predicted={predicted} score={score:.4f} source={src}")
|
| 131 |
|
| 132 |
# Make composite of worst issues
|
| 133 |
print("\nGenerating composite of flagged issues...")
|
| 134 |
+
if len(issue_mask) > 0:
|
| 135 |
from PIL import ImageDraw
|
| 136 |
cell = 48
|
| 137 |
+
n_show = min(100, len(issue_mask))
|
| 138 |
cols = min(20, n_show)
|
| 139 |
rows = (n_show + cols - 1) // cols
|
| 140 |
sheet = Image.new("RGB", (cols * cell, rows * cell), (0, 0, 0))
|
| 141 |
draw = ImageDraw.Draw(sheet)
|
| 142 |
|
| 143 |
+
for i, idx in enumerate(issue_mask[:n_show]):
|
| 144 |
img = Image.open(io.BytesIO(df.iloc[idx]["image"]["bytes"])).convert("L")
|
| 145 |
img_rgb = img.resize((cell, cell)).convert("RGB")
|
| 146 |
r, c = i // cols, i % cols
|
uv.lock
CHANGED
|
@@ -17,6 +17,7 @@ source = { virtual = "." }
|
|
| 17 |
dependencies = [
|
| 18 |
{ name = "cleanlab" },
|
| 19 |
{ name = "datasets" },
|
|
|
|
| 20 |
{ name = "pandas" },
|
| 21 |
{ name = "pillow" },
|
| 22 |
{ name = "pyarrow" },
|
|
@@ -29,6 +30,7 @@ dependencies = [
|
|
| 29 |
requires-dist = [
|
| 30 |
{ name = "cleanlab", specifier = ">=2.9.0" },
|
| 31 |
{ name = "datasets", specifier = ">=4.8.4" },
|
|
|
|
| 32 |
{ name = "pandas", specifier = ">=3.0.1" },
|
| 33 |
{ name = "pillow", specifier = ">=12.1.1" },
|
| 34 |
{ name = "pyarrow", specifier = ">=23.0.1" },
|
|
|
|
| 17 |
dependencies = [
|
| 18 |
{ name = "cleanlab" },
|
| 19 |
{ name = "datasets" },
|
| 20 |
+
{ name = "huggingface-hub" },
|
| 21 |
{ name = "pandas" },
|
| 22 |
{ name = "pillow" },
|
| 23 |
{ name = "pyarrow" },
|
|
|
|
| 30 |
requires-dist = [
|
| 31 |
{ name = "cleanlab", specifier = ">=2.9.0" },
|
| 32 |
{ name = "datasets", specifier = ">=4.8.4" },
|
| 33 |
+
{ name = "huggingface-hub", specifier = ">=1.8.0" },
|
| 34 |
{ name = "pandas", specifier = ">=3.0.1" },
|
| 35 |
{ name = "pillow", specifier = ">=12.1.1" },
|
| 36 |
{ name = "pyarrow", specifier = ">=23.0.1" },
|