VOIDER commited on
Commit
1e3b83e
Β·
verified Β·
1 Parent(s): 73b6364

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +108 -116
app.py CHANGED
@@ -1,162 +1,154 @@
 
1
  import torch
2
  import torch.nn as nn
 
3
  import clip
4
  import gradio as gr
5
- import numpy as np
6
  from PIL import Image
7
  from huggingface_hub import hf_hub_download
8
- import os
9
-
10
- # ── Labels ─────────────────────────────────────────────────────────────────────
11
- # Pony V7 captioning uses 9 aesthetic buckets (worst β†’ best)
12
- LABELS = [
13
- "worst quality",
14
- "very bad quality",
15
- "bad quality",
16
- "low quality",
17
- "normal quality",
18
- "good quality",
19
- "high quality",
20
- "best quality",
21
- "masterpiece",
22
- ]
23
-
24
- # Colour gradient: red β†’ yellow β†’ green
25
- COLOURS = [
26
- "#e74c3c", "#e67e22", "#f39c12",
27
- "#d4ac0d", "#a9cce3", "#27ae60",
28
- "#1e8449", "#148f77", "#0e6655",
29
- ]
30
 
31
- # ── Model ───────────────────────────────────────────────────────────────────────
32
- class AestheticHead(nn.Module):
33
- """Small MLP head that sits on top of frozen CLIP image features."""
34
- def __init__(self, in_features: int = 768, num_classes: int = 9):
35
  super().__init__()
36
- self.layers = nn.Sequential(
37
- nn.Linear(in_features, 1024),
38
  nn.ReLU(),
39
- nn.Dropout(0.2),
40
- nn.Linear(1024, 128),
41
  nn.ReLU(),
42
- nn.Dropout(0.2),
43
- nn.Linear(128, num_classes),
44
  )
45
 
46
  def forward(self, x: torch.Tensor) -> torch.Tensor:
47
- return self.layers(x)
48
 
49
 
50
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
51
  print(f"[info] device: {DEVICE}")
52
 
53
- # Load CLIP backbone
54
- print("[info] Loading CLIP ViT-L/14 …")
55
  clip_model, preprocess = clip.load("ViT-L/14", device=DEVICE)
56
  clip_model.eval()
57
 
58
- # Load aesthetic head
59
- print("[info] Downloading aesthetic-classifier checkpoint …")
60
  ckpt_path = hf_hub_download(
61
  repo_id="purplesmartai/aesthetic-classifier",
62
  filename="v2.ckpt",
63
  )
64
- state_dict = torch.load(ckpt_path, map_location=DEVICE)
65
-
66
- # Auto-detect architecture from checkpoint keys
67
- first_key = next(iter(state_dict))
68
- # If keys start with 'layers.' it's our AestheticHead; otherwise try to load directly
69
- if isinstance(state_dict, dict) and not any(k.startswith("layers") for k in state_dict):
70
- # Flat state dict β€” try wrapping in 'layers'
71
- new_sd = {"layers." + k if not k.startswith("layers") else k: v for k, v in state_dict.items()}
72
- state_dict = new_sd
73
-
74
- # Detect input size from first weight tensor
75
- in_feat = 768 # default ViT-L/14
76
- for k, v in state_dict.items():
77
- if "weight" in k and v.dim() == 2:
78
- in_feat = v.shape[1]
79
- break
80
-
81
- num_classes = len(LABELS)
82
- model = AestheticHead(in_features=in_feat, num_classes=num_classes).to(DEVICE)
83
- try:
84
- model.load_state_dict(state_dict, strict=True)
85
- print("[info] Checkpoint loaded (strict).")
86
- except RuntimeError:
87
- model.load_state_dict(state_dict, strict=False)
88
- print("[warn] Checkpoint loaded (non-strict).")
89
- model.eval()
90
-
91
-
92
- # ── Inference ───────────────────────────────────────────────────────────────────
93
- @torch.no_grad()
94
- def classify(image: Image.Image):
95
- if image is None:
96
- return {}
97
 
98
- # Preprocess & encode with CLIP
99
- tensor = preprocess(image).unsqueeze(0).to(DEVICE)
100
- features = clip_model.encode_image(tensor).float()
101
- features = features / features.norm(dim=-1, keepdim=True)
102
 
103
- # Run head
104
- logits = model(features)
105
- probs = torch.softmax(logits, dim=-1)[0].cpu().numpy()
106
 
107
- # Top prediction
108
- top_idx = int(np.argmax(probs))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
 
110
- result = {label: float(prob) for label, prob in zip(LABELS, probs)}
111
- return result
112
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
 
114
- # ── Gradio UI ───────────────────────────────────────────────────────────────────
115
- EXAMPLES = []
116
- examples_dir = "examples"
117
- if os.path.isdir(examples_dir):
118
- EXAMPLES = [[os.path.join(examples_dir, f)] for f in os.listdir(examples_dir)
119
- if f.lower().endswith((".jpg", ".jpeg", ".png", ".webp"))]
120
 
 
121
  with gr.Blocks(
122
- title="Aesthetic Classifier β€” PurpleSmartAI",
123
  theme=gr.themes.Soft(primary_hue="purple"),
124
- css="""
125
- .gradio-container { max-width: 900px !important; margin: auto; }
126
- #title { text-align: center; margin-bottom: 0.5rem; }
127
- #subtitle { text-align: center; color: #888; margin-bottom: 1.5rem; font-size: 0.95rem; }
128
- """,
129
  ) as demo:
130
- gr.Markdown("# 🎨 Aesthetic Classifier", elem_id="title")
131
  gr.Markdown(
132
- "CLIP-based aesthetic quality classifier by **PurpleSmartAI** β€” "
133
- "originally developed for [Pony V7](https://huggingface.co/purplesmartai/aesthetic-classifier) captioning.\n\n"
134
- "Upload an image and get a probability distribution across 9 quality tiers.",
135
- elem_id="subtitle",
136
  )
137
-
138
  with gr.Row():
139
  with gr.Column(scale=1):
140
  img_input = gr.Image(type="pil", label="Input Image", height=340)
141
- run_btn = gr.Button("✨ Classify", variant="primary", size="lg")
142
-
143
  with gr.Column(scale=1):
144
- label_output = gr.Label(
145
- num_top_classes=9,
146
- label="Aesthetic Score Distribution",
147
  )
148
-
149
- if EXAMPLES:
150
- gr.Examples(examples=EXAMPLES, inputs=img_input, label="Example images")
151
-
152
  gr.Markdown(
153
- "---\n"
154
- "**Model:** [`purplesmartai/aesthetic-classifier`](https://huggingface.co/purplesmartai/aesthetic-classifier) Β· "
155
- "**Backbone:** OpenAI CLIP ViT-L/14"
156
  )
157
-
158
- run_btn.click(fn=classify, inputs=img_input, outputs=label_output)
159
- img_input.change(fn=classify, inputs=img_input, outputs=label_output)
160
 
161
  if __name__ == "__main__":
162
- demo.launch()
 
1
+ import os
2
  import torch
3
  import torch.nn as nn
4
+ import numpy as np
5
  import clip
6
  import gradio as gr
 
7
  from PIL import Image
8
  from huggingface_hub import hf_hub_download
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
+ # ── Model β€” exactly as in the Pony V7 Captioner notebook ───────────────────────
11
+ class AestheticScorer(nn.Module):
12
+ def __init__(self, input_size: int = 768):
 
13
  super().__init__()
14
+ self.model = nn.Sequential(
15
+ nn.Linear(input_size, 1024),
16
  nn.ReLU(),
17
+ nn.Dropout(0.5),
18
+ nn.Linear(1024, 512),
19
  nn.ReLU(),
20
+ nn.Dropout(0.3),
21
+ nn.Linear(512, 1),
22
  )
23
 
24
  def forward(self, x: torch.Tensor) -> torch.Tensor:
25
+ return self.model(x)
26
 
27
 
28
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
29
  print(f"[info] device: {DEVICE}")
30
 
31
+ print("[info] Loading CLIP ViT-L/14 ...")
 
32
  clip_model, preprocess = clip.load("ViT-L/14", device=DEVICE)
33
  clip_model.eval()
34
 
35
+ print("[info] Downloading aesthetic-classifier checkpoint ...")
 
36
  ckpt_path = hf_hub_download(
37
  repo_id="purplesmartai/aesthetic-classifier",
38
  filename="v2.ckpt",
39
  )
40
+ checkpoint_data = torch.load(ckpt_path, map_location=DEVICE)
41
+ state_dict = checkpoint_data["state_dict"]
42
+ # Strip the "model." prefix from keys (same as notebook)
43
+ state_dict = {k.replace("model.", ""): v for k, v in state_dict.items()}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
+ aesthetic_model = AestheticScorer(input_size=768).to(DEVICE)
46
+ aesthetic_model.load_state_dict(state_dict)
47
+ aesthetic_model.eval()
48
+ print("[info] Model ready.")
49
 
 
 
 
50
 
51
+ # ── Scoring β€” identical to notebook ────────────────────────────────────────────
52
+ @torch.no_grad()
53
+ def get_score(image: Image.Image) -> float:
54
+ """Returns raw float score (typically 0-1 range)."""
55
+ image_tensor = preprocess(image.convert("RGB")).unsqueeze(0).to(DEVICE)
56
+ features = clip_model.encode_image(image_tensor).cpu().numpy()
57
+ norm = np.linalg.norm(features, axis=1, keepdims=True)
58
+ norm[norm == 0] = 1
59
+ features = features / norm
60
+ features_t = torch.tensor(features, dtype=torch.float32, device=DEVICE)
61
+ raw = aesthetic_model(features_t).item()
62
+ return raw
63
+
64
+
65
+ def raw_to_pony(raw: float) -> int:
66
+ """Convert raw score to pony score_0...score_9 (same formula as notebook)."""
67
+ return int(max(0.0, min(0.99, raw)) * 10)
68
+
69
+
70
+ # ── Colour palette ─────────────────────────────────────────────────────────────
71
+ SCORE_COLOURS = [
72
+ "#c0392b", "#e74c3c", "#e67e22", "#f39c12", "#d4ac0d",
73
+ "#27ae60", "#1e8449", "#148f77", "#0e6655", "#0a4f42",
74
+ ]
75
 
 
 
76
 
77
+ def build_html(raw: float) -> str:
78
+ pony = raw_to_pony(raw)
79
+ colour = SCORE_COLOURS[pony]
80
+
81
+ tiles_html = ""
82
+ for i in range(10):
83
+ active = i == pony
84
+ bg = SCORE_COLOURS[i] if active else "rgba(255,255,255,0.06)"
85
+ border = f"2px solid {SCORE_COLOURS[i]}" if active else "2px solid transparent"
86
+ weight = "700" if active else "400"
87
+ scale = "scale(1.12)" if active else "scale(1)"
88
+ opac = "1" if active else "0.45"
89
+ tiles_html += f"""<div style="background:{bg};border:{border};border-radius:8px;
90
+ padding:10px 0;text-align:center;font-size:.82rem;font-weight:{weight};color:#fff;
91
+ transform:{scale};opacity:{opac};transition:all .2s;user-select:none;">score_{i}</div>"""
92
+
93
+ bar_w = min(raw, 1.0) * 100
94
+
95
+ return f"""
96
+ <div style="font-family:'Inter',sans-serif;padding:8px 0;">
97
+ <div style="text-align:center;margin-bottom:20px;">
98
+ <div style="display:inline-block;background:{colour};color:#fff;border-radius:12px;
99
+ padding:14px 36px;font-size:2rem;font-weight:800;letter-spacing:.04em;
100
+ box-shadow:0 4px 20px {colour}66;">score_{pony}</div>
101
+ <div style="color:#aaa;font-size:.85rem;margin-top:8px;">
102
+ raw score: <code style="color:#ddd">{raw:.4f}</code>
103
+ </div>
104
+ </div>
105
+ <div style="display:grid;grid-template-columns:repeat(10,1fr);gap:6px;margin-bottom:16px;">
106
+ {tiles_html}
107
+ </div>
108
+ <div style="background:rgba(255,255,255,.1);border-radius:6px;height:8px;overflow:hidden;">
109
+ <div style="width:{bar_w:.1f}%;height:100%;
110
+ background:linear-gradient(90deg,#c0392b,#f39c12,#27ae60);
111
+ border-radius:6px;transition:width .4s;"></div>
112
+ </div>
113
+ <div style="display:flex;justify-content:space-between;font-size:.72rem;color:#777;margin-top:4px;">
114
+ <span>score_0</span><span>score_9</span>
115
+ </div>
116
+ </div>"""
117
+
118
+
119
+ def classify(image):
120
+ if image is None:
121
+ return "<p style='color:#888;text-align:center'>Upload an image to score it.</p>"
122
+ raw = get_score(image)
123
+ return build_html(raw)
124
 
 
 
 
 
 
 
125
 
126
+ # ── Gradio UI ───────────────────────────────────────────────────────────────────
127
  with gr.Blocks(
128
+ title="Aesthetic Classifier - PurpleSmartAI",
129
  theme=gr.themes.Soft(primary_hue="purple"),
130
+ css=".gradio-container{max-width:860px!important;margin:auto} #title{text-align:center} #sub{text-align:center;color:#888;font-size:.9rem;margin-bottom:1.5rem}",
 
 
 
 
131
  ) as demo:
132
+ gr.Markdown("# Aesthetic Classifier", elem_id="title")
133
  gr.Markdown(
134
+ "CLIP ViT-L/14 regression model by **PurpleSmartAI** for Pony V7 captioning. "
135
+ "Outputs a **score_0...score_9** tag used directly in training captions.",
136
+ elem_id="sub",
 
137
  )
 
138
  with gr.Row():
139
  with gr.Column(scale=1):
140
  img_input = gr.Image(type="pil", label="Input Image", height=340)
141
+ run_btn = gr.Button("Score image", variant="primary", size="lg")
 
142
  with gr.Column(scale=1):
143
+ out_html = gr.HTML(
144
+ value="<p style='color:#888;text-align:center;padding:40px 0'>Upload an image to see its score.</p>",
 
145
  )
 
 
 
 
146
  gr.Markdown(
147
+ "---\n**Model:** [`purplesmartai/aesthetic-classifier`](https://huggingface.co/purplesmartai/aesthetic-classifier)"
148
+ " Β· **Backbone:** OpenAI CLIP ViT-L/14"
 
149
  )
150
+ run_btn.click(fn=classify, inputs=img_input, outputs=out_html)
151
+ img_input.change(fn=classify, inputs=img_input, outputs=out_html)
 
152
 
153
  if __name__ == "__main__":
154
+ demo.launch()