Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from PIL import Image
|
| 3 |
-
import requests
|
| 4 |
-
from io import BytesIO
|
| 5 |
import torch
|
| 6 |
from transformers import (
|
| 7 |
VisionEncoderDecoderModel,
|
|
@@ -10,195 +10,174 @@ from transformers import (
|
|
| 10 |
T5ForConditionalGeneration,
|
| 11 |
T5Tokenizer,
|
| 12 |
)
|
| 13 |
-
import urllib.parse
|
| 14 |
-
import threading
|
| 15 |
-
import time
|
| 16 |
|
|
|
|
|
|
|
|
|
|
| 17 |
device = torch.device("cpu")
|
| 18 |
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
REWRITER_NAME = "t5-small"
|
| 22 |
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
model = VisionEncoderDecoderModel.from_pretrained(PROCESSOR_NAME).to(device)
|
| 27 |
-
model.eval()
|
| 28 |
|
| 29 |
-
|
| 30 |
-
rewriter = T5ForConditionalGeneration.from_pretrained(
|
| 31 |
-
rewriter.eval()
|
| 32 |
|
| 33 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
try:
|
| 35 |
url = url.strip()
|
| 36 |
if url.startswith("data:"):
|
| 37 |
-
header, encoded = url.split(",", 1)
|
| 38 |
import base64
|
| 39 |
-
data =
|
| 40 |
-
img = Image.open(BytesIO(data)).convert("RGB")
|
| 41 |
return img, None
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
img = Image.open(BytesIO(resp.content)).convert("RGB")
|
| 48 |
-
return img, None
|
| 49 |
except Exception as e:
|
| 50 |
-
return None, f"
|
| 51 |
|
| 52 |
-
|
| 53 |
-
|
| 54 |
inputs = processor(images=img, return_tensors="pt")
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
return
|
| 80 |
-
|
| 81 |
-
def
|
| 82 |
-
|
| 83 |
if prompt and prompt.strip():
|
| 84 |
-
instr = f"Expand
|
| 85 |
else:
|
| 86 |
-
instr = f"Expand
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 94 |
try:
|
| 95 |
-
|
| 96 |
-
status_callback("Expanding caption (step 1/2)...")
|
| 97 |
-
# Small sleep allows UI update
|
| 98 |
-
time.sleep(0.1)
|
| 99 |
-
expanded = expand_with_t5(caption, prompt=prompt, max_len=max_expand_len)
|
| 100 |
-
status_callback("Finalizing (step 2/2)...")
|
| 101 |
time.sleep(0.1)
|
| 102 |
-
|
| 103 |
-
|
|
|
|
| 104 |
except Exception as e:
|
| 105 |
-
|
| 106 |
-
return
|
| 107 |
-
|
| 108 |
-
#
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
"""
|
| 114 |
-
img, err = load_image_from_url(url)
|
| 115 |
if err:
|
| 116 |
-
return None, "",
|
| 117 |
|
| 118 |
-
# Map detail_level to rewriter max_len
|
| 119 |
detail_map = {"Low": 80, "Medium": 140, "High": 220}
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
# Generate candidates
|
| 123 |
-
candidates = generate_caption_candidates(img, max_len=max_caption_len, num_beams=beams, num_return_sequences=3, do_sample=do_sample)
|
| 124 |
-
base = pick_most_detailed(candidates)
|
| 125 |
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
status = {"text": "Queued for expansion..."}
|
| 129 |
-
def status_callback(s):
|
| 130 |
-
status["text"] = s
|
| 131 |
-
|
| 132 |
-
result_container = {"final": base}
|
| 133 |
|
| 134 |
def worker():
|
| 135 |
-
|
| 136 |
-
result_container["final"] = expanded
|
| 137 |
-
|
| 138 |
-
thread = threading.Thread(target=worker, daemon=True)
|
| 139 |
-
thread.start()
|
| 140 |
|
| 141 |
-
|
| 142 |
return img, base, status["text"]
|
| 143 |
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
# In this simple pattern we re-run a lightweight check by storing results in a global map keyed by URL+prompt
|
| 147 |
-
# For simplicity in this Space we will re-run expansion synchronously here if needed.
|
| 148 |
-
# But to avoid redoing heavy work, you can implement a shared cache (omitted for brevity).
|
| 149 |
-
return "If expansion still running, refresh in a few seconds. Final caption will replace base when ready."
|
| 150 |
-
|
| 151 |
-
# Simple endpoint to get final expanded caption synchronously (used when user hits 'Get final caption')
|
| 152 |
-
def get_final_caption(url: str, prompt: str, detail_level: str, max_caption_len: int = 40, beams: int = 2, do_sample: bool = True):
|
| 153 |
-
img, err = load_image_from_url(url)
|
| 154 |
if err:
|
| 155 |
-
return "",
|
| 156 |
-
candidates = generate_caption_candidates(img, max_len=max_caption_len, num_beams=beams, num_return_sequences=3, do_sample=do_sample)
|
| 157 |
-
base = pick_most_detailed(candidates)
|
| 158 |
detail_map = {"Low": 80, "Medium": 140, "High": 220}
|
| 159 |
-
|
|
|
|
|
|
|
| 160 |
try:
|
| 161 |
-
|
| 162 |
-
return
|
| 163 |
except Exception as e:
|
| 164 |
return base, f"Expand error: {e}"
|
| 165 |
|
| 166 |
-
#
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
""
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
gr.Markdown("## Image Describer β uncensored captions, optional prompt to bias description. Use 'Get final caption' for the detailed expanded output (may take longer).")
|
| 173 |
with gr.Row():
|
| 174 |
-
with gr.Column(
|
| 175 |
-
url_in = gr.Textbox(label="Image URL
|
| 176 |
-
prompt_in = gr.Textbox(label="Optional prompt
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
with gr.Column(scale=1):
|
| 185 |
img_out = gr.Image(type="pil", label="Image")
|
| 186 |
-
with gr.Column(
|
| 187 |
-
caption_out = gr.Textbox(label="Caption
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
return final_caption, status
|
| 200 |
-
|
| 201 |
-
get_final.click(fn=on_get_final, inputs=[url_in, prompt_in, detail_level, max_len, beams, do_sample_chk], outputs=[caption_out, status_txt])
|
| 202 |
|
| 203 |
if __name__ == "__main__":
|
| 204 |
demo.launch(server_name="0.0.0.0", server_port=7860)
|
|
|
|
| 1 |
+
# app.py β minimal, CPUβonly, highβquality captions
|
| 2 |
import gradio as gr
|
| 3 |
from PIL import Image
|
| 4 |
+
import requests, urllib.parse, threading, time
|
|
|
|
| 5 |
import torch
|
| 6 |
from transformers import (
|
| 7 |
VisionEncoderDecoderModel,
|
|
|
|
| 10 |
T5ForConditionalGeneration,
|
| 11 |
T5Tokenizer,
|
| 12 |
)
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
+
# -------------------------------------------------
|
| 15 |
+
# Device & models (CPU)
|
| 16 |
+
# -------------------------------------------------
|
| 17 |
device = torch.device("cpu")
|
| 18 |
|
| 19 |
+
IMG_MODEL = "nlpconnect/vit-gpt2-image-captioning"
|
| 20 |
+
TXT_MODEL = "t5-small"
|
|
|
|
| 21 |
|
| 22 |
+
processor = ViTImageProcessor.from_pretrained(IMG_MODEL)
|
| 23 |
+
tokenizer = AutoTokenizer.from_pretrained(IMG_MODEL)
|
| 24 |
+
vision = VisionEncoderDecoderModel.from_pretrained(IMG_MODEL).to(device).eval()
|
|
|
|
|
|
|
| 25 |
|
| 26 |
+
rewriter_tok = T5Tokenizer.from_pretrained(TXT_MODEL)
|
| 27 |
+
rewriter = T5ForConditionalGeneration.from_pretrained(TXT_MODEL).to(device).eval()
|
|
|
|
| 28 |
|
| 29 |
+
# -------------------------------------------------
|
| 30 |
+
# Helpers
|
| 31 |
+
# -------------------------------------------------
|
| 32 |
+
def load_image(url: str):
|
| 33 |
+
"""Return PIL image or (None, error). Handles http/https and dataβURL."""
|
| 34 |
try:
|
| 35 |
url = url.strip()
|
| 36 |
if url.startswith("data:"):
|
|
|
|
| 37 |
import base64
|
| 38 |
+
_, data = url.split(",", 1)
|
| 39 |
+
img = Image.open(BytesIO(base64.b64decode(data))).convert("RGB")
|
| 40 |
return img, None
|
| 41 |
+
if not urllib.parse.urlsplit(url).scheme:
|
| 42 |
+
return None, "Missing http/https scheme."
|
| 43 |
+
r = requests.get(url, timeout=10, headers={"User-Agent": "duck.ai"})
|
| 44 |
+
r.raise_for_status()
|
| 45 |
+
return Image.open(BytesIO(r.content)).convert("RGB"), None
|
|
|
|
|
|
|
| 46 |
except Exception as e:
|
| 47 |
+
return None, f"Load error: {e}"
|
| 48 |
|
| 49 |
+
def generate_base(img: Image.Image, max_len=40, beams=2, sample=False):
|
| 50 |
+
"""Return a single βmost detailedβ base caption."""
|
| 51 |
inputs = processor(images=img, return_tensors="pt")
|
| 52 |
+
pix = inputs.pixel_values.to(device)
|
| 53 |
+
|
| 54 |
+
if sample:
|
| 55 |
+
out = vision.generate(
|
| 56 |
+
pix,
|
| 57 |
+
max_length=max_len,
|
| 58 |
+
do_sample=True,
|
| 59 |
+
temperature=0.8,
|
| 60 |
+
top_k=50,
|
| 61 |
+
top_p=0.9,
|
| 62 |
+
num_return_sequences=3,
|
| 63 |
+
early_stopping=True,
|
| 64 |
+
)
|
| 65 |
+
else:
|
| 66 |
+
# ensure num_return β€ beams
|
| 67 |
+
out = vision.generate(
|
| 68 |
+
pix,
|
| 69 |
+
max_length=max_len,
|
| 70 |
+
num_beams=beams,
|
| 71 |
+
num_return_sequences=min(3, beams),
|
| 72 |
+
early_stopping=True,
|
| 73 |
+
)
|
| 74 |
+
caps = [tokenizer.decode(o, skip_special_tokens=True).strip() for o in out]
|
| 75 |
+
# pick longest (most detailed)
|
| 76 |
+
return max(caps, key=lambda s: len(s.split()))
|
| 77 |
+
|
| 78 |
+
def expand_caption(base: str, prompt: str = None, max_len=160):
|
| 79 |
+
"""Rich T5 expansion."""
|
| 80 |
if prompt and prompt.strip():
|
| 81 |
+
instr = f"Expand using: '{prompt}'. Caption: \"{base}\""
|
| 82 |
else:
|
| 83 |
+
instr = f"Expand with rich visual detail. Caption: \"{base}\""
|
| 84 |
+
|
| 85 |
+
toks = rewriter_tok(
|
| 86 |
+
instr,
|
| 87 |
+
return_tensors="pt",
|
| 88 |
+
truncation=True,
|
| 89 |
+
padding="max_length",
|
| 90 |
+
max_length=256,
|
| 91 |
+
).to(device)
|
| 92 |
+
|
| 93 |
+
out = rewriter.generate(
|
| 94 |
+
**toks,
|
| 95 |
+
max_length=max_len,
|
| 96 |
+
num_beams=4,
|
| 97 |
+
early_stopping=True,
|
| 98 |
+
no_repeat_ngram_size=3,
|
| 99 |
+
)
|
| 100 |
+
return rewriter_tok.decode(out[0], skip_special_tokens=True).strip()
|
| 101 |
+
|
| 102 |
+
# -------------------------------------------------
|
| 103 |
+
# Async expansion (background thread)
|
| 104 |
+
# -------------------------------------------------
|
| 105 |
+
def async_expand(base, prompt, max_len, status):
|
| 106 |
try:
|
| 107 |
+
status["text"] = "Expandingβ¦"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 108 |
time.sleep(0.1)
|
| 109 |
+
result = expand_caption(base, prompt, max_len)
|
| 110 |
+
status["text"] = "Done"
|
| 111 |
+
return result
|
| 112 |
except Exception as e:
|
| 113 |
+
status["text"] = f"Error: {e}"
|
| 114 |
+
return base
|
| 115 |
+
|
| 116 |
+
# -------------------------------------------------
|
| 117 |
+
# Gradio callbacks
|
| 118 |
+
# -------------------------------------------------
|
| 119 |
+
def fast_describe(url, prompt, detail, beams, sample):
|
| 120 |
+
img, err = load_image(url)
|
|
|
|
|
|
|
| 121 |
if err:
|
| 122 |
+
return None, "", err
|
| 123 |
|
|
|
|
| 124 |
detail_map = {"Low": 80, "Medium": 140, "High": 220}
|
| 125 |
+
max_expand = detail_map.get(detail, 140)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 126 |
|
| 127 |
+
base = generate_base(img, beams=beams, sample=sample)
|
| 128 |
+
status = {"text": "Queuedβ¦"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
|
| 130 |
def worker():
|
| 131 |
+
status["final"] = async_expand(base, prompt, max_expand, status)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 132 |
|
| 133 |
+
threading.Thread(target=worker, daemon=True).start()
|
| 134 |
return img, base, status["text"]
|
| 135 |
|
| 136 |
+
def final_caption(url, prompt, detail, beams, sample):
|
| 137 |
+
img, err = load_image(url)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 138 |
if err:
|
| 139 |
+
return "", err
|
|
|
|
|
|
|
| 140 |
detail_map = {"Low": 80, "Medium": 140, "High": 220}
|
| 141 |
+
max_expand = detail_map.get(detail, 140)
|
| 142 |
+
|
| 143 |
+
base = generate_base(img, beams=beams, sample=sample)
|
| 144 |
try:
|
| 145 |
+
final = expand_caption(base, prompt, max_expand)
|
| 146 |
+
return final, "Done"
|
| 147 |
except Exception as e:
|
| 148 |
return base, f"Expand error: {e}"
|
| 149 |
|
| 150 |
+
# -------------------------------------------------
|
| 151 |
+
# UI
|
| 152 |
+
# -------------------------------------------------
|
| 153 |
+
css = "footer {display:none !important;}"
|
| 154 |
+
with gr.Blocks(css=css, title="Image Describer (CPU)") as demo:
|
| 155 |
+
gr.Markdown("## Image Describer β fast base caption + optional detailed rewrite")
|
|
|
|
| 156 |
with gr.Row():
|
| 157 |
+
with gr.Column():
|
| 158 |
+
url_in = gr.Textbox(label="Image URL / dataβURL")
|
| 159 |
+
prompt_in = gr.Textbox(label="Optional prompt")
|
| 160 |
+
detail_in = gr.Radio(["Low", "Medium", "High"], value="Medium", label="Detail level")
|
| 161 |
+
beams_in = gr.Slider(1, 4, step=1, value=2, label="Beams (higher = better, slower)")
|
| 162 |
+
sample_in = gr.Checkbox(label="Enable sampling (more diverse)", value=False)
|
| 163 |
+
go_btn = gr.Button("Load & Describe (fast)")
|
| 164 |
+
final_btn = gr.Button("Get final caption (detailed)")
|
| 165 |
+
status_out = gr.Textbox(label="Status", interactive=False)
|
| 166 |
+
with gr.Column():
|
|
|
|
| 167 |
img_out = gr.Image(type="pil", label="Image")
|
| 168 |
+
with gr.Column():
|
| 169 |
+
caption_out = gr.Textbox(label="Caption", lines=8)
|
| 170 |
+
|
| 171 |
+
go_btn.click(
|
| 172 |
+
fn=fast_describe,
|
| 173 |
+
inputs=[url_in, prompt_in, detail_in, beams_in, sample_in],
|
| 174 |
+
outputs=[img_out, caption_out, status_out],
|
| 175 |
+
)
|
| 176 |
+
final_btn.click(
|
| 177 |
+
fn=final_caption,
|
| 178 |
+
inputs=[url_in, prompt_in, detail_in, beams_in, sample_in],
|
| 179 |
+
outputs=[caption_out, status_out],
|
| 180 |
+
)
|
|
|
|
|
|
|
|
|
|
| 181 |
|
| 182 |
if __name__ == "__main__":
|
| 183 |
demo.launch(server_name="0.0.0.0", server_port=7860)
|