lostfound-hack / app.py
hackerloi45's picture
Fix CLIP2 model issue in app.py
bb08dc6
raw
history blame
6.29 kB
# app.py
import os
import uuid
import io
import base64
from PIL import Image
import gradio as gr
import numpy as np
# CLIP via Sentence-Transformers (text+image to same 512-dim space)
from sentence_transformers import SentenceTransformer
# Gemini (Google) client
from google import genai
# Qdrant client & helpers
from qdrant_client import QdrantClient
from qdrant_client.http.models import VectorParams, Distance, PointStruct
# -------------------------
# CONFIG (reads env vars)
# -------------------------
GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
QDRANT_URL = os.environ.get("QDRANT_URL")
QDRANT_API_KEY = os.environ.get("QDRANT_API_KEY")
# -------------------------
# Initialize clients/models
# -------------------------
print("Loading CLIP model (this may take 20-60s the first time)...")
MODEL_ID = "sentence-transformers/clip-ViT-B-32-multilingual-v1"
clip_model = SentenceTransformer(MODEL_ID)
# Gemini client
genai_client = genai.Client(api_key=GEMINI_API_KEY) if GEMINI_API_KEY else None
# Qdrant client
if not QDRANT_URL:
raise RuntimeError("Please set QDRANT_URL environment variable")
qclient = QdrantClient(url=QDRANT_URL, api_key=QDRANT_API_KEY)
COLLECTION = "lost_found_items"
VECTOR_SIZE = 512
if not qclient.collection_exists(COLLECTION):
qclient.create_collection(
collection_name=COLLECTION,
vectors_config=VectorParams(size=VECTOR_SIZE, distance=Distance.COSINE),
)
# -------------------------
# Helpers
# -------------------------
def embed_text(text: str):
return clip_model.encode(text, convert_to_numpy=True)
def embed_image_pil(pil_img: Image.Image):
return clip_model.encode(pil_img, convert_to_numpy=True)
def gen_tags_from_image_file(file_obj) -> str:
"""file_obj can be path or BytesIO"""
if genai_client is None:
return ""
uploaded_file = genai_client.files.upload(file=file_obj)
prompt_text = (
"Give 4 short tags (comma-separated) describing this item in the image. "
"Tags should be short single words or two-word phrases (e.g. 'black backpack', 'water bottle'). "
"Respond only with tags, no extra explanation."
)
response = genai_client.models.generate_content(
model="gemini-2.5-flash",
contents=[prompt_text, uploaded_file],
)
return response.text.strip()
# -------------------------
# App logic: add item
# -------------------------
def add_item(mode: str, uploaded_image, text_description: str):
item_id = str(uuid.uuid4())
payload = {"mode": mode, "text": text_description}
if uploaded_image is not None:
# Save to BytesIO
img_bytes_io = io.BytesIO()
uploaded_image.save(img_bytes_io, format="PNG")
img_bytes_io.seek(0)
# Embed image
vec = embed_image_pil(uploaded_image).tolist()
payload["has_image"] = True
# Generate tags
try:
tags = gen_tags_from_image_file(img_bytes_io)
except Exception:
tags = ""
payload["tags"] = tags
# Store image as base64
img_bytes_io.seek(0)
payload["image_b64"] = base64.b64encode(img_bytes_io.read()).decode("utf-8")
else:
vec = embed_text(text_description).tolist()
payload["has_image"] = False
if genai_client:
try:
resp = genai_client.models.generate_content(
model="gemini-2.5-flash",
contents=f"Give 4 short, comma-separated tags for this item described as: {text_description}. Reply only with tags."
)
payload["tags"] = resp.text.strip()
except Exception:
payload["tags"] = ""
else:
payload["tags"] = ""
# Upsert into Qdrant
point = PointStruct(id=item_id, vector=vec, payload=payload)
qclient.upsert(collection_name=COLLECTION, points=[point], wait=True)
return f"Saved item id: {item_id}\nTags: {payload.get('tags','')}"
# -------------------------
# App logic: search
# -------------------------
def search_items(query_image, query_text, limit: int = 5):
if query_image is not None:
qvec = embed_image_pil(query_image).tolist()
elif query_text:
qvec = embed_text(query_text).tolist()
else:
return "Please provide a query image or text."
hits = qclient.search(collection_name=COLLECTION, query_vector=qvec, limit=limit)
if not hits:
return "No results."
results = []
for h in hits:
payload = h.payload or {}
score = getattr(h, "score", None)
img_html = ""
if payload.get("has_image") and payload.get("image_b64"):
img_html = f'<img src="data:image/png;base64,{payload["image_b64"]}" width="200">'
results.append(
f"{img_html}<br>ID:{h.id}<br>Score:{float(score) if score else 0:.4f}<br>"
f"Mode:{payload.get('mode','')}<br>Tags:{payload.get('tags','')}<br>Text:{payload.get('text','')}"
)
return "<br><br>".join(results)
# -------------------------
# Gradio UI
# -------------------------
with gr.Blocks(title="Lost & Found — Simple Helper") as demo:
gr.Markdown("## Lost & Found Helper — Upload items and search by image or text.")
with gr.Row():
with gr.Column():
mode = gr.Radio(choices=["lost", "found"], value="lost", label="Add as")
upload_img = gr.Image(type="pil", label="Item photo (optional)")
text_desc = gr.Textbox(lines=2, placeholder="Short description", label="Description (optional)")
add_btn = gr.Button("Add item")
add_out = gr.HTML(label="Add result") # Changed to HTML to render images
with gr.Column():
query_img = gr.Image(type="pil", label="Search by image (optional)")
query_text = gr.Textbox(lines=2, label="Search by text (optional)")
search_btn = gr.Button("Search")
search_out = gr.HTML(label="Search results") # HTML to render images
add_btn.click(add_item, inputs=[mode, upload_img, text_desc], outputs=[add_out])
search_btn.click(search_items, inputs=[query_img, query_text], outputs=[search_out])
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860)