Spaces:
Runtime error
Runtime error
File size: 6,288 Bytes
746bf5b bb08dc6 746bf5b e8736ae 746bf5b e8736ae 746bf5b e8736ae 746bf5b bb08dc6 746bf5b bb08dc6 746bf5b bb08dc6 746bf5b bb08dc6 746bf5b bb08dc6 746bf5b bb08dc6 746bf5b bb08dc6 746bf5b bb08dc6 746bf5b bb08dc6 e8736ae 746bf5b bb08dc6 746bf5b bb08dc6 746bf5b bb08dc6 746bf5b bb08dc6 746bf5b e8736ae bb08dc6 746bf5b bb08dc6 746bf5b bb08dc6 746bf5b bb08dc6 746bf5b e8736ae bb08dc6 746bf5b bb08dc6 746bf5b e8736ae 746bf5b bb08dc6 746bf5b bb08dc6 746bf5b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 |
# app.py
import os
import uuid
import io
import base64
from PIL import Image
import gradio as gr
import numpy as np
# CLIP via Sentence-Transformers (text+image to same 512-dim space)
from sentence_transformers import SentenceTransformer
# Gemini (Google) client
from google import genai
# Qdrant client & helpers
from qdrant_client import QdrantClient
from qdrant_client.http.models import VectorParams, Distance, PointStruct
# -------------------------
# CONFIG (reads env vars)
# -------------------------
GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
QDRANT_URL = os.environ.get("QDRANT_URL")
QDRANT_API_KEY = os.environ.get("QDRANT_API_KEY")
# -------------------------
# Initialize clients/models
# -------------------------
print("Loading CLIP model (this may take 20-60s the first time)...")
MODEL_ID = "sentence-transformers/clip-ViT-B-32-multilingual-v1"
clip_model = SentenceTransformer(MODEL_ID)
# Gemini client
genai_client = genai.Client(api_key=GEMINI_API_KEY) if GEMINI_API_KEY else None
# Qdrant client
if not QDRANT_URL:
raise RuntimeError("Please set QDRANT_URL environment variable")
qclient = QdrantClient(url=QDRANT_URL, api_key=QDRANT_API_KEY)
COLLECTION = "lost_found_items"
VECTOR_SIZE = 512
if not qclient.collection_exists(COLLECTION):
qclient.create_collection(
collection_name=COLLECTION,
vectors_config=VectorParams(size=VECTOR_SIZE, distance=Distance.COSINE),
)
# -------------------------
# Helpers
# -------------------------
def embed_text(text: str):
return clip_model.encode(text, convert_to_numpy=True)
def embed_image_pil(pil_img: Image.Image):
return clip_model.encode(pil_img, convert_to_numpy=True)
def gen_tags_from_image_file(file_obj) -> str:
"""file_obj can be path or BytesIO"""
if genai_client is None:
return ""
uploaded_file = genai_client.files.upload(file=file_obj)
prompt_text = (
"Give 4 short tags (comma-separated) describing this item in the image. "
"Tags should be short single words or two-word phrases (e.g. 'black backpack', 'water bottle'). "
"Respond only with tags, no extra explanation."
)
response = genai_client.models.generate_content(
model="gemini-2.5-flash",
contents=[prompt_text, uploaded_file],
)
return response.text.strip()
# -------------------------
# App logic: add item
# -------------------------
def add_item(mode: str, uploaded_image, text_description: str):
item_id = str(uuid.uuid4())
payload = {"mode": mode, "text": text_description}
if uploaded_image is not None:
# Save to BytesIO
img_bytes_io = io.BytesIO()
uploaded_image.save(img_bytes_io, format="PNG")
img_bytes_io.seek(0)
# Embed image
vec = embed_image_pil(uploaded_image).tolist()
payload["has_image"] = True
# Generate tags
try:
tags = gen_tags_from_image_file(img_bytes_io)
except Exception:
tags = ""
payload["tags"] = tags
# Store image as base64
img_bytes_io.seek(0)
payload["image_b64"] = base64.b64encode(img_bytes_io.read()).decode("utf-8")
else:
vec = embed_text(text_description).tolist()
payload["has_image"] = False
if genai_client:
try:
resp = genai_client.models.generate_content(
model="gemini-2.5-flash",
contents=f"Give 4 short, comma-separated tags for this item described as: {text_description}. Reply only with tags."
)
payload["tags"] = resp.text.strip()
except Exception:
payload["tags"] = ""
else:
payload["tags"] = ""
# Upsert into Qdrant
point = PointStruct(id=item_id, vector=vec, payload=payload)
qclient.upsert(collection_name=COLLECTION, points=[point], wait=True)
return f"Saved item id: {item_id}\nTags: {payload.get('tags','')}"
# -------------------------
# App logic: search
# -------------------------
def search_items(query_image, query_text, limit: int = 5):
if query_image is not None:
qvec = embed_image_pil(query_image).tolist()
elif query_text:
qvec = embed_text(query_text).tolist()
else:
return "Please provide a query image or text."
hits = qclient.search(collection_name=COLLECTION, query_vector=qvec, limit=limit)
if not hits:
return "No results."
results = []
for h in hits:
payload = h.payload or {}
score = getattr(h, "score", None)
img_html = ""
if payload.get("has_image") and payload.get("image_b64"):
img_html = f'<img src="data:image/png;base64,{payload["image_b64"]}" width="200">'
results.append(
f"{img_html}<br>ID:{h.id}<br>Score:{float(score) if score else 0:.4f}<br>"
f"Mode:{payload.get('mode','')}<br>Tags:{payload.get('tags','')}<br>Text:{payload.get('text','')}"
)
return "<br><br>".join(results)
# -------------------------
# Gradio UI
# -------------------------
with gr.Blocks(title="Lost & Found — Simple Helper") as demo:
gr.Markdown("## Lost & Found Helper — Upload items and search by image or text.")
with gr.Row():
with gr.Column():
mode = gr.Radio(choices=["lost", "found"], value="lost", label="Add as")
upload_img = gr.Image(type="pil", label="Item photo (optional)")
text_desc = gr.Textbox(lines=2, placeholder="Short description", label="Description (optional)")
add_btn = gr.Button("Add item")
add_out = gr.HTML(label="Add result") # Changed to HTML to render images
with gr.Column():
query_img = gr.Image(type="pil", label="Search by image (optional)")
query_text = gr.Textbox(lines=2, label="Search by text (optional)")
search_btn = gr.Button("Search")
search_out = gr.HTML(label="Search results") # HTML to render images
add_btn.click(add_item, inputs=[mode, upload_img, text_desc], outputs=[add_out])
search_btn.click(search_items, inputs=[query_img, query_text], outputs=[search_out])
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860)
|