hackerloi45 commited on
Commit
bb08dc6
·
1 Parent(s): e8736ae

Fix CLIP2 model issue in app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -36
app.py CHANGED
@@ -2,6 +2,7 @@
2
  import os
3
  import uuid
4
  import io
 
5
  from PIL import Image
6
  import gradio as gr
7
  import numpy as np
@@ -41,7 +42,6 @@ qclient = QdrantClient(url=QDRANT_URL, api_key=QDRANT_API_KEY)
41
  COLLECTION = "lost_found_items"
42
  VECTOR_SIZE = 512
43
 
44
- # Create collection if missing
45
  if not qclient.collection_exists(COLLECTION):
46
  qclient.create_collection(
47
  collection_name=COLLECTION,
@@ -52,24 +52,24 @@ if not qclient.collection_exists(COLLECTION):
52
  # Helpers
53
  # -------------------------
54
  def embed_text(text: str):
55
- vec = clip_model.encode([text], convert_to_numpy=True)[0] # wrap in list
56
- return vec
57
 
58
  def embed_image_pil(pil_img: Image.Image):
59
- vec = clip_model.encode([pil_img], convert_to_numpy=True)[0] # wrap in list
60
- return vec
61
 
62
- def gen_tags_from_image_file(local_path: str) -> str:
63
- if not genai_client:
 
64
  return ""
65
- file_obj = genai_client.files.upload(file=local_path)
66
  prompt_text = (
67
  "Give 4 short tags (comma-separated) describing this item in the image. "
68
- "Tags should be short single words or two-word phrases. Respond only with tags."
 
69
  )
70
  response = genai_client.models.generate_content(
71
  model="gemini-2.5-flash",
72
- contents=[prompt_text, file_obj],
73
  )
74
  return response.text.strip()
75
 
@@ -81,21 +81,29 @@ def add_item(mode: str, uploaded_image, text_description: str):
81
  payload = {"mode": mode, "text": text_description}
82
 
83
  if uploaded_image is not None:
84
- tmp_path = f"/tmp/{item_id}.png"
85
- uploaded_image.save(tmp_path)
 
 
 
 
86
  vec = embed_image_pil(uploaded_image).tolist()
87
  payload["has_image"] = True
 
 
88
  try:
89
- tags = gen_tags_from_image_file(tmp_path)
90
  except Exception:
91
  tags = ""
92
  payload["tags"] = tags
93
- with open(tmp_path, "rb") as f:
94
- b64 = f.read()
95
- payload["image_b64"] = True
 
96
  else:
97
  vec = embed_text(text_description).tolist()
98
  payload["has_image"] = False
 
99
  if genai_client:
100
  try:
101
  resp = genai_client.models.generate_content(
@@ -108,6 +116,7 @@ def add_item(mode: str, uploaded_image, text_description: str):
108
  else:
109
  payload["tags"] = ""
110
 
 
111
  point = PointStruct(id=item_id, vector=vec, payload=payload)
112
  qclient.upsert(collection_name=COLLECTION, points=[point], wait=True)
113
 
@@ -119,53 +128,47 @@ def add_item(mode: str, uploaded_image, text_description: str):
119
  def search_items(query_image, query_text, limit: int = 5):
120
  if query_image is not None:
121
  qvec = embed_image_pil(query_image).tolist()
122
- elif query_text and query_text.strip():
123
  qvec = embed_text(query_text).tolist()
124
  else:
125
- return "Please provide a query image or some query text."
126
 
127
  hits = qclient.search(collection_name=COLLECTION, query_vector=qvec, limit=limit)
128
 
 
 
 
129
  results = []
130
  for h in hits:
131
  payload = h.payload or {}
132
  score = getattr(h, "score", None)
 
 
 
133
  results.append(
134
- {
135
- "id": h.id,
136
- "score": float(score) if score else None,
137
- "mode": payload.get("mode", ""),
138
- "text": payload.get("text", ""),
139
- "tags": payload.get("tags", ""),
140
- "has_image": payload.get("has_image", False),
141
- }
142
  )
143
 
144
- if not results:
145
- return "No results."
146
- out_lines = [
147
- f"id:{r['id']} score:{r['score']:.4f} mode:{r['mode']} tags:{r['tags']} text:{r['text']}"
148
- for r in results
149
- ]
150
- return "\n\n".join(out_lines)
151
 
152
  # -------------------------
153
  # Gradio UI
154
  # -------------------------
155
  with gr.Blocks(title="Lost & Found — Simple Helper") as demo:
156
- gr.Markdown("## Lost & Found Helper (image/text search)")
157
  with gr.Row():
158
  with gr.Column():
159
  mode = gr.Radio(choices=["lost", "found"], value="lost", label="Add as")
160
  upload_img = gr.Image(type="pil", label="Item photo (optional)")
161
  text_desc = gr.Textbox(lines=2, placeholder="Short description", label="Description (optional)")
162
  add_btn = gr.Button("Add item")
163
- add_out = gr.Textbox(label="Add result", interactive=False)
164
  with gr.Column():
165
  query_img = gr.Image(type="pil", label="Search by image (optional)")
166
  query_text = gr.Textbox(lines=2, label="Search by text (optional)")
167
  search_btn = gr.Button("Search")
168
- search_out = gr.Textbox(label="Search results", interactive=False)
169
 
170
  add_btn.click(add_item, inputs=[mode, upload_img, text_desc], outputs=[add_out])
171
  search_btn.click(search_items, inputs=[query_img, query_text], outputs=[search_out])
 
2
  import os
3
  import uuid
4
  import io
5
+ import base64
6
  from PIL import Image
7
  import gradio as gr
8
  import numpy as np
 
42
  COLLECTION = "lost_found_items"
43
  VECTOR_SIZE = 512
44
 
 
45
  if not qclient.collection_exists(COLLECTION):
46
  qclient.create_collection(
47
  collection_name=COLLECTION,
 
52
  # Helpers
53
  # -------------------------
54
  def embed_text(text: str):
55
+ return clip_model.encode(text, convert_to_numpy=True)
 
56
 
57
  def embed_image_pil(pil_img: Image.Image):
58
+ return clip_model.encode(pil_img, convert_to_numpy=True)
 
59
 
60
+ def gen_tags_from_image_file(file_obj) -> str:
61
+ """file_obj can be path or BytesIO"""
62
+ if genai_client is None:
63
  return ""
64
+ uploaded_file = genai_client.files.upload(file=file_obj)
65
  prompt_text = (
66
  "Give 4 short tags (comma-separated) describing this item in the image. "
67
+ "Tags should be short single words or two-word phrases (e.g. 'black backpack', 'water bottle'). "
68
+ "Respond only with tags, no extra explanation."
69
  )
70
  response = genai_client.models.generate_content(
71
  model="gemini-2.5-flash",
72
+ contents=[prompt_text, uploaded_file],
73
  )
74
  return response.text.strip()
75
 
 
81
  payload = {"mode": mode, "text": text_description}
82
 
83
  if uploaded_image is not None:
84
+ # Save to BytesIO
85
+ img_bytes_io = io.BytesIO()
86
+ uploaded_image.save(img_bytes_io, format="PNG")
87
+ img_bytes_io.seek(0)
88
+
89
+ # Embed image
90
  vec = embed_image_pil(uploaded_image).tolist()
91
  payload["has_image"] = True
92
+
93
+ # Generate tags
94
  try:
95
+ tags = gen_tags_from_image_file(img_bytes_io)
96
  except Exception:
97
  tags = ""
98
  payload["tags"] = tags
99
+
100
+ # Store image as base64
101
+ img_bytes_io.seek(0)
102
+ payload["image_b64"] = base64.b64encode(img_bytes_io.read()).decode("utf-8")
103
  else:
104
  vec = embed_text(text_description).tolist()
105
  payload["has_image"] = False
106
+
107
  if genai_client:
108
  try:
109
  resp = genai_client.models.generate_content(
 
116
  else:
117
  payload["tags"] = ""
118
 
119
+ # Upsert into Qdrant
120
  point = PointStruct(id=item_id, vector=vec, payload=payload)
121
  qclient.upsert(collection_name=COLLECTION, points=[point], wait=True)
122
 
 
128
  def search_items(query_image, query_text, limit: int = 5):
129
  if query_image is not None:
130
  qvec = embed_image_pil(query_image).tolist()
131
+ elif query_text:
132
  qvec = embed_text(query_text).tolist()
133
  else:
134
+ return "Please provide a query image or text."
135
 
136
  hits = qclient.search(collection_name=COLLECTION, query_vector=qvec, limit=limit)
137
 
138
+ if not hits:
139
+ return "No results."
140
+
141
  results = []
142
  for h in hits:
143
  payload = h.payload or {}
144
  score = getattr(h, "score", None)
145
+ img_html = ""
146
+ if payload.get("has_image") and payload.get("image_b64"):
147
+ img_html = f'<img src="data:image/png;base64,{payload["image_b64"]}" width="200">'
148
  results.append(
149
+ f"{img_html}<br>ID:{h.id}<br>Score:{float(score) if score else 0:.4f}<br>"
150
+ f"Mode:{payload.get('mode','')}<br>Tags:{payload.get('tags','')}<br>Text:{payload.get('text','')}"
 
 
 
 
 
 
151
  )
152
 
153
+ return "<br><br>".join(results)
 
 
 
 
 
 
154
 
155
  # -------------------------
156
  # Gradio UI
157
  # -------------------------
158
  with gr.Blocks(title="Lost & Found — Simple Helper") as demo:
159
+ gr.Markdown("## Lost & Found Helper — Upload items and search by image or text.")
160
  with gr.Row():
161
  with gr.Column():
162
  mode = gr.Radio(choices=["lost", "found"], value="lost", label="Add as")
163
  upload_img = gr.Image(type="pil", label="Item photo (optional)")
164
  text_desc = gr.Textbox(lines=2, placeholder="Short description", label="Description (optional)")
165
  add_btn = gr.Button("Add item")
166
+ add_out = gr.HTML(label="Add result") # Changed to HTML to render images
167
  with gr.Column():
168
  query_img = gr.Image(type="pil", label="Search by image (optional)")
169
  query_text = gr.Textbox(lines=2, label="Search by text (optional)")
170
  search_btn = gr.Button("Search")
171
+ search_out = gr.HTML(label="Search results") # HTML to render images
172
 
173
  add_btn.click(add_item, inputs=[mode, upload_img, text_desc], outputs=[add_out])
174
  search_btn.click(search_items, inputs=[query_img, query_text], outputs=[search_out])