hackerloi45 commited on
Commit
a1501eb
Β·
1 Parent(s): 7fae8fb

Fix CLIrrr2 model issue in appetete333.py

Browse files
Files changed (1) hide show
  1. app.py +39 -35
app.py CHANGED
@@ -31,6 +31,9 @@ print("Loading CLIP model (this may take 20-60s the first time)...")
31
  MODEL_ID = "sentence-transformers/clip-ViT-B-32-multilingual-v1"
32
  clip_model = SentenceTransformer(MODEL_ID)
33
 
 
 
 
34
  genai_client = genai.Client(api_key=GEMINI_API_KEY) if GEMINI_API_KEY else None
35
 
36
  if not QDRANT_URL:
@@ -38,7 +41,6 @@ if not QDRANT_URL:
38
 
39
  qclient = QdrantClient(url=QDRANT_URL, api_key=QDRANT_API_KEY)
40
  COLLECTION = "lost_found_items"
41
- VECTOR_SIZE = 512
42
 
43
  # Create collection if missing
44
  try:
@@ -75,7 +77,8 @@ def gen_tags_from_image_file(image_bytes: io.BytesIO) -> str:
75
  contents=[prompt_text, file_obj],
76
  )
77
  return response.text.strip()
78
- except Exception:
 
79
  return ""
80
 
81
  # -------------------------
@@ -85,38 +88,39 @@ def add_item(mode: str, uploaded_image, text_description: str):
85
  item_id = str(uuid.uuid4())
86
  payload = {"mode": mode, "text": text_description}
87
 
88
- if uploaded_image is not None:
89
- img_bytes = io.BytesIO()
90
- uploaded_image.convert("RGB").save(img_bytes, format="PNG")
91
- img_bytes.seek(0)
 
92
 
93
- vec = embed_image_pil(uploaded_image).tolist()
94
- payload["has_image"] = True
95
 
96
- payload["tags"] = gen_tags_from_image_file(img_bytes)
97
- payload["image_b64"] = base64.b64encode(img_bytes.getvalue()).decode("utf-8")
98
- else:
99
- vec = embed_text(text_description).tolist()
100
- payload["has_image"] = False
101
- if genai_client:
102
- try:
103
- resp = genai_client.models.generate_content(
104
- model="gemini-2.5-flash",
105
- contents=f"Give 4 short, comma-separated tags for this item described as: {text_description}. Reply only with tags."
106
- )
107
- payload["tags"] = resp.text.strip()
108
- except Exception:
109
- payload["tags"] = ""
110
  else:
111
- payload["tags"] = ""
 
 
 
 
 
 
 
 
 
 
 
 
112
 
113
- try:
114
  point = PointStruct(id=item_id, vector=vec, payload=payload)
115
  qclient.upsert(collection_name=COLLECTION, points=[point], wait=True)
116
- except Exception as e:
117
- return f"Error saving to Qdrant: {e}"
118
 
119
- return f"Saved item id: {item_id}\nTags: {payload.get('tags','')}"
 
 
120
 
121
  # -------------------------
122
  # App logic: search
@@ -127,22 +131,22 @@ def search_items(query_image, query_text, limit: int = 5):
127
  elif query_text and len(query_text.strip()) > 0:
128
  qvec = embed_text(query_text).tolist()
129
  else:
130
- return "Please provide a query image or some query text."
131
 
132
  try:
133
  hits = qclient.search(collection_name=COLLECTION, query_vector=qvec, limit=limit)
134
  except Exception as e:
135
- return f"Error querying Qdrant: {e}"
136
 
137
  if not hits:
138
- return "No results."
139
 
140
  results = []
141
  for h in hits:
142
  payload = h.payload or {}
143
  score = getattr(h, "score", None)
144
  results.append(
145
- f"id:{h.id} score:{float(score) if score else None} mode:{payload.get('mode','')} tags:{payload.get('tags','')} text:{payload.get('text','')}"
146
  )
147
  return "\n\n".join(results)
148
 
@@ -150,19 +154,19 @@ def search_items(query_image, query_text, limit: int = 5):
150
  # Gradio UI
151
  # -------------------------
152
  with gr.Blocks(title="Lost & Found β€” Simple Helper") as demo:
153
- gr.Markdown("## Lost & Found Helper (image/text search) β€” upload items, then search by image or text.")
154
  with gr.Row():
155
  with gr.Column():
156
  mode = gr.Radio(choices=["lost", "found"], value="lost", label="Add as")
157
  upload_img = gr.Image(type="pil", label="Item photo (optional)")
158
  text_desc = gr.Textbox(lines=2, placeholder="Short description (e.g. 'black backpack with blue zipper')", label="Description (optional)")
159
- add_btn = gr.Button("Add item")
160
  add_out = gr.Textbox(label="Add result", interactive=False)
161
  with gr.Column():
162
- gr.Markdown("### Search")
163
  query_img = gr.Image(type="pil", label="Search by image (optional)")
164
  query_text = gr.Textbox(lines=2, label="Search by text (optional)")
165
- search_btn = gr.Button("Search")
166
  search_out = gr.Textbox(label="Search results", interactive=False)
167
 
168
  add_btn.click(add_item, inputs=[mode, upload_img, text_desc], outputs=[add_out])
 
31
  MODEL_ID = "sentence-transformers/clip-ViT-B-32-multilingual-v1"
32
  clip_model = SentenceTransformer(MODEL_ID)
33
 
34
+ # Detect correct embedding size dynamically
35
+ VECTOR_SIZE = clip_model.get_sentence_embedding_dimension()
36
+
37
  genai_client = genai.Client(api_key=GEMINI_API_KEY) if GEMINI_API_KEY else None
38
 
39
  if not QDRANT_URL:
 
41
 
42
  qclient = QdrantClient(url=QDRANT_URL, api_key=QDRANT_API_KEY)
43
  COLLECTION = "lost_found_items"
 
44
 
45
  # Create collection if missing
46
  try:
 
77
  contents=[prompt_text, file_obj],
78
  )
79
  return response.text.strip()
80
+ except Exception as e:
81
+ print("Error generating tags:", e)
82
  return ""
83
 
84
  # -------------------------
 
88
  item_id = str(uuid.uuid4())
89
  payload = {"mode": mode, "text": text_description}
90
 
91
+ try:
92
+ if uploaded_image is not None:
93
+ img_bytes = io.BytesIO()
94
+ uploaded_image.convert("RGB").save(img_bytes, format="PNG")
95
+ img_bytes.seek(0)
96
 
97
+ vec = embed_image_pil(uploaded_image).tolist()
98
+ payload["has_image"] = True
99
 
100
+ payload["tags"] = gen_tags_from_image_file(img_bytes)
101
+ payload["image_b64"] = base64.b64encode(img_bytes.getvalue()).decode("utf-8")
 
 
 
 
 
 
 
 
 
 
 
 
102
  else:
103
+ vec = embed_text(text_description).tolist()
104
+ payload["has_image"] = False
105
+ if genai_client:
106
+ try:
107
+ resp = genai_client.models.generate_content(
108
+ model="gemini-2.5-flash",
109
+ contents=f"Give 4 short, comma-separated tags for this item described as: {text_description}. Reply only with tags."
110
+ )
111
+ payload["tags"] = resp.text.strip()
112
+ except Exception:
113
+ payload["tags"] = ""
114
+ else:
115
+ payload["tags"] = ""
116
 
117
+ # Upsert into Qdrant
118
  point = PointStruct(id=item_id, vector=vec, payload=payload)
119
  qclient.upsert(collection_name=COLLECTION, points=[point], wait=True)
 
 
120
 
121
+ return f"βœ… Saved item id: {item_id}\nTags: {payload.get('tags','')}"
122
+ except Exception as e:
123
+ return f"❌ Error saving to Qdrant: {e}"
124
 
125
  # -------------------------
126
  # App logic: search
 
131
  elif query_text and len(query_text.strip()) > 0:
132
  qvec = embed_text(query_text).tolist()
133
  else:
134
+ return "⚠️ Please provide a query image or some query text."
135
 
136
  try:
137
  hits = qclient.search(collection_name=COLLECTION, query_vector=qvec, limit=limit)
138
  except Exception as e:
139
+ return f"❌ Error querying Qdrant: {e}"
140
 
141
  if not hits:
142
+ return "No results found."
143
 
144
  results = []
145
  for h in hits:
146
  payload = h.payload or {}
147
  score = getattr(h, "score", None)
148
  results.append(
149
+ f"id:{h.id} | score:{float(score) if score else None:.4f} | mode:{payload.get('mode','')} | tags:{payload.get('tags','')} | text:{payload.get('text','')}"
150
  )
151
  return "\n\n".join(results)
152
 
 
154
  # Gradio UI
155
  # -------------------------
156
  with gr.Blocks(title="Lost & Found β€” Simple Helper") as demo:
157
+ gr.Markdown("## 🧳 Lost & Found Helper β€” Upload items, then search by image or text.")
158
  with gr.Row():
159
  with gr.Column():
160
  mode = gr.Radio(choices=["lost", "found"], value="lost", label="Add as")
161
  upload_img = gr.Image(type="pil", label="Item photo (optional)")
162
  text_desc = gr.Textbox(lines=2, placeholder="Short description (e.g. 'black backpack with blue zipper')", label="Description (optional)")
163
+ add_btn = gr.Button("βž• Add item")
164
  add_out = gr.Textbox(label="Add result", interactive=False)
165
  with gr.Column():
166
+ gr.Markdown("### πŸ” Search")
167
  query_img = gr.Image(type="pil", label="Search by image (optional)")
168
  query_text = gr.Textbox(lines=2, label="Search by text (optional)")
169
+ search_btn = gr.Button("πŸ”Ž Search")
170
  search_out = gr.Textbox(label="Search results", interactive=False)
171
 
172
  add_btn.click(add_item, inputs=[mode, upload_img, text_desc], outputs=[add_out])