hackerloi45 commited on
Commit
ebbc741
Β·
1 Parent(s): 7712c9d
Files changed (1) hide show
  1. app.py +52 -75
app.py CHANGED
@@ -8,27 +8,23 @@ from qdrant_client.http.models import VectorParams, Distance, PointStruct
8
  from sentence_transformers import SentenceTransformer
9
 
10
  # ===============================
11
- # Config / Setup
12
  # ===============================
13
  UPLOAD_DIR = "uploaded_images"
14
  os.makedirs(UPLOAD_DIR, exist_ok=True)
15
 
16
  COLLECTION = "lost_and_found"
17
-
18
  qclient = QdrantClient(":memory:")
19
 
20
  # Load CLIP model
21
  encoder = SentenceTransformer("clip-ViT-B-32")
22
 
23
- # Get vector dimension safely
24
- try:
25
- VECTOR_SIZE = encoder.get_sentence_embedding_dimension()
26
- if VECTOR_SIZE is None:
27
- VECTOR_SIZE = len(encoder.encode(["test"])[0])
28
- except Exception:
29
  VECTOR_SIZE = len(encoder.encode(["test"])[0])
30
 
31
- # Create collection if not exists
32
  if not qclient.collection_exists(COLLECTION):
33
  qclient.create_collection(
34
  collection_name=COLLECTION,
@@ -36,53 +32,46 @@ if not qclient.collection_exists(COLLECTION):
36
  )
37
 
38
  # ===============================
39
- # Encoding function
40
  # ===============================
41
- def encode_data(text=None, image=None):
42
- """Encode either text or image into embeddings"""
43
- if isinstance(image, Image.Image):
44
- return encoder.encode(image.convert("RGB"))
45
- if isinstance(image, str):
46
- return encoder.encode(Image.open(image).convert("RGB"))
47
- if text:
48
- return encoder.encode([text])[0]
49
- return None
50
 
51
  # ===============================
52
  # Add Item
53
  # ===============================
54
  def add_item(text, image, uploader_name, uploader_phone):
55
  try:
56
- img_path = None
57
  vector = None
 
58
 
59
  if isinstance(image, Image.Image):
60
  img_id = str(uuid.uuid4())
61
  img_path = os.path.join(UPLOAD_DIR, f"{img_id}.png")
62
  image.save(img_path)
63
- vector = encode_data(image=image)
64
- elif text:
65
- vector = encode_data(text=text)
66
 
67
  if vector is None:
68
- return "❌ Please provide at least an image or text."
69
-
70
- vec = np.asarray(vector, dtype=float)
71
 
72
  payload = {
73
  "text": text or "",
74
  "uploader_name": uploader_name or "N/A",
75
  "uploader_phone": uploader_phone or "N/A",
76
  "image_path": img_path,
77
- "has_image": bool(img_path),
78
  }
79
 
80
  qclient.upsert(
81
  collection_name=COLLECTION,
82
- points=[PointStruct(id=str(uuid.uuid4()), vector=vec.tolist(), payload=payload)],
83
  wait=True,
84
  )
85
- return "βœ… Item added to database!"
86
  except Exception as e:
87
  return f"❌ Error: {e}"
88
 
@@ -91,59 +80,47 @@ def add_item(text, image, uploader_name, uploader_phone):
91
  # ===============================
92
  def search_items(text, image, max_results, min_score):
93
  try:
94
- text_vec = None
95
- img_vec = None
96
-
97
  if isinstance(image, Image.Image):
98
- img_vec = encode_data(image=image)
99
- img_vec = np.asarray(img_vec, dtype=float)
100
- if text and len(text.strip()) > 0:
101
- text_vec = encode_data(text=text)
102
- text_vec = np.asarray(text_vec, dtype=float)
103
-
104
- if img_vec is not None and text_vec is not None:
105
- # Combine both queries
106
- v1 = img_vec / (np.linalg.norm(img_vec) + 1e-12)
107
- v2 = text_vec / (np.linalg.norm(text_vec) + 1e-12)
108
- qvec = (v1 + v2) / 2
109
- elif img_vec is not None:
110
- qvec = img_vec
111
- elif text_vec is not None:
112
- qvec = text_vec
113
- else:
114
- return "❌ Provide text or image to search.", []
115
-
116
- hits = qclient.search(
117
  collection_name=COLLECTION,
118
- query_vector=qvec.tolist(),
119
  limit=int(max_results),
120
  score_threshold=float(min_score),
121
  with_payload=True,
122
  )
123
 
124
- if not hits:
125
  return "No matches found.", []
126
 
127
- result_texts = []
128
- gallery_items = []
129
 
130
- for h in hits:
131
- payload = h.payload or {}
132
- score_str = f"{getattr(h, 'score', 0):.3f}"
133
- uploader_name = payload.get("uploader_name", "N/A") or "N/A"
134
- uploader_phone = payload.get("uploader_phone", "N/A") or "N/A"
135
 
136
  desc = (
137
- f"id:{h.id} | score:{score_str} | text:{payload.get('text','')} "
138
- f"| finder:{uploader_name} ({uploader_phone})"
 
139
  )
140
- result_texts.append(desc)
141
 
142
  img_path = payload.get("image_path")
143
  if img_path and os.path.exists(img_path):
144
- gallery_items.append(img_path)
145
 
146
- return "\n".join(result_texts), gallery_items
147
  except Exception as e:
148
  return f"❌ Error: {e}", []
149
 
@@ -171,29 +148,29 @@ def clear_database():
171
  # Gradio UI
172
  # ===============================
173
  with gr.Blocks() as demo:
174
- gr.Markdown("## πŸ”Ž Lost & Found")
175
 
176
  with gr.Tab("βž• Add Found Item"):
177
  text_in = gr.Textbox(label="Description (optional)")
178
  img_in = gr.Image(type="pil", label="Upload Image (optional)")
179
- uploader_name = gr.Textbox(label="Finder's name")
180
- uploader_phone = gr.Textbox(label="Finder's phone")
181
- add_btn = gr.Button("Add to database")
182
  add_status = gr.Textbox(label="Status")
183
  add_btn.click(add_item, inputs=[text_in, img_in, uploader_name, uploader_phone], outputs=[add_status])
184
 
185
  with gr.Tab("πŸ” Search Lost Item"):
186
- search_text = gr.Textbox(label="Search by text (optional)")
187
- search_img = gr.Image(type="pil", label="Search by image (optional)")
188
- max_results = gr.Slider(1, 20, value=5, step=1, label="Max results")
189
- min_score = gr.Slider(0.0, 1.0, value=0.75, step=0.01, label="Min similarity score")
190
  search_btn = gr.Button("Search")
191
- search_text_out = gr.Textbox(label="Search results (text)")
192
  search_gallery = gr.Gallery(label="Search Results", columns=2, height="auto")
193
  search_btn.click(search_items, inputs=[search_text, search_img, max_results, min_score], outputs=[search_text_out, search_gallery])
194
 
195
  with gr.Tab("πŸ—‘οΈ Admin"):
196
- clear_btn = gr.Button("Clear database")
197
  clear_out = gr.Textbox(label="Status")
198
  clear_btn.click(clear_database, outputs=[clear_out])
199
 
 
8
  from sentence_transformers import SentenceTransformer
9
 
10
  # ===============================
11
+ # Config
12
  # ===============================
13
  UPLOAD_DIR = "uploaded_images"
14
  os.makedirs(UPLOAD_DIR, exist_ok=True)
15
 
16
  COLLECTION = "lost_and_found"
 
17
  qclient = QdrantClient(":memory:")
18
 
19
  # Load CLIP model
20
  encoder = SentenceTransformer("clip-ViT-B-32")
21
 
22
+ # Safe vector size
23
+ VECTOR_SIZE = encoder.get_sentence_embedding_dimension()
24
+ if VECTOR_SIZE is None:
 
 
 
25
  VECTOR_SIZE = len(encoder.encode(["test"])[0])
26
 
27
+ # Create collection
28
  if not qclient.collection_exists(COLLECTION):
29
  qclient.create_collection(
30
  collection_name=COLLECTION,
 
32
  )
33
 
34
  # ===============================
35
+ # Encode function
36
  # ===============================
37
+ def encode_text(text: str):
38
+ return np.asarray(encoder.encode([text])[0], dtype=float)
39
+
40
+ def encode_image(img: Image.Image):
41
+ return np.asarray(encoder.encode(img.convert("RGB")), dtype=float)
 
 
 
 
42
 
43
  # ===============================
44
  # Add Item
45
  # ===============================
46
  def add_item(text, image, uploader_name, uploader_phone):
47
  try:
 
48
  vector = None
49
+ img_path = None
50
 
51
  if isinstance(image, Image.Image):
52
  img_id = str(uuid.uuid4())
53
  img_path = os.path.join(UPLOAD_DIR, f"{img_id}.png")
54
  image.save(img_path)
55
+ vector = encode_image(image)
56
+ elif text and text.strip():
57
+ vector = encode_text(text)
58
 
59
  if vector is None:
60
+ return "❌ Please provide either an image or description."
 
 
61
 
62
  payload = {
63
  "text": text or "",
64
  "uploader_name": uploader_name or "N/A",
65
  "uploader_phone": uploader_phone or "N/A",
66
  "image_path": img_path,
 
67
  }
68
 
69
  qclient.upsert(
70
  collection_name=COLLECTION,
71
+ points=[PointStruct(id=str(uuid.uuid4()), vector=vector.tolist(), payload=payload)],
72
  wait=True,
73
  )
74
+ return "βœ… Item added!"
75
  except Exception as e:
76
  return f"❌ Error: {e}"
77
 
 
80
  # ===============================
81
  def search_items(text, image, max_results, min_score):
82
  try:
83
+ vector = None
 
 
84
  if isinstance(image, Image.Image):
85
+ vector = encode_image(image)
86
+ elif text and text.strip():
87
+ vector = encode_text(text)
88
+
89
+ if vector is None:
90
+ return "❌ Provide text or image.", []
91
+
92
+ results = qclient.search(
 
 
 
 
 
 
 
 
 
 
 
93
  collection_name=COLLECTION,
94
+ query_vector=vector.tolist(),
95
  limit=int(max_results),
96
  score_threshold=float(min_score),
97
  with_payload=True,
98
  )
99
 
100
+ if not results:
101
  return "No matches found.", []
102
 
103
+ text_out = []
104
+ gallery = []
105
 
106
+ for r in results:
107
+ payload = r.payload or {}
108
+ score = getattr(r, "score", 0)
109
+ uploader_name = payload.get("uploader_name", "N/A")
110
+ uploader_phone = payload.get("uploader_phone", "N/A")
111
 
112
  desc = (
113
+ f"id:{r.id} | score:{score:.3f} | "
114
+ f"text:{payload.get('text','')} | "
115
+ f"finder:{uploader_name} ({uploader_phone})"
116
  )
117
+ text_out.append(desc)
118
 
119
  img_path = payload.get("image_path")
120
  if img_path and os.path.exists(img_path):
121
+ gallery.append(img_path)
122
 
123
+ return "\n".join(text_out), gallery
124
  except Exception as e:
125
  return f"❌ Error: {e}", []
126
 
 
148
  # Gradio UI
149
  # ===============================
150
  with gr.Blocks() as demo:
151
+ gr.Markdown("## πŸ”Ž Lost & Found System")
152
 
153
  with gr.Tab("βž• Add Found Item"):
154
  text_in = gr.Textbox(label="Description (optional)")
155
  img_in = gr.Image(type="pil", label="Upload Image (optional)")
156
+ uploader_name = gr.Textbox(label="Finder's Name")
157
+ uploader_phone = gr.Textbox(label="Finder's Phone")
158
+ add_btn = gr.Button("Add to Database")
159
  add_status = gr.Textbox(label="Status")
160
  add_btn.click(add_item, inputs=[text_in, img_in, uploader_name, uploader_phone], outputs=[add_status])
161
 
162
  with gr.Tab("πŸ” Search Lost Item"):
163
+ search_text = gr.Textbox(label="Search by Text (optional)")
164
+ search_img = gr.Image(type="pil", label="Search by Image (optional)")
165
+ max_results = gr.Slider(1, 20, value=5, step=1, label="Max Results")
166
+ min_score = gr.Slider(0.0, 1.0, value=0.3, step=0.01, label="Min Similarity Score")
167
  search_btn = gr.Button("Search")
168
+ search_text_out = gr.Textbox(label="Search Results (Text)")
169
  search_gallery = gr.Gallery(label="Search Results", columns=2, height="auto")
170
  search_btn.click(search_items, inputs=[search_text, search_img, max_results, min_score], outputs=[search_text_out, search_gallery])
171
 
172
  with gr.Tab("πŸ—‘οΈ Admin"):
173
+ clear_btn = gr.Button("Clear Database")
174
  clear_out = gr.Textbox(label="Status")
175
  clear_btn.click(clear_database, outputs=[clear_out])
176