hackerloi45 commited on
Commit
c559bc7
Β·
1 Parent(s): 9e00920

image search fixed

Browse files
Files changed (1) hide show
  1. app.py +96 -85
app.py CHANGED
@@ -1,58 +1,63 @@
1
  import os
2
  import uuid
3
  import gradio as gr
 
 
 
4
  from PIL import Image
5
- from qdrant_client import QdrantClient, models
6
- from transformers import CLIPProcessor, CLIPModel
7
 
8
- # ==============================
9
  # Setup
10
- # ==============================
11
  UPLOAD_DIR = "uploaded_images"
12
  os.makedirs(UPLOAD_DIR, exist_ok=True)
13
 
14
- qclient = QdrantClient(":memory:")
15
  COLLECTION = "lost_and_found"
16
 
 
 
 
 
17
  # Create collection
18
  if not qclient.collection_exists(COLLECTION):
19
  qclient.create_collection(
20
- COLLECTION,
21
  vectors_config=models.VectorParams(size=512, distance=models.Distance.COSINE),
22
  )
23
 
24
- # Load CLIP
25
- clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
26
- clip_proc = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
27
-
28
-
29
- # ==============================
30
- # Encode Function
31
- # ==============================
32
  def encode_data(text=None, image=None):
33
- if text:
34
- inputs = clip_proc(text=[text], return_tensors="pt", padding=True)
35
- return clip_model.get_text_features(**inputs).detach().numpy()[0]
36
-
37
- if image:
38
- inputs = clip_proc(images=image, return_tensors="pt")
39
- return clip_model.get_image_features(**inputs).detach().numpy()[0]
40
-
41
- raise ValueError("Need either text or image for encoding")
42
-
43
-
44
- # ==============================
45
- # Add Item (finder uploads)
46
- # ==============================
47
  def add_item(text, image, uploader_name, uploader_phone):
48
  try:
49
  img_path = None
 
 
50
  if image:
51
  img_id = str(uuid.uuid4())
52
  img_path = os.path.join(UPLOAD_DIR, f"{img_id}.png")
53
  image.save(img_path)
54
 
55
- vector = encode_data(text=text if text else None, image=image if image else None)
 
 
 
 
 
 
 
 
56
 
57
  qclient.upsert(
58
  collection_name=COLLECTION,
@@ -65,7 +70,6 @@ def add_item(text, image, uploader_name, uploader_phone):
65
  "uploader_name": uploader_name or "N/A",
66
  "uploader_phone": uploader_phone or "N/A",
67
  "image_path": img_path,
68
- "has_image": bool(image),
69
  },
70
  )
71
  ],
@@ -74,93 +78,100 @@ def add_item(text, image, uploader_name, uploader_phone):
74
  except Exception as e:
75
  return f"❌ Error: {e}"
76
 
77
-
78
- # ==============================
79
- # Search Items (for lost things)
80
- # ==============================
81
  def search_items(text, image, max_results, min_score):
82
  try:
83
- vector = encode_data(text=text if text else None, image=image if image else None)
 
 
 
 
 
 
 
84
 
85
  results = qclient.search(
86
  collection_name=COLLECTION,
87
  query_vector=vector.tolist(),
88
  limit=max_results,
89
  score_threshold=min_score,
90
- with_payload=True,
91
  )
92
 
93
- texts, imgs = [], []
 
 
 
 
94
  for r in results:
95
- p = r.payload
96
- desc = f"id:{r.id} | score:{r.score:.3f} | text:{p.get('text','')}"
97
-
98
- # Show finder details
99
- uploader_name = p.get("uploader_name", "N/A") or "N/A"
100
- uploader_phone = p.get("uploader_phone", "N/A") or "N/A"
101
- desc += f" | finder:{uploader_name} ({uploader_phone})"
102
-
103
- texts.append(desc)
104
- if p.get("has_image") and "image_path" in p:
105
- imgs.append(p["image_path"])
106
- return "\n".join(texts) if texts else "No matches", imgs
107
  except Exception as e:
108
  return f"❌ Error: {e}", []
109
 
110
-
111
- # ==============================
112
  # Delete All
113
- # ==============================
114
- def clear_all():
115
- qclient.recreate_collection(
116
- COLLECTION, vectors_config=models.VectorParams(size=512, distance=models.Distance.COSINE)
 
 
117
  )
118
- return "πŸ—‘οΈ All items cleared."
119
-
120
 
121
- # ==============================
122
  # Gradio UI
123
- # ==============================
124
  with gr.Blocks() as demo:
125
- gr.Markdown("# πŸ”‘ Lost & Found Database")
126
-
127
- with gr.Tab("βž• Add Found Item"):
128
- text_in = gr.Textbox(label="Description (optional)")
129
- img_in = gr.Image(type="pil", label="Upload Image")
130
- uploader_name = gr.Textbox(label="Finder Name")
131
- uploader_phone = gr.Textbox(label="Finder Phone")
 
 
132
  add_btn = gr.Button("Add to Database")
133
- add_out = gr.Textbox(label="Status")
134
 
135
  add_btn.click(
136
  add_item,
137
- inputs=[text_in, img_in, uploader_name, uploader_phone],
138
- outputs=add_out,
139
  )
140
 
141
- with gr.Tab("πŸ” Search Lost Item"):
142
- search_text = gr.Textbox(label="Search by text (optional)")
143
- search_img = gr.Image(type="pil", label="Search by image (optional)")
144
- max_results = gr.Slider(1, 10, value=5, step=1, label="Max results")
145
- min_score = gr.Slider(0.5, 1.0, value=0.8, step=0.01, label="Min similarity threshold")
 
 
146
  search_btn = gr.Button("Search")
147
- search_out = gr.Textbox(label="Search results (text)")
148
- search_gallery = gr.Gallery(label="Search Results")
149
 
150
  search_btn.click(
151
  search_items,
152
  inputs=[search_text, search_img, max_results, min_score],
153
- outputs=[search_out, search_gallery],
154
  )
155
 
156
  with gr.Tab("πŸ—‘οΈ Admin"):
157
- clear_btn = gr.Button("Clear All Items")
158
  clear_out = gr.Textbox(label="Status")
159
- clear_btn.click(clear_all, outputs=clear_out)
160
-
161
 
162
- # ==============================
163
- # Launch
164
- # ==============================
165
- if __name__ == "__main__":
166
- demo.launch()
 
1
  import os
2
  import uuid
3
  import gradio as gr
4
+ import qdrant_client
5
+ from qdrant_client import models
6
+ from sentence_transformers import SentenceTransformer
7
  from PIL import Image
 
 
8
 
9
+ # ===============================
10
  # Setup
11
+ # ===============================
12
  UPLOAD_DIR = "uploaded_images"
13
  os.makedirs(UPLOAD_DIR, exist_ok=True)
14
 
 
15
  COLLECTION = "lost_and_found"
16
 
17
+ # Qdrant client
18
+ qclient = qdrant_client.QdrantClient(":memory:") # In-memory for Hugging Face
19
+ encoder = SentenceTransformer("clip-ViT-B-32")
20
+
21
  # Create collection
22
  if not qclient.collection_exists(COLLECTION):
23
  qclient.create_collection(
24
+ collection_name=COLLECTION,
25
  vectors_config=models.VectorParams(size=512, distance=models.Distance.COSINE),
26
  )
27
 
28
+ # ===============================
29
+ # Encode Function (Text or Image)
30
+ # ===============================
 
 
 
 
 
31
  def encode_data(text=None, image=None):
32
+ if image is not None:
33
+ return encoder.encode(Image.open(image).convert("RGB"))
34
+ elif text:
35
+ return encoder.encode([text])[0]
36
+ else:
37
+ return None
38
+
39
+ # ===============================
40
+ # Add Item
41
+ # ===============================
 
 
 
 
42
  def add_item(text, image, uploader_name, uploader_phone):
43
  try:
44
  img_path = None
45
+ vector = None
46
+
47
  if image:
48
  img_id = str(uuid.uuid4())
49
  img_path = os.path.join(UPLOAD_DIR, f"{img_id}.png")
50
  image.save(img_path)
51
 
52
+ # βœ… Always store image embedding if available
53
+ vector = encode_data(image=img_path)
54
+
55
+ elif text:
56
+ # βœ… Fallback: text embedding
57
+ vector = encode_data(text=text)
58
+
59
+ if vector is None:
60
+ return "❌ Please provide an image or text."
61
 
62
  qclient.upsert(
63
  collection_name=COLLECTION,
 
70
  "uploader_name": uploader_name or "N/A",
71
  "uploader_phone": uploader_phone or "N/A",
72
  "image_path": img_path,
 
73
  },
74
  )
75
  ],
 
78
  except Exception as e:
79
  return f"❌ Error: {e}"
80
 
81
+ # ===============================
82
+ # Search Function
83
+ # ===============================
 
84
  def search_items(text, image, max_results, min_score):
85
  try:
86
+ vector = None
87
+ if image:
88
+ vector = encode_data(image=image)
89
+ elif text:
90
+ vector = encode_data(text=text)
91
+
92
+ if vector is None:
93
+ return "❌ Please provide an image or text.", []
94
 
95
  results = qclient.search(
96
  collection_name=COLLECTION,
97
  query_vector=vector.tolist(),
98
  limit=max_results,
99
  score_threshold=min_score,
 
100
  )
101
 
102
+ if not results:
103
+ return "No matches found.", []
104
+
105
+ # Format results
106
+ result_texts, result_imgs = [], []
107
  for r in results:
108
+ payload = r.payload
109
+ result_texts.append(
110
+ f"id:{r.id} | score:{r.score:.3f} | "
111
+ f"text:{payload.get('text','')} | "
112
+ f"finder:{payload.get('uploader_name','N/A')} "
113
+ f"({payload.get('uploader_phone','N/A')})"
114
+ )
115
+ if payload.get("image_path") and os.path.exists(payload["image_path"]):
116
+ result_imgs.append(payload["image_path"])
117
+
118
+ return "\n".join(result_texts), result_imgs
 
119
  except Exception as e:
120
  return f"❌ Error: {e}", []
121
 
122
+ # ===============================
 
123
  # Delete All
124
+ # ===============================
125
+ def clear_database():
126
+ qclient.delete_collection(COLLECTION)
127
+ qclient.create_collection(
128
+ collection_name=COLLECTION,
129
+ vectors_config=models.VectorParams(size=512, distance=models.Distance.COSINE),
130
  )
131
+ return "πŸ—‘οΈ Database cleared!"
 
132
 
133
+ # ===============================
134
  # Gradio UI
135
+ # ===============================
136
  with gr.Blocks() as demo:
137
+ gr.Markdown("## πŸ—οΈ Lost & Found - Database")
138
+
139
+ with gr.Tab("βž• Add Item"):
140
+ with gr.Row():
141
+ text_input = gr.Textbox(label="Description (optional)")
142
+ img_input = gr.Image(type="pil", label="Upload Image")
143
+ with gr.Row():
144
+ uploader_name = gr.Textbox(label="Finder Name")
145
+ uploader_phone = gr.Textbox(label="Finder Phone")
146
  add_btn = gr.Button("Add to Database")
147
+ add_output = gr.Textbox(label="Status")
148
 
149
  add_btn.click(
150
  add_item,
151
+ inputs=[text_input, img_input, uploader_name, uploader_phone],
152
+ outputs=add_output,
153
  )
154
 
155
+ with gr.Tab("πŸ” Search"):
156
+ with gr.Row():
157
+ search_text = gr.Textbox(label="Search by text (optional)")
158
+ search_img = gr.Image(type="pil", label="Search by image (optional)")
159
+ with gr.Row():
160
+ max_results = gr.Slider(1, 10, value=5, step=1, label="Max results")
161
+ min_score = gr.Slider(0.5, 1.0, value=0.8, step=0.01, label="Min similarity threshold")
162
  search_btn = gr.Button("Search")
163
+ search_text_out = gr.Textbox(label="Search results (text)")
164
+ search_gallery = gr.Gallery(label="Search Results", columns=2, height="auto")
165
 
166
  search_btn.click(
167
  search_items,
168
  inputs=[search_text, search_img, max_results, min_score],
169
+ outputs=[search_text_out, search_gallery],
170
  )
171
 
172
  with gr.Tab("πŸ—‘οΈ Admin"):
173
+ clear_btn = gr.Button("Clear Database")
174
  clear_out = gr.Textbox(label="Status")
175
+ clear_btn.click(clear_database, outputs=clear_out)
 
176
 
177
+ demo.launch()