hackerloi45 commited on
Commit
0c4adc5
·
1 Parent(s): 22ef1d5
Files changed (1) hide show
  1. app.py +25 -6
app.py CHANGED
@@ -1,14 +1,19 @@
1
  import gradio as gr
2
  import uuid
 
3
  from qdrant_client import QdrantClient
4
  from qdrant_client.models import PointStruct
5
  from sentence_transformers import SentenceTransformer
6
  from PIL import Image
7
  import numpy as np
8
 
 
 
 
 
9
  # Connect to Qdrant
10
  COLLECTION = "lost_and_found"
11
- qclient = QdrantClient(":memory:") # use in-memory for demo, replace with host/port for persistence
12
 
13
  # Load CLIP model
14
  model = SentenceTransformer("sentence-transformers/clip-ViT-B-32-multilingual-v1")
@@ -39,9 +44,19 @@ def add_item(mode, text, image, name, phone):
39
  "text": text,
40
  "has_image": image is not None,
41
  }
 
 
 
 
 
 
 
 
 
42
  if mode == "found":
43
  payload["finder_name"] = name
44
  payload["finder_phone"] = phone
 
45
  qclient.upsert(
46
  collection_name=COLLECTION,
47
  points=[PointStruct(id=str(uuid.uuid4()), vector=vector.tolist(), payload=payload)]
@@ -73,8 +88,10 @@ def search_items(query_image, query_text, limit, min_score):
73
  if pl.get("mode") == "found":
74
  info += f" | found by: {pl.get('finder_name','?')} ({pl.get('finder_phone','?')})"
75
  out_texts.append(info)
76
- if pl.get("has_image"):
77
- out_imgs.append(query_image) # just echo search image (or store actual image paths if needed)
 
 
78
  return "\n".join(out_texts) if out_texts else "No matches.", out_imgs
79
  except Exception as e:
80
  return f"❌ Error: {e}", []
@@ -82,11 +99,13 @@ def search_items(query_image, query_text, limit, min_score):
82
  # Clear all images
83
  def clear_all_images():
84
  try:
 
 
 
 
85
  qclient.delete(
86
  collection_name=COLLECTION,
87
- points_selector={
88
- "filter": {"must": [{"key": "has_image", "match": {"value": True}}]}
89
- }
90
  )
91
  return "🗑️ All image items cleared!"
92
  except Exception as e:
 
1
  import gradio as gr
2
  import uuid
3
+ import os
4
  from qdrant_client import QdrantClient
5
  from qdrant_client.models import PointStruct
6
  from sentence_transformers import SentenceTransformer
7
  from PIL import Image
8
  import numpy as np
9
 
10
+ # Create uploads folder
11
+ UPLOAD_DIR = "uploads"
12
+ os.makedirs(UPLOAD_DIR, exist_ok=True)
13
+
14
  # Connect to Qdrant
15
  COLLECTION = "lost_and_found"
16
+ qclient = QdrantClient(":memory:") # for demo, replace with your Qdrant server for persistence
17
 
18
  # Load CLIP model
19
  model = SentenceTransformer("sentence-transformers/clip-ViT-B-32-multilingual-v1")
 
44
  "text": text,
45
  "has_image": image is not None,
46
  }
47
+
48
+ # Save image file if uploaded
49
+ if image is not None:
50
+ img = Image.open(image).convert("RGB")
51
+ fname = f"{uuid.uuid4().hex}.png"
52
+ fpath = os.path.join(UPLOAD_DIR, fname)
53
+ img.save(fpath)
54
+ payload["image_path"] = fpath
55
+
56
  if mode == "found":
57
  payload["finder_name"] = name
58
  payload["finder_phone"] = phone
59
+
60
  qclient.upsert(
61
  collection_name=COLLECTION,
62
  points=[PointStruct(id=str(uuid.uuid4()), vector=vector.tolist(), payload=payload)]
 
88
  if pl.get("mode") == "found":
89
  info += f" | found by: {pl.get('finder_name','?')} ({pl.get('finder_phone','?')})"
90
  out_texts.append(info)
91
+
92
+ if pl.get("image_path"):
93
+ out_imgs.append(pl["image_path"])
94
+
95
  return "\n".join(out_texts) if out_texts else "No matches.", out_imgs
96
  except Exception as e:
97
  return f"❌ Error: {e}", []
 
99
  # Clear all images
100
  def clear_all_images():
101
  try:
102
+ # Clear uploads folder
103
+ for f in os.listdir(UPLOAD_DIR):
104
+ os.remove(os.path.join(UPLOAD_DIR, f))
105
+ # Clear Qdrant
106
  qclient.delete(
107
  collection_name=COLLECTION,
108
+ points_selector={"filter": {"must": [{"key": "has_image", "match": {"value": True}}]}}
 
 
109
  )
110
  return "🗑️ All image items cleared!"
111
  except Exception as e: