hackerloi45 commited on
Commit
2b16ad8
·
1 Parent(s): a1501eb

Fix CLIrrr2 model issue in appetete333.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -31,7 +31,7 @@ print("Loading CLIP model (this may take 20-60s the first time)...")
31
  MODEL_ID = "sentence-transformers/clip-ViT-B-32-multilingual-v1"
32
  clip_model = SentenceTransformer(MODEL_ID)
33
 
34
- # Detect correct embedding size dynamically
35
  VECTOR_SIZE = clip_model.get_sentence_embedding_dimension()
36
 
37
  genai_client = genai.Client(api_key=GEMINI_API_KEY) if GEMINI_API_KEY else None
@@ -42,7 +42,7 @@ if not QDRANT_URL:
42
  qclient = QdrantClient(url=QDRANT_URL, api_key=QDRANT_API_KEY)
43
  COLLECTION = "lost_found_items"
44
 
45
- # Create collection if missing
46
  try:
47
  if not qclient.collection_exists(COLLECTION):
48
  qclient.create_collection(
@@ -56,11 +56,12 @@ except Exception as e:
56
  # Helpers
57
  # -------------------------
58
  def embed_text(text: str):
59
- return clip_model.encode(text, convert_to_numpy=True)
60
 
61
  def embed_image_pil(pil_img: Image.Image):
62
  pil_img = pil_img.convert("RGB")
63
- return clip_model.encode(pil_img, convert_to_numpy=True)
 
64
 
65
  def gen_tags_from_image_file(image_bytes: io.BytesIO) -> str:
66
  if genai_client is None:
@@ -96,7 +97,6 @@ def add_item(mode: str, uploaded_image, text_description: str):
96
 
97
  vec = embed_image_pil(uploaded_image).tolist()
98
  payload["has_image"] = True
99
-
100
  payload["tags"] = gen_tags_from_image_file(img_bytes)
101
  payload["image_b64"] = base64.b64encode(img_bytes.getvalue()).decode("utf-8")
102
  else:
 
31
  MODEL_ID = "sentence-transformers/clip-ViT-B-32-multilingual-v1"
32
  clip_model = SentenceTransformer(MODEL_ID)
33
 
34
+ # Dynamically detect vector size
35
  VECTOR_SIZE = clip_model.get_sentence_embedding_dimension()
36
 
37
  genai_client = genai.Client(api_key=GEMINI_API_KEY) if GEMINI_API_KEY else None
 
42
  qclient = QdrantClient(url=QDRANT_URL, api_key=QDRANT_API_KEY)
43
  COLLECTION = "lost_found_items"
44
 
45
+ # Ensure collection exists with correct vector size
46
  try:
47
  if not qclient.collection_exists(COLLECTION):
48
  qclient.create_collection(
 
56
  # Helpers
57
  # -------------------------
58
  def embed_text(text: str):
59
+ return clip_model.encode([text], convert_to_numpy=True)[0]
60
 
61
  def embed_image_pil(pil_img: Image.Image):
62
  pil_img = pil_img.convert("RGB")
63
+ np_img = np.array(pil_img)
64
+ return clip_model.encode([np_img], convert_to_numpy=True)[0]
65
 
66
  def gen_tags_from_image_file(image_bytes: io.BytesIO) -> str:
67
  if genai_client is None:
 
97
 
98
  vec = embed_image_pil(uploaded_image).tolist()
99
  payload["has_image"] = True
 
100
  payload["tags"] = gen_tags_from_image_file(img_bytes)
101
  payload["image_b64"] = base64.b64encode(img_bytes.getvalue()).decode("utf-8")
102
  else: