Spaces:
Running
Running
File size: 1,372 Bytes
a06f06c | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 | import asyncio
import io
import torch
from PIL import Image
from transformers import CLIPProcessor, CLIPModel
from qdrant_client import QdrantClient, models
# Mock the setup
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Device: {device}")
try:
print("Loading model...")
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32").to(device)
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
qdrant = QdrantClient(":memory:")
print("Model loaded.")
# Create dummy image
img = Image.new('RGB', (100, 100), color = 'red')
# Run logic from search_standalone.py
print("Processing image...")
inputs = processor(images=img, return_tensors="pt").to(device)
with torch.no_grad():
features = model.get_image_features(**inputs)
print(f"Features type: {type(features)}")
# Test normalization
features = features / features.norm(p=2, dim=-1, keepdim=True)
vector = features.cpu().numpy()[0].tolist()
print(f"Vector length: {len(vector)}")
# Test Qdrant search
print("Searching Qdrant...")
results = qdrant.search(
collection_name="booth_items",
query_vector=vector,
limit=10
)
print("Search complete (empty result expected).")
except Exception as e:
import traceback
traceback.print_exc()
|