Spaces:
Sleeping
Sleeping
File size: 3,435 Bytes
c7ccfa3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 |
from fastapi import FastAPI, UploadFile, File, HTTPException
from pydantic import BaseModel
import torch
import pinecone
import requests
from PIL import Image
from io import BytesIO
from transformers import AutoProcessor, CLIPModel
import numpy as np
# β
Initialize FastAPI
app = FastAPI(title="Image & Text Search API", version="1.0")
# β
Initialize Pinecone
PINECONE_API_KEY = "pcsk_6r4DPn_4P9LckhZak3PhebvSebnEBKQZuzYFeJL2X93LtLxZVBxyJ93inBAktefa8usvJC" # Replace with your API key
INDEX_NAME = "unsplash-index"
pc = pinecone.Pinecone(api_key=PINECONE_API_KEY)
unsplash_index = pc.Index(INDEX_NAME)
# β
Load CLIP Model & Processor
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
# β
Function to Generate Embedding from Text
def get_text_embedding(text: str):
inputs = processor(text=[text], return_tensors="pt", padding=True, truncation=True)
with torch.no_grad():
text_features = model.get_text_features(**inputs)
return text_features.detach().cpu().numpy().flatten().tolist()
# β
Function to Generate Embedding from Image
def get_image_embedding(image: Image.Image):
inputs = processor(images=image, return_tensors="pt")
with torch.no_grad():
image_features = model.get_image_features(**inputs)
return image_features.detach().cpu().numpy().flatten().tolist()
# β
Function to Search Pinecone for Similar Images
def search_similar_images(embedding, top_k=10):
results = unsplash_index.query(
vector=embedding,
top_k=top_k,
include_metadata=True,
namespace="image-search-dataset"
)
return results.get("matches", [])
# β
API Endpoint: Text-to-Image Search
class TextSearchRequest(BaseModel):
query: str
@app.post("/search/text")
async def search_by_text(request: TextSearchRequest):
embedding = get_text_embedding(request.query)
matches = search_similar_images(embedding, top_k=10)
if not matches:
raise HTTPException(status_code=404, detail="No matching images found.")
return {"query": request.query, "results": matches}
# β
API Endpoint: Image-to-Image Search
@app.post("/search/image")
async def search_by_image(file: UploadFile = File(...)):
# Read image file
image = Image.open(BytesIO(await file.read())).convert("RGB")
embedding = get_image_embedding(image)
matches = search_similar_images(embedding, top_k=10)
if not matches:
raise HTTPException(status_code=404, detail="No similar images found.")
return {"filename": file.filename, "results": matches}
# β
API Endpoint: Upload Image to Store in Pinecone
@app.post("/store/image")
async def store_image(file: UploadFile = File(...)):
try:
# Read image file
image = Image.open(BytesIO(await file.read())).convert("RGB")
embedding = get_image_embedding(image)
# Generate a unique ID (use filename or hash)
image_id = file.filename
# Store embedding in Pinecone
unsplash_index.upsert([(image_id, embedding, {"filename": image_id})])
return {"message": f"Image {image_id} stored successfully!"}
except Exception as e:
raise HTTPException(status_code=500, detail=f"Error processing image: {str(e)}")
# β
Health Check Endpoint
@app.get("/")
async def health_check():
return {"message": "API is running!"}
|