File size: 1,409 Bytes
1ea1964
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
from fastapi import FastAPI, File, UploadFile
import torch
from transformers import CLIPProcessor, CLIPModel
from dotenv import load_dotenv
import logging
import os
from PIL import Image

load_dotenv()


logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

app = FastAPI(title="Image Embedding API",
              description="Returns CLIP image embeddings via GET")


HF_TOKEN = os.getenv('hf_token')

logger.info("Loading CLIP processor and model...")
try:
    processor = CLIPProcessor.from_pretrained(
        "openai/clip-vit-large-patch14", use_auth_token=HF_TOKEN)
    clip_model = CLIPModel.from_pretrained(
        "openai/clip-vit-large-patch14", use_auth_token=HF_TOKEN)
    clip_model.eval()
    logger.info("CLIP model loaded successfully")
except Exception as e:
    logger.error(f"Failed to load CLIP model: {e}")
    raise


@app.get("/")
async def root():
    logger.info("Root endpoint accessed")
    return {"message": "Welcome to the Image Embedding API."}


@app.post("/clip/process")
async def process_image(file: UploadFile = File(...)):
    logger.info("Processing image")
    image = Image.open(file.file).convert("RGB")
    inputs = processor(images=image, return_tensors="pt")
    with torch.no_grad():
        embeddings = clip_model.get_image_features(**inputs)
    return {"embedding": embeddings.tolist()}