Spaces:
Sleeping
Sleeping
| from fastapi import FastAPI, File, UploadFile | |
| import torch | |
| from transformers import CLIPProcessor, CLIPModel | |
| from dotenv import load_dotenv | |
| import logging | |
| import os | |
| from PIL import Image | |
| load_dotenv() | |
| logging.basicConfig(level=logging.INFO) | |
| logger = logging.getLogger(__name__) | |
| app = FastAPI(title="Image Embedding API", | |
| description="Returns CLIP image embeddings via GET") | |
| HF_TOKEN = os.getenv('hf_token') | |
| logger.info("Loading CLIP processor and model...") | |
| try: | |
| processor = CLIPProcessor.from_pretrained( | |
| "openai/clip-vit-large-patch14", use_auth_token=HF_TOKEN) | |
| clip_model = CLIPModel.from_pretrained( | |
| "openai/clip-vit-large-patch14", use_auth_token=HF_TOKEN) | |
| clip_model.eval() | |
| logger.info("CLIP model loaded successfully") | |
| except Exception as e: | |
| logger.error(f"Failed to load CLIP model: {e}") | |
| raise | |
| async def root(): | |
| logger.info("Root endpoint accessed") | |
| return {"message": "Welcome to the Image Embedding API."} | |
| async def process_image(file: UploadFile = File(...)): | |
| logger.info("Processing image") | |
| image = Image.open(file.file).convert("RGB") | |
| inputs = processor(images=image, return_tensors="pt") | |
| with torch.no_grad(): | |
| embeddings = clip_model.get_image_features(**inputs) | |
| return {"embedding": embeddings.tolist()} | |