File size: 1,333 Bytes
57d02cb 8a14cfe 57d02cb cc15462 8a14cfe cc15462 8a14cfe f1d9c38 57d02cb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
from sentence_transformers import SentenceTransformer
from PIL import Image
from fastapi import UploadFile
from typing import List, Optional
import torch
import os
# Set multiple environment variables to redirect all caching
os.environ["TRANSFORMERS_CACHE"] = "/app/cache"
os.environ["HF_HOME"] = "/app/cache"
os.environ["XDG_CACHE_HOME"] = "/app/cache" # Additional variable for broader compatibility
# Ensure cache directory exists (redundant with Dockerfile but added for safety)
cache_dir = "/app/cache"
if not os.path.exists(cache_dir):
os.makedirs(cache_dir, exist_ok=True)
# Load model with custom cache directory
model = SentenceTransformer("clip-ViT-B-32")
def get_text_embedding(text: str) -> Optional[List[float]]:
try:
embedding = model.encode(text, convert_to_tensor=True).cpu().numpy().tolist()
return embedding
except Exception as e:
print(f"Error generating text embedding: {e}")
return None
def get_image_embedding(image_file: UploadFile) -> Optional[List[float]]:
try:
image = Image.open(image_file.file).convert("RGB").resize((224, 224))
embedding = model.encode(image, convert_to_tensor=True).cpu().numpy().tolist()
return embedding
except Exception as e:
print(f"Error generating image embedding: {e}")
return None |