File size: 635 Bytes
c583556 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 | from transformers import CLIPModel, CLIPProcessor
import torch
import numpy as np
device = "cuda" if torch.cuda.is_available() else "cpu"
model = CLIPModel.from_pretrained(".").to(device)
processor = CLIPProcessor.from_pretrained(".")
def inference(inputs):
text = inputs.get("text", None)
if text:
processed = processor(text=[text], return_tensors="pt", truncation=True).to(device)
with torch.no_grad():
emb = model.get_text_features(**processed)
emb = torch.nn.functional.normalize(emb, dim=-1)
return emb.cpu().numpy().tolist()
return {"error": "No valid input provided"} |