File size: 319 Bytes
e02c264 | 1 2 3 4 5 6 7 8 | from PIL import Image
from transformers import CLIPProcessor
def load_and_preprocess_image(image_path):
image = Image.open(image_path).convert("RGB")
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
inputs = processor(images=image, return_tensors="pt")
return inputs, processor |