from transformers import BlipProcessor, BlipForConditionalGeneration import torch processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base") def generate_caption(image): inputs = processor(image, return_tensors="pt") with torch.no_grad(): out = model.generate(**inputs, max_length=50, num_beams=5, early_stopping=True, no_repeat_ngram_size=2) caption = processor.decode(out[0], skip_special_tokens=True) return caption