import gradio as gr from transformers import BlipProcessor, BlipForConditionalGeneration from PIL import Image def generate_caption(input_image): processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base") inputs = processor(input_image, return_tensors="pt") outputs = model.generate(**inputs) caption = processor.decode(outputs[0], skip_special_tokens=True) return caption iface = gr.Interface( fn=generate_caption, inputs=gr.Image( type = "pil"), outputs="text", title="Image Captioning", ) iface.launch()