File size: 662 Bytes
6951dcc
 
807b2d7
bdeb045
db39d96
807b2d7
 
d4f2186
6951dcc
db39d96
 
bdeb045
 
6951dcc
 
db39d96
 
 
6951dcc
 
db39d96
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
import gradio as gr
import os
from transformers import AutoProcessor, AutoModelForImageTextToText

# Load model and processor
processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
model = AutoModelForImageTextToText.from_pretrained("Salesforce/blip-image-captioning-base")

def caption(image):
    # 'image' is now a PIL Image object because of type="pil"
    inputs = processor(image, return_tensors="pt")
    out = model.generate(**inputs)
    return processor.decode(out[0], skip_special_tokens=True)

demo = gr.Interface(
    fn=caption,
    inputs=gr.Image(label="Upload Image", type="pil"), 
    outputs="text"
)

demo.launch()