File size: 1,232 Bytes
800c4f5
 
 
48fc669
800c4f5
 
 
48fc669
800c4f5
48fc669
 
 
fa5ba4e
48fc669
429764a
fa5ba4e
48fc669
800c4f5
48fc669
 
fa5ba4e
48fc669
 
 
 
fa5ba4e
 
 
48fc669
800c4f5
48fc669
800c4f5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import gradio as gr
from transformers import pipeline

# Load the image-to-text model pipeline
pipe = pipeline("image-to-text",
                model="Salesforce/blip-image-captioning-base")

# Define the function to generate text from image
def launch(input):
    out = pipe(input)  # Get the model output
    return out[0]['generated_text']  # Return the generated text

# Define examples with images and expected outputs
examples = [
    ["example1.jpeg", "a dog swimming in the ocean"],  # Example 1
    ["example2.png", "a fairy sitting on a tree branch"]  # Example 2
]

# Create the Gradio interface
iface = gr.Interface(
    fn=launch,
    inputs=gr.Image(type='pil'),  # Input is an image
    outputs="text",  # Output is a text description
    title="Image Captioning with BLIP",
    description="This application uses the BLIP image-captioning model to generate descriptions for the images you upload. "
                "Simply upload an image, and the model will generate a caption describing the content of the image. "
                "You can also try some pre-loaded examples below.",
    examples=[example[:1] for example in examples]  # Only include image paths for Gradio
)

# Launch the interface
iface.launch()