pooadi
Initial commit
0f2351b
raw
history blame contribute delete
814 Bytes
from transformers import pipeline
import gradio as gr
# Load pre-trained image captioning model
captioner = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
# Define function to get caption
def get_image_caption(image):
if image is None:
return "Please upload an image."
caption = captioner(image)[0]['generated_text']
return caption
# Build Gradio app
with gr.Blocks() as image_captioning_app:
gr.Markdown("## 🖼️ Image Captioning App")
with gr.Row():
image_input = gr.Image(type="pil", label="Upload an Image")
caption_output = gr.Textbox(label="Image Caption")
generate_button = gr.Button("Generate Caption")
generate_button.click(fn=get_image_caption, inputs=image_input, outputs=caption_output)
image_captioning_app.launch()