Spaces:
Sleeping
Sleeping
File size: 975 Bytes
9ede01d a55bbac 9ede01d a55bbac 9ede01d | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 | import gradio as gr
from transformers import pipeline
import torch
# Check if GPU is available (Spaces provides GPU)
device = 0 if torch.cuda.is_available() else -1
# Initialize the model
captioner = pipeline("image-to-text",
model="Salesforce/blip-image-captioning-base",
device=device)
def generate_caption(image):
"""Generate caption for the given image"""
try:
captions = captioner(image)
return captions[0]['generated_text']
except Exception as e:
return f"Error generating caption: {str(e)}"
# Create Gradio interface without examples
interface = gr.Interface(
fn=generate_caption,
inputs=gr.Image(type="pil"),
outputs=gr.Textbox(label="Generated Caption"),
title="Image Caption Generator",
description="Upload an image and get an AI-generated caption!",
article="Built using the BLIP image captioning model from Salesforce."
)
# Launch the app
interface.launch() |