Spaces:
Sleeping
Sleeping
File size: 1,225 Bytes
949fb1d a7ea9b8 949fb1d a7ea9b8 949fb1d a7ea9b8 9e93eaf 949fb1d a7ea9b8 949fb1d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
import os
import io
from PIL import Image
import base64
from transformers import pipeline
import gradio as gr
hf_api_key = os.environ['HF_API_KEY']
# Load the image-to-text pipeline with BLIP model
get_completion = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
def image_to_base64_str(pil_image):
byte_arr = io.BytesIO()
pil_image.save(byte_arr, format='PNG')
byte_arr = byte_arr.getvalue()
return str(base64.b64encode(byte_arr).decode('utf-8'))
def captioner(image):
# The BLIP model expects a PIL image directly
result = get_completion(image)
return result[0]['generated_text']
demo = gr.Interface(fn=captioner,
inputs=[gr.Image(label="Upload image", type="pil")],
outputs=[gr.Textbox(label="Caption")],
title="Image Captioning with BLIP",
description="Caption any image using the BLIP model",
flagging_mode="never", # Updated from allow_flagging
examples=["images/christmas_dog.jpg", "images/bird_flight.jpg", "images/cow.jpg"])
demo.launch(
share=True,
# server_port=int(os.environ.get('PORT3', 7860)) # Uncomment if needed
) |