Spaces:
Sleeping
Sleeping
File size: 1,624 Bytes
c7004b8 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 | import torch
import gradio as gr
from PIL import Image
import scipy.io.wavfile as wavfile
# Use a pipeline as a high-level helper
from transformers import pipeline
# model_path = "../Models/models--Salesforce--blip-image-captioning-base/snapshots/82a37760796d32b1411fe092ab5d4e227313294b"
# model_path2 = "../Models/models--kakao-enterprise--vits-ljs/snapshots/3bcb8321394f671bd948ebf0d086d694dda95464"
device = "cuda" if torch.cuda.is_available() else "cpu"
# caption_image = pipeline("image-to-text", model=model_path, device=device)
caption_image = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base", device=device)
# narrator = pipeline("text-to-speech", model=model_path2)
narrator = pipeline("text-to-speech", model="kakao-enterprise/vits-ljs")
def generate_audio(text):
# Generate the narrated text
narrated_text = narrator(text)
# Save the audio to a WAV file
wavfile.write("output.wav", rate=narrated_text["sampling_rate"],
data=narrated_text["audio"][0])
# Return the path to the saved audio file
return "output.wav"
def caption_my_image(pil_image):
semantics = caption_image(images=pil_image)[0]['generated_text']
return generate_audio(semantics)
gr.close_all()
demo = gr.Interface(fn=caption_my_image,
inputs=[gr.Image(label="Select Image",type="pil")],
outputs=[gr.Audio(label="Generated Caption")],
title="@GenAILearniverse Project 8: Image Captioning",
description="THIS APPLICATION WILL BE USED TO GET THE AUDIO CAPTION OF IMAGE.")
demo.launch() |