Natthathida's picture
test update app.py
4fe3418 verified
raw
history blame
1.64 kB
from transformers import pipeline, BlipForConditionalGeneration, BlipProcessor, AutoTokenizer, AutoModelForSeq2SeqLM
import torchaudio
from torchaudio.transforms import Resample
import torch
import gradio as gr
# Initialize TTS model from Hugging Face
tts_model_name = "suno/bark"
tts = pipeline(task="text-to-speech", model=tts_model_name)
# Initialize Blip model for image captioning
model_id = "dblasko/blip-dalle3-img2prompt"
blip_model = BlipForConditionalGeneration.from_pretrained(model_id)
blip_processor = BlipProcessor.from_pretrained(model_id)
def generate_caption(image):
# Generate caption from image using Blip model
inputs = blip_processor(images=image, return_tensors="pt")
pixel_values = inputs.pixel_values
generated_ids = blip_model.generate(pixel_values=pixel_values, max_length=50)
generated_caption = blip_processor.batch_decode(generated_ids, skip_special_tokens=True, temperature=0.8, top_k=40, top_p=0.9)[0]
# Use TTS model to convert generated caption to audio
audio_output = tts(generated_caption)
audio_path = "generated_audio_resampled.wav"
torchaudio.save(audio_path, torch.tensor(audio_output[0]), audio_output["sampling_rate"])
return generated_caption, audio_path
# Create a Gradio interface with an image input, a textbox output, a button, and an audio player
# demo = gr.Interface(
# fn=generate_caption,
# inputs=gr.Image(),
# outputs=[
# gr.Textbox(label="Generated caption"),
# gr.Button("Convert to Audio"),
# gr.Audio(type="filepath", label="Generated Audio")
# ],
# live=True
# )
# demo.launch(share=True)