from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan from datasets import load_dataset import torch import soundfile as sf from datasets import load_dataset from transformers import pipeline import gradio as gr import tempfile summarizer = pipeline("summarization", model="facebook/bart-large-cnn") speech = pipeline("text-to-speech", model="microsoft/speecht5_tts") # code from the Model card processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts") model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts") vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan") speaker_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") speaker_embeddings = torch.tensor(speaker_dataset[0]["xvector"]).unsqueeze(0) def summarize_text_and_speak(prompt): summary = summarizer(prompt, max_length=150, min_length=30, do_sample=False) summary_text = summary[0]['summary_text'] #inputs = processor(text="Hello, my dog is cute.", return_tensors="pt") inputs = processor(text=summary_text, return_tensors="pt") #speech = model.generate_speech(inputs["input_ids"], speaker_embeddings, vocoder=vocoder) speech_audio = model.generate_speech(inputs["input_ids"], speaker_embeddings, vocoder=vocoder) #sf.write("speech.wav", speech.numpy(), samplerate=16000) with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file: sf.write(tmp_file.name, speech_audio.numpy(), samplerate=16000) audio_path = tmp_file.name return summary_text, audio_path interface = gr.Interface( fn=summarize_text_and_speak, inputs=gr.Textbox(lines=10, label="Input text"), outputs=[gr.Textbox(label="Summary"), gr.Audio(label="Audio")] ) interface.launch(share=True)