Spaces:
Paused
Paused
| import gradio as gr | |
| import torch | |
| from diffusers import DiffusionPipeline | |
| from transformers import ( | |
| WhisperForConditionalGeneration, | |
| WhisperProcessor, | |
| ) | |
| import os | |
| MY_SECRET_TOKEN=os.environ.get('HF_TOKEN_SD') | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small").to(device) | |
| processor = WhisperProcessor.from_pretrained("openai/whisper-small") | |
| diffuser_pipeline = DiffusionPipeline.from_pretrained( | |
| "CompVis/stable-diffusion-v1-4", | |
| custom_pipeline="speech_to_image_diffusion", | |
| speech_model=model, | |
| speech_processor=processor, | |
| use_auth_token=MY_SECRET_TOKEN, | |
| revision="fp16", | |
| torch_dtype=torch.float16, | |
| ) | |
| diffuser_pipeline.enable_attention_slicing() | |
| diffuser_pipeline = diffuser_pipeline.to(device) | |
| #ββββββββββββββββββββββββββββββββββββββββββββ | |
| # GRADIO SETUP | |
| audio_input = gr.Audio(source="microphone") | |
| image_output = gr.Image() | |
| def speech_to_text(audio_sample): | |
| text = audio_sample["text"].lower() | |
| print(text) | |
| speech_data = audio_sample["audio"]["array"] | |
| output = diffuser_pipeline(speech_data) | |
| return output.images[0] | |
| demo = gr.Interface(fn=speech_to_text, inputs=audio_input, outputs=image_output) | |
| demo.launch() |