Spaces:
Sleeping
Sleeping
| import spaces | |
| import gradio as gr | |
| from transformers import pipeline | |
| import tempfile | |
| import os | |
| MODEL_NAME = "openai/whisper-small.en" | |
| BATCH_SIZE = 8 | |
| pipe = pipeline( | |
| model=MODEL_NAME, | |
| chunk_length_s=30, | |
| ) | |
| def transcribe(inputs, task): | |
| if inputs is None: | |
| raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.") | |
| text = pipe(inputs, batch_size=BATCH_SIZE, return_timestamps=True)["text"] | |
| return text | |
| demo = gr.Blocks() | |
| file_transcribe = gr.Interface( | |
| fn=transcribe, | |
| inputs=[ | |
| gr.Audio(sources="upload", type="filepath", label="Audio file"), | |
| ], | |
| outputs="text", | |
| title="Whisper small.en: Transcribe Audio", | |
| allow_flagging="never", | |
| ) | |
| with demo: | |
| gr.TabbedInterface([file_transcribe], ["Audio file"]) | |
| demo.queue().launch() |