| import gradio as gr | |
| from faster_whisper import WhisperModel | |
| model = WhisperModel("small",compute_type='auto',num_workers=2, cpu_threads=8) | |
| def test(audio): | |
| segments, info = model.transcribe(audio) | |
| text = '' | |
| for segment in segments: | |
| text = text + segment.text + '\n' | |
| return text | |
| demo = gr.Interface( | |
| test, | |
| gr.Audio(type='filepath'), | |
| 'text' | |
| ) | |
| demo.launch(share=True) | |