Spaces:
Running
on
Zero
Running
on
Zero
| """Minimum code to run OpenAI Whisper on Hugging Face (incl. ZeroGPU)""" | |
| from functools import lru_cache | |
| import whisper | |
| import gradio as gr | |
| import spaces | |
| def load_model(): | |
| """Returns a newly loaded or cached whisper base model""" | |
| model = whisper.load_model("base") | |
| return model | |
| def transcribe(file_path: str): | |
| """Transcribes audio | |
| Keyword arguments: | |
| file_path -- path to audio file, passed from Gradio | |
| """ | |
| model = load_model() | |
| result = model.transcribe(file_path) | |
| return result["text"] | |
| interface = gr.Interface( | |
| fn = transcribe, | |
| inputs = gr.Audio(type="filepath", label="Upload audio"), | |
| outputs = gr.Textbox(label="Transcription Output"), | |
| title = "Minimum OpenAI Whisper", | |
| description = """ | |
| <p> | |
| This repository is the most minimal example of a working OpenAI Whisper pipeline using ZeroGPU, and only ZeroGPU <sup><a href="#fn1" style="text-decoration: none;">[1]</a></sup>. | |
| </p> | |
| <p> | |
| Per the Unix philosophy: <q>do one thing and do it well</q>. | |
| </p> | |
| <p id="fn1"> | |
| <small>[1] Because the build is broken on paid hardware, something about a Gradio version not existing, PRs welcome</small> | |
| </p> | |
| """ | |
| ) | |
| interface.queue().launch() | |