Spaces:
Runtime error
Runtime error
File size: 563 Bytes
0435fec |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 |
import gradio as gr
import whisper
# Choose model: "tiny" for faster, "base" for better accuracy
model = whisper.load_model("small")
def transcribe(audio):
if audio is None:
return "Please upload an audio file."
result = model.transcribe(audio)
return result["text"]
app = gr.Interface(
fn=transcribe,
inputs=gr.Audio(sources=["microphone", "upload"], type="filepath"),
outputs="textbox",
title="🎙️ Whisper Speech-to-Text",
description="Transcribe audio to text using OpenAI Whisper (Tiny model).",
)
app.launch()
|