Spaces:
Runtime error
Runtime error
Use whisper-small and remove source param
Browse files
app.py
CHANGED
|
@@ -2,27 +2,26 @@ import gradio as gr
|
|
| 2 |
import torch
|
| 3 |
from transformers import pipeline
|
| 4 |
|
| 5 |
-
# 1) Pipeline de Whisper para ES → texto ES
|
| 6 |
device = 0 if torch.cuda.is_available() else -1
|
| 7 |
asr = pipeline(
|
| 8 |
"automatic-speech-recognition",
|
| 9 |
-
model="openai/whisper-
|
| 10 |
device=device,
|
| 11 |
-
generate_kwargs={"task":"transcribe","language":"es"}
|
| 12 |
)
|
| 13 |
|
| 14 |
# 2) Función de transcripción
|
| 15 |
def transcribe(audio_path):
|
| 16 |
-
|
| 17 |
-
return result["text"]
|
| 18 |
|
| 19 |
# 3) Interfaz Gradio
|
| 20 |
demo = gr.Interface(
|
| 21 |
fn=transcribe,
|
| 22 |
-
inputs=gr.Audio(
|
| 23 |
outputs=gr.Textbox(label="Transcripción"),
|
| 24 |
title="Audio→Texto en Español",
|
| 25 |
-
description="Transcribe audio en español con Whisper"
|
| 26 |
)
|
| 27 |
|
| 28 |
if __name__ == "__main__":
|
|
|
|
| 2 |
import torch
|
| 3 |
from transformers import pipeline
|
| 4 |
|
| 5 |
+
# 1) Pipeline de Whisper-small para ES → texto ES
|
| 6 |
device = 0 if torch.cuda.is_available() else -1
|
| 7 |
asr = pipeline(
|
| 8 |
"automatic-speech-recognition",
|
| 9 |
+
model="openai/whisper-small", # <-- modelo pequeño para CPU
|
| 10 |
device=device,
|
| 11 |
+
generate_kwargs={"task": "transcribe", "language": "es"}
|
| 12 |
)
|
| 13 |
|
| 14 |
# 2) Función de transcripción
|
| 15 |
def transcribe(audio_path):
|
| 16 |
+
return asr(audio_path)["text"]
|
|
|
|
| 17 |
|
| 18 |
# 3) Interfaz Gradio
|
| 19 |
demo = gr.Interface(
|
| 20 |
fn=transcribe,
|
| 21 |
+
inputs=gr.Audio(type="filepath", label="Sube audio (ES)"), # sin source="upload"
|
| 22 |
outputs=gr.Textbox(label="Transcripción"),
|
| 23 |
title="Audio→Texto en Español",
|
| 24 |
+
description="Transcribe audio en español con Whisper-small"
|
| 25 |
)
|
| 26 |
|
| 27 |
if __name__ == "__main__":
|