| import gradio as gr | |
| from transformers import pipeline | |
| import torch | |
| import os | |
| from huggingface_hub import login | |
| #login(token=os.environ.get("HF_TOKEN")) | |
| pipe = pipeline("automatic-speech-recognition", model="aisha-org/Whisper-Uzbek") | |
| def transcribe_audio(audio_file): | |
| result = pipe(audio_file) | |
| return result["text"] | |
| iface = gr.Interface( | |
| fn=transcribe_audio, | |
| inputs=gr.Audio(type="filepath"), | |
| outputs="text", | |
| title="Audio Transcription", | |
| # description="" | |
| ) | |
| iface.launch() |