import gradio as gr from transformers import pipeline # Load your model from the Hub pipe = pipeline("automatic-speech-recognition", model="uzair0/Katib-ASR") def transcribe_audio(audio_filepath): if audio_filepath is None: return "⚠️ Please record some audio first!" result = pipe( audio_filepath, generate_kwargs={"language": "pashto", "task": "transcribe"} ) return result["text"] demo = gr.Interface( fn=transcribe_audio, inputs=gr.Audio(sources=["microphone"], type="filepath", label="Record Pashto"), outputs=gr.Textbox(label="Katib ASR Transcription", lines=3), title="🎙️ Katib ASR: Pashto Speech Recognition", description="Click the Record button below, speak Pashto into your microphone, and see the result!" ) demo.launch()