# app.py import gradio as gr from whisper_jax import FlaxWhisperPipline import jax.numpy as jnp # Load Whisper JAX model once (on startup) pipeline = FlaxWhisperPipline( "parthiv11/indic_whisper_nodcil", dtype=jnp.bfloat16 ) # Function connected to Gradio def transcribe(audio_file): if audio_file is None: return "Please upload an audio file." result = pipeline(audio_file) return result["text"] if isinstance(result, dict) else result # Build UI (this part comes from Playground export) demo = gr.Interface( fn=transcribe, inputs=gr.Audio(type="filepath"), outputs="text", title="Hindi Whisper ", description="Upload or record Hindi speech and get transcription" ) if __name__ == "__main__": demo.launch(server_name="0.0.0.0", server_port=7860, show_error=True)