import gradio as gr from alm_pipeline import analyze_audio from reasoning import generate_reasoning def full_pipeline(audio, question): if audio is None: return "No audio uploaded", "" summary = analyze_audio(audio) answer = generate_reasoning(summary, question) return summary, answer ui = gr.Interface( fn=full_pipeline, inputs=[ gr.Audio(sources=["upload"], type="filepath"), gr.Textbox(label="Ask a Question About the Audio") ], outputs=[ gr.JSON(label="Audio Understanding Summary"), gr.Textbox(label="Reasoning Answer") ], title="ALM Prototype – Hugging Face Edition", description="Upload audio → detect speech, sounds → ask any question → get AI reasoning." ) ui.launch()