from transformers import pipeline # Load lightweight reasoning model reasoner = pipeline("text2text-generation", model="google/flan-t5-large") def generate_reasoning(summary, question): prompt = f""" Audio Summary: Speech: {summary['transcription']} Main Sound Event: {summary['sound_event']} Emotion: {summary['emotion']} Speakers: {summary['speakers']} Question: {question} Provide a detailed reasoning-based answer using the audio cues. """ result = reasoner(prompt, max_length=200)[0]["generated_text"] return result