Spaces:
Sleeping
Sleeping
| from __future__ import annotations | |
| import json | |
| from typing import Any | |
| from app.prompts_rag import EVIDENCE_SUMMARY_SYSTEM, EVIDENCE_SUMMARY_USER_TEMPLATE | |
| from app.llm_client import chat_completion_json, safe_json_loads | |
| def build_evidence_summary_with_llm( | |
| client, | |
| model: str, | |
| raw_agent_results: list[dict[str, Any]], | |
| merged_events: list[dict[str, Any]], | |
| audio_agent_results: list[dict[str, Any]] | None = None, | |
| document_agent_results: list[dict[str, Any]] | None = None, | |
| ) -> dict[str, Any]: | |
| payload = { | |
| "raw_agent_results": raw_agent_results, | |
| "merged_events": merged_events, | |
| "audio_agent_results": audio_agent_results or [], | |
| "document_agent_results": document_agent_results or [], | |
| } | |
| user_prompt = EVIDENCE_SUMMARY_USER_TEMPLATE.replace( | |
| "<<INPUT_JSON>>", | |
| json.dumps(payload, ensure_ascii=False, indent=2), | |
| ) | |
| messages = [ | |
| {"role": "system", "content": EVIDENCE_SUMMARY_SYSTEM}, | |
| {"role": "user", "content": user_prompt}, | |
| ] | |
| text = chat_completion_json( | |
| client=client, | |
| model=model, | |
| messages=messages, | |
| temperature=0.1, | |
| timeout=180, | |
| ) | |
| return safe_json_loads(text) |