| import json |
| from schemas.session_summary import SessionSummary, SessionSummaryContent, MessageRange |
| from llm.gemini_client import GeminiClient |
|
|
|
|
| class ConversationSummarizer: |
| def __init__(self, llm=None): |
| self.llm = llm if llm else GeminiClient() |
| |
|
|
| def summarize(self, messages, msg_from, msg_to, previous_summary: SessionSummary | None = None) -> SessionSummary | None: |
| conversation_text = "\n".join( |
| f"{m.role.upper()}: {m.content}" for m in messages |
| ) |
| if previous_summary: |
| previous_summary_json = previous_summary.model_dump( |
| by_alias=True, |
| ) |
| else: |
| previous_summary_json = "None" |
|
|
| prompt = f""" |
| You are summarizing a chat session for short-term memory. |
| If there is a previous summary, incorporate its information to update the new summary. |
| |
| The output MUST be valid JSON. |
| The output MUST strictly conform to the schema below. |
| Do NOT add comments. |
| Do NOT add markdown. |
| Do NOT add trailing text. |
| |
| SCHEMA (copy exactly, keep all keys): |
| |
| {{ |
| "session_summary": {{ |
| "user_profile": {{ |
| "prefs": [], |
| "constraints": [] |
| }}, |
| "key_facts": [], |
| "decisions": [], |
| "open_questions": [], |
| "todos": [] |
| }} |
| }} |
| |
| Guidelines: |
| - prefs: user preferences about style, tools, or behavior |
| - constraints: hard limitations or requirements |
| - key_facts: confirmed facts established in the conversation |
| - decisions: choices that have been explicitly made |
| - open_questions: unresolved questions |
| - todos: concrete next actions |
| |
| Rules: |
| - Preserve important existing information |
| - Add new facts, decisions, questions, and todos |
| - Remove items that are clearly resolved or obsolete |
| - Return the FULL updated summary |
| |
| Existing/Previous session summary: |
| {previous_summary_json} |
| |
| Conversation: |
| {conversation_text} |
| """ |
|
|
| response = self.llm.generate(prompt, temperature=0, top_p=1.0, top_k=1, response_schema=SessionSummaryContent, response_mime_type="application/json") |
|
|
| |
| try: |
| summary_content = SessionSummaryContent.model_validate_json(response) |
| except Exception as e: |
| print("[summarize] Failed to parse summary:", e) |
| return None |
|
|
| |
| summary = SessionSummary( |
| session_summary=summary_content, |
| message_range_summarized=MessageRange(from_=msg_from, to=msg_to) |
| ) |
|
|
| return summary |
|
|