Spaces:
Sleeping
Sleeping
File size: 6,244 Bytes
fbdfc24 3e14b58 fbdfc24 478b91f fbdfc24 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 | # [file name]: core/nodes/helper_nodes.py
# Add this as the FIRST lines of code (after docstrings)
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent))
import logging
from typing import Dict, Any, List
from langchain_core.runnables import RunnableConfig
from langchain_core.messages import HumanMessage
from models.state_models import MultiCountryLegalState
from core.nodes.base_node import BaseNode
from core.prompts.prompt_templates import PromptTemplates
logger = logging.getLogger(__name__)
class HelperNodes(BaseNode):
"""Helper nodes for unclear routes and summarization"""
def __init__(self, llm):
self.llm = llm
self.prompts = PromptTemplates()
async def out_of_scope_node(self, state: MultiCountryLegalState, config: RunnableConfig) -> Dict[str, Any]:
"""Handle out-of-scope questions - redirect to legal domain"""
try:
logger.info("🚫 Out of scope question detected")
redirect_message = {
"role": "assistant",
"content": (
"Je suis un assistant juridique spécialisé dans le droit du Bénin et de Madagascar. "
"Je ne peux répondre qu'aux questions relatives au droit et aux procédures juridiques.\n\n"
"Comment puis-je vous aider avec vos questions juridiques ?"
),
"meta": {
"is_out_of_scope": True,
"timestamp": self._get_timestamp()
}
}
return {
"messages": [redirect_message],
"current_country": "out_of_scope",
"search_results": "Out of scope query - no legal search performed"
}
except Exception as e:
logger.error(f"Error in out_of_scope handler: {str(e)}")
return self._create_error_state(f"Error in out_of_scope: {str(e)}")
async def unclear_route_node(self, state: MultiCountryLegalState, config: RunnableConfig) -> Dict[str, Any]:
"""Handle unclear routing cases - for ambiguous legal queries"""
try:
s = state.model_dump()
route_explanation = s.get("route_explanation", "")
# This is now only for unclear LEGAL queries
clarification_msg = {
"role": "assistant",
"content": self.prompts.get_clarification_message(),
"meta": {
"requires_clarification": True,
"timestamp": self._get_timestamp()
}
}
return {
"messages": [clarification_msg],
"search_results": "Country clarification needed"
}
except Exception as e:
logger.error(f"Error in unclear route handling: {str(e)}")
return self._create_error_state(f"Error in unclear route: {str(e)}")
async def conversation_summarization_node(self, state: MultiCountryLegalState, config: RunnableConfig) -> Dict[str, Any]:
"""Generate summary of conversation history"""
try:
s = state.model_dump()
messages = s.get("messages", [])
logger.info(f"📋 Generating conversation summary for {len(messages)} messages")
summary = await self._generate_conversation_summary(messages)
return {
"messages": [{
"role": "assistant",
"content": summary,
"meta": {
"is_summary": True,
"conversation_length": len(messages),
"timestamp": self._get_timestamp()
}
}],
"search_results": "Conversation summary generated - no legal search performed"
}
except Exception as e:
logger.error(f"Error in conversation summarization: {str(e)}")
return self._create_error_state(f"Error in summarization: {str(e)}")
async def _generate_conversation_summary(self, messages: List[Dict]) -> str:
"""Use LLM to generate conversation summary"""
conversation_messages = [
msg for msg in messages
if msg.get("role") in ["user", "assistant"]
]
if len(conversation_messages) <= 2:
return "Notre conversation vient juste de commencer. Nous n'avons pas encore beaucoup échangé."
conversation_text = ""
for i, msg in enumerate(conversation_messages):
role = "Utilisateur" if msg.get("role") == "user" else "Assistant"
content = msg.get("content", "")
conversation_text += f"{role}: {content}\n\n"
summary_prompt = f"""
Vous êtes un assistant juridique. Résumez la conversation suivante entre l'utilisateur et vous-même.
**CONVERSATION:**
{conversation_text}
**INSTRUCTIONS:**
- Faites un résumé concis et clair
- Mettez en évidence les points juridiques principaux discutés
- Mentionnez les pays concernés (Bénin/Madagascar) si pertinents
- Gardez un ton professionnel mais accessible
- Maximum 5-7 phrases
**RÉSUMÉ:**
"""
try:
response = await self.llm.ainvoke([HumanMessage(content=summary_prompt)])
return response.content if hasattr(response, 'content') else str(response)
except Exception as e:
logger.error(f"LLM summarization failed: {e}")
user_messages = [m for m in conversation_messages if m.get("role") == "user"]
assistant_messages = [m for m in conversation_messages if m.get("role") == "assistant"]
return f"""**Résumé de notre conversation:**
- **Échanges totaux**: {len(conversation_messages)} messages
- **Questions de l'utilisateur**: {len(user_messages)}
- **Réponses fournies**: {len(assistant_messages)}
- **Dernier échange**: {conversation_messages[-1].get('content', '')[:100]}...
*Pour un résumé détaillé, veuillez reposer votre question.*""" |