Spaces:
Sleeping
Sleeping
Cyril Dupland commited on
Commit ·
043f287
1
Parent(s): a4ca4b2
Add SynthesisAgent functionality to A3 workflow, including new prompts and payload fields. Introduce synthesis model in A3Payload, update AgentState to include synthesis output, and create synthesis_node for generating executive summaries. Enhance agent service to support specific LLM configurations for synthesis, improving overall data analysis capabilities.
Browse files- domain/payloads/a3.py +7 -0
- graphs/nodes/synthesis_node.py +125 -0
- graphs/prompts/a3_prompts.py +42 -0
- graphs/state.py +1 -0
- graphs/workflows/agent_a3.py +18 -6
- services/agent_service.py +57 -4
domain/payloads/a3.py
CHANGED
|
@@ -63,4 +63,11 @@ class A3Payload(BaseAgentPayload):
|
|
| 63 |
description="Modèle LLM spécifique pour le nœud AnalysisAgent. "
|
| 64 |
"Si non spécifié, utilise le modèle par défaut de la requête."
|
| 65 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
|
|
|
|
| 63 |
description="Modèle LLM spécifique pour le nœud AnalysisAgent. "
|
| 64 |
"Si non spécifié, utilise le modèle par défaut de la requête."
|
| 65 |
)
|
| 66 |
+
|
| 67 |
+
# Modèle optionnel pour SynthesisAgent
|
| 68 |
+
synthesis_model: Optional[str] = Field(
|
| 69 |
+
default=None,
|
| 70 |
+
description="Modèle LLM spécifique pour le nœud SynthesisAgent. "
|
| 71 |
+
"Si non spécifié, utilise le modèle par défaut de la requête."
|
| 72 |
+
)
|
| 73 |
|
graphs/nodes/synthesis_node.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""SynthesisAgent node for A3 workflow - generates executive summary."""
|
| 2 |
+
from typing import Callable, Dict, Any
|
| 3 |
+
from langchain_core.language_models.chat_models import BaseChatModel
|
| 4 |
+
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
|
| 5 |
+
|
| 6 |
+
from graphs.state import AgentState
|
| 7 |
+
from graphs.prompts.a3_prompts import SYNTHESIS_SYSTEM_PROMPT, SYNTHESIS_HUMAN_PROMPT
|
| 8 |
+
from graphs.utils.usage import extract_usage_metadata, normalize_usage
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def _extract_model_name(llm: BaseChatModel) -> str:
|
| 12 |
+
"""Extract model name from LLM instance."""
|
| 13 |
+
if hasattr(llm, "model_name"):
|
| 14 |
+
return getattr(llm, "model_name")
|
| 15 |
+
if hasattr(llm, "model"):
|
| 16 |
+
model = getattr(llm, "model")
|
| 17 |
+
if isinstance(model, str):
|
| 18 |
+
return model
|
| 19 |
+
return llm.__class__.__name__
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def create_synthesis_node(llm: BaseChatModel) -> Callable[[AgentState], dict]:
|
| 23 |
+
"""
|
| 24 |
+
Factory returning a node that produces an executive synthesis of the analysis.
|
| 25 |
+
|
| 26 |
+
This node:
|
| 27 |
+
1. Takes the analysis_output from the state
|
| 28 |
+
2. Generates a concise executive summary (300-500 chars)
|
| 29 |
+
3. Returns the synthesis in the state
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
llm: Language model to use for synthesis
|
| 33 |
+
|
| 34 |
+
Returns:
|
| 35 |
+
Node function that takes AgentState and returns updated state
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
def _run(state: AgentState) -> dict:
|
| 39 |
+
print("\n" + "="*60, flush=True)
|
| 40 |
+
print("[SYNTHESIS_AGENT] Node started", flush=True)
|
| 41 |
+
print("="*60, flush=True)
|
| 42 |
+
|
| 43 |
+
payload = state.get("payload", {})
|
| 44 |
+
analysis_output = state.get("analysis_output", "")
|
| 45 |
+
|
| 46 |
+
concern_name = payload.get("concern_name", "")
|
| 47 |
+
|
| 48 |
+
print(f"[SYNTHESIS_AGENT] Concern: {concern_name}", flush=True)
|
| 49 |
+
print(f"[SYNTHESIS_AGENT] Analysis output length: {len(analysis_output)} chars", flush=True)
|
| 50 |
+
|
| 51 |
+
# Check if we have an analysis to synthesize
|
| 52 |
+
if not analysis_output:
|
| 53 |
+
print("[SYNTHESIS_AGENT] WARNING: No analysis_output found in state", flush=True)
|
| 54 |
+
return {
|
| 55 |
+
"synthesis_output": "",
|
| 56 |
+
"agent_output": state.get("agent_output", {})
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
# Build messages for synthesis
|
| 60 |
+
system_content = SYNTHESIS_SYSTEM_PROMPT.format(
|
| 61 |
+
concern_name=concern_name
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
human_content = SYNTHESIS_HUMAN_PROMPT.format(
|
| 65 |
+
concern_name=concern_name,
|
| 66 |
+
analysis_output=analysis_output
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
messages = [
|
| 70 |
+
SystemMessage(content=system_content),
|
| 71 |
+
HumanMessage(content=human_content)
|
| 72 |
+
]
|
| 73 |
+
|
| 74 |
+
# Invoke LLM (no tools needed for synthesis)
|
| 75 |
+
print(f"[SYNTHESIS_AGENT] Invoking LLM...", flush=True)
|
| 76 |
+
response = llm.invoke(messages)
|
| 77 |
+
print(f"[SYNTHESIS_AGENT] LLM response received", flush=True)
|
| 78 |
+
|
| 79 |
+
# Extract usage metadata
|
| 80 |
+
usage_raw = extract_usage_metadata(response)
|
| 81 |
+
usage_normalized = normalize_usage(usage_raw)
|
| 82 |
+
|
| 83 |
+
print(f"[SYNTHESIS_AGENT] Usage: {usage_normalized}", flush=True)
|
| 84 |
+
|
| 85 |
+
synthesis_content = response.content
|
| 86 |
+
print(f"[SYNTHESIS_AGENT] Synthesis length: {len(synthesis_content)} chars", flush=True)
|
| 87 |
+
|
| 88 |
+
# Create response message
|
| 89 |
+
response_message = AIMessage(
|
| 90 |
+
content=synthesis_content,
|
| 91 |
+
usage_metadata=usage_raw if usage_raw else None
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
# Extract model name
|
| 95 |
+
model_name = _extract_model_name(llm)
|
| 96 |
+
|
| 97 |
+
# Build updated agent_output with concatenated analysis + synthesis
|
| 98 |
+
current_agent_output = state.get("agent_output", {})
|
| 99 |
+
current_analysis = current_agent_output.get("analysis", analysis_output)
|
| 100 |
+
|
| 101 |
+
# Concatenate analysis with synthesis
|
| 102 |
+
combined_analysis = f"{current_analysis}\n\n---\n\n## Synthèse\n\n{synthesis_content}"
|
| 103 |
+
|
| 104 |
+
updated_agent_output: Dict[str, Any] = {
|
| 105 |
+
**current_agent_output,
|
| 106 |
+
"analysis": combined_analysis
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
result = {
|
| 110 |
+
"messages": [response_message],
|
| 111 |
+
"synthesis_output": synthesis_content,
|
| 112 |
+
"analysis_output": combined_analysis, # Update analysis_output with combined content
|
| 113 |
+
"agent_output": updated_agent_output,
|
| 114 |
+
"total_usage": usage_normalized,
|
| 115 |
+
"usage_by_model": {model_name: usage_normalized}
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
print(f"[SYNTHESIS_AGENT] agent_output updated with synthesis", flush=True)
|
| 119 |
+
print(f"[SYNTHESIS_AGENT] Node completed", flush=True)
|
| 120 |
+
print("="*60 + "\n", flush=True)
|
| 121 |
+
|
| 122 |
+
return result
|
| 123 |
+
|
| 124 |
+
return _run
|
| 125 |
+
|
graphs/prompts/a3_prompts.py
CHANGED
|
@@ -245,3 +245,45 @@ ANALYSIS_HUMAN_PROMPT = """Identifiant du site : {id_site}
|
|
| 245 |
{indicators_text}
|
| 246 |
"""
|
| 247 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 245 |
{indicators_text}
|
| 246 |
"""
|
| 247 |
|
| 248 |
+
|
| 249 |
+
# =============================================================================
|
| 250 |
+
# SYNTHESIS PROMPTS
|
| 251 |
+
# =============================================================================
|
| 252 |
+
|
| 253 |
+
SYNTHESIS_SYSTEM_PROMPT = """# ROLE
|
| 254 |
+
Tu es expert MERITHALLE dans la synthèse d'analyses d'indicateurs clients, spécialisé dans le domaine VITI en France.
|
| 255 |
+
|
| 256 |
+
# CONTEXTE
|
| 257 |
+
Tu viens de recevoir une analyse détaillée de l'enjeu "{concern_name}" pour un site viticole. Cette analyse contient des observations, des comparaisons N vs N-1, et potentiellement des recommandations.
|
| 258 |
+
|
| 259 |
+
# OBJECTIF
|
| 260 |
+
Ton objectif est de produire une **synthèse exécutive** concise de l'analyse, permettant une lecture rapide des points clés.
|
| 261 |
+
|
| 262 |
+
# RÈGLES DE SYNTHÈSE
|
| 263 |
+
|
| 264 |
+
1. **Concision** : La synthèse doit faire entre 300 et 500 caractères maximum.
|
| 265 |
+
|
| 266 |
+
2. **Structure** : Organise la synthèse en 3 points clés maximum :
|
| 267 |
+
- **Tendance principale** : L'évolution globale (amélioration, stabilité, dégradation)
|
| 268 |
+
- **Points d'attention** : 1-2 éléments critiques à surveiller (si pertinent)
|
| 269 |
+
- **Données manquantes** : Signaler uniquement si des N/F importants ont été identifiés
|
| 270 |
+
|
| 271 |
+
3. **Neutralité** : Ne pas répéter l'analyse, mais en extraire l'essentiel.
|
| 272 |
+
|
| 273 |
+
4. **Format** :
|
| 274 |
+
- Pas de titre
|
| 275 |
+
- Style télégraphique accepté
|
| 276 |
+
- Utiliser des indicateurs visuels si pertinent (↑ ↓ → pour les tendances)
|
| 277 |
+
|
| 278 |
+
# SORTIE
|
| 279 |
+
La synthèse doit être en texte brut ou Markdown léger, directement utilisable comme résumé exécutif.
|
| 280 |
+
Ne pas mentionner l'ID du site."""
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
SYNTHESIS_HUMAN_PROMPT = """## Analyse complète de l'enjeu "{concern_name}" :
|
| 284 |
+
|
| 285 |
+
{analysis_output}
|
| 286 |
+
|
| 287 |
+
---
|
| 288 |
+
|
| 289 |
+
Produis maintenant la synthèse exécutive de cette analyse."""
|
graphs/state.py
CHANGED
|
@@ -74,3 +74,4 @@ class AgentState(TypedDict, total=False):
|
|
| 74 |
# Intermediate data for multi-node workflows (agent-specific)
|
| 75 |
coherence_output: str
|
| 76 |
analysis_output: str
|
|
|
|
|
|
| 74 |
# Intermediate data for multi-node workflows (agent-specific)
|
| 75 |
coherence_output: str
|
| 76 |
analysis_output: str
|
| 77 |
+
synthesis_output: str
|
graphs/workflows/agent_a3.py
CHANGED
|
@@ -10,35 +10,41 @@ from graphs.tools.batch_calculator import batch_calculator
|
|
| 10 |
from graphs.tools.indicators import get_site_indicators
|
| 11 |
from graphs.tools.itineraires import get_itineraires
|
| 12 |
from graphs.nodes.analysis_node import create_analysis_node, should_continue_analysis
|
|
|
|
| 13 |
|
| 14 |
|
| 15 |
def get_agent_a3(
|
| 16 |
llm: BaseChatModel,
|
| 17 |
-
analysis_llm: Optional[BaseChatModel] = None
|
|
|
|
| 18 |
):
|
| 19 |
"""
|
| 20 |
Create the A3 workflow - Analyse.
|
| 21 |
|
| 22 |
-
This workflow consists of
|
| 23 |
1. AnalysisAgent: Analyzes indicator data for year N against N-1 context and analysis
|
|
|
|
| 24 |
|
| 25 |
The workflow also includes a tool executor for the batch_calculator tool.
|
| 26 |
|
| 27 |
Flow:
|
| 28 |
-
analysis_agent -> (tool_executor)* -> END
|
| 29 |
|
| 30 |
Args:
|
| 31 |
-
llm: Default LLM (used for
|
| 32 |
analysis_llm: Optional specific LLM for AnalysisAgent.
|
| 33 |
If not provided, uses the default llm.
|
|
|
|
|
|
|
| 34 |
|
| 35 |
Returns:
|
| 36 |
Compiled LangGraph workflow
|
| 37 |
"""
|
| 38 |
workflow = StateGraph(AgentState)
|
| 39 |
|
| 40 |
-
# Use specific
|
| 41 |
analysis_model = analysis_llm or llm
|
|
|
|
| 42 |
|
| 43 |
# Define tools
|
| 44 |
tools = [batch_calculator, get_site_indicators, get_itineraires]
|
|
@@ -46,26 +52,32 @@ def get_agent_a3(
|
|
| 46 |
# Create nodes
|
| 47 |
analysis_node = create_analysis_node(analysis_model, tools)
|
| 48 |
tool_node = ToolNode(tools)
|
|
|
|
| 49 |
|
| 50 |
# Add nodes to workflow
|
| 51 |
workflow.add_node("analysis_agent", analysis_node)
|
| 52 |
workflow.add_node("tool_executor", tool_node)
|
|
|
|
| 53 |
|
| 54 |
# Set entry point
|
| 55 |
workflow.set_entry_point("analysis_agent")
|
| 56 |
|
| 57 |
# Add conditional edges for tool execution loop
|
|
|
|
| 58 |
workflow.add_conditional_edges(
|
| 59 |
"analysis_agent",
|
| 60 |
should_continue_analysis,
|
| 61 |
{
|
| 62 |
"tool_executor": "tool_executor",
|
| 63 |
-
"end": END
|
| 64 |
}
|
| 65 |
)
|
| 66 |
|
| 67 |
# Tool executor goes back to analysis agent to process results
|
| 68 |
workflow.add_edge("tool_executor", "analysis_agent")
|
| 69 |
|
|
|
|
|
|
|
|
|
|
| 70 |
return workflow.compile()
|
| 71 |
|
|
|
|
| 10 |
from graphs.tools.indicators import get_site_indicators
|
| 11 |
from graphs.tools.itineraires import get_itineraires
|
| 12 |
from graphs.nodes.analysis_node import create_analysis_node, should_continue_analysis
|
| 13 |
+
from graphs.nodes.synthesis_node import create_synthesis_node
|
| 14 |
|
| 15 |
|
| 16 |
def get_agent_a3(
|
| 17 |
llm: BaseChatModel,
|
| 18 |
+
analysis_llm: Optional[BaseChatModel] = None,
|
| 19 |
+
synthesis_llm: Optional[BaseChatModel] = None
|
| 20 |
):
|
| 21 |
"""
|
| 22 |
Create the A3 workflow - Analyse.
|
| 23 |
|
| 24 |
+
This workflow consists of two main nodes:
|
| 25 |
1. AnalysisAgent: Analyzes indicator data for year N against N-1 context and analysis
|
| 26 |
+
2. SynthesisAgent: Generates an executive summary of the analysis
|
| 27 |
|
| 28 |
The workflow also includes a tool executor for the batch_calculator tool.
|
| 29 |
|
| 30 |
Flow:
|
| 31 |
+
analysis_agent -> (tool_executor)* -> synthesis_agent -> END
|
| 32 |
|
| 33 |
Args:
|
| 34 |
+
llm: Default LLM (used for all nodes if specific LLMs not provided)
|
| 35 |
analysis_llm: Optional specific LLM for AnalysisAgent.
|
| 36 |
If not provided, uses the default llm.
|
| 37 |
+
synthesis_llm: Optional specific LLM for SynthesisAgent.
|
| 38 |
+
If not provided, uses the default llm.
|
| 39 |
|
| 40 |
Returns:
|
| 41 |
Compiled LangGraph workflow
|
| 42 |
"""
|
| 43 |
workflow = StateGraph(AgentState)
|
| 44 |
|
| 45 |
+
# Use specific LLMs if provided, otherwise use default
|
| 46 |
analysis_model = analysis_llm or llm
|
| 47 |
+
synthesis_model = synthesis_llm or llm
|
| 48 |
|
| 49 |
# Define tools
|
| 50 |
tools = [batch_calculator, get_site_indicators, get_itineraires]
|
|
|
|
| 52 |
# Create nodes
|
| 53 |
analysis_node = create_analysis_node(analysis_model, tools)
|
| 54 |
tool_node = ToolNode(tools)
|
| 55 |
+
synthesis_node = create_synthesis_node(synthesis_model)
|
| 56 |
|
| 57 |
# Add nodes to workflow
|
| 58 |
workflow.add_node("analysis_agent", analysis_node)
|
| 59 |
workflow.add_node("tool_executor", tool_node)
|
| 60 |
+
workflow.add_node("synthesis_agent", synthesis_node)
|
| 61 |
|
| 62 |
# Set entry point
|
| 63 |
workflow.set_entry_point("analysis_agent")
|
| 64 |
|
| 65 |
# Add conditional edges for tool execution loop
|
| 66 |
+
# When analysis is done (no more tool calls), go to synthesis
|
| 67 |
workflow.add_conditional_edges(
|
| 68 |
"analysis_agent",
|
| 69 |
should_continue_analysis,
|
| 70 |
{
|
| 71 |
"tool_executor": "tool_executor",
|
| 72 |
+
"end": "synthesis_agent" # Go to synthesis instead of END
|
| 73 |
}
|
| 74 |
)
|
| 75 |
|
| 76 |
# Tool executor goes back to analysis agent to process results
|
| 77 |
workflow.add_edge("tool_executor", "analysis_agent")
|
| 78 |
|
| 79 |
+
# Synthesis agent goes to END
|
| 80 |
+
workflow.add_edge("synthesis_agent", END)
|
| 81 |
+
|
| 82 |
return workflow.compile()
|
| 83 |
|
services/agent_service.py
CHANGED
|
@@ -68,10 +68,13 @@ class AgentService:
|
|
| 68 |
# Handle agent-specific LLM configurations
|
| 69 |
coherence_llm = self._get_coherence_llm(payload_dict, temperature, max_tokens)
|
| 70 |
analysis_llm = self._get_analysis_llm(payload_dict, temperature, max_tokens)
|
|
|
|
| 71 |
if coherence_llm:
|
| 72 |
print(f"[AGENT_SERVICE] Using specific coherence_model: {payload_dict.get('coherence_model')}", flush=True)
|
| 73 |
if analysis_llm:
|
| 74 |
print(f"[AGENT_SERVICE] Using specific analysis_model: {payload_dict.get('analysis_model')}", flush=True)
|
|
|
|
|
|
|
| 75 |
|
| 76 |
# Get agent builder and create graph with appropriate LLMs
|
| 77 |
builder = agent_registry.get_builder(agent_type)
|
|
@@ -79,8 +82,14 @@ class AgentService:
|
|
| 79 |
# Pass specific LLMs if the agent supports them
|
| 80 |
if agent_type == AgentType.A2 and coherence_llm:
|
| 81 |
graph = builder(llm, coherence_llm=coherence_llm)
|
| 82 |
-
elif agent_type == AgentType.A3
|
| 83 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
else:
|
| 85 |
graph = builder(llm)
|
| 86 |
|
|
@@ -211,10 +220,13 @@ class AgentService:
|
|
| 211 |
# Handle agent-specific LLM configurations
|
| 212 |
coherence_llm = self._get_coherence_llm(payload_dict, temperature, max_tokens, streaming=True)
|
| 213 |
analysis_llm = self._get_analysis_llm(payload_dict, temperature, max_tokens, streaming=True)
|
|
|
|
| 214 |
if coherence_llm:
|
| 215 |
print(f"[AGENT_SERVICE] Using specific coherence_model: {payload_dict.get('coherence_model')}")
|
| 216 |
if analysis_llm:
|
| 217 |
print(f"[AGENT_SERVICE] Using specific analysis_model: {payload_dict.get('analysis_model')}")
|
|
|
|
|
|
|
| 218 |
|
| 219 |
# Get agent builder and create graph with appropriate LLMs
|
| 220 |
builder = agent_registry.get_builder(agent_type)
|
|
@@ -222,8 +234,14 @@ class AgentService:
|
|
| 222 |
# Pass specific LLMs if the agent supports them
|
| 223 |
if agent_type == AgentType.A2 and coherence_llm:
|
| 224 |
graph = builder(llm, coherence_llm=coherence_llm)
|
| 225 |
-
elif agent_type == AgentType.A3
|
| 226 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 227 |
else:
|
| 228 |
graph = builder(llm)
|
| 229 |
|
|
@@ -459,6 +477,41 @@ class AgentService:
|
|
| 459 |
# Invalid model name, return None to use default
|
| 460 |
return None
|
| 461 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 462 |
def _prepare_messages_from_payload(
|
| 463 |
self,
|
| 464 |
payload: Dict[str, Any]
|
|
|
|
| 68 |
# Handle agent-specific LLM configurations
|
| 69 |
coherence_llm = self._get_coherence_llm(payload_dict, temperature, max_tokens)
|
| 70 |
analysis_llm = self._get_analysis_llm(payload_dict, temperature, max_tokens)
|
| 71 |
+
synthesis_llm = self._get_synthesis_llm(payload_dict, temperature, max_tokens)
|
| 72 |
if coherence_llm:
|
| 73 |
print(f"[AGENT_SERVICE] Using specific coherence_model: {payload_dict.get('coherence_model')}", flush=True)
|
| 74 |
if analysis_llm:
|
| 75 |
print(f"[AGENT_SERVICE] Using specific analysis_model: {payload_dict.get('analysis_model')}", flush=True)
|
| 76 |
+
if synthesis_llm:
|
| 77 |
+
print(f"[AGENT_SERVICE] Using specific synthesis_model: {payload_dict.get('synthesis_model')}", flush=True)
|
| 78 |
|
| 79 |
# Get agent builder and create graph with appropriate LLMs
|
| 80 |
builder = agent_registry.get_builder(agent_type)
|
|
|
|
| 82 |
# Pass specific LLMs if the agent supports them
|
| 83 |
if agent_type == AgentType.A2 and coherence_llm:
|
| 84 |
graph = builder(llm, coherence_llm=coherence_llm)
|
| 85 |
+
elif agent_type == AgentType.A3:
|
| 86 |
+
# A3 supports both analysis_llm and synthesis_llm
|
| 87 |
+
kwargs = {}
|
| 88 |
+
if analysis_llm:
|
| 89 |
+
kwargs["analysis_llm"] = analysis_llm
|
| 90 |
+
if synthesis_llm:
|
| 91 |
+
kwargs["synthesis_llm"] = synthesis_llm
|
| 92 |
+
graph = builder(llm, **kwargs) if kwargs else builder(llm)
|
| 93 |
else:
|
| 94 |
graph = builder(llm)
|
| 95 |
|
|
|
|
| 220 |
# Handle agent-specific LLM configurations
|
| 221 |
coherence_llm = self._get_coherence_llm(payload_dict, temperature, max_tokens, streaming=True)
|
| 222 |
analysis_llm = self._get_analysis_llm(payload_dict, temperature, max_tokens, streaming=True)
|
| 223 |
+
synthesis_llm = self._get_synthesis_llm(payload_dict, temperature, max_tokens, streaming=True)
|
| 224 |
if coherence_llm:
|
| 225 |
print(f"[AGENT_SERVICE] Using specific coherence_model: {payload_dict.get('coherence_model')}")
|
| 226 |
if analysis_llm:
|
| 227 |
print(f"[AGENT_SERVICE] Using specific analysis_model: {payload_dict.get('analysis_model')}")
|
| 228 |
+
if synthesis_llm:
|
| 229 |
+
print(f"[AGENT_SERVICE] Using specific synthesis_model: {payload_dict.get('synthesis_model')}")
|
| 230 |
|
| 231 |
# Get agent builder and create graph with appropriate LLMs
|
| 232 |
builder = agent_registry.get_builder(agent_type)
|
|
|
|
| 234 |
# Pass specific LLMs if the agent supports them
|
| 235 |
if agent_type == AgentType.A2 and coherence_llm:
|
| 236 |
graph = builder(llm, coherence_llm=coherence_llm)
|
| 237 |
+
elif agent_type == AgentType.A3:
|
| 238 |
+
# A3 supports both analysis_llm and synthesis_llm
|
| 239 |
+
kwargs = {}
|
| 240 |
+
if analysis_llm:
|
| 241 |
+
kwargs["analysis_llm"] = analysis_llm
|
| 242 |
+
if synthesis_llm:
|
| 243 |
+
kwargs["synthesis_llm"] = synthesis_llm
|
| 244 |
+
graph = builder(llm, **kwargs) if kwargs else builder(llm)
|
| 245 |
else:
|
| 246 |
graph = builder(llm)
|
| 247 |
|
|
|
|
| 477 |
# Invalid model name, return None to use default
|
| 478 |
return None
|
| 479 |
|
| 480 |
+
def _get_synthesis_llm(
|
| 481 |
+
self,
|
| 482 |
+
payload_dict: Dict[str, Any],
|
| 483 |
+
temperature: float,
|
| 484 |
+
max_tokens: Optional[int],
|
| 485 |
+
streaming: bool = False
|
| 486 |
+
) -> Optional[BaseChatModel]:
|
| 487 |
+
"""
|
| 488 |
+
Get a specific LLM for the SynthesisAgent if specified in payload.
|
| 489 |
+
|
| 490 |
+
Args:
|
| 491 |
+
payload_dict: Payload dictionary
|
| 492 |
+
temperature: Sampling temperature
|
| 493 |
+
max_tokens: Max tokens to generate
|
| 494 |
+
streaming: Whether to enable streaming
|
| 495 |
+
|
| 496 |
+
Returns:
|
| 497 |
+
LLM instance if synthesis_model is specified, None otherwise
|
| 498 |
+
"""
|
| 499 |
+
synthesis_model_str = payload_dict.get("synthesis_model")
|
| 500 |
+
if not synthesis_model_str:
|
| 501 |
+
return None
|
| 502 |
+
|
| 503 |
+
try:
|
| 504 |
+
synthesis_model_name = ModelName(synthesis_model_str)
|
| 505 |
+
return llm_service.get_llm(
|
| 506 |
+
model_name=synthesis_model_name,
|
| 507 |
+
temperature=temperature,
|
| 508 |
+
streaming=streaming,
|
| 509 |
+
max_tokens=max_tokens
|
| 510 |
+
)
|
| 511 |
+
except ValueError:
|
| 512 |
+
# Invalid model name, return None to use default
|
| 513 |
+
return None
|
| 514 |
+
|
| 515 |
def _prepare_messages_from_payload(
|
| 516 |
self,
|
| 517 |
payload: Dict[str, Any]
|