Spaces:
Sleeping
Sleeping
Update services/graph_service.py
Browse files- services/graph_service.py +19 -6
services/graph_service.py
CHANGED
|
@@ -13,6 +13,7 @@ from langgraph.prebuilt import ToolNode
|
|
| 13 |
|
| 14 |
from tools.analysis_tools import trigger_interview_analysis
|
| 15 |
|
|
|
|
| 16 |
class AgentState(TypedDict):
|
| 17 |
messages: Annotated[Sequence[BaseMessage], lambda x, y: x + y]
|
| 18 |
user_id: str
|
|
@@ -98,6 +99,7 @@ class GraphInterviewProcessor:
|
|
| 98 |
"conversation_history": state["messages"]
|
| 99 |
})
|
| 100 |
return {"messages": [response['output']]}
|
|
|
|
| 101 |
def _router(self, state: AgentState):
|
| 102 |
"""Décide du chemin à suivre après la réponse de l'agent."""
|
| 103 |
last_message = state["messages"][-1]
|
|
@@ -105,6 +107,7 @@ class GraphInterviewProcessor:
|
|
| 105 |
return "call_tool"
|
| 106 |
else:
|
| 107 |
return "end_turn"
|
|
|
|
| 108 |
def _build_graph(self) -> any:
|
| 109 |
"""Construit et compile le graphe d'états."""
|
| 110 |
tool_node = ToolNode([trigger_interview_analysis])
|
|
@@ -114,15 +117,16 @@ class GraphInterviewProcessor:
|
|
| 114 |
graph.add_node("tools", tool_node)
|
| 115 |
|
| 116 |
graph.set_entry_point("agent")
|
|
|
|
| 117 |
graph.add_conditional_edges(
|
| 118 |
"agent",
|
| 119 |
self._router,
|
| 120 |
{
|
| 121 |
-
"call_tool": "tools",
|
| 122 |
-
"end_turn": END
|
| 123 |
}
|
| 124 |
)
|
| 125 |
-
|
| 126 |
graph.add_edge("tools", "agent")
|
| 127 |
|
| 128 |
return graph.compile()
|
|
@@ -137,15 +141,24 @@ class GraphInterviewProcessor:
|
|
| 137 |
"messages": langchain_messages,
|
| 138 |
}
|
| 139 |
|
|
|
|
| 140 |
final_state = self.graph.invoke(initial_state)
|
|
|
|
|
|
|
| 141 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 142 |
last_message = final_state['messages'][-1]
|
|
|
|
| 143 |
|
| 144 |
-
# Le statut est 'finished' si le dernier message contient un appel à un outil.
|
| 145 |
-
# Cela signifie que le graphe a pris le chemin de l'analyse.
|
| 146 |
status = "finished" if hasattr(last_message, 'tool_calls') and last_message.tool_calls else "interviewing"
|
| 147 |
|
|
|
|
|
|
|
|
|
|
| 148 |
return {
|
| 149 |
-
"response":
|
| 150 |
"status": status
|
| 151 |
}
|
|
|
|
| 13 |
|
| 14 |
from tools.analysis_tools import trigger_interview_analysis
|
| 15 |
|
| 16 |
+
|
| 17 |
class AgentState(TypedDict):
|
| 18 |
messages: Annotated[Sequence[BaseMessage], lambda x, y: x + y]
|
| 19 |
user_id: str
|
|
|
|
| 99 |
"conversation_history": state["messages"]
|
| 100 |
})
|
| 101 |
return {"messages": [response['output']]}
|
| 102 |
+
|
| 103 |
def _router(self, state: AgentState):
|
| 104 |
"""Décide du chemin à suivre après la réponse de l'agent."""
|
| 105 |
last_message = state["messages"][-1]
|
|
|
|
| 107 |
return "call_tool"
|
| 108 |
else:
|
| 109 |
return "end_turn"
|
| 110 |
+
|
| 111 |
def _build_graph(self) -> any:
|
| 112 |
"""Construit et compile le graphe d'états."""
|
| 113 |
tool_node = ToolNode([trigger_interview_analysis])
|
|
|
|
| 117 |
graph.add_node("tools", tool_node)
|
| 118 |
|
| 119 |
graph.set_entry_point("agent")
|
| 120 |
+
|
| 121 |
graph.add_conditional_edges(
|
| 122 |
"agent",
|
| 123 |
self._router,
|
| 124 |
{
|
| 125 |
+
"call_tool": "tools",
|
| 126 |
+
"end_turn": END
|
| 127 |
}
|
| 128 |
)
|
| 129 |
+
|
| 130 |
graph.add_edge("tools", "agent")
|
| 131 |
|
| 132 |
return graph.compile()
|
|
|
|
| 141 |
"messages": langchain_messages,
|
| 142 |
}
|
| 143 |
|
| 144 |
+
logging.info(f"Invoking graph with initial state: {initial_state}")
|
| 145 |
final_state = self.graph.invoke(initial_state)
|
| 146 |
+
|
| 147 |
+
logging.info(f"Graph finished. Final state: {final_state}")
|
| 148 |
|
| 149 |
+
if not final_state or not final_state.get('messages'):
|
| 150 |
+
logging.error("Final state is empty or does not contain messages.")
|
| 151 |
+
return {"response": "Erreur: Impossible de générer une réponse.", "status": "finished"}
|
| 152 |
+
|
| 153 |
last_message = final_state['messages'][-1]
|
| 154 |
+
logging.info(f"Last message from graph: {last_message}")
|
| 155 |
|
|
|
|
|
|
|
| 156 |
status = "finished" if hasattr(last_message, 'tool_calls') and last_message.tool_calls else "interviewing"
|
| 157 |
|
| 158 |
+
response_content = last_message.content
|
| 159 |
+
logging.info(f"Final response content: '{response_content}', status: {status}")
|
| 160 |
+
|
| 161 |
return {
|
| 162 |
+
"response": response_content,
|
| 163 |
"status": status
|
| 164 |
}
|