File size: 5,538 Bytes
ae61a39
 
 
 
 
bf17131
 
ae61a39
 
bf17131
71505ac
d23845f
9716705
1063f06
eecf451
 
ae61a39
 
 
 
bf17131
ae61a39
 
ed46021
bf17131
ae61a39
 
 
bf17131
71505ac
bf17131
ae61a39
71505ac
ae61a39
 
 
 
 
71505ac
ae61a39
 
71505ac
bf17131
 
ae61a39
71505ac
 
 
 
 
ae61a39
ed46021
ae61a39
 
9716705
 
 
 
 
 
 
 
 
 
 
 
 
1063f06
 
 
 
 
 
 
 
 
 
 
 
 
eecf451
 
 
 
 
 
 
 
 
 
 
 
 
 
ae61a39
 
eecf451
ae61a39
 
ed46021
 
 
ae61a39
 
ed46021
bf17131
ae61a39
eecf451
ae61a39
bf17131
 
ae61a39
 
9716705
1063f06
eecf451
ae61a39
 
 
 
 
 
71505ac
 
 
9716705
1063f06
eecf451
bf17131
b6a7e53
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
import os
from typing import Annotated, Literal, TypedDict
from pydantic import BaseModel

from langchain_core.messages import BaseMessage, SystemMessage
from langchain_openai import ChatOpenAI
from langgraph.graph import StateGraph, START, END
from langgraph.graph.message import add_messages
from langgraph.prebuilt import create_react_agent

# Импорты из соседних файлов
from api.ads import ads_tools
from api.analytics import analytics_tools
from api.finance import finance_tools
from api.logistics import logistics_tools
from prompts import SUPERVISOR_PROMPT, ADS_ANALYST_PROMPT, ANALYTICS_ANALYST_PROMPT, FINANCE_ANALYST_PROMPT, LOGISTICS_ANALYST_PROMPT

# ==========================================
# 1. СТРУКТУРА СОСТОЯНИЯ (STATE)
# ==========================================
class AgentState(TypedDict):
    messages: Annotated[list[BaseMessage], add_messages]
    next_node: str
    agent_responded: bool

# ==========================================
# 2. ИНИЦИАЛИЗАЦИЯ LLM (OpenRouter)
# ==========================================
llm = ChatOpenAI(
    model="google/gemini-2.0-flash-001", 
    openai_api_key=os.getenv("OPENROUTER_API_KEY"),
    openai_api_base="https://openrouter.ai/api/v1",
    temperature=0.2 
)

# ==========================================
# 3. УЗЕЛ: АНАЛИТИК РЕКЛАМЫ (Суб-агент)
# ==========================================
# Оставляем ТОЛЬКО модель и инструменты. Никаких модификаторов промпта.
ads_agent_runnable = create_react_agent(
    model=llm,
    tools=ads_tools
)

async def ads_node(state: AgentState):
    # Промпт склеиваем с сообщениями вручную перед вызовом агента
    messages_with_prompt = [SystemMessage(content=ADS_ANALYST_PROMPT)] + state["messages"]
    
    # Отдаем агенту готовый массив
    result = await ads_agent_runnable.ainvoke({"messages": messages_with_prompt})
    
    return {"messages": [result["messages"][-1]], "agent_responded": True}

# ==========================================
# 4. УЗЕЛ: АНАЛИТИК ПРОДАЖ (Суб-агент)
# ==========================================
analytics_agent_runnable = create_react_agent(
    model=llm,
    tools=analytics_tools
)

async def analytics_node(state: AgentState):
    messages_with_prompt = [SystemMessage(content=ANALYTICS_ANALYST_PROMPT)] + state["messages"]
    result = await analytics_agent_runnable.ainvoke({"messages": messages_with_prompt})
    return {"messages": [result["messages"][-1]], "agent_responded": True}

# ==========================================
# 5. УЗЕЛ: ФИНАНСОВЫЙ АНАЛИТИК (Суб-агент)
# ==========================================
finance_agent_runnable = create_react_agent(
    model=llm,
    tools=finance_tools
)

async def finance_node(state: AgentState):
    messages_with_prompt = [SystemMessage(content=FINANCE_ANALYST_PROMPT)] + state["messages"]
    result = await finance_agent_runnable.ainvoke({"messages": messages_with_prompt})
    return {"messages": [result["messages"][-1]], "agent_responded": True}

# ==========================================
# 6. УЗЕЛ: АНАЛИТИК ЛОГИСТИКИ (Суб-агент)
# ==========================================
logistics_agent_runnable = create_react_agent(
    model=llm,
    tools=logistics_tools
)

async def logistics_node(state: AgentState):
    messages_with_prompt = [SystemMessage(content=LOGISTICS_ANALYST_PROMPT)] + state["messages"]
    result = await logistics_agent_runnable.ainvoke({"messages": messages_with_prompt})
    return {"messages": [result["messages"][-1]], "agent_responded": True}

# ==========================================
# 7. УЗЕЛ: СУПЕРВАЙЗЕР (Оркестратор)
# ==========================================
class RouterOutput(BaseModel):
    next_node: Literal["Ads_Analyst", "Analytics_Analyst", "Finance_Analyst", "Logistics_Analyst", "FINISH"]

async def supervisor_node(state: AgentState):
    # Если агент уже ответил — детерминированно завершаем, без вызова LLM
    if state.get("agent_responded"):
        return {"next_node": "FINISH", "agent_responded": False}
    messages = [SystemMessage(content=SUPERVISOR_PROMPT)] + state["messages"]
    response = await llm.with_structured_output(RouterOutput).ainvoke(messages)
    return {"next_node": response.next_node, "agent_responded": False}

# ==========================================
# 8. СБОРКА ГРАФА СОСТОЯНИЙ
# ==========================================
workflow = StateGraph(AgentState)

workflow.add_node("Supervisor", supervisor_node)
workflow.add_node("Ads_Analyst", ads_node)
workflow.add_node("Analytics_Analyst", analytics_node)
workflow.add_node("Finance_Analyst", finance_node)
workflow.add_node("Logistics_Analyst", logistics_node)

def router(state: AgentState) -> str:
    if state["next_node"] == "FINISH":
        return END
    return state["next_node"]

workflow.add_edge(START, "Supervisor")
workflow.add_conditional_edges("Supervisor", router)
workflow.add_edge("Ads_Analyst", "Supervisor")
workflow.add_edge("Analytics_Analyst", "Supervisor")
workflow.add_edge("Finance_Analyst", "Supervisor")
workflow.add_edge("Logistics_Analyst", "Supervisor")

# Граф компилируется в app.py после инициализации checkpointer