mriusero commited on
Commit
42500ca
·
1 Parent(s): c4d08fc

core: refacto

Browse files
src/agent/{inference.py → mistral_agent.py} RENAMED
File without changes
src/agent/stream.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from gradio import ChatMessage
2
+
3
+ from src.agent.mistral_agent import MistralAgent
4
+
5
+ agent = MistralAgent()
6
+
7
+ with open("./prompt.md", encoding="utf-8") as f:
8
+ SYSTEM_PROMPT = f.read()
9
+
10
+ async def respond(message, history=None):
11
+ """
12
+ Respond to a user message using the Mistral agent.
13
+ """
14
+ if history is None:
15
+ history = []
16
+
17
+ history.append(ChatMessage(role="user", content=message))
18
+ history.append(ChatMessage(role="assistant", content="", metadata={"title": "Thinking", "status": "pending"}))
19
+ yield history
20
+
21
+ messages = [
22
+ {"role": "system", "content": SYSTEM_PROMPT},
23
+ {"role": "user", "content": message},
24
+ {
25
+ "role": "assistant",
26
+ "content": "THINKING: Let's tackle this problem, ",
27
+ "prefix": True,
28
+ },
29
+ ]
30
+ payload = {
31
+ "agent_id": agent.agent_id,
32
+ "messages": messages,
33
+ "stream": True,
34
+ "max_tokens": None,
35
+ "tools": agent.tools,
36
+ "tool_choice": "auto",
37
+ "presence_penalty": 0,
38
+ "frequency_penalty": 0,
39
+ "n": 1
40
+ }
41
+ response = await agent.client.agents.stream_async(**payload)
42
+
43
+ full = ""
44
+ thinking = ""
45
+ tooling = ""
46
+ final = ""
47
+
48
+ current_phase = None # None | "thinking" | "tooling" | "final"
49
+
50
+ history[-1] = ChatMessage(role="assistant", content="", metadata={"title": "Thinking", "status": "pending"})
51
+
52
+ async for chunk in response:
53
+ delta = chunk.data.choices[0].delta
54
+ content = delta.content or ""
55
+ full += content
56
+
57
+ # Phase finale
58
+ if "FINAL ANSWER:" in full:
59
+
60
+ parts = full.split("FINAL ANSWER:", 1)
61
+ before_final = parts[0]
62
+ final = parts[1].strip()
63
+
64
+ if "TOOLING:" in before_final:
65
+ tooling = before_final.split("TOOLING:", 1)[1].strip()
66
+ else:
67
+ tooling = ""
68
+
69
+ if current_phase != "final":
70
+ if current_phase == "tooling":
71
+ history[-1] = ChatMessage(role="assistant", content=tooling, metadata={"title": "Tooling", "status": "done"})
72
+ elif current_phase == "thinking":
73
+ history[-1] = ChatMessage(role="assistant", content=thinking, metadata={"title": "Thinking", "status": "done"})
74
+
75
+ history.append(ChatMessage(role="assistant", content=final))
76
+ current_phase = "final"
77
+ yield history
78
+
79
+ # Phase outil
80
+ elif "TOOLING:" in full:
81
+
82
+ parts = full.split("TOOLING:", 1)
83
+ before_tooling = parts[0]
84
+ tooling = ""
85
+
86
+ if "THINKING:" in before_tooling:
87
+ thinking = before_tooling.split("THINKING:", 1)[1].strip()
88
+ else:
89
+ thinking = before_tooling.strip()
90
+
91
+ tooling = parts[1].strip()
92
+
93
+ if current_phase != "tooling":
94
+ if current_phase == "thinking":
95
+ history[-1] = ChatMessage(role="assistant", content=thinking,
96
+ metadata={"title": "Thinking", "status": "done"})
97
+ history.append(
98
+ ChatMessage(role="assistant", content=tooling, metadata={"title": "Tooling", "status": "pending"}))
99
+ current_phase = "tooling"
100
+ else:
101
+ history[-1] = ChatMessage(role="assistant", content=tooling,
102
+ metadata={"title": "Tooling", "status": "pending"})
103
+ yield history
104
+
105
+ # Phase réflexion
106
+ elif "THINKING:" in full or current_phase is None:
107
+
108
+ if "THINKING:" in full:
109
+ thinking = full.split("THINKING:", 1)[1].strip()
110
+ else:
111
+ thinking = full.strip()
112
+
113
+ if current_phase != "thinking":
114
+ history[-1] = ChatMessage(role="assistant", content=thinking, metadata={"title": "Thinking", "status": "pending"})
115
+ current_phase = "thinking"
116
+ else:
117
+ history[-1] = ChatMessage(role="assistant", content=thinking, metadata={"title": "Thinking", "status": "pending"})
118
+ yield history
119
+
120
+ if current_phase == "thinking":
121
+ history[-1] = ChatMessage(role="assistant", content=thinking, metadata={"title": "Thinking", "status": "done"})
122
+ elif current_phase == "tooling":
123
+ history[-1] = ChatMessage(role="assistant", content=tooling, metadata={"title": "Tooling", "status": "done"})
124
+
125
+ yield history
src/ui/sidebar.py CHANGED
@@ -1,130 +1,6 @@
1
  import gradio as gr
2
- import json
3
- from gradio import ChatMessage
4
 
5
- from src.agent.inference import MistralAgent
6
-
7
- agent = MistralAgent()
8
-
9
- with open("./prompt.md", encoding="utf-8") as f:
10
- SYSTEM_PROMPT = f.read()
11
-
12
- async def respond(message, history=None):
13
- """
14
- Respond to a user message using the Mistral agent.
15
- """
16
- if history is None:
17
- history = []
18
-
19
- history.append(ChatMessage(role="user", content=message))
20
- history.append(ChatMessage(role="assistant", content="", metadata={"title": "Thinking", "status": "pending"}))
21
- yield history
22
-
23
- messages = [
24
- {"role": "system", "content": SYSTEM_PROMPT},
25
- {"role": "user", "content": message},
26
- {
27
- "role": "assistant",
28
- "content": "THINKING: Let's tackle this problem, ",
29
- "prefix": True,
30
- },
31
- ]
32
- payload = {
33
- "agent_id": agent.agent_id,
34
- "messages": messages,
35
- "stream": True,
36
- "max_tokens": None,
37
- "tools": agent.tools,
38
- "tool_choice": "auto",
39
- "presence_penalty": 0,
40
- "frequency_penalty": 0,
41
- "n": 1
42
- }
43
- response = await agent.client.agents.stream_async(**payload)
44
-
45
- full = ""
46
- thinking = ""
47
- tooling = ""
48
- final = ""
49
-
50
- current_phase = None # None | "thinking" | "tooling" | "final"
51
-
52
- history[-1] = ChatMessage(role="assistant", content="", metadata={"title": "Thinking", "status": "pending"})
53
-
54
- async for chunk in response:
55
- delta = chunk.data.choices[0].delta
56
- content = delta.content or ""
57
- full += content
58
-
59
- # Phase finale
60
- if "FINAL ANSWER:" in full:
61
-
62
- parts = full.split("FINAL ANSWER:", 1)
63
- before_final = parts[0]
64
- final = parts[1].strip()
65
-
66
- if "TOOLING:" in before_final:
67
- tooling = before_final.split("TOOLING:", 1)[1].strip()
68
- else:
69
- tooling = ""
70
-
71
- if current_phase != "final":
72
- if current_phase == "tooling":
73
- history[-1] = ChatMessage(role="assistant", content=tooling, metadata={"title": "Tooling", "status": "done"})
74
- elif current_phase == "thinking":
75
- history[-1] = ChatMessage(role="assistant", content=thinking, metadata={"title": "Thinking", "status": "done"})
76
-
77
- history.append(ChatMessage(role="assistant", content=final))
78
- current_phase = "final"
79
- yield history
80
-
81
- # Phase outil
82
- elif "TOOLING:" in full:
83
-
84
- parts = full.split("TOOLING:", 1)
85
- before_tooling = parts[0]
86
- tooling = ""
87
-
88
- if "THINKING:" in before_tooling:
89
- thinking = before_tooling.split("THINKING:", 1)[1].strip()
90
- else:
91
- thinking = before_tooling.strip()
92
-
93
- tooling = parts[1].strip()
94
-
95
- if current_phase != "tooling":
96
- if current_phase == "thinking":
97
- history[-1] = ChatMessage(role="assistant", content=thinking,
98
- metadata={"title": "Thinking", "status": "done"})
99
- history.append(
100
- ChatMessage(role="assistant", content=tooling, metadata={"title": "Tooling", "status": "pending"}))
101
- current_phase = "tooling"
102
- else:
103
- history[-1] = ChatMessage(role="assistant", content=tooling,
104
- metadata={"title": "Tooling", "status": "pending"})
105
- yield history
106
-
107
- # Phase réflexion
108
- elif "THINKING:" in full or current_phase is None:
109
-
110
- if "THINKING:" in full:
111
- thinking = full.split("THINKING:", 1)[1].strip()
112
- else:
113
- thinking = full.strip()
114
-
115
- if current_phase != "thinking":
116
- history[-1] = ChatMessage(role="assistant", content=thinking, metadata={"title": "Thinking", "status": "pending"})
117
- current_phase = "thinking"
118
- else:
119
- history[-1] = ChatMessage(role="assistant", content=thinking, metadata={"title": "Thinking", "status": "pending"})
120
- yield history
121
-
122
- if current_phase == "thinking":
123
- history[-1] = ChatMessage(role="assistant", content=thinking, metadata={"title": "Thinking", "status": "done"})
124
- elif current_phase == "tooling":
125
- history[-1] = ChatMessage(role="assistant", content=tooling, metadata={"title": "Tooling", "status": "done"})
126
-
127
- yield history
128
 
129
 
130
  def sidebar_ui(state, width=700, visible=True):
 
1
  import gradio as gr
 
 
2
 
3
+ from src.agent.stream import respond
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
 
6
  def sidebar_ui(state, width=700, visible=True):