Sanyam400 commited on
Commit
1ed2170
Β·
verified Β·
1 Parent(s): 65b5b97

Update app/agent_system.py

Browse files
Files changed (1) hide show
  1. app/agent_system.py +283 -186
app/agent_system.py CHANGED
@@ -1,14 +1,20 @@
1
  """
2
- PraisonChat β€” Code Interpreter Agent
3
- ======================================
4
- Like ClawBot: the AI writes real Python, we execute it, AI sees real output,
5
- AI iterates. No fake tools. No simulation. Everything is real.
 
 
 
6
  """
7
- import os, json, asyncio, datetime, traceback, base64, re
8
  from openai import AsyncOpenAI
9
- from typing import AsyncGenerator
10
  from sandbox import run as sandbox_run, pip_install, PKG_DIR
11
  from docs_context import PRAISONAI_DOCS
 
 
 
 
12
 
13
  LONGCAT_BASE = "https://api.longcat.chat/openai/v1"
14
  MODEL_MAP = {
@@ -18,79 +24,104 @@ MODEL_MAP = {
18
  }
19
  DEFAULT_MODEL = "LongCat-Flash-Lite"
20
 
21
- def now_iso():
22
- return datetime.datetime.now().strftime("%A %B %d %Y %I:%M:%S %p")
23
 
24
- # ─── Master system prompt ────────────────────────────────────────────────────
25
- SYSTEM = f"""You are PraisonChat β€” a powerful AI assistant with a real Python code interpreter.
26
- You can write and execute Python code to accomplish ANY task.
27
 
28
- Current datetime: {{DATETIME}}
29
- Python package dir: {PKG_DIR}
30
 
31
- ## HOW TO USE THE CODE INTERPRETER
32
 
33
- When you need to do ANYTHING real (search, calculate, fetch data, generate audio, create files,
34
- draw charts, etc.) β€” write Python code inside <execute> tags:
35
 
36
- <execute>
37
- # Example: search the web
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  import sys
39
  sys.path.insert(0, '{PKG_DIR}')
40
  from duckduckgo_search import DDGS
41
-
42
  with DDGS() as ddgs:
43
- results = list(ddgs.text("latest AI news 2025", max_results=5))
44
- for r in results:
45
- print(r['title'])
46
- print(r['body'])
47
- print()
48
- </execute>
49
-
50
- The code runs IMMEDIATELY in a real Python environment.
51
- You will see the ACTUAL output and can write more code based on it.
52
-
53
- ## RULES
54
- 1. ALWAYS use <execute> for: web search, date/time, calculations, file creation, audio, images
55
- 2. Never say "I cannot" β€” instead write code that does it
56
- 3. If a library is missing, just import it β€” it gets auto-installed
57
- 4. To generate VOICE: use gtts and save as .mp3, then it gets returned as audio
58
- 5. To create FILES (images, PDFs, etc.): save to current dir, they get returned automatically
59
- 6. You can pip install ANYTHING: just `import subprocess; subprocess.run(["pip","install","X"])`
60
- OR just import it directly β€” auto-install handles it
61
- 7. Chain multiple <execute> blocks if needed
62
- 8. After seeing results, explain them clearly to the user
63
-
64
- ## AVAILABLE PRE-INSTALLED PACKAGES
65
- requests, httpx, beautifulsoup4, duckduckgo-search, gtts, pillow, matplotlib,
66
- numpy, pandas, qrcode, python-dateutil, pytz
67
-
68
- ## RESPONSE FORMATS
69
- - For voice/audio requests: save MP3 to file, it will be sent as audio
70
- - For image requests: save PNG/JPG, it will be displayed
71
- - For data: print clearly formatted output
72
- - For files: save to current directory, they get returned
73
 
74
- {PRAISONAI_DOCS}
75
- """
 
 
 
 
 
 
 
 
76
 
77
- def get_system():
78
- return SYSTEM.replace("{DATETIME}", now_iso())
 
79
 
80
- # ─── Parse <execute> blocks from LLM response ────────────────────────────────
81
- def extract_code_blocks(text: str) -> list[str]:
82
- return re.findall(r'<execute>(.*?)</execute>', text, re.DOTALL)
83
 
84
- def strip_code_blocks(text: str) -> str:
85
- return re.sub(r'<execute>.*?</execute>', '', text, flags=re.DOTALL).strip()
86
 
87
- # ─── Detect output type from files ───────────────────────────────────────────
88
- def classify_file(name: str, ext: str) -> str:
89
- audio_exts = {"mp3","wav","ogg","m4a","flac","aac"}
90
- image_exts = {"png","jpg","jpeg","gif","webp","bmp","svg"}
91
- if ext in audio_exts: return "audio"
92
- if ext in image_exts: return "image"
93
- return "file"
 
 
 
 
 
 
 
 
 
 
 
 
 
94
 
95
 
96
  class AgentOrchestrator:
@@ -104,162 +135,228 @@ class AgentOrchestrator:
104
  )
105
  return self._clients[api_key]
106
 
107
- async def _llm_stream(self, client, messages: list, model: str) -> tuple[str, AsyncGenerator]:
108
- """Stream from LLM, return full text."""
109
- stream = await client.chat.completions.create(
110
  model=model, messages=messages,
111
- max_tokens=16000, temperature=0.7, stream=True
112
  )
113
- full = ""
114
- async def gen():
115
- nonlocal full
116
- async for chunk in stream:
117
- c = chunk.choices[0].delta.content
118
- if c:
119
- full += c
120
- yield c
121
- return full, gen()
122
-
123
- async def stream_response(
124
- self, user_msg: str, history: list,
125
- api_key: str, model: str = DEFAULT_MODEL
126
- ) -> AsyncGenerator:
127
  def emit(d: dict) -> str:
128
  return json.dumps(d)
129
 
130
- model = MODEL_MAP.get(model, DEFAULT_MODEL)
131
- cl = self.client(api_key)
132
 
133
- # Build message history
134
- messages = [{"role": "system", "content": get_system()}]
135
- for m in history[-12:]:
136
- messages.append({"role": m["role"], "content": str(m.get("content",""))[:3000]})
 
 
 
137
  messages.append({"role": "user", "content": user_msg})
138
 
139
  try:
140
- # ── Agentic loop: LLM β†’ execute β†’ LLM β†’ execute β†’ … ─────────────
141
- MAX_ITERATIONS = 6
142
- iteration = 0
143
- final_text = ""
144
 
145
- while iteration < MAX_ITERATIONS:
146
- iteration += 1
147
  yield emit({"type": "thinking",
148
- "text": f"Iteration {iteration}: generating response…"})
149
  await asyncio.sleep(0)
150
 
151
- # Stream LLM response
152
- collected = ""
153
- yield emit({"type": "response_start"})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154
 
155
- stream = await cl.chat.completions.create(
156
- model=model, messages=messages,
157
- max_tokens=16000, temperature=0.7, stream=True
158
- )
159
- async for chunk in stream:
160
- c = chunk.choices[0].delta.content
161
- if c:
162
- collected += c
163
- yield emit({"type": "token", "content": c})
164
 
165
- await asyncio.sleep(0)
 
 
 
 
166
 
167
- # ── Check for code blocks ────────────────────────────────────
168
- code_blocks = extract_code_blocks(collected)
 
 
 
169
 
170
- if not code_blocks:
171
- # No code β€” pure text response, we're done
172
- final_text = collected
173
- break
 
 
 
 
 
 
 
 
 
 
 
174
 
175
- # ── Execute each code block ──────────────────────────────────
176
- all_exec_results = []
177
 
178
- for idx, code in enumerate(code_blocks):
179
- code = code.strip()
180
- yield emit({"type": "code_executing",
181
- "code": code[:500],
182
- "index": idx,
183
- "total": len(code_blocks)})
 
 
 
 
 
 
 
184
  await asyncio.sleep(0)
185
 
186
- # Run in thread pool (blocking)
187
- loop = asyncio.get_event_loop()
 
188
  result = await loop.run_in_executor(
189
- None, lambda c=code: sandbox_run(c, max_retries=3, timeout=60)
190
  )
191
 
192
- # Emit install events
193
  for inst in result.get("installs", []):
194
- yield emit({"type": "pkg_install",
195
- "package": inst["package"],
196
- "ok": inst["ok"],
197
- "msg": inst["msg"]})
198
 
199
- stdout = result.get("stdout", "").strip()
200
- stderr = result.get("stderr", "").strip()
201
  ok = result.get("ok", False)
202
  files = result.get("files", [])
203
 
204
- yield emit({"type": "code_result",
205
- "ok": ok,
206
- "stdout": stdout[:1000],
207
- "stderr": stderr[:500] if not ok else "",
208
- "files": [{"name": f["name"], "ext": f["ext"], "size": f["size"]}
209
- for f in files]})
210
-
211
- # ── Handle output files ──────────────────────────────────
212
  for f in files:
213
- ftype = classify_file(f["name"], f["ext"])
214
- if ftype == "audio":
215
- yield emit({"type": "audio_response",
216
- "audio_b64": f["b64"],
217
- "filename": f["name"]})
218
- elif ftype == "image":
219
- yield emit({"type": "image_response",
220
- "image_b64": f["b64"],
221
- "filename": f["name"],
222
- "ext": f["ext"]})
223
  else:
224
- yield emit({"type": "file_response",
225
- "file_b64": f["b64"],
226
- "filename": f["name"],
227
- "size": f["size"]})
228
-
229
- # Build exec summary for next LLM call
230
- exec_summary = f"=== Code Block {idx+1} Output ===\n"
231
- if stdout:
232
- exec_summary += f"STDOUT:\n{stdout[:3000]}\n"
233
- if not ok and stderr:
234
- exec_summary += f"STDERR:\n{stderr[:1000]}\n"
235
- if files:
236
- exec_summary += f"FILES: {[f['name'] for f in files]}\n"
237
- if not stdout and not stderr and not files:
238
- exec_summary += "(code ran with no output)\n"
239
- all_exec_results.append(exec_summary)
240
-
241
- # ── Feed results back to LLM ─────────────────────────────────
242
- # Add assistant turn (text before/around code blocks)
243
- visible_text = strip_code_blocks(collected)
244
- full_turn = collected # keep original with execute tags
245
- messages.append({"role": "assistant", "content": full_turn})
246
-
247
- # Add execution results as user turn
248
- exec_feedback = "EXECUTION RESULTS:\n\n" + "\n\n".join(all_exec_results)
249
- exec_feedback += "\n\nBased on these REAL results, provide your final answer to the user. Do NOT write more code unless absolutely necessary."
250
- messages.append({"role": "user", "content": exec_feedback})
251
-
252
- # Next iteration will produce the final answer
253
- # But if all code succeeded and we have good output, one more pass is enough
254
- if all(r.get("ok", False) for r in [result]):
255
- pass # continue to next iteration for final answer
256
-
257
- yield emit({"type": "done"})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
258
 
259
  except Exception as e:
260
  tb = traceback.format_exc()
261
  print(f"[AGENT] Error: {e}\n{tb}")
262
- yield emit({"type": "error", "message": str(e), "detail": tb[-800:]})
263
 
264
 
265
  orchestrator = AgentOrchestrator()
 
1
  """
2
+ PraisonChat Agent System v5 β€” OpenClaw-style
3
+ =============================================
4
+ - Code interpreter loop (write β†’ execute β†’ reflect β†’ repeat)
5
+ - Persistent memory + skills
6
+ - Real sub-agents with real tool execution
7
+ - Clean output: only results shown, internals hidden
8
+ - Robust JSON error handling
9
  """
10
+ import os, json, asyncio, datetime, traceback, re
11
  from openai import AsyncOpenAI
 
12
  from sandbox import run as sandbox_run, pip_install, PKG_DIR
13
  from docs_context import PRAISONAI_DOCS
14
+ from memory import (
15
+ get_memory_context, get_skills_context, save_memory,
16
+ save_skill, list_skills, search_memories
17
+ )
18
 
19
  LONGCAT_BASE = "https://api.longcat.chat/openai/v1"
20
  MODEL_MAP = {
 
24
  }
25
  DEFAULT_MODEL = "LongCat-Flash-Lite"
26
 
27
+ def now_str():
28
+ return datetime.datetime.now().strftime("%A, %B %d %Y at %I:%M:%S %p")
29
 
30
+ def build_system(memory_ctx: str, skills_ctx: str) -> str:
31
+ return f"""You are PraisonChat β€” a powerful autonomous AI agent.
32
+ You have a real Python code interpreter, persistent memory, and can create/save reusable skills.
33
 
34
+ Current datetime: {now_str()}
35
+ Python packages dir: {PKG_DIR}
36
 
37
+ {memory_ctx}
38
 
39
+ {skills_ctx}
 
40
 
41
+ ## CODE INTERPRETER
42
+ Write Python inside <execute> tags. It runs IMMEDIATELY with real results.
43
+
44
+ Rules:
45
+ - ALWAYS import sys and add PKG_DIR to path: sys.path.insert(0, '{PKG_DIR}')
46
+ - Use duckduckgo_search for web searches (NOT google.com)
47
+ - Save files (images, audio, PDFs) to current dir β€” they are returned to user
48
+ - Use gtts for voice/audio generation
49
+ - For voice: save as voice_response.mp3, it becomes a playable audio
50
+ - For images: save as image.png, it becomes visible in chat
51
+ - NEVER simulate or estimate data β€” always execute real code
52
+
53
+ ## MEMORY SYSTEM
54
+ Save important info:
55
+ <save_memory key="user_preferences">User prefers dark mode, speaks English</save_memory>
56
+
57
+ Search memory:
58
+ <search_memory>user preferences</search_memory>
59
+
60
+ ## SKILLS SYSTEM
61
+ Save reusable code as a skill:
62
+ <save_skill name="search_news" description="Search for latest news using DuckDuckGo">
63
+ import sys
64
+ sys.path.insert(0, '{PKG_DIR}')
65
+ def search_news(query, max_results=5):
66
+ from duckduckgo_search import DDGS
67
+ with DDGS() as ddgs:
68
+ return list(ddgs.news(query, max_results=max_results))
69
+ </save_skill>
70
+
71
+ ## SUB-AGENTS
72
+ For complex tasks, spawn specialized sub-agents:
73
+ <spawn_agent name="ResearchAgent" task="Find the top 5 AI papers from 2025">
74
  import sys
75
  sys.path.insert(0, '{PKG_DIR}')
76
  from duckduckgo_search import DDGS
77
+ results = []
78
  with DDGS() as ddgs:
79
+ for r in ddgs.text("top AI research papers 2025", max_results=8):
80
+ results.append(r['title'] + ': ' + r['body'])
81
+ print('\n'.join(results))
82
+ </spawn_agent>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
 
84
+ Each <spawn_agent> executes its code independently and returns real results.
85
+
86
+ ## RESPONSE FORMAT RULES
87
+ - NEVER show raw code to the user in your final response
88
+ - NEVER say "I executed the following code..."
89
+ - JUST present the RESULTS clearly
90
+ - Use markdown for formatting
91
+ - For voice requests: put the text to speak in [SPEAK: text here]
92
+ - For math: just show the answer
93
+ - For search: show results in a clean list
94
 
95
+ ## AVAILABLE PACKAGES (always installed)
96
+ requests, httpx, duckduckgo-search, beautifulsoup4, gtts, pillow,
97
+ matplotlib, numpy, pandas, qrcode, python-dateutil, pytz
98
 
99
+ ## ADDITIONAL PACKAGES
100
+ Just import any package β€” it gets auto-installed automatically.
 
101
 
102
+ {PRAISONAI_DOCS}
103
+ """
104
 
105
+ def extract_blocks(text: str, tag: str) -> list[dict]:
106
+ """Extract all <tag ...>content</tag> blocks."""
107
+ results = []
108
+ pattern = rf'<{tag}([^>]*)>(.*?)</{tag}>'
109
+ for m in re.finditer(pattern, text, re.DOTALL):
110
+ attrs_str = m.group(1).strip()
111
+ content = m.group(2).strip()
112
+ # Parse key=value attrs
113
+ attrs = {}
114
+ for a in re.finditer(r'(\w+)=["\']([^"\']*)["\']', attrs_str):
115
+ attrs[a.group(1)] = a.group(2)
116
+ results.append({"attrs": attrs, "content": content, "full": m.group(0)})
117
+ return results
118
+
119
+ def strip_all_tags(text: str) -> str:
120
+ """Remove all agent internal tags from final response."""
121
+ tags = ["execute", "spawn_agent", "save_memory", "search_memory", "save_skill"]
122
+ for tag in tags:
123
+ text = re.sub(rf'<{tag}[^>]*>.*?</{tag}>', '', text, flags=re.DOTALL)
124
+ return text.strip()
125
 
126
 
127
  class AgentOrchestrator:
 
135
  )
136
  return self._clients[api_key]
137
 
138
+ async def _llm(self, client, messages: list, model: str, stream: bool = False):
139
+ return await client.chat.completions.create(
 
140
  model=model, messages=messages,
141
+ max_tokens=16000, temperature=0.7, stream=stream
142
  )
143
+
144
+ async def stream_response(self, user_msg: str, history: list,
145
+ api_key: str, model: str = DEFAULT_MODEL):
 
 
 
 
 
 
 
 
 
 
 
146
  def emit(d: dict) -> str:
147
  return json.dumps(d)
148
 
149
+ model = MODEL_MAP.get(model, DEFAULT_MODEL)
150
+ cl = self.client(api_key)
151
 
152
+ mem_ctx = get_memory_context()
153
+ skills_ctx = get_skills_context()
154
+
155
+ messages = [{"role": "system", "content": build_system(mem_ctx, skills_ctx)}]
156
+ for m in history[-14:]:
157
+ messages.append({"role": m["role"],
158
+ "content": str(m.get("content",""))[:3000]})
159
  messages.append({"role": "user", "content": user_msg})
160
 
161
  try:
162
+ MAX_ITER = 7
163
+ all_exec_results = {}
164
+ all_agent_results = {}
 
165
 
166
+ for iteration in range(MAX_ITER):
 
167
  yield emit({"type": "thinking",
168
+ "text": f"Working… (step {iteration+1})"})
169
  await asyncio.sleep(0)
170
 
171
+ # ── LLM call ──────────────────────────────────────────────
172
+ resp = await self._llm(cl, messages, model)
173
+ raw = resp.choices[0].message.content or ""
174
+
175
+ # ── Process special tags ──────────────────────────────────
176
+
177
+ # 1. Memory saves
178
+ for blk in extract_blocks(raw, "save_memory"):
179
+ key = blk["attrs"].get("key", f"note_{iteration}")
180
+ content = blk["content"]
181
+ save_memory(key, content)
182
+ yield emit({"type": "memory_saved", "key": key})
183
+
184
+ # 2. Memory searches
185
+ for blk in extract_blocks(raw, "search_memory"):
186
+ query = blk["content"]
187
+ result = search_memories(query)
188
+ all_exec_results[f"memory:{query}"] = result
189
+ yield emit({"type": "memory_search", "query": query,
190
+ "result": result[:200]})
191
+
192
+ # 3. Skill saves
193
+ for blk in extract_blocks(raw, "save_skill"):
194
+ name = blk["attrs"].get("name", f"skill_{iteration}")
195
+ desc = blk["attrs"].get("description", "")
196
+ save_skill(name, blk["content"], desc)
197
+ yield emit({"type": "skill_saved", "name": name,
198
+ "description": desc})
199
+
200
+ # 4. Execute code blocks
201
+ exec_blocks = extract_blocks(raw, "execute")
202
+ for idx, blk in enumerate(exec_blocks):
203
+ code = blk["content"]
204
+ yield emit({"type": "executing", "index": idx})
205
+ await asyncio.sleep(0)
206
 
207
+ loop = asyncio.get_event_loop()
208
+ result = await loop.run_in_executor(
209
+ None, lambda c=code: sandbox_run(c, max_retries=3, timeout=60)
210
+ )
 
 
 
 
 
211
 
212
+ # Emit installs silently (activity panel only)
213
+ for inst in result.get("installs", []):
214
+ yield emit({"type": "pkg_install",
215
+ "package": inst["package"],
216
+ "ok": inst["ok"]})
217
 
218
+ key = f"code_{iteration}_{idx}"
219
+ stdout = result.get("stdout","").strip()
220
+ stderr = result.get("stderr","").strip()
221
+ ok = result.get("ok", False)
222
+ files = result.get("files", [])
223
 
224
+ # Emit output files (audio, images, downloads)
225
+ for f in files:
226
+ ext = f.get("ext","").lower()
227
+ fname = f["name"]
228
+ b64 = f["b64"]
229
+ if ext in {"mp3","wav","ogg","m4a"}:
230
+ yield emit({"type":"audio_response",
231
+ "audio_b64":b64, "filename":fname})
232
+ elif ext in {"png","jpg","jpeg","gif","webp","svg"}:
233
+ yield emit({"type":"image_response",
234
+ "image_b64":b64, "filename":fname, "ext":ext})
235
+ else:
236
+ yield emit({"type":"file_response",
237
+ "file_b64":b64, "filename":fname,
238
+ "size":f.get("size",0)})
239
 
240
+ exec_result = stdout if ok else f"Error: {stderr}"
241
+ all_exec_results[key] = exec_result
242
 
243
+ yield emit({"type": "exec_done",
244
+ "ok": ok,
245
+ "output": exec_result[:300],
246
+ "files": [f["name"] for f in files]})
247
+
248
+ # 5. Spawn sub-agents
249
+ agent_blocks = extract_blocks(raw, "spawn_agent")
250
+ for blk in agent_blocks:
251
+ name = blk["attrs"].get("name", f"Agent_{iteration}")
252
+ task = blk["attrs"].get("task", "Execute task")
253
+ code = blk["content"]
254
+
255
+ yield emit({"type":"agent_created","name":name,"task":task[:100]})
256
  await asyncio.sleep(0)
257
 
258
+ yield emit({"type":"agent_working","name":name})
259
+
260
+ loop = asyncio.get_event_loop()
261
  result = await loop.run_in_executor(
262
+ None, lambda c=code: sandbox_run(c, max_retries=3, timeout=90)
263
  )
264
 
 
265
  for inst in result.get("installs", []):
266
+ yield emit({"type":"pkg_install",
267
+ "package":inst["package"],"ok":inst["ok"]})
 
 
268
 
269
+ stdout = result.get("stdout","").strip()
270
+ stderr = result.get("stderr","").strip()
271
  ok = result.get("ok", False)
272
  files = result.get("files", [])
273
 
 
 
 
 
 
 
 
 
274
  for f in files:
275
+ ext = f.get("ext","").lower()
276
+ b64 = f["b64"]
277
+ if ext in {"mp3","wav","ogg"}:
278
+ yield emit({"type":"audio_response","audio_b64":b64,"filename":f["name"]})
279
+ elif ext in {"png","jpg","jpeg","gif","webp"}:
280
+ yield emit({"type":"image_response","image_b64":b64,"filename":f["name"],"ext":ext})
 
 
 
 
281
  else:
282
+ yield emit({"type":"file_response","file_b64":b64,"filename":f["name"],"size":f.get("size",0)})
283
+
284
+ agent_out = stdout if ok else f"Error: {stderr}"
285
+ all_agent_results[name] = agent_out
286
+
287
+ yield emit({"type":"agent_done","name":name,
288
+ "preview":agent_out[:250],"ok":ok})
289
+
290
+ # ── Decide: done or iterate? ──────────────────────────────
291
+ has_actions = (exec_blocks or agent_blocks or
292
+ extract_blocks(raw, "save_memory") or
293
+ extract_blocks(raw, "search_memory") or
294
+ extract_blocks(raw, "save_skill"))
295
+
296
+ if not has_actions:
297
+ # Pure text response β€” strip tags and stream it
298
+ clean = strip_all_tags(raw)
299
+ yield emit({"type":"response_start"})
300
+
301
+ # Stream character by character for smooth effect
302
+ chunk_size = 8
303
+ for i in range(0, len(clean), chunk_size):
304
+ yield emit({"type":"token","content":clean[i:i+chunk_size]})
305
+ if i % 80 == 0:
306
+ await asyncio.sleep(0)
307
+
308
+ # Handle voice
309
+ if "[SPEAK:" in clean:
310
+ try:
311
+ speak_text = clean.split("[SPEAK:")[1].rsplit("]",1)[0].strip()
312
+ voice_code = f"""
313
+ import sys, io, base64
314
+ sys.path.insert(0, '{PKG_DIR}')
315
+ from gtts import gTTS
316
+ tts = gTTS(text={repr(speak_text[:2000])}, lang='en', slow=False)
317
+ tts.save('voice_response.mp3')
318
+ print('Voice generated')
319
+ """
320
+ loop = asyncio.get_event_loop()
321
+ vr = await loop.run_in_executor(
322
+ None, lambda: sandbox_run(voice_code, timeout=30)
323
+ )
324
+ for f in vr.get("files", []):
325
+ if f.get("ext") == "mp3":
326
+ yield emit({"type":"audio_response",
327
+ "audio_b64":f["b64"],
328
+ "filename":"voice_response.mp3"})
329
+ except Exception:
330
+ pass
331
+
332
+ break
333
+
334
+ else:
335
+ # Feed results back to LLM for next iteration
336
+ results_summary = ""
337
+ if all_exec_results:
338
+ results_summary += "EXECUTION RESULTS:\n"
339
+ for k, v in list(all_exec_results.items())[-5:]:
340
+ results_summary += f"[{k}]: {v[:800]}\n\n"
341
+ if all_agent_results:
342
+ results_summary += "AGENT RESULTS:\n"
343
+ for name, out in all_agent_results.items():
344
+ results_summary += f"[{name}]: {out[:800]}\n\n"
345
+
346
+ messages.append({"role":"assistant","content":raw})
347
+ messages.append({"role":"user","content":(
348
+ f"{results_summary}\n"
349
+ f"Now give the user a clean, clear response based on these REAL results. "
350
+ f"Do NOT show code. Do NOT show <execute> tags. "
351
+ f"Just present the results/answer naturally in markdown."
352
+ )})
353
+
354
+ yield emit({"type":"done"})
355
 
356
  except Exception as e:
357
  tb = traceback.format_exc()
358
  print(f"[AGENT] Error: {e}\n{tb}")
359
+ yield emit({"type":"error","message":str(e),"detail":tb[-600:]})
360
 
361
 
362
  orchestrator = AgentOrchestrator()