Sanyam400 commited on
Commit
8dc579c
Β·
verified Β·
1 Parent(s): 31ccede

Update app/agent_system.py

Browse files
Files changed (1) hide show
  1. app/agent_system.py +201 -222
app/agent_system.py CHANGED
@@ -1,19 +1,17 @@
1
  """
2
- PraisonChat Agent System v5 β€” OpenClaw-style
3
- =============================================
4
- - Code interpreter loop (write β†’ execute β†’ reflect β†’ repeat)
5
- - Persistent memory + skills
6
- - Real sub-agents with real tool execution
7
- - Clean output: only results shown, internals hidden
8
- - Robust JSON error handling
9
  """
10
  import os, json, asyncio, datetime, traceback, re
11
  from openai import AsyncOpenAI
12
  from sandbox import run as sandbox_run, pip_install, PKG_DIR
13
  from docs_context import PRAISONAI_DOCS
14
  from memory import (
15
- get_memory_context, get_skills_context, save_memory,
16
- save_skill, list_skills, search_memories
17
  )
18
 
19
  LONGCAT_BASE = "https://api.longcat.chat/openai/v1"
@@ -25,338 +23,319 @@ MODEL_MAP = {
25
  DEFAULT_MODEL = "LongCat-Flash-Lite"
26
 
27
  def now_str():
28
- return datetime.datetime.now().strftime("%A, %B %d %Y at %I:%M:%S %p")
29
-
30
- def build_system(memory_ctx: str, skills_ctx: str) -> str:
31
- return f"""You are PraisonChat β€” a powerful autonomous AI agent.
32
- You have a real Python code interpreter, persistent memory, and can create/save reusable skills.
33
-
34
- Current datetime: {now_str()}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  Python packages dir: {PKG_DIR}
36
 
37
- {memory_ctx}
 
38
 
39
- {skills_ctx}
 
40
 
41
  ## CODE INTERPRETER
42
- Write Python inside <execute> tags. It runs IMMEDIATELY with real results.
 
43
 
44
- Rules:
45
- - ALWAYS import sys and add PKG_DIR to path: sys.path.insert(0, '{PKG_DIR}')
46
- - Use duckduckgo_search for web searches (NOT google.com)
47
- - Save files (images, audio, PDFs) to current dir β€” they are returned to user
48
- - Use gtts for voice/audio generation
49
- - For voice: save as voice_response.mp3, it becomes a playable audio
50
- - For images: save as image.png, it becomes visible in chat
51
- - NEVER simulate or estimate data β€” always execute real code
52
 
53
- ## MEMORY SYSTEM
54
- Save important info:
55
- <save_memory key="user_preferences">User prefers dark mode, speaks English</save_memory>
 
 
 
 
 
56
 
57
- Search memory:
 
58
  <search_memory>user preferences</search_memory>
59
 
60
- ## SKILLS SYSTEM
61
- Save reusable code as a skill:
62
- <save_skill name="search_news" description="Search for latest news using DuckDuckGo">
63
  import sys
64
  sys.path.insert(0, '{PKG_DIR}')
65
- def search_news(query, max_results=5):
66
  from duckduckgo_search import DDGS
67
- with DDGS() as ddgs:
68
- return list(ddgs.news(query, max_results=max_results))
69
  </save_skill>
70
 
71
  ## SUB-AGENTS
72
- For complex tasks, spawn specialized sub-agents:
73
- <spawn_agent name="ResearchAgent" task="Find the top 5 AI papers from 2025">
74
  import sys
75
  sys.path.insert(0, '{PKG_DIR}')
76
  from duckduckgo_search import DDGS
77
- results = []
78
- with DDGS() as ddgs:
79
- for r in ddgs.text("top AI research papers 2025", max_results=8):
80
- results.append(r['title'] + ': ' + r['body'])
81
- print('\n'.join(results))
82
  </spawn_agent>
83
 
84
- Each <spawn_agent> executes its code independently and returns real results.
85
-
86
- ## RESPONSE FORMAT RULES
87
- - NEVER show raw code to the user in your final response
88
- - NEVER say "I executed the following code..."
89
- - JUST present the RESULTS clearly
90
- - Use markdown for formatting
91
- - For voice requests: put the text to speak in [SPEAK: text here]
92
- - For math: just show the answer
93
- - For search: show results in a clean list
94
 
95
- ## AVAILABLE PACKAGES (always installed)
96
  requests, httpx, duckduckgo-search, beautifulsoup4, gtts, pillow,
97
  matplotlib, numpy, pandas, qrcode, python-dateutil, pytz
98
 
99
- ## ADDITIONAL PACKAGES
100
- Just import any package β€” it gets auto-installed automatically.
101
-
102
  {PRAISONAI_DOCS}
103
  """
104
 
105
- def extract_blocks(text: str, tag: str) -> list[dict]:
106
- """Extract all <tag ...>content</tag> blocks."""
 
 
 
 
107
  results = []
108
  pattern = rf'<{tag}([^>]*)>(.*?)</{tag}>'
109
  for m in re.finditer(pattern, text, re.DOTALL):
110
- attrs_str = m.group(1).strip()
111
- content = m.group(2).strip()
112
- # Parse key=value attrs
113
  attrs = {}
114
- for a in re.finditer(r'(\w+)=["\']([^"\']*)["\']', attrs_str):
115
  attrs[a.group(1)] = a.group(2)
116
- results.append({"attrs": attrs, "content": content, "full": m.group(0)})
117
  return results
118
 
119
- def strip_all_tags(text: str) -> str:
120
- """Remove all agent internal tags from final response."""
121
- tags = ["execute", "spawn_agent", "save_memory", "search_memory", "save_skill"]
122
- for tag in tags:
123
  text = re.sub(rf'<{tag}[^>]*>.*?</{tag}>', '', text, flags=re.DOTALL)
124
- return text.strip()
125
 
126
 
127
  class AgentOrchestrator:
128
  def __init__(self):
129
- self._clients: dict = {}
130
 
131
- def client(self, api_key: str) -> AsyncOpenAI:
132
  if api_key not in self._clients:
133
  self._clients[api_key] = AsyncOpenAI(
134
  api_key=api_key, base_url=LONGCAT_BASE
135
  )
136
  return self._clients[api_key]
137
 
138
- async def _llm(self, client, messages: list, model: str, stream: bool = False):
139
- return await client.chat.completions.create(
140
- model=model, messages=messages,
141
- max_tokens=16000, temperature=0.7, stream=stream
142
- )
143
-
144
- async def stream_response(self, user_msg: str, history: list,
145
- api_key: str, model: str = DEFAULT_MODEL):
146
- def emit(d: dict) -> str:
147
- return json.dumps(d)
148
-
149
  model = MODEL_MAP.get(model, DEFAULT_MODEL)
150
  cl = self.client(api_key)
151
 
152
- mem_ctx = get_memory_context()
153
- skills_ctx = get_skills_context()
 
 
 
154
 
155
- messages = [{"role": "system", "content": build_system(mem_ctx, skills_ctx)}]
156
- for m in history[-14:]:
157
- messages.append({"role": m["role"],
158
- "content": str(m.get("content",""))[:3000]})
159
- messages.append({"role": "user", "content": user_msg})
160
 
161
- try:
162
- MAX_ITER = 7
163
- all_exec_results = {}
164
- all_agent_results = {}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
 
166
  for iteration in range(MAX_ITER):
167
- yield emit({"type": "thinking",
168
- "text": f"Working… (step {iteration+1})"})
169
  await asyncio.sleep(0)
170
 
171
- # ── LLM call ──────────────────────────────────────────────
172
- resp = await self._llm(cl, messages, model)
173
- raw = resp.choices[0].message.content or ""
 
 
174
 
175
- # ── Process special tags ──────────────────────────────────
176
-
177
- # 1. Memory saves
178
  for blk in extract_blocks(raw, "save_memory"):
179
- key = blk["attrs"].get("key", f"note_{iteration}")
180
- content = blk["content"]
181
- save_memory(key, content)
182
  yield emit({"type": "memory_saved", "key": key})
183
 
184
- # 2. Memory searches
185
  for blk in extract_blocks(raw, "search_memory"):
186
- query = blk["content"]
187
- result = search_memories(query)
188
- all_exec_results[f"memory:{query}"] = result
189
- yield emit({"type": "memory_search", "query": query,
190
- "result": result[:200]})
191
 
192
- # 3. Skill saves
193
  for blk in extract_blocks(raw, "save_skill"):
194
  name = blk["attrs"].get("name", f"skill_{iteration}")
195
- desc = blk["attrs"].get("description", "")
196
  save_skill(name, blk["content"], desc)
197
- yield emit({"type": "skill_saved", "name": name,
198
- "description": desc})
 
 
 
 
 
 
 
199
 
200
- # 4. Execute code blocks
201
- exec_blocks = extract_blocks(raw, "execute")
202
  for idx, blk in enumerate(exec_blocks):
203
- code = blk["content"]
204
  yield emit({"type": "executing", "index": idx})
205
  await asyncio.sleep(0)
206
-
207
  loop = asyncio.get_event_loop()
208
  result = await loop.run_in_executor(
209
- None, lambda c=code: sandbox_run(c, max_retries=3, timeout=60)
210
  )
211
-
212
- # Emit installs silently (activity panel only)
213
- for inst in result.get("installs", []):
214
- yield emit({"type": "pkg_install",
215
- "package": inst["package"],
216
- "ok": inst["ok"]})
217
-
218
- key = f"code_{iteration}_{idx}"
219
- stdout = result.get("stdout","").strip()
220
- stderr = result.get("stderr","").strip()
221
- ok = result.get("ok", False)
222
- files = result.get("files", [])
223
-
224
- # Emit output files (audio, images, downloads)
225
  for f in files:
226
- ext = f.get("ext","").lower()
227
- fname = f["name"]
228
- b64 = f["b64"]
229
  if ext in {"mp3","wav","ogg","m4a"}:
230
- yield emit({"type":"audio_response",
231
- "audio_b64":b64, "filename":fname})
232
  elif ext in {"png","jpg","jpeg","gif","webp","svg"}:
233
- yield emit({"type":"image_response",
234
- "image_b64":b64, "filename":fname, "ext":ext})
235
  else:
236
- yield emit({"type":"file_response",
237
- "file_b64":b64, "filename":fname,
238
- "size":f.get("size",0)})
239
-
240
- exec_result = stdout if ok else f"Error: {stderr}"
241
- all_exec_results[key] = exec_result
242
-
243
- yield emit({"type": "exec_done",
244
- "ok": ok,
245
- "output": exec_result[:300],
246
- "files": [f["name"] for f in files]})
247
 
248
- # 5. Spawn sub-agents
249
- agent_blocks = extract_blocks(raw, "spawn_agent")
250
  for blk in agent_blocks:
251
  name = blk["attrs"].get("name", f"Agent_{iteration}")
252
- task = blk["attrs"].get("task", "Execute task")
253
- code = blk["content"]
254
-
255
  yield emit({"type":"agent_created","name":name,"task":task[:100]})
256
- await asyncio.sleep(0)
257
-
258
  yield emit({"type":"agent_working","name":name})
259
-
260
  loop = asyncio.get_event_loop()
261
  result = await loop.run_in_executor(
262
- None, lambda c=code: sandbox_run(c, max_retries=3, timeout=90)
263
  )
264
-
265
- for inst in result.get("installs", []):
266
- yield emit({"type":"pkg_install",
267
- "package":inst["package"],"ok":inst["ok"]})
268
-
269
  stdout = result.get("stdout","").strip()
270
- stderr = result.get("stderr","").strip()
271
  ok = result.get("ok", False)
272
- files = result.get("files", [])
273
-
274
  for f in files:
275
- ext = f.get("ext","").lower()
276
- b64 = f["b64"]
277
  if ext in {"mp3","wav","ogg"}:
278
  yield emit({"type":"audio_response","audio_b64":b64,"filename":f["name"]})
279
  elif ext in {"png","jpg","jpeg","gif","webp"}:
280
  yield emit({"type":"image_response","image_b64":b64,"filename":f["name"],"ext":ext})
281
  else:
282
  yield emit({"type":"file_response","file_b64":b64,"filename":f["name"],"size":f.get("size",0)})
 
 
 
283
 
284
- agent_out = stdout if ok else f"Error: {stderr}"
285
- all_agent_results[name] = agent_out
286
-
287
- yield emit({"type":"agent_done","name":name,
288
- "preview":agent_out[:250],"ok":ok})
289
-
290
- # ── Decide: done or iterate? ──────────────────────────────
291
- has_actions = (exec_blocks or agent_blocks or
292
- extract_blocks(raw, "save_memory") or
293
- extract_blocks(raw, "search_memory") or
294
- extract_blocks(raw, "save_skill"))
295
-
296
  if not has_actions:
297
- # Pure text response β€” strip tags and stream it
298
- clean = strip_all_tags(raw)
299
- yield emit({"type":"response_start"})
 
300
 
301
- # Stream character by character for smooth effect
302
- chunk_size = 8
303
  for i in range(0, len(clean), chunk_size):
304
- yield emit({"type":"token","content":clean[i:i+chunk_size]})
305
- if i % 80 == 0:
306
- await asyncio.sleep(0)
307
 
308
  # Handle voice
309
- if "[SPEAK:" in clean:
310
  try:
311
- speak_text = clean.split("[SPEAK:")[1].rsplit("]",1)[0].strip()
312
  voice_code = f"""
313
  import sys, io, base64
314
  sys.path.insert(0, '{PKG_DIR}')
315
  from gtts import gTTS
316
  tts = gTTS(text={repr(speak_text[:2000])}, lang='en', slow=False)
317
  tts.save('voice_response.mp3')
318
- print('Voice generated')
319
  """
320
- loop = asyncio.get_event_loop()
321
- vr = await loop.run_in_executor(
322
  None, lambda: sandbox_run(voice_code, timeout=30)
323
  )
324
- for f in vr.get("files", []):
325
- if f.get("ext") == "mp3":
326
- yield emit({"type":"audio_response",
327
- "audio_b64":f["b64"],
328
- "filename":"voice_response.mp3"})
329
  except Exception:
330
  pass
331
-
332
  break
333
 
334
  else:
335
- # Feed results back to LLM for next iteration
336
- results_summary = ""
337
- if all_exec_results:
338
- results_summary += "EXECUTION RESULTS:\n"
339
- for k, v in list(all_exec_results.items())[-5:]:
340
- results_summary += f"[{k}]: {v[:800]}\n\n"
341
- if all_agent_results:
342
- results_summary += "AGENT RESULTS:\n"
343
- for name, out in all_agent_results.items():
344
- results_summary += f"[{name}]: {out[:800]}\n\n"
345
-
346
- messages.append({"role":"assistant","content":raw})
347
- messages.append({"role":"user","content":(
348
- f"{results_summary}\n"
349
- f"Now give the user a clean, clear response based on these REAL results. "
350
- f"Do NOT show code. Do NOT show <execute> tags. "
351
- f"Just present the results/answer naturally in markdown."
352
  )})
353
 
354
- yield emit({"type":"done"})
355
 
356
  except Exception as e:
357
  tb = traceback.format_exc()
358
- print(f"[AGENT] Error: {e}\n{tb}")
359
- yield emit({"type":"error","message":str(e),"detail":tb[-600:]})
 
 
 
360
 
361
 
362
  orchestrator = AgentOrchestrator()
 
1
  """
2
+ PraisonChat Agent System v6 β€” Reliable
3
+ =======================================
4
+ - Fast path for simple messages (no code execution overhead)
5
+ - Agentic loop only for tasks that need it
6
+ - Never silent failures β€” always emits response
 
 
7
  """
8
  import os, json, asyncio, datetime, traceback, re
9
  from openai import AsyncOpenAI
10
  from sandbox import run as sandbox_run, pip_install, PKG_DIR
11
  from docs_context import PRAISONAI_DOCS
12
  from memory import (
13
+ get_memory_context, get_skills_context,
14
+ save_memory, save_skill, search_memories
15
  )
16
 
17
  LONGCAT_BASE = "https://api.longcat.chat/openai/v1"
 
23
  DEFAULT_MODEL = "LongCat-Flash-Lite"
24
 
25
  def now_str():
26
+ return datetime.datetime.now().strftime("%A, %B %d %Y %I:%M:%S %p")
27
+
28
+ # ── Detect if a task needs code execution ──────────────────────────────────
29
+ NEEDS_CODE_KEYWORDS = [
30
+ "search", "find", "look up", "lookup", "browse", "web",
31
+ "time", "date", "today", "weather", "news", "latest",
32
+ "calculate", "math", "compute", "solve",
33
+ "create", "make", "generate", "build", "draw", "plot", "chart", "graph",
34
+ "image", "picture", "photo", "qr code", "qr",
35
+ "voice", "audio", "speak", "say", "read aloud",
36
+ "code", "script", "python", "run", "execute",
37
+ "file", "pdf", "download", "save",
38
+ "fetch", "scrape", "extract", "parse",
39
+ "install", "pip",
40
+ "agent", "sub-agent", "spawn",
41
+ "memory", "remember", "recall",
42
+ "skill", "tool",
43
+ ]
44
+
45
+ def needs_code(msg: str) -> bool:
46
+ m = msg.lower()
47
+ return any(kw in m for kw in NEEDS_CODE_KEYWORDS)
48
+
49
+
50
+ SIMPLE_SYSTEM = """You are PraisonChat β€” a helpful, intelligent AI assistant.
51
+ Current datetime: {DATETIME}
52
+ Be concise, friendly, and helpful. Use markdown for formatting when useful."""
53
+
54
+ AGENT_SYSTEM = f"""You are PraisonChat β€” a powerful autonomous AI agent with a real Python interpreter.
55
+ Current datetime: {{DATETIME}}
56
  Python packages dir: {PKG_DIR}
57
 
58
+ ## MEMORY
59
+ {{MEMORY}}
60
 
61
+ ## AVAILABLE SKILLS
62
+ {{SKILLS}}
63
 
64
  ## CODE INTERPRETER
65
+ Write Python in <execute> tags β€” it runs with REAL results immediately.
66
+ Always add: import sys; sys.path.insert(0, '{PKG_DIR}')
67
 
68
+ Examples:
69
+ <execute>
70
+ import sys, datetime
71
+ sys.path.insert(0, '{PKG_DIR}')
72
+ print(datetime.datetime.now().strftime("%A, %B %d %Y %I:%M:%S %p"))
73
+ </execute>
 
 
74
 
75
+ <execute>
76
+ import sys
77
+ sys.path.insert(0, '{PKG_DIR}')
78
+ from duckduckgo_search import DDGS
79
+ with DDGS() as d:
80
+ for r in d.text("latest AI news 2025", max_results=4):
81
+ print(r['title'], '-', r['body'][:100])
82
+ </execute>
83
 
84
+ ## MEMORY SYSTEM
85
+ <save_memory key="user_name">John</save_memory>
86
  <search_memory>user preferences</search_memory>
87
 
88
+ ## SKILLS SYSTEM
89
+ <save_skill name="search_web" description="Search DuckDuckGo">
 
90
  import sys
91
  sys.path.insert(0, '{PKG_DIR}')
92
+ def search_web(query, n=5):
93
  from duckduckgo_search import DDGS
94
+ with DDGS() as d:
95
+ return list(d.text(query, max_results=n))
96
  </save_skill>
97
 
98
  ## SUB-AGENTS
99
+ <spawn_agent name="Researcher" task="Find AI news">
 
100
  import sys
101
  sys.path.insert(0, '{PKG_DIR}')
102
  from duckduckgo_search import DDGS
103
+ with DDGS() as d:
104
+ for r in d.text("AI news 2025", max_results=5):
105
+ print(r['title'])
 
 
106
  </spawn_agent>
107
 
108
+ ## OUTPUT RULES
109
+ - NEVER show <execute> tags or code in the final response
110
+ - Present only clean results to the user
111
+ - Use markdown formatting
112
+ - For voice: put text in [SPEAK: your text here]
113
+ - For images/audio/files: they are automatically sent
 
 
 
 
114
 
115
+ ## PRE-INSTALLED PACKAGES
116
  requests, httpx, duckduckgo-search, beautifulsoup4, gtts, pillow,
117
  matplotlib, numpy, pandas, qrcode, python-dateutil, pytz
118
 
 
 
 
119
  {PRAISONAI_DOCS}
120
  """
121
 
122
+ def get_agent_system(memory_ctx, skills_ctx):
123
+ return AGENT_SYSTEM.replace("{DATETIME}", now_str())\
124
+ .replace("{MEMORY}", memory_ctx or "No memories yet.")\
125
+ .replace("{SKILLS}", skills_ctx or "No skills yet.")
126
+
127
+ def extract_blocks(text, tag):
128
  results = []
129
  pattern = rf'<{tag}([^>]*)>(.*?)</{tag}>'
130
  for m in re.finditer(pattern, text, re.DOTALL):
 
 
 
131
  attrs = {}
132
+ for a in re.finditer(r'(\w+)=["\']([^"\']*)["\']', m.group(1)):
133
  attrs[a.group(1)] = a.group(2)
134
+ results.append({"attrs": attrs, "content": m.group(2).strip()})
135
  return results
136
 
137
+ def strip_tags(text):
138
+ for tag in ["execute","spawn_agent","save_memory","search_memory","save_skill"]:
 
 
139
  text = re.sub(rf'<{tag}[^>]*>.*?</{tag}>', '', text, flags=re.DOTALL)
140
+ return re.sub(r'\[SPEAK:.*?\]', '', text, flags=re.DOTALL).strip()
141
 
142
 
143
  class AgentOrchestrator:
144
  def __init__(self):
145
+ self._clients = {}
146
 
147
+ def client(self, api_key):
148
  if api_key not in self._clients:
149
  self._clients[api_key] = AsyncOpenAI(
150
  api_key=api_key, base_url=LONGCAT_BASE
151
  )
152
  return self._clients[api_key]
153
 
154
+ async def stream_response(self, user_msg, history, api_key, model=DEFAULT_MODEL):
155
+ def emit(d): return json.dumps(d)
 
 
 
 
 
 
 
 
 
156
  model = MODEL_MAP.get(model, DEFAULT_MODEL)
157
  cl = self.client(api_key)
158
 
159
+ try:
160
+ # ── FAST PATH: simple conversation (no code needed) ───────────
161
+ if not needs_code(user_msg):
162
+ yield emit({"type": "thinking", "text": "Responding…"})
163
+ await asyncio.sleep(0)
164
 
165
+ messages = [{"role": "system", "content": SIMPLE_SYSTEM.replace("{DATETIME}", now_str())}]
166
+ for m in history[-12:]:
167
+ messages.append({"role": m["role"], "content": str(m.get("content",""))[:2000]})
168
+ messages.append({"role": "user", "content": user_msg})
 
169
 
170
+ yield emit({"type": "response_start"})
171
+ stream = await cl.chat.completions.create(
172
+ model=model, messages=messages,
173
+ max_tokens=4000, temperature=0.7, stream=True
174
+ )
175
+ async for chunk in stream:
176
+ c = chunk.choices[0].delta.content
177
+ if c:
178
+ yield emit({"type": "token", "content": c})
179
+
180
+ yield emit({"type": "done"})
181
+ return
182
+
183
+ # ── AGENT PATH: task needs code execution ─────────────────────
184
+ mem_ctx = get_memory_context()
185
+ skills_ctx = get_skills_context()
186
+
187
+ messages = [{"role": "system", "content": get_agent_system(mem_ctx, skills_ctx)}]
188
+ for m in history[-12:]:
189
+ messages.append({"role": m["role"], "content": str(m.get("content",""))[:2000]})
190
+ messages.append({"role": "user", "content": user_msg})
191
+
192
+ MAX_ITER = 5
193
+ all_results = {}
194
 
195
  for iteration in range(MAX_ITER):
196
+ yield emit({"type": "thinking", "text": f"Working… (step {iteration+1})"})
 
197
  await asyncio.sleep(0)
198
 
199
+ resp = await cl.chat.completions.create(
200
+ model=model, messages=messages,
201
+ max_tokens=8000, temperature=0.7
202
+ )
203
+ raw = resp.choices[0].message.content or ""
204
 
205
+ # Process memory ops
 
 
206
  for blk in extract_blocks(raw, "save_memory"):
207
+ key = blk["attrs"].get("key", f"note_{iteration}")
208
+ save_memory(key, blk["content"])
 
209
  yield emit({"type": "memory_saved", "key": key})
210
 
 
211
  for blk in extract_blocks(raw, "search_memory"):
212
+ result = search_memories(blk["content"])
213
+ all_results[f"mem:{blk['content']}"] = result
 
 
 
214
 
215
+ # Process skill saves
216
  for blk in extract_blocks(raw, "save_skill"):
217
  name = blk["attrs"].get("name", f"skill_{iteration}")
218
+ desc = blk["attrs"].get("description","")
219
  save_skill(name, blk["content"], desc)
220
+ yield emit({"type": "skill_saved", "name": name, "description": desc})
221
+
222
+ # Process code blocks
223
+ exec_blocks = extract_blocks(raw, "execute")
224
+ agent_blocks = extract_blocks(raw, "spawn_agent")
225
+ has_actions = bool(exec_blocks or agent_blocks or
226
+ extract_blocks(raw,"save_memory") or
227
+ extract_blocks(raw,"search_memory") or
228
+ extract_blocks(raw,"save_skill"))
229
 
 
 
230
  for idx, blk in enumerate(exec_blocks):
 
231
  yield emit({"type": "executing", "index": idx})
232
  await asyncio.sleep(0)
 
233
  loop = asyncio.get_event_loop()
234
  result = await loop.run_in_executor(
235
+ None, lambda c=blk["content"]: sandbox_run(c, max_retries=3, timeout=60)
236
  )
237
+ for inst in result.get("installs",[]):
238
+ yield emit({"type":"pkg_install","package":inst["package"],"ok":inst["ok"]})
239
+ stdout = result.get("stdout","").strip()
240
+ stderr = result.get("stderr","").strip()
241
+ ok = result.get("ok", False)
242
+ files = result.get("files", [])
 
 
 
 
 
 
 
 
243
  for f in files:
244
+ ext = f.get("ext","").lower()
245
+ b64 = f["b64"]
 
246
  if ext in {"mp3","wav","ogg","m4a"}:
247
+ yield emit({"type":"audio_response","audio_b64":b64,"filename":f["name"]})
 
248
  elif ext in {"png","jpg","jpeg","gif","webp","svg"}:
249
+ yield emit({"type":"image_response","image_b64":b64,"filename":f["name"],"ext":ext})
 
250
  else:
251
+ yield emit({"type":"file_response","file_b64":b64,"filename":f["name"],"size":f.get("size",0)})
252
+ out_txt = stdout if ok else f"Error: {stderr}"
253
+ all_results[f"code_{iteration}_{idx}"] = out_txt
254
+ yield emit({"type":"exec_done","ok":ok,"output":out_txt[:300],"files":[f["name"] for f in files]})
 
 
 
 
 
 
 
255
 
 
 
256
  for blk in agent_blocks:
257
  name = blk["attrs"].get("name", f"Agent_{iteration}")
258
+ task = blk["attrs"].get("task","")
 
 
259
  yield emit({"type":"agent_created","name":name,"task":task[:100]})
 
 
260
  yield emit({"type":"agent_working","name":name})
 
261
  loop = asyncio.get_event_loop()
262
  result = await loop.run_in_executor(
263
+ None, lambda c=blk["content"]: sandbox_run(c, max_retries=3, timeout=90)
264
  )
265
+ for inst in result.get("installs",[]):
266
+ yield emit({"type":"pkg_install","package":inst["package"],"ok":inst["ok"]})
 
 
 
267
  stdout = result.get("stdout","").strip()
 
268
  ok = result.get("ok", False)
269
+ files = result.get("files",[])
 
270
  for f in files:
271
+ ext=f.get("ext","").lower();b64=f["b64"]
 
272
  if ext in {"mp3","wav","ogg"}:
273
  yield emit({"type":"audio_response","audio_b64":b64,"filename":f["name"]})
274
  elif ext in {"png","jpg","jpeg","gif","webp"}:
275
  yield emit({"type":"image_response","image_b64":b64,"filename":f["name"],"ext":ext})
276
  else:
277
  yield emit({"type":"file_response","file_b64":b64,"filename":f["name"],"size":f.get("size",0)})
278
+ out_txt = stdout if ok else f"Error: {result.get('stderr','')}"
279
+ all_results[name] = out_txt
280
+ yield emit({"type":"agent_done","name":name,"preview":out_txt[:250],"ok":ok})
281
 
282
+ # ── Decide: done or iterate ──────────────────────────────
 
 
 
 
 
 
 
 
 
 
 
283
  if not has_actions:
284
+ # Final clean response
285
+ clean = strip_tags(raw)
286
+ if not clean.strip():
287
+ clean = "Done! Let me know if you need anything else."
288
 
289
+ yield emit({"type": "response_start"})
290
+ chunk_size = 6
291
  for i in range(0, len(clean), chunk_size):
292
+ yield emit({"type": "token", "content": clean[i:i+chunk_size]})
293
+ if i % 60 == 0: await asyncio.sleep(0)
 
294
 
295
  # Handle voice
296
+ if "[SPEAK:" in raw:
297
  try:
298
+ speak_text = raw.split("[SPEAK:")[1].rsplit("]",1)[0].strip()
299
  voice_code = f"""
300
  import sys, io, base64
301
  sys.path.insert(0, '{PKG_DIR}')
302
  from gtts import gTTS
303
  tts = gTTS(text={repr(speak_text[:2000])}, lang='en', slow=False)
304
  tts.save('voice_response.mp3')
305
+ print('ok')
306
  """
307
+ loop = asyncio.get_event_loop()
308
+ vr = await loop.run_in_executor(
309
  None, lambda: sandbox_run(voice_code, timeout=30)
310
  )
311
+ for f in vr.get("files",[]):
312
+ if f.get("ext")=="mp3":
313
+ yield emit({"type":"audio_response","audio_b64":f["b64"],"filename":"voice_response.mp3"})
 
 
314
  except Exception:
315
  pass
 
316
  break
317
 
318
  else:
319
+ # Feed results back
320
+ results_text = "REAL EXECUTION RESULTS:\n\n"
321
+ for k, v in list(all_results.items())[-4:]:
322
+ results_text += f"[{k}]:\n{v[:1000]}\n\n"
323
+ messages.append({"role": "assistant", "content": raw})
324
+ messages.append({"role": "user", "content": (
325
+ f"{results_text}"
326
+ f"Now give the user a clean final answer based on these REAL results. "
327
+ f"Do NOT use <execute> tags. Do NOT show code. Just present the results clearly in markdown."
 
 
 
 
 
 
 
 
328
  )})
329
 
330
+ yield emit({"type": "done"})
331
 
332
  except Exception as e:
333
  tb = traceback.format_exc()
334
+ print(f"[AGENT] {e}\n{tb}")
335
+ # Always emit a response so the user sees something
336
+ yield emit({"type": "response_start"})
337
+ yield emit({"type": "token", "content": f"❌ Error: {str(e)}\n\nPlease check your LongCat API key in Settings."})
338
+ yield emit({"type": "done"})
339
 
340
 
341
  orchestrator = AgentOrchestrator()