Update app/agent_system.py
Browse files- app/agent_system.py +283 -186
app/agent_system.py
CHANGED
|
@@ -1,14 +1,20 @@
|
|
| 1 |
"""
|
| 2 |
-
PraisonChat
|
| 3 |
-
======================================
|
| 4 |
-
|
| 5 |
-
|
|
|
|
|
|
|
|
|
|
| 6 |
"""
|
| 7 |
-
import os, json, asyncio, datetime, traceback,
|
| 8 |
from openai import AsyncOpenAI
|
| 9 |
-
from typing import AsyncGenerator
|
| 10 |
from sandbox import run as sandbox_run, pip_install, PKG_DIR
|
| 11 |
from docs_context import PRAISONAI_DOCS
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
LONGCAT_BASE = "https://api.longcat.chat/openai/v1"
|
| 14 |
MODEL_MAP = {
|
|
@@ -18,79 +24,104 @@ MODEL_MAP = {
|
|
| 18 |
}
|
| 19 |
DEFAULT_MODEL = "LongCat-Flash-Lite"
|
| 20 |
|
| 21 |
-
def
|
| 22 |
-
return datetime.datetime.now().strftime("%A %B %d %Y %I:%M:%S %p")
|
| 23 |
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
You
|
| 27 |
|
| 28 |
-
Current datetime: {
|
| 29 |
-
Python
|
| 30 |
|
| 31 |
-
|
| 32 |
|
| 33 |
-
|
| 34 |
-
draw charts, etc.) β write Python code inside <execute> tags:
|
| 35 |
|
| 36 |
-
|
| 37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
import sys
|
| 39 |
sys.path.insert(0, '{PKG_DIR}')
|
| 40 |
from duckduckgo_search import DDGS
|
| 41 |
-
|
| 42 |
with DDGS() as ddgs:
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
print()
|
| 48 |
-
</execute>
|
| 49 |
-
|
| 50 |
-
The code runs IMMEDIATELY in a real Python environment.
|
| 51 |
-
You will see the ACTUAL output and can write more code based on it.
|
| 52 |
-
|
| 53 |
-
## RULES
|
| 54 |
-
1. ALWAYS use <execute> for: web search, date/time, calculations, file creation, audio, images
|
| 55 |
-
2. Never say "I cannot" β instead write code that does it
|
| 56 |
-
3. If a library is missing, just import it β it gets auto-installed
|
| 57 |
-
4. To generate VOICE: use gtts and save as .mp3, then it gets returned as audio
|
| 58 |
-
5. To create FILES (images, PDFs, etc.): save to current dir, they get returned automatically
|
| 59 |
-
6. You can pip install ANYTHING: just `import subprocess; subprocess.run(["pip","install","X"])`
|
| 60 |
-
OR just import it directly β auto-install handles it
|
| 61 |
-
7. Chain multiple <execute> blocks if needed
|
| 62 |
-
8. After seeing results, explain them clearly to the user
|
| 63 |
-
|
| 64 |
-
## AVAILABLE PRE-INSTALLED PACKAGES
|
| 65 |
-
requests, httpx, beautifulsoup4, duckduckgo-search, gtts, pillow, matplotlib,
|
| 66 |
-
numpy, pandas, qrcode, python-dateutil, pytz
|
| 67 |
-
|
| 68 |
-
## RESPONSE FORMATS
|
| 69 |
-
- For voice/audio requests: save MP3 to file, it will be sent as audio
|
| 70 |
-
- For image requests: save PNG/JPG, it will be displayed
|
| 71 |
-
- For data: print clearly formatted output
|
| 72 |
-
- For files: save to current directory, they get returned
|
| 73 |
|
| 74 |
-
|
| 75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
|
| 77 |
-
|
| 78 |
-
|
|
|
|
| 79 |
|
| 80 |
-
#
|
| 81 |
-
|
| 82 |
-
return re.findall(r'<execute>(.*?)</execute>', text, re.DOTALL)
|
| 83 |
|
| 84 |
-
|
| 85 |
-
|
| 86 |
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 94 |
|
| 95 |
|
| 96 |
class AgentOrchestrator:
|
|
@@ -104,162 +135,228 @@ class AgentOrchestrator:
|
|
| 104 |
)
|
| 105 |
return self._clients[api_key]
|
| 106 |
|
| 107 |
-
async def
|
| 108 |
-
|
| 109 |
-
stream = await client.chat.completions.create(
|
| 110 |
model=model, messages=messages,
|
| 111 |
-
max_tokens=16000, temperature=0.7, stream=
|
| 112 |
)
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
async for chunk in stream:
|
| 117 |
-
c = chunk.choices[0].delta.content
|
| 118 |
-
if c:
|
| 119 |
-
full += c
|
| 120 |
-
yield c
|
| 121 |
-
return full, gen()
|
| 122 |
-
|
| 123 |
-
async def stream_response(
|
| 124 |
-
self, user_msg: str, history: list,
|
| 125 |
-
api_key: str, model: str = DEFAULT_MODEL
|
| 126 |
-
) -> AsyncGenerator:
|
| 127 |
def emit(d: dict) -> str:
|
| 128 |
return json.dumps(d)
|
| 129 |
|
| 130 |
-
model
|
| 131 |
-
cl
|
| 132 |
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
|
|
|
|
|
|
|
|
|
| 137 |
messages.append({"role": "user", "content": user_msg})
|
| 138 |
|
| 139 |
try:
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
final_text = ""
|
| 144 |
|
| 145 |
-
|
| 146 |
-
iteration += 1
|
| 147 |
yield emit({"type": "thinking",
|
| 148 |
-
"text": f"
|
| 149 |
await asyncio.sleep(0)
|
| 150 |
|
| 151 |
-
#
|
| 152 |
-
|
| 153 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 154 |
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
async for chunk in stream:
|
| 160 |
-
c = chunk.choices[0].delta.content
|
| 161 |
-
if c:
|
| 162 |
-
collected += c
|
| 163 |
-
yield emit({"type": "token", "content": c})
|
| 164 |
|
| 165 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 166 |
|
| 167 |
-
|
| 168 |
-
|
|
|
|
|
|
|
|
|
|
| 169 |
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 174 |
|
| 175 |
-
|
| 176 |
-
|
| 177 |
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
"
|
| 182 |
-
|
| 183 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 184 |
await asyncio.sleep(0)
|
| 185 |
|
| 186 |
-
|
| 187 |
-
|
|
|
|
| 188 |
result = await loop.run_in_executor(
|
| 189 |
-
None, lambda c=code: sandbox_run(c, max_retries=3, timeout=
|
| 190 |
)
|
| 191 |
|
| 192 |
-
# Emit install events
|
| 193 |
for inst in result.get("installs", []):
|
| 194 |
-
yield emit({"type":
|
| 195 |
-
"package":
|
| 196 |
-
"ok": inst["ok"],
|
| 197 |
-
"msg": inst["msg"]})
|
| 198 |
|
| 199 |
-
stdout = result.get("stdout",
|
| 200 |
-
stderr = result.get("stderr",
|
| 201 |
ok = result.get("ok", False)
|
| 202 |
files = result.get("files", [])
|
| 203 |
|
| 204 |
-
yield emit({"type": "code_result",
|
| 205 |
-
"ok": ok,
|
| 206 |
-
"stdout": stdout[:1000],
|
| 207 |
-
"stderr": stderr[:500] if not ok else "",
|
| 208 |
-
"files": [{"name": f["name"], "ext": f["ext"], "size": f["size"]}
|
| 209 |
-
for f in files]})
|
| 210 |
-
|
| 211 |
-
# ββ Handle output files ββββββββββββββββββββββββββββββββββ
|
| 212 |
for f in files:
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
yield emit({"type": "image_response",
|
| 220 |
-
"image_b64": f["b64"],
|
| 221 |
-
"filename": f["name"],
|
| 222 |
-
"ext": f["ext"]})
|
| 223 |
else:
|
| 224 |
-
yield emit({"type":
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 258 |
|
| 259 |
except Exception as e:
|
| 260 |
tb = traceback.format_exc()
|
| 261 |
print(f"[AGENT] Error: {e}\n{tb}")
|
| 262 |
-
yield emit({"type":
|
| 263 |
|
| 264 |
|
| 265 |
orchestrator = AgentOrchestrator()
|
|
|
|
| 1 |
"""
|
| 2 |
+
PraisonChat Agent System v5 β OpenClaw-style
|
| 3 |
+
=============================================
|
| 4 |
+
- Code interpreter loop (write β execute β reflect β repeat)
|
| 5 |
+
- Persistent memory + skills
|
| 6 |
+
- Real sub-agents with real tool execution
|
| 7 |
+
- Clean output: only results shown, internals hidden
|
| 8 |
+
- Robust JSON error handling
|
| 9 |
"""
|
| 10 |
+
import os, json, asyncio, datetime, traceback, re
|
| 11 |
from openai import AsyncOpenAI
|
|
|
|
| 12 |
from sandbox import run as sandbox_run, pip_install, PKG_DIR
|
| 13 |
from docs_context import PRAISONAI_DOCS
|
| 14 |
+
from memory import (
|
| 15 |
+
get_memory_context, get_skills_context, save_memory,
|
| 16 |
+
save_skill, list_skills, search_memories
|
| 17 |
+
)
|
| 18 |
|
| 19 |
LONGCAT_BASE = "https://api.longcat.chat/openai/v1"
|
| 20 |
MODEL_MAP = {
|
|
|
|
| 24 |
}
|
| 25 |
DEFAULT_MODEL = "LongCat-Flash-Lite"
|
| 26 |
|
| 27 |
+
def now_str():
|
| 28 |
+
return datetime.datetime.now().strftime("%A, %B %d %Y at %I:%M:%S %p")
|
| 29 |
|
| 30 |
+
def build_system(memory_ctx: str, skills_ctx: str) -> str:
|
| 31 |
+
return f"""You are PraisonChat β a powerful autonomous AI agent.
|
| 32 |
+
You have a real Python code interpreter, persistent memory, and can create/save reusable skills.
|
| 33 |
|
| 34 |
+
Current datetime: {now_str()}
|
| 35 |
+
Python packages dir: {PKG_DIR}
|
| 36 |
|
| 37 |
+
{memory_ctx}
|
| 38 |
|
| 39 |
+
{skills_ctx}
|
|
|
|
| 40 |
|
| 41 |
+
## CODE INTERPRETER
|
| 42 |
+
Write Python inside <execute> tags. It runs IMMEDIATELY with real results.
|
| 43 |
+
|
| 44 |
+
Rules:
|
| 45 |
+
- ALWAYS import sys and add PKG_DIR to path: sys.path.insert(0, '{PKG_DIR}')
|
| 46 |
+
- Use duckduckgo_search for web searches (NOT google.com)
|
| 47 |
+
- Save files (images, audio, PDFs) to current dir β they are returned to user
|
| 48 |
+
- Use gtts for voice/audio generation
|
| 49 |
+
- For voice: save as voice_response.mp3, it becomes a playable audio
|
| 50 |
+
- For images: save as image.png, it becomes visible in chat
|
| 51 |
+
- NEVER simulate or estimate data β always execute real code
|
| 52 |
+
|
| 53 |
+
## MEMORY SYSTEM
|
| 54 |
+
Save important info:
|
| 55 |
+
<save_memory key="user_preferences">User prefers dark mode, speaks English</save_memory>
|
| 56 |
+
|
| 57 |
+
Search memory:
|
| 58 |
+
<search_memory>user preferences</search_memory>
|
| 59 |
+
|
| 60 |
+
## SKILLS SYSTEM
|
| 61 |
+
Save reusable code as a skill:
|
| 62 |
+
<save_skill name="search_news" description="Search for latest news using DuckDuckGo">
|
| 63 |
+
import sys
|
| 64 |
+
sys.path.insert(0, '{PKG_DIR}')
|
| 65 |
+
def search_news(query, max_results=5):
|
| 66 |
+
from duckduckgo_search import DDGS
|
| 67 |
+
with DDGS() as ddgs:
|
| 68 |
+
return list(ddgs.news(query, max_results=max_results))
|
| 69 |
+
</save_skill>
|
| 70 |
+
|
| 71 |
+
## SUB-AGENTS
|
| 72 |
+
For complex tasks, spawn specialized sub-agents:
|
| 73 |
+
<spawn_agent name="ResearchAgent" task="Find the top 5 AI papers from 2025">
|
| 74 |
import sys
|
| 75 |
sys.path.insert(0, '{PKG_DIR}')
|
| 76 |
from duckduckgo_search import DDGS
|
| 77 |
+
results = []
|
| 78 |
with DDGS() as ddgs:
|
| 79 |
+
for r in ddgs.text("top AI research papers 2025", max_results=8):
|
| 80 |
+
results.append(r['title'] + ': ' + r['body'])
|
| 81 |
+
print('\n'.join(results))
|
| 82 |
+
</spawn_agent>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 83 |
|
| 84 |
+
Each <spawn_agent> executes its code independently and returns real results.
|
| 85 |
+
|
| 86 |
+
## RESPONSE FORMAT RULES
|
| 87 |
+
- NEVER show raw code to the user in your final response
|
| 88 |
+
- NEVER say "I executed the following code..."
|
| 89 |
+
- JUST present the RESULTS clearly
|
| 90 |
+
- Use markdown for formatting
|
| 91 |
+
- For voice requests: put the text to speak in [SPEAK: text here]
|
| 92 |
+
- For math: just show the answer
|
| 93 |
+
- For search: show results in a clean list
|
| 94 |
|
| 95 |
+
## AVAILABLE PACKAGES (always installed)
|
| 96 |
+
requests, httpx, duckduckgo-search, beautifulsoup4, gtts, pillow,
|
| 97 |
+
matplotlib, numpy, pandas, qrcode, python-dateutil, pytz
|
| 98 |
|
| 99 |
+
## ADDITIONAL PACKAGES
|
| 100 |
+
Just import any package β it gets auto-installed automatically.
|
|
|
|
| 101 |
|
| 102 |
+
{PRAISONAI_DOCS}
|
| 103 |
+
"""
|
| 104 |
|
| 105 |
+
def extract_blocks(text: str, tag: str) -> list[dict]:
|
| 106 |
+
"""Extract all <tag ...>content</tag> blocks."""
|
| 107 |
+
results = []
|
| 108 |
+
pattern = rf'<{tag}([^>]*)>(.*?)</{tag}>'
|
| 109 |
+
for m in re.finditer(pattern, text, re.DOTALL):
|
| 110 |
+
attrs_str = m.group(1).strip()
|
| 111 |
+
content = m.group(2).strip()
|
| 112 |
+
# Parse key=value attrs
|
| 113 |
+
attrs = {}
|
| 114 |
+
for a in re.finditer(r'(\w+)=["\']([^"\']*)["\']', attrs_str):
|
| 115 |
+
attrs[a.group(1)] = a.group(2)
|
| 116 |
+
results.append({"attrs": attrs, "content": content, "full": m.group(0)})
|
| 117 |
+
return results
|
| 118 |
+
|
| 119 |
+
def strip_all_tags(text: str) -> str:
|
| 120 |
+
"""Remove all agent internal tags from final response."""
|
| 121 |
+
tags = ["execute", "spawn_agent", "save_memory", "search_memory", "save_skill"]
|
| 122 |
+
for tag in tags:
|
| 123 |
+
text = re.sub(rf'<{tag}[^>]*>.*?</{tag}>', '', text, flags=re.DOTALL)
|
| 124 |
+
return text.strip()
|
| 125 |
|
| 126 |
|
| 127 |
class AgentOrchestrator:
|
|
|
|
| 135 |
)
|
| 136 |
return self._clients[api_key]
|
| 137 |
|
| 138 |
+
async def _llm(self, client, messages: list, model: str, stream: bool = False):
|
| 139 |
+
return await client.chat.completions.create(
|
|
|
|
| 140 |
model=model, messages=messages,
|
| 141 |
+
max_tokens=16000, temperature=0.7, stream=stream
|
| 142 |
)
|
| 143 |
+
|
| 144 |
+
async def stream_response(self, user_msg: str, history: list,
|
| 145 |
+
api_key: str, model: str = DEFAULT_MODEL):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 146 |
def emit(d: dict) -> str:
|
| 147 |
return json.dumps(d)
|
| 148 |
|
| 149 |
+
model = MODEL_MAP.get(model, DEFAULT_MODEL)
|
| 150 |
+
cl = self.client(api_key)
|
| 151 |
|
| 152 |
+
mem_ctx = get_memory_context()
|
| 153 |
+
skills_ctx = get_skills_context()
|
| 154 |
+
|
| 155 |
+
messages = [{"role": "system", "content": build_system(mem_ctx, skills_ctx)}]
|
| 156 |
+
for m in history[-14:]:
|
| 157 |
+
messages.append({"role": m["role"],
|
| 158 |
+
"content": str(m.get("content",""))[:3000]})
|
| 159 |
messages.append({"role": "user", "content": user_msg})
|
| 160 |
|
| 161 |
try:
|
| 162 |
+
MAX_ITER = 7
|
| 163 |
+
all_exec_results = {}
|
| 164 |
+
all_agent_results = {}
|
|
|
|
| 165 |
|
| 166 |
+
for iteration in range(MAX_ITER):
|
|
|
|
| 167 |
yield emit({"type": "thinking",
|
| 168 |
+
"text": f"Working⦠(step {iteration+1})"})
|
| 169 |
await asyncio.sleep(0)
|
| 170 |
|
| 171 |
+
# ββ LLM call ββββββββββββββββββββββββββββββββββββββββββββββ
|
| 172 |
+
resp = await self._llm(cl, messages, model)
|
| 173 |
+
raw = resp.choices[0].message.content or ""
|
| 174 |
+
|
| 175 |
+
# ββ Process special tags ββββββββββββββββββββββββββββββββββ
|
| 176 |
+
|
| 177 |
+
# 1. Memory saves
|
| 178 |
+
for blk in extract_blocks(raw, "save_memory"):
|
| 179 |
+
key = blk["attrs"].get("key", f"note_{iteration}")
|
| 180 |
+
content = blk["content"]
|
| 181 |
+
save_memory(key, content)
|
| 182 |
+
yield emit({"type": "memory_saved", "key": key})
|
| 183 |
+
|
| 184 |
+
# 2. Memory searches
|
| 185 |
+
for blk in extract_blocks(raw, "search_memory"):
|
| 186 |
+
query = blk["content"]
|
| 187 |
+
result = search_memories(query)
|
| 188 |
+
all_exec_results[f"memory:{query}"] = result
|
| 189 |
+
yield emit({"type": "memory_search", "query": query,
|
| 190 |
+
"result": result[:200]})
|
| 191 |
+
|
| 192 |
+
# 3. Skill saves
|
| 193 |
+
for blk in extract_blocks(raw, "save_skill"):
|
| 194 |
+
name = blk["attrs"].get("name", f"skill_{iteration}")
|
| 195 |
+
desc = blk["attrs"].get("description", "")
|
| 196 |
+
save_skill(name, blk["content"], desc)
|
| 197 |
+
yield emit({"type": "skill_saved", "name": name,
|
| 198 |
+
"description": desc})
|
| 199 |
+
|
| 200 |
+
# 4. Execute code blocks
|
| 201 |
+
exec_blocks = extract_blocks(raw, "execute")
|
| 202 |
+
for idx, blk in enumerate(exec_blocks):
|
| 203 |
+
code = blk["content"]
|
| 204 |
+
yield emit({"type": "executing", "index": idx})
|
| 205 |
+
await asyncio.sleep(0)
|
| 206 |
|
| 207 |
+
loop = asyncio.get_event_loop()
|
| 208 |
+
result = await loop.run_in_executor(
|
| 209 |
+
None, lambda c=code: sandbox_run(c, max_retries=3, timeout=60)
|
| 210 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 211 |
|
| 212 |
+
# Emit installs silently (activity panel only)
|
| 213 |
+
for inst in result.get("installs", []):
|
| 214 |
+
yield emit({"type": "pkg_install",
|
| 215 |
+
"package": inst["package"],
|
| 216 |
+
"ok": inst["ok"]})
|
| 217 |
|
| 218 |
+
key = f"code_{iteration}_{idx}"
|
| 219 |
+
stdout = result.get("stdout","").strip()
|
| 220 |
+
stderr = result.get("stderr","").strip()
|
| 221 |
+
ok = result.get("ok", False)
|
| 222 |
+
files = result.get("files", [])
|
| 223 |
|
| 224 |
+
# Emit output files (audio, images, downloads)
|
| 225 |
+
for f in files:
|
| 226 |
+
ext = f.get("ext","").lower()
|
| 227 |
+
fname = f["name"]
|
| 228 |
+
b64 = f["b64"]
|
| 229 |
+
if ext in {"mp3","wav","ogg","m4a"}:
|
| 230 |
+
yield emit({"type":"audio_response",
|
| 231 |
+
"audio_b64":b64, "filename":fname})
|
| 232 |
+
elif ext in {"png","jpg","jpeg","gif","webp","svg"}:
|
| 233 |
+
yield emit({"type":"image_response",
|
| 234 |
+
"image_b64":b64, "filename":fname, "ext":ext})
|
| 235 |
+
else:
|
| 236 |
+
yield emit({"type":"file_response",
|
| 237 |
+
"file_b64":b64, "filename":fname,
|
| 238 |
+
"size":f.get("size",0)})
|
| 239 |
|
| 240 |
+
exec_result = stdout if ok else f"Error: {stderr}"
|
| 241 |
+
all_exec_results[key] = exec_result
|
| 242 |
|
| 243 |
+
yield emit({"type": "exec_done",
|
| 244 |
+
"ok": ok,
|
| 245 |
+
"output": exec_result[:300],
|
| 246 |
+
"files": [f["name"] for f in files]})
|
| 247 |
+
|
| 248 |
+
# 5. Spawn sub-agents
|
| 249 |
+
agent_blocks = extract_blocks(raw, "spawn_agent")
|
| 250 |
+
for blk in agent_blocks:
|
| 251 |
+
name = blk["attrs"].get("name", f"Agent_{iteration}")
|
| 252 |
+
task = blk["attrs"].get("task", "Execute task")
|
| 253 |
+
code = blk["content"]
|
| 254 |
+
|
| 255 |
+
yield emit({"type":"agent_created","name":name,"task":task[:100]})
|
| 256 |
await asyncio.sleep(0)
|
| 257 |
|
| 258 |
+
yield emit({"type":"agent_working","name":name})
|
| 259 |
+
|
| 260 |
+
loop = asyncio.get_event_loop()
|
| 261 |
result = await loop.run_in_executor(
|
| 262 |
+
None, lambda c=code: sandbox_run(c, max_retries=3, timeout=90)
|
| 263 |
)
|
| 264 |
|
|
|
|
| 265 |
for inst in result.get("installs", []):
|
| 266 |
+
yield emit({"type":"pkg_install",
|
| 267 |
+
"package":inst["package"],"ok":inst["ok"]})
|
|
|
|
|
|
|
| 268 |
|
| 269 |
+
stdout = result.get("stdout","").strip()
|
| 270 |
+
stderr = result.get("stderr","").strip()
|
| 271 |
ok = result.get("ok", False)
|
| 272 |
files = result.get("files", [])
|
| 273 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 274 |
for f in files:
|
| 275 |
+
ext = f.get("ext","").lower()
|
| 276 |
+
b64 = f["b64"]
|
| 277 |
+
if ext in {"mp3","wav","ogg"}:
|
| 278 |
+
yield emit({"type":"audio_response","audio_b64":b64,"filename":f["name"]})
|
| 279 |
+
elif ext in {"png","jpg","jpeg","gif","webp"}:
|
| 280 |
+
yield emit({"type":"image_response","image_b64":b64,"filename":f["name"],"ext":ext})
|
|
|
|
|
|
|
|
|
|
|
|
|
| 281 |
else:
|
| 282 |
+
yield emit({"type":"file_response","file_b64":b64,"filename":f["name"],"size":f.get("size",0)})
|
| 283 |
+
|
| 284 |
+
agent_out = stdout if ok else f"Error: {stderr}"
|
| 285 |
+
all_agent_results[name] = agent_out
|
| 286 |
+
|
| 287 |
+
yield emit({"type":"agent_done","name":name,
|
| 288 |
+
"preview":agent_out[:250],"ok":ok})
|
| 289 |
+
|
| 290 |
+
# ββ Decide: done or iterate? ββββββββββββββββββββββββββββββ
|
| 291 |
+
has_actions = (exec_blocks or agent_blocks or
|
| 292 |
+
extract_blocks(raw, "save_memory") or
|
| 293 |
+
extract_blocks(raw, "search_memory") or
|
| 294 |
+
extract_blocks(raw, "save_skill"))
|
| 295 |
+
|
| 296 |
+
if not has_actions:
|
| 297 |
+
# Pure text response β strip tags and stream it
|
| 298 |
+
clean = strip_all_tags(raw)
|
| 299 |
+
yield emit({"type":"response_start"})
|
| 300 |
+
|
| 301 |
+
# Stream character by character for smooth effect
|
| 302 |
+
chunk_size = 8
|
| 303 |
+
for i in range(0, len(clean), chunk_size):
|
| 304 |
+
yield emit({"type":"token","content":clean[i:i+chunk_size]})
|
| 305 |
+
if i % 80 == 0:
|
| 306 |
+
await asyncio.sleep(0)
|
| 307 |
+
|
| 308 |
+
# Handle voice
|
| 309 |
+
if "[SPEAK:" in clean:
|
| 310 |
+
try:
|
| 311 |
+
speak_text = clean.split("[SPEAK:")[1].rsplit("]",1)[0].strip()
|
| 312 |
+
voice_code = f"""
|
| 313 |
+
import sys, io, base64
|
| 314 |
+
sys.path.insert(0, '{PKG_DIR}')
|
| 315 |
+
from gtts import gTTS
|
| 316 |
+
tts = gTTS(text={repr(speak_text[:2000])}, lang='en', slow=False)
|
| 317 |
+
tts.save('voice_response.mp3')
|
| 318 |
+
print('Voice generated')
|
| 319 |
+
"""
|
| 320 |
+
loop = asyncio.get_event_loop()
|
| 321 |
+
vr = await loop.run_in_executor(
|
| 322 |
+
None, lambda: sandbox_run(voice_code, timeout=30)
|
| 323 |
+
)
|
| 324 |
+
for f in vr.get("files", []):
|
| 325 |
+
if f.get("ext") == "mp3":
|
| 326 |
+
yield emit({"type":"audio_response",
|
| 327 |
+
"audio_b64":f["b64"],
|
| 328 |
+
"filename":"voice_response.mp3"})
|
| 329 |
+
except Exception:
|
| 330 |
+
pass
|
| 331 |
+
|
| 332 |
+
break
|
| 333 |
+
|
| 334 |
+
else:
|
| 335 |
+
# Feed results back to LLM for next iteration
|
| 336 |
+
results_summary = ""
|
| 337 |
+
if all_exec_results:
|
| 338 |
+
results_summary += "EXECUTION RESULTS:\n"
|
| 339 |
+
for k, v in list(all_exec_results.items())[-5:]:
|
| 340 |
+
results_summary += f"[{k}]: {v[:800]}\n\n"
|
| 341 |
+
if all_agent_results:
|
| 342 |
+
results_summary += "AGENT RESULTS:\n"
|
| 343 |
+
for name, out in all_agent_results.items():
|
| 344 |
+
results_summary += f"[{name}]: {out[:800]}\n\n"
|
| 345 |
+
|
| 346 |
+
messages.append({"role":"assistant","content":raw})
|
| 347 |
+
messages.append({"role":"user","content":(
|
| 348 |
+
f"{results_summary}\n"
|
| 349 |
+
f"Now give the user a clean, clear response based on these REAL results. "
|
| 350 |
+
f"Do NOT show code. Do NOT show <execute> tags. "
|
| 351 |
+
f"Just present the results/answer naturally in markdown."
|
| 352 |
+
)})
|
| 353 |
+
|
| 354 |
+
yield emit({"type":"done"})
|
| 355 |
|
| 356 |
except Exception as e:
|
| 357 |
tb = traceback.format_exc()
|
| 358 |
print(f"[AGENT] Error: {e}\n{tb}")
|
| 359 |
+
yield emit({"type":"error","message":str(e),"detail":tb[-600:]})
|
| 360 |
|
| 361 |
|
| 362 |
orchestrator = AgentOrchestrator()
|