Upload 3 files
Browse files- app/agent_system.py +360 -0
- app/docs_context.py +188 -0
- app/main.py +67 -0
app/agent_system.py
ADDED
|
@@ -0,0 +1,360 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import asyncio
|
| 4 |
+
import traceback
|
| 5 |
+
from openai import AsyncOpenAI
|
| 6 |
+
from typing import AsyncGenerator
|
| 7 |
+
from docs_context import PRAISONAI_DOCS
|
| 8 |
+
|
| 9 |
+
LONGCAT_BASE_URL = "https://api.longcat.chat/openai/v1"
|
| 10 |
+
MODEL = "LongCat-Flash-Lite"
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def build_orchestrator_system():
|
| 14 |
+
return f"""You are the Main Orchestrator Agent for PraisonChat — a powerful AI system that solves complex tasks by dynamically creating specialized sub-agents, each with custom-built tools.
|
| 15 |
+
|
| 16 |
+
{PRAISONAI_DOCS}
|
| 17 |
+
|
| 18 |
+
## Your Job
|
| 19 |
+
When a user sends a task:
|
| 20 |
+
1. Analyze what kind of work is needed
|
| 21 |
+
2. Design specialized sub-agents, each focused on one responsibility
|
| 22 |
+
3. For each sub-agent, design the exact Python tools they need
|
| 23 |
+
4. Return a structured execution plan as JSON
|
| 24 |
+
|
| 25 |
+
## Response Format
|
| 26 |
+
Always respond with valid JSON in this exact structure:
|
| 27 |
+
{{
|
| 28 |
+
"task_analysis": "Clear explanation of what needs to be done and why",
|
| 29 |
+
"needs_sub_agents": true,
|
| 30 |
+
"sub_agents": [
|
| 31 |
+
{{
|
| 32 |
+
"name": "AgentName",
|
| 33 |
+
"role": "Specific professional role",
|
| 34 |
+
"goal": "What this agent achieves",
|
| 35 |
+
"backstory": "Brief agent background/expertise",
|
| 36 |
+
"tools": [
|
| 37 |
+
{{
|
| 38 |
+
"name": "tool_function_name",
|
| 39 |
+
"description": "What this tool does",
|
| 40 |
+
"parameters": "param1: str, param2: int = 10",
|
| 41 |
+
"return_type": "str",
|
| 42 |
+
"docstring": "Detailed docstring explaining the tool",
|
| 43 |
+
"implementation": "Python code body (indented with 4 spaces, no def line)"
|
| 44 |
+
}}
|
| 45 |
+
],
|
| 46 |
+
"task_description": "Exact task for this agent to perform",
|
| 47 |
+
"expected_output": "What format/content to expect back"
|
| 48 |
+
}}
|
| 49 |
+
],
|
| 50 |
+
"execution_order": ["AgentName1", "AgentName2"],
|
| 51 |
+
"synthesis_instruction": "How to combine all agent results into the final answer"
|
| 52 |
+
}}
|
| 53 |
+
|
| 54 |
+
## Rules
|
| 55 |
+
- Create sub-agents ONLY when needed. Simple questions = no sub-agents (set needs_sub_agents: false)
|
| 56 |
+
- Tools must be real, executable Python code
|
| 57 |
+
- Each tool implementation must be complete and working
|
| 58 |
+
- Maximum 4 sub-agents per task
|
| 59 |
+
- Tool implementations must not import anything not in Python stdlib
|
| 60 |
+
- Keep tool implementations under 30 lines each
|
| 61 |
+
"""
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def build_tool_function(tool_spec: dict) -> callable:
|
| 65 |
+
"""Dynamically create a Python function from a tool spec."""
|
| 66 |
+
name = tool_spec["name"]
|
| 67 |
+
params = tool_spec.get("parameters", "input: str")
|
| 68 |
+
return_type = tool_spec.get("return_type", "str")
|
| 69 |
+
docstring = tool_spec.get("docstring", "Auto-generated tool")
|
| 70 |
+
implementation = tool_spec.get("implementation", "return str(input)")
|
| 71 |
+
|
| 72 |
+
# Build the function source
|
| 73 |
+
func_source = f"""
|
| 74 |
+
def {name}({params}) -> {return_type}:
|
| 75 |
+
\"\"\"{docstring}\"\"\"
|
| 76 |
+
{chr(10).join(' ' + line if line.strip() else '' for line in implementation.strip().splitlines())}
|
| 77 |
+
"""
|
| 78 |
+
namespace = {}
|
| 79 |
+
try:
|
| 80 |
+
exec(func_source, namespace)
|
| 81 |
+
return namespace[name]
|
| 82 |
+
except Exception as e:
|
| 83 |
+
# Return a safe fallback function
|
| 84 |
+
def fallback_tool(**kwargs) -> str:
|
| 85 |
+
return f"Tool '{name}' could not be created: {e}. Using fallback."
|
| 86 |
+
fallback_tool.__name__ = name
|
| 87 |
+
fallback_tool.__doc__ = docstring
|
| 88 |
+
return fallback_tool
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
class AgentOrchestrator:
|
| 92 |
+
def __init__(self):
|
| 93 |
+
self._client_cache = {}
|
| 94 |
+
|
| 95 |
+
def get_client(self, api_key: str) -> AsyncOpenAI:
|
| 96 |
+
if api_key not in self._client_cache:
|
| 97 |
+
self._client_cache[api_key] = AsyncOpenAI(
|
| 98 |
+
api_key=api_key,
|
| 99 |
+
base_url=LONGCAT_BASE_URL
|
| 100 |
+
)
|
| 101 |
+
return self._client_cache[api_key]
|
| 102 |
+
|
| 103 |
+
async def plan_task(self, client: AsyncOpenAI, user_message: str, history: list) -> dict:
|
| 104 |
+
"""Ask the orchestrator to plan the task."""
|
| 105 |
+
messages = [{"role": "system", "content": build_orchestrator_system()}]
|
| 106 |
+
|
| 107 |
+
# Add recent history for context
|
| 108 |
+
for msg in history[-6:]:
|
| 109 |
+
messages.append({"role": msg["role"], "content": str(msg["content"])[:2000]})
|
| 110 |
+
|
| 111 |
+
messages.append({
|
| 112 |
+
"role": "user",
|
| 113 |
+
"content": f"Plan the execution for this task: {user_message}"
|
| 114 |
+
})
|
| 115 |
+
|
| 116 |
+
response = await client.chat.completions.create(
|
| 117 |
+
model=MODEL,
|
| 118 |
+
messages=messages,
|
| 119 |
+
max_tokens=6000,
|
| 120 |
+
temperature=0.2,
|
| 121 |
+
)
|
| 122 |
+
|
| 123 |
+
raw = response.choices[0].message.content.strip()
|
| 124 |
+
# Strip markdown code fences if present
|
| 125 |
+
if raw.startswith("```"):
|
| 126 |
+
raw = raw.split("```")[1]
|
| 127 |
+
if raw.startswith("json"):
|
| 128 |
+
raw = raw[4:]
|
| 129 |
+
raw = raw.strip()
|
| 130 |
+
|
| 131 |
+
try:
|
| 132 |
+
return json.loads(raw)
|
| 133 |
+
except Exception:
|
| 134 |
+
return {
|
| 135 |
+
"task_analysis": "Direct response",
|
| 136 |
+
"needs_sub_agents": False,
|
| 137 |
+
"sub_agents": [],
|
| 138 |
+
"execution_order": [],
|
| 139 |
+
"synthesis_instruction": "Respond directly"
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
async def run_sub_agent(
|
| 143 |
+
self,
|
| 144 |
+
client: AsyncOpenAI,
|
| 145 |
+
agent_spec: dict,
|
| 146 |
+
context_so_far: str
|
| 147 |
+
) -> str:
|
| 148 |
+
"""Run a single sub-agent with its tools."""
|
| 149 |
+
tool_descriptions = ""
|
| 150 |
+
tools_created = []
|
| 151 |
+
tool_errors = []
|
| 152 |
+
|
| 153 |
+
for tool_spec in agent_spec.get("tools", []):
|
| 154 |
+
fn = build_tool_function(tool_spec)
|
| 155 |
+
tools_created.append(fn.__name__)
|
| 156 |
+
tool_descriptions += f"\n- {fn.__name__}: {tool_spec.get('description', '')}"
|
| 157 |
+
|
| 158 |
+
system_prompt = f"""You are {agent_spec['name']}.
|
| 159 |
+
Role: {agent_spec['role']}
|
| 160 |
+
Goal: {agent_spec['goal']}
|
| 161 |
+
Backstory: {agent_spec.get('backstory', '')}
|
| 162 |
+
|
| 163 |
+
You have access to these custom tools (simulate their usage in your reasoning):
|
| 164 |
+
{tool_descriptions if tool_descriptions else "No specialized tools - use your knowledge directly."}
|
| 165 |
+
|
| 166 |
+
Context from previous agents:
|
| 167 |
+
{context_so_far if context_so_far else "You are the first agent running."}
|
| 168 |
+
|
| 169 |
+
Execute your task thoroughly. Show your reasoning and tool usage step by step.
|
| 170 |
+
Expected output: {agent_spec.get('expected_output', 'Detailed results')}"""
|
| 171 |
+
|
| 172 |
+
response = await client.chat.completions.create(
|
| 173 |
+
model=MODEL,
|
| 174 |
+
messages=[
|
| 175 |
+
{"role": "system", "content": system_prompt},
|
| 176 |
+
{"role": "user", "content": agent_spec["task_description"]}
|
| 177 |
+
],
|
| 178 |
+
max_tokens=12000,
|
| 179 |
+
temperature=0.7,
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
return response.choices[0].message.content
|
| 183 |
+
|
| 184 |
+
async def synthesize(
|
| 185 |
+
self,
|
| 186 |
+
client: AsyncOpenAI,
|
| 187 |
+
user_message: str,
|
| 188 |
+
agent_results: dict,
|
| 189 |
+
synthesis_instruction: str
|
| 190 |
+
) -> AsyncGenerator[str, None]:
|
| 191 |
+
"""Stream the final synthesized response."""
|
| 192 |
+
results_text = "\n\n".join([
|
| 193 |
+
f"=== {name} ===\n{result}"
|
| 194 |
+
for name, result in agent_results.items()
|
| 195 |
+
])
|
| 196 |
+
|
| 197 |
+
system_prompt = f"""You are the Main Orchestrator synthesizing results from specialized sub-agents.
|
| 198 |
+
|
| 199 |
+
Synthesis instruction: {synthesis_instruction}
|
| 200 |
+
|
| 201 |
+
Sub-agent results:
|
| 202 |
+
{results_text}
|
| 203 |
+
|
| 204 |
+
Provide a comprehensive, well-structured final response to the user.
|
| 205 |
+
Use markdown formatting. Be thorough but concise."""
|
| 206 |
+
|
| 207 |
+
stream = await client.chat.completions.create(
|
| 208 |
+
model=MODEL,
|
| 209 |
+
messages=[
|
| 210 |
+
{"role": "system", "content": system_prompt},
|
| 211 |
+
{"role": "user", "content": user_message}
|
| 212 |
+
],
|
| 213 |
+
max_tokens=16000,
|
| 214 |
+
temperature=0.7,
|
| 215 |
+
stream=True
|
| 216 |
+
)
|
| 217 |
+
|
| 218 |
+
async for chunk in stream:
|
| 219 |
+
delta = chunk.choices[0].delta
|
| 220 |
+
if delta.content:
|
| 221 |
+
yield delta.content
|
| 222 |
+
|
| 223 |
+
async def direct_response(
|
| 224 |
+
self,
|
| 225 |
+
client: AsyncOpenAI,
|
| 226 |
+
user_message: str,
|
| 227 |
+
history: list
|
| 228 |
+
) -> AsyncGenerator[str, None]:
|
| 229 |
+
"""Stream a direct response without sub-agents."""
|
| 230 |
+
messages = [{
|
| 231 |
+
"role": "system",
|
| 232 |
+
"content": "You are PraisonChat, a powerful AI assistant. Respond helpfully using markdown formatting."
|
| 233 |
+
}]
|
| 234 |
+
for msg in history[-10:]:
|
| 235 |
+
messages.append({"role": msg["role"], "content": str(msg["content"])[:3000]})
|
| 236 |
+
messages.append({"role": "user", "content": user_message})
|
| 237 |
+
|
| 238 |
+
stream = await client.chat.completions.create(
|
| 239 |
+
model=MODEL,
|
| 240 |
+
messages=messages,
|
| 241 |
+
max_tokens=16000,
|
| 242 |
+
temperature=0.7,
|
| 243 |
+
stream=True
|
| 244 |
+
)
|
| 245 |
+
|
| 246 |
+
async for chunk in stream:
|
| 247 |
+
delta = chunk.choices[0].delta
|
| 248 |
+
if delta.content:
|
| 249 |
+
yield delta.content
|
| 250 |
+
|
| 251 |
+
async def stream_response(
|
| 252 |
+
self,
|
| 253 |
+
user_message: str,
|
| 254 |
+
history: list,
|
| 255 |
+
api_key: str
|
| 256 |
+
) -> AsyncGenerator[str, None]:
|
| 257 |
+
"""Main entry point — streams SSE-formatted events."""
|
| 258 |
+
|
| 259 |
+
def emit(data: dict) -> str:
|
| 260 |
+
return json.dumps(data)
|
| 261 |
+
|
| 262 |
+
client = self.get_client(api_key)
|
| 263 |
+
|
| 264 |
+
try:
|
| 265 |
+
# ── Step 1: Plan ─────────────────────────────────────────────────
|
| 266 |
+
yield emit({"type": "step", "text": "🧠 Main Agent analyzing your task..."})
|
| 267 |
+
await asyncio.sleep(0)
|
| 268 |
+
|
| 269 |
+
plan = await self.plan_task(client, user_message, history)
|
| 270 |
+
|
| 271 |
+
yield emit({
|
| 272 |
+
"type": "step",
|
| 273 |
+
"text": f"📋 {plan.get('task_analysis', 'Planning execution...')}"
|
| 274 |
+
})
|
| 275 |
+
await asyncio.sleep(0)
|
| 276 |
+
|
| 277 |
+
sub_agents = plan.get("sub_agents", [])
|
| 278 |
+
needs_sub_agents = plan.get("needs_sub_agents", bool(sub_agents))
|
| 279 |
+
|
| 280 |
+
# ── Step 2: Sub-agents or direct ─────────────────────────────────
|
| 281 |
+
if needs_sub_agents and sub_agents:
|
| 282 |
+
yield emit({
|
| 283 |
+
"type": "step",
|
| 284 |
+
"text": f"🤖 Spawning {len(sub_agents)} specialized sub-agent(s)..."
|
| 285 |
+
})
|
| 286 |
+
|
| 287 |
+
for agent_spec in sub_agents:
|
| 288 |
+
tool_names = [t["name"] for t in agent_spec.get("tools", [])]
|
| 289 |
+
yield emit({
|
| 290 |
+
"type": "agent_created",
|
| 291 |
+
"name": agent_spec["name"],
|
| 292 |
+
"role": agent_spec["role"],
|
| 293 |
+
"tools": tool_names
|
| 294 |
+
})
|
| 295 |
+
await asyncio.sleep(0.05)
|
| 296 |
+
|
| 297 |
+
# Execute each sub-agent
|
| 298 |
+
context_so_far = ""
|
| 299 |
+
agent_results = {}
|
| 300 |
+
execution_order = plan.get("execution_order", [a["name"] for a in sub_agents])
|
| 301 |
+
|
| 302 |
+
for agent_name in execution_order:
|
| 303 |
+
agent_spec = next(
|
| 304 |
+
(a for a in sub_agents if a["name"] == agent_name), None
|
| 305 |
+
)
|
| 306 |
+
if not agent_spec:
|
| 307 |
+
continue
|
| 308 |
+
|
| 309 |
+
yield emit({
|
| 310 |
+
"type": "step",
|
| 311 |
+
"text": f"⚡ {agent_name} working on: {agent_spec['task_description'][:100]}..."
|
| 312 |
+
})
|
| 313 |
+
await asyncio.sleep(0)
|
| 314 |
+
|
| 315 |
+
try:
|
| 316 |
+
result = await self.run_sub_agent(client, agent_spec, context_so_far)
|
| 317 |
+
agent_results[agent_name] = result
|
| 318 |
+
context_so_far += f"\n\n{agent_name} completed: {result[:600]}"
|
| 319 |
+
|
| 320 |
+
yield emit({
|
| 321 |
+
"type": "agent_result",
|
| 322 |
+
"name": agent_name,
|
| 323 |
+
"preview": result[:300] + ("..." if len(result) > 300 else "")
|
| 324 |
+
})
|
| 325 |
+
except Exception as e:
|
| 326 |
+
yield emit({
|
| 327 |
+
"type": "step",
|
| 328 |
+
"text": f"⚠️ {agent_name} encountered an issue: {str(e)[:100]}"
|
| 329 |
+
})
|
| 330 |
+
agent_results[agent_name] = f"Error: {e}"
|
| 331 |
+
|
| 332 |
+
# Synthesize
|
| 333 |
+
yield emit({"type": "step", "text": "✨ Synthesizing final response..."})
|
| 334 |
+
yield emit({"type": "response_start"})
|
| 335 |
+
await asyncio.sleep(0)
|
| 336 |
+
|
| 337 |
+
async for token in self.synthesize(
|
| 338 |
+
client, user_message, agent_results,
|
| 339 |
+
plan.get("synthesis_instruction", "Combine all results into a clear response")
|
| 340 |
+
):
|
| 341 |
+
yield emit({"type": "token", "content": token})
|
| 342 |
+
|
| 343 |
+
else:
|
| 344 |
+
# Direct response
|
| 345 |
+
yield emit({"type": "step", "text": "💬 Generating response..."})
|
| 346 |
+
yield emit({"type": "response_start"})
|
| 347 |
+
await asyncio.sleep(0)
|
| 348 |
+
|
| 349 |
+
async for token in self.direct_response(client, user_message, history):
|
| 350 |
+
yield emit({"type": "token", "content": token})
|
| 351 |
+
|
| 352 |
+
yield emit({"type": "done"})
|
| 353 |
+
|
| 354 |
+
except Exception as e:
|
| 355 |
+
tb = traceback.format_exc()
|
| 356 |
+
yield emit({"type": "error", "message": str(e), "detail": tb[:500]})
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
# Singleton
|
| 360 |
+
orchestrator = AgentOrchestrator()
|
app/docs_context.py
ADDED
|
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
PRAISONAI_DOCS = """
|
| 2 |
+
# PraisonAI Agent Framework — Full Documentation
|
| 3 |
+
|
| 4 |
+
## What is PraisonAI?
|
| 5 |
+
PraisonAI is a multi-agent orchestration framework. You (the orchestrator) can create specialized
|
| 6 |
+
sub-agents, each with custom tools written in Python, to complete complex tasks collaboratively.
|
| 7 |
+
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
## Creating an Agent
|
| 11 |
+
```python
|
| 12 |
+
from praisonaiagents import Agent, Task, PraisonAIAgents
|
| 13 |
+
|
| 14 |
+
agent = Agent(
|
| 15 |
+
name="ResearchAgent", # Unique agent name
|
| 16 |
+
role="Research Specialist", # Role description
|
| 17 |
+
goal="Find and summarize info", # What the agent aims to achieve
|
| 18 |
+
backstory="Expert researcher", # Context/personality
|
| 19 |
+
llm="LongCat-Flash-Lite", # LLM to use
|
| 20 |
+
tools=[my_tool_fn], # List of tool functions
|
| 21 |
+
verbose=True,
|
| 22 |
+
self_reflect=False, # Enable self-reflection
|
| 23 |
+
min_reflect=1,
|
| 24 |
+
max_reflect=3
|
| 25 |
+
)
|
| 26 |
+
```
|
| 27 |
+
|
| 28 |
+
---
|
| 29 |
+
|
| 30 |
+
## Creating a Task
|
| 31 |
+
```python
|
| 32 |
+
task = Task(
|
| 33 |
+
name="research_task",
|
| 34 |
+
description="Research the latest developments in quantum computing",
|
| 35 |
+
expected_output="A detailed summary with key findings",
|
| 36 |
+
agent=agent,
|
| 37 |
+
context=[previous_task], # Task dependencies
|
| 38 |
+
tools=[tool_fn] # Override agent tools
|
| 39 |
+
)
|
| 40 |
+
```
|
| 41 |
+
|
| 42 |
+
---
|
| 43 |
+
|
| 44 |
+
## Creating Custom Tools
|
| 45 |
+
Tools are just Python functions with type hints and docstrings:
|
| 46 |
+
|
| 47 |
+
```python
|
| 48 |
+
def search_web(query: str) -> str:
|
| 49 |
+
"""Search the internet for information.
|
| 50 |
+
Args:
|
| 51 |
+
query: The search query string
|
| 52 |
+
Returns:
|
| 53 |
+
Search results as a formatted string
|
| 54 |
+
"""
|
| 55 |
+
import requests
|
| 56 |
+
# implementation
|
| 57 |
+
return results
|
| 58 |
+
|
| 59 |
+
def read_file(filepath: str) -> str:
|
| 60 |
+
"""Read content from a file.
|
| 61 |
+
Args:
|
| 62 |
+
filepath: Path to the file
|
| 63 |
+
Returns:
|
| 64 |
+
File content as string
|
| 65 |
+
"""
|
| 66 |
+
with open(filepath) as f:
|
| 67 |
+
return f.read()
|
| 68 |
+
|
| 69 |
+
def execute_python(code: str) -> str:
|
| 70 |
+
"""Execute Python code and return output.
|
| 71 |
+
Args:
|
| 72 |
+
code: Python code to execute
|
| 73 |
+
Returns:
|
| 74 |
+
Execution output
|
| 75 |
+
"""
|
| 76 |
+
import subprocess
|
| 77 |
+
result = subprocess.run(['python3', '-c', code], capture_output=True, text=True)
|
| 78 |
+
return result.stdout + result.stderr
|
| 79 |
+
|
| 80 |
+
def calculate(expression: str) -> str:
|
| 81 |
+
"""Evaluate a mathematical expression.
|
| 82 |
+
Args:
|
| 83 |
+
expression: Math expression as string
|
| 84 |
+
Returns:
|
| 85 |
+
Result as string
|
| 86 |
+
"""
|
| 87 |
+
return str(eval(expression))
|
| 88 |
+
```
|
| 89 |
+
|
| 90 |
+
---
|
| 91 |
+
|
| 92 |
+
## Running Multiple Agents (Sequential)
|
| 93 |
+
```python
|
| 94 |
+
system = PraisonAIAgents(
|
| 95 |
+
agents=[researcher, analyst, writer],
|
| 96 |
+
tasks=[research_task, analysis_task, write_task],
|
| 97 |
+
process="sequential", # agents run one after another
|
| 98 |
+
verbose=True
|
| 99 |
+
)
|
| 100 |
+
result = system.start()
|
| 101 |
+
```
|
| 102 |
+
|
| 103 |
+
---
|
| 104 |
+
|
| 105 |
+
## Running Multiple Agents (Hierarchical)
|
| 106 |
+
```python
|
| 107 |
+
# Manager coordinates workers automatically
|
| 108 |
+
system = PraisonAIAgents(
|
| 109 |
+
agents=[manager_agent, worker1, worker2],
|
| 110 |
+
tasks=[main_task, sub_task1, sub_task2],
|
| 111 |
+
process="hierarchical",
|
| 112 |
+
manager_llm="LongCat-Flash-Lite",
|
| 113 |
+
verbose=True
|
| 114 |
+
)
|
| 115 |
+
result = system.start()
|
| 116 |
+
```
|
| 117 |
+
|
| 118 |
+
---
|
| 119 |
+
|
| 120 |
+
## Task Context (Chaining Tasks)
|
| 121 |
+
```python
|
| 122 |
+
# task2 receives output of task1 as context
|
| 123 |
+
task1 = Task(name="t1", description="...", agent=agent1, expected_output="...")
|
| 124 |
+
task2 = Task(name="t2", description="...", agent=agent2, expected_output="...", context=[task1])
|
| 125 |
+
```
|
| 126 |
+
|
| 127 |
+
---
|
| 128 |
+
|
| 129 |
+
## Best Practices for Dynamic Tool Creation
|
| 130 |
+
When you need a tool for a specific purpose, write it as a Python function:
|
| 131 |
+
|
| 132 |
+
1. **Naming**: Use snake_case, descriptive names (e.g., `fetch_github_repo`, `parse_csv_data`)
|
| 133 |
+
2. **Type hints**: Always include type hints for parameters and return value
|
| 134 |
+
3. **Docstrings**: Write clear docstrings — this is how the agent understands the tool
|
| 135 |
+
4. **Error handling**: Wrap risky operations in try/except
|
| 136 |
+
5. **Return strings**: Tools should return strings for easy consumption
|
| 137 |
+
|
| 138 |
+
Example of a dynamically created specialized tool:
|
| 139 |
+
```python
|
| 140 |
+
def analyze_sentiment(text: str) -> str:
|
| 141 |
+
\"\"\"Analyze the sentiment of given text.
|
| 142 |
+
Args:
|
| 143 |
+
text: The text to analyze
|
| 144 |
+
Returns:
|
| 145 |
+
Sentiment analysis result: positive, negative, or neutral with confidence
|
| 146 |
+
\"\"\"
|
| 147 |
+
# Simple keyword-based analysis
|
| 148 |
+
positive_words = ['good', 'great', 'excellent', 'happy', 'love', 'best']
|
| 149 |
+
negative_words = ['bad', 'terrible', 'hate', 'worst', 'awful', 'horrible']
|
| 150 |
+
text_lower = text.lower()
|
| 151 |
+
pos = sum(1 for w in positive_words if w in text_lower)
|
| 152 |
+
neg = sum(1 for w in negative_words if w in text_lower)
|
| 153 |
+
if pos > neg:
|
| 154 |
+
return f"Positive sentiment (score: {pos}/{pos+neg})"
|
| 155 |
+
elif neg > pos:
|
| 156 |
+
return f"Negative sentiment (score: {neg}/{pos+neg})"
|
| 157 |
+
else:
|
| 158 |
+
return "Neutral sentiment"
|
| 159 |
+
```
|
| 160 |
+
|
| 161 |
+
---
|
| 162 |
+
|
| 163 |
+
## Sub-Agent Design Patterns
|
| 164 |
+
|
| 165 |
+
### Pattern 1: Research + Synthesis
|
| 166 |
+
- Agent 1 (Researcher): Gathers raw information using search/fetch tools
|
| 167 |
+
- Agent 2 (Analyst): Processes and analyzes the gathered data
|
| 168 |
+
- Agent 3 (Writer): Produces the final output
|
| 169 |
+
|
| 170 |
+
### Pattern 2: Divide and Conquer
|
| 171 |
+
- Split large tasks into parallel sub-tasks
|
| 172 |
+
- Each agent handles one domain
|
| 173 |
+
- Orchestrator merges results
|
| 174 |
+
|
| 175 |
+
### Pattern 3: Validation Pipeline
|
| 176 |
+
- Agent 1: Performs the main task
|
| 177 |
+
- Agent 2: Reviews and validates Agent 1's output
|
| 178 |
+
- Agent 3: Applies corrections
|
| 179 |
+
|
| 180 |
+
---
|
| 181 |
+
|
| 182 |
+
## LongCat-Flash-Lite Configuration
|
| 183 |
+
Model: LongCat-Flash-Lite
|
| 184 |
+
Max tokens: 327,680 (320K context)
|
| 185 |
+
Speed: Very fast (~500-700 tokens/sec)
|
| 186 |
+
Free quota: 50M tokens/day
|
| 187 |
+
API Base: https://api.longcat.chat/openai/v1
|
| 188 |
+
"""
|
app/main.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
from fastapi import FastAPI, Request, HTTPException
|
| 5 |
+
from fastapi.responses import StreamingResponse, HTMLResponse, JSONResponse
|
| 6 |
+
from fastapi.staticfiles import StaticFiles
|
| 7 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 8 |
+
from agent_system import orchestrator
|
| 9 |
+
|
| 10 |
+
app = FastAPI(title="PraisonChat", version="1.0.0")
|
| 11 |
+
|
| 12 |
+
app.add_middleware(
|
| 13 |
+
CORSMiddleware,
|
| 14 |
+
allow_origins=["*"],
|
| 15 |
+
allow_methods=["*"],
|
| 16 |
+
allow_headers=["*"],
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
STATIC_DIR = Path(__file__).parent / "static"
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@app.get("/", response_class=HTMLResponse)
|
| 23 |
+
async def root():
|
| 24 |
+
return HTMLResponse(content=(STATIC_DIR / "index.html").read_text(encoding="utf-8"))
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@app.get("/api/health")
|
| 28 |
+
def health():
|
| 29 |
+
return {"status": "ok", "model": "LongCat-Flash-Lite", "version": "1.0.0"}
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
@app.get("/api/models")
|
| 33 |
+
def models():
|
| 34 |
+
return {
|
| 35 |
+
"models": [
|
| 36 |
+
{"id": "LongCat-Flash-Lite", "name": "LongCat Flash Lite", "context": "320K", "speed": "fastest"},
|
| 37 |
+
{"id": "LongCat-Flash-Chat", "name": "LongCat Flash Chat", "context": "256K", "speed": "fast"},
|
| 38 |
+
{"id": "LongCat-Flash-Thinking-2601", "name": "LongCat Flash Thinking", "context": "256K", "speed": "medium"},
|
| 39 |
+
]
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
@app.post("/api/chat")
|
| 44 |
+
async def chat(request: Request):
|
| 45 |
+
try:
|
| 46 |
+
body = await request.json()
|
| 47 |
+
except Exception:
|
| 48 |
+
raise HTTPException(400, "Invalid JSON body")
|
| 49 |
+
|
| 50 |
+
messages = body.get("messages", [])
|
| 51 |
+
api_key = body.get("api_key") or os.getenv("LONGCAT_API_KEY", "")
|
| 52 |
+
|
| 53 |
+
if not api_key:
|
| 54 |
+
raise HTTPException(400, "LongCat API key is required. Set it in Settings or as LONGCAT_API_KEY env var.")
|
| 55 |
+
|
| 56 |
+
if not messages:
|
| 57 |
+
raise HTTPException(400, "No messages provided")
|
| 58 |
+
|
| 59 |
+
user_message = messages[-1].get("content", "")
|
| 60 |
+
history = messages[:-1]
|
| 61 |
+
|
| 62 |
+
async def event_stream():
|
| 63 |
+
async for chunk in orchestrator.stream_response(user_message, history, api_key):
|
| 64 |
+
yield f"data: {chunk}\n\n"
|
| 65 |
+
|
| 66 |
+
return StreamingResponse(event_stream(), media_type="text/event-stream",
|
| 67 |
+
headers={"X-Accel-Buffering": "no", "Cache-Control": "no-cache"})
|