deep-agent / agents /engine.py
Shami96's picture
Upload agents/engine.py with huggingface_hub
0f0319a verified
"""
Agent Engine β€” the core loop that makes agents AGENTS (not chatbots).
Given a goal + tools, the engine:
1. Plans what steps are needed
2. Calls tools to gather information / take actions
3. Loops until the goal is achieved
4. Returns a structured result
Uses Groq's tool-calling API for native function calling.
"""
import os
import json
import asyncio
from typing import Any, Callable, Optional
from datetime import datetime
# Tool registry β€” maps tool names to async functions
ToolFunc = Callable[..., Any]
def _build_tool_schema(func: ToolFunc) -> dict:
"""Build OpenAI-compatible tool schema from a function's docstring."""
import inspect
doc = inspect.getdoc(func) or ""
# Parse Args section from docstring
params = {}
required = []
sig = inspect.signature(func)
for name, param in sig.parameters.items():
p_type = "string" # default
annotation = param.annotation
if annotation == int:
p_type = "integer"
elif annotation == float:
p_type = "number"
elif annotation == bool:
p_type = "boolean"
desc = ""
# Try to find description in docstring
for line in doc.split("\n"):
if name + ":" in line:
desc = line.split(":", 1)[1].strip()
break
params[name] = {"type": p_type, "description": desc}
if param.default is inspect.Parameter.empty:
required.append(name)
# First line of docstring = description
description = doc.split("\n")[0] if doc else func.__name__
return {
"type": "function",
"function": {
"name": func.__name__,
"description": description,
"parameters": {
"type": "object",
"properties": params,
"required": required,
},
},
}
async def run_agent(
goal: str,
system_prompt: str,
tools: list[ToolFunc],
model: str = "llama-3.3-70b-versatile",
max_iterations: int = 10,
conversation_id: str = "default",
history: Optional[list[dict]] = None,
) -> dict:
"""Run an agent loop: plan β†’ tool calls β†’ synthesize β†’ return.
Returns:
{
"result": str, # Final answer/output
"tool_calls_made": int, # How many tool calls were executed
"tools_used": list[str], # Which tools were called
"iterations": int, # How many loop iterations
"conversation_id": str,
}
"""
from groq import AsyncGroq
client = AsyncGroq(api_key=os.environ.get("GROQ_API_KEY"))
# Build tool schemas
tool_schemas = [_build_tool_schema(t) for t in tools]
tool_map = {t.__name__: t for t in tools}
# Initialize messages
messages = [{"role": "system", "content": system_prompt}]
if history:
messages.extend(history)
messages.append({"role": "user", "content": goal})
total_tool_calls = 0
tools_used = set()
iterations = 0
for iteration in range(max_iterations):
iterations = iteration + 1
# Call the model
try:
response = await client.chat.completions.create(
model=model,
messages=messages,
tools=tool_schemas if tool_schemas else None,
tool_choice="auto" if tool_schemas else None,
max_tokens=4096,
temperature=0.3,
)
except Exception as e:
return {
"result": f"Model error: {e}",
"tool_calls_made": total_tool_calls,
"tools_used": list(tools_used),
"iterations": iterations,
"conversation_id": conversation_id,
}
choice = response.choices[0]
msg = choice.message
# If the model wants to call tools
if msg.tool_calls:
# Add assistant message with tool calls
messages.append({
"role": "assistant",
"content": msg.content or "",
"tool_calls": [
{
"id": tc.id,
"type": "function",
"function": {
"name": tc.function.name,
"arguments": tc.function.arguments,
},
}
for tc in msg.tool_calls
],
})
# Execute each tool call
for tc in msg.tool_calls:
func_name = tc.function.name
tools_used.add(func_name)
total_tool_calls += 1
try:
args = json.loads(tc.function.arguments)
func = tool_map.get(func_name)
if func is None:
result = f"Unknown tool: {func_name}"
else:
result = await func(**args)
if not isinstance(result, str):
result = json.dumps(result, indent=2, default=str)
except Exception as e:
result = f"Tool error: {e}"
# Truncate massive tool results
if len(result) > 8000:
result = result[:8000] + "\n\n[truncated]"
messages.append({
"role": "tool",
"tool_call_id": tc.id,
"content": result,
})
continue # Loop back for the model to process results
# Model returned text (no tool calls) β€” we're done
final_content = msg.content or ""
return {
"result": final_content,
"tool_calls_made": total_tool_calls,
"tools_used": list(tools_used),
"iterations": iterations,
"conversation_id": conversation_id,
}
# Hit max iterations
return {
"result": "Agent reached maximum iterations without completing. Partial results may be in the conversation.",
"tool_calls_made": total_tool_calls,
"tools_used": list(tools_used),
"iterations": iterations,
"conversation_id": conversation_id,
}