amine-yagoub's picture
refactor: clean up core modules by removing comment headers and unused code
6a2abaa
"""Custom ReACT engine using LiteLLM's native function calling with Z.ai GLM-5.
CrewAI doesn't reliably route tools through to Z.ai via LiteLLM, so this module
bypasses CrewAI entirely for the tool-calling loop.
"""
import json
import logging
import time
import litellm
from code_tribunal.config import TribunalConfig
log = logging.getLogger("code_tribunal.react")
_MAX_RETRIES = 5
_BASE_DELAY = 4.0
def _completion_with_retry(**kwargs):
"""Call litellm.completion with exponential backoff on rate-limit errors."""
for attempt in range(_MAX_RETRIES):
try:
return litellm.completion(**kwargs)
except litellm.RateLimitError:
if attempt == _MAX_RETRIES - 1:
raise
delay = _BASE_DELAY * (2 ** attempt)
log.warning("[RETRY] Rate limited (attempt %d/%d), waiting %.0fs...", attempt + 1, _MAX_RETRIES, delay)
time.sleep(delay)
def _build_tool_schemas(tools: list) -> list[dict]:
"""Convert CrewAI BaseTool instances to OpenAI function calling schema."""
schemas = []
for tool in tools:
schema = tool.args_schema.model_json_schema()
properties = schema.get("properties", {})
for prop in properties.values():
prop.pop("title", None)
schemas.append({
"type": "function",
"function": {
"name": tool.name,
"description": tool.description.split("\n")[0],
"parameters": {
"type": "object",
"properties": properties,
"required": schema.get("required", []),
},
},
})
return schemas
def _execute_tool(tools: list, tool_name: str, arguments: dict) -> str:
"""Find and execute a tool by name."""
for tool in tools:
if tool.name == tool_name:
return tool._run(**arguments)
return f"Error: Unknown tool '{tool_name}'"
def react_loop(
config: TribunalConfig,
task_description: str,
agent_role: str,
agent_goal: str,
tools: list,
max_iterations: int = 10,
) -> str:
"""Run a full ReACT loop using function calling.
The agent receives tools, decides which to call, observes the results,
and iterates until it has enough information to answer.
Returns the final text output.
"""
tool_schemas = _build_tool_schemas(tools)
system_prompt = (
f"You are {agent_role}. {agent_goal}\n\n"
"You have access to tools for investigating code. Use them actively:\n"
"- Call file_reader to read specific files\n"
"- Call pattern_search to run GritQL patterns\n"
"- Call code_graph_query to trace call chains and dependencies\n"
"- Call finding_context to see surrounding code for a finding\n\n"
"Always call at least one tool before giving your final answer. "
"After gathering information, provide a detailed analysis."
)
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": task_description},
]
for iteration in range(max_iterations):
response = _completion_with_retry(
model=config.model_name,
messages=messages,
tools=tool_schemas,
tool_choice="auto",
api_key=config.api_key,
api_base=config.api_base,
temperature=config.temperature,
)
message = response.choices[0].message
messages.append(message.model_dump())
if not message.tool_calls:
return message.content or ""
for tool_call in message.tool_calls:
func_name = tool_call.function.name
try:
func_args = json.loads(tool_call.function.arguments)
except json.JSONDecodeError:
func_args = {}
log.debug(" [ReACT %d] %s(%s)", iteration + 1, func_name, func_args)
result = _execute_tool(tools, func_name, func_args)
messages.append({
"role": "tool",
"content": str(result),
"tool_call_id": tool_call.id,
})
response = _completion_with_retry(
model=config.model_name,
messages=messages,
api_key=config.api_key,
api_base=config.api_base,
temperature=config.temperature,
)
return response.choices[0].message.content or ""
def react_loop_stream(
config: TribunalConfig,
task_description: str,
agent_role: str,
agent_goal: str,
tools: list,
max_iterations: int = 10,
):
"""Streaming ReACT loop — yields (role, delta_text, is_tool_call) tuples.
Each tool call is reported as a small delta. The final answer is yielded
as a large delta.
"""
tool_schemas = _build_tool_schemas(tools)
system_prompt = (
f"You are {agent_role}. {agent_goal}\n\n"
"You have access to tools for investigating code. Use them actively:\n"
"- Call file_reader to read specific files\n"
"- Call pattern_search to run GritQL patterns\n"
"- Call code_graph_query to trace call chains and dependencies\n"
"- Call finding_context to see surrounding code for a finding\n\n"
"Always call at least one tool before giving your final answer. "
"After gathering information, provide a detailed analysis."
)
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": task_description},
]
for iteration in range(max_iterations):
response = _completion_with_retry(
model=config.model_name,
messages=messages,
tools=tool_schemas,
tool_choice="auto",
api_key=config.api_key,
api_base=config.api_base,
temperature=config.temperature,
)
message = response.choices[0].message
messages.append(message.model_dump())
if message.tool_calls:
for tool_call in message.tool_calls:
func_name = tool_call.function.name
try:
func_args = json.loads(tool_call.function.arguments)
except json.JSONDecodeError:
func_args = {}
yield (agent_role, f"\n[Using tool: {func_name}({json.dumps(func_args)})]\n", True)
result = _execute_tool(tools, func_name, func_args)
messages.append({
"role": "tool",
"content": str(result),
"tool_call_id": tool_call.id,
})
if not message.tool_calls:
if message.content:
yield (agent_role, message.content, False)
break
else:
response = _completion_with_retry(
model=config.model_name,
messages=messages,
api_key=config.api_key,
api_base=config.api_base,
temperature=config.temperature,
)
yield (agent_role, response.choices[0].message.content or "", False)