|
|
""" |
|
|
Code Implementation Agent for File-by-File Development |
|
|
|
|
|
Handles systematic code implementation with progress tracking and |
|
|
memory optimization for long-running development sessions. |
|
|
""" |
|
|
|
|
|
import json |
|
|
import time |
|
|
import logging |
|
|
from typing import Dict, Any, List, Optional |
|
|
|
|
|
|
|
|
try: |
|
|
import tiktoken |
|
|
|
|
|
TIKTOKEN_AVAILABLE = True |
|
|
except ImportError: |
|
|
TIKTOKEN_AVAILABLE = False |
|
|
|
|
|
|
|
|
import sys |
|
|
import os |
|
|
|
|
|
sys.path.insert( |
|
|
0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) |
|
|
) |
|
|
from prompts.code_prompts import ( |
|
|
GENERAL_CODE_IMPLEMENTATION_SYSTEM_PROMPT, |
|
|
) |
|
|
|
|
|
|
|
|
class CodeImplementationAgent: |
|
|
""" |
|
|
Code Implementation Agent for systematic file-by-file development |
|
|
|
|
|
Responsibilities: |
|
|
- Track file implementation progress |
|
|
- Execute MCP tool calls for code generation |
|
|
- Monitor implementation status |
|
|
- Coordinate with Summary Agent for memory optimization |
|
|
- Calculate token usage for context management |
|
|
""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
mcp_agent, |
|
|
logger: Optional[logging.Logger] = None, |
|
|
enable_read_tools: bool = True, |
|
|
): |
|
|
""" |
|
|
Initialize Code Implementation Agent |
|
|
|
|
|
Args: |
|
|
mcp_agent: MCP agent instance for tool calls |
|
|
logger: Logger instance for tracking operations |
|
|
enable_read_tools: Whether to enable read_file and read_code_mem tools (default: True) |
|
|
""" |
|
|
self.mcp_agent = mcp_agent |
|
|
self.logger = logger or self._create_default_logger() |
|
|
self.enable_read_tools = enable_read_tools |
|
|
|
|
|
self.implementation_summary = { |
|
|
"completed_files": [], |
|
|
"technical_decisions": [], |
|
|
"important_constraints": [], |
|
|
"architecture_notes": [], |
|
|
"dependency_analysis": [], |
|
|
} |
|
|
self.files_implemented_count = 0 |
|
|
self.implemented_files_set = ( |
|
|
set() |
|
|
) |
|
|
self.files_read_for_dependencies = ( |
|
|
set() |
|
|
) |
|
|
self.last_summary_file_count = ( |
|
|
0 |
|
|
) |
|
|
|
|
|
|
|
|
self.max_context_tokens = ( |
|
|
200000 |
|
|
) |
|
|
self.token_buffer = 10000 |
|
|
self.summary_trigger_tokens = ( |
|
|
self.max_context_tokens - self.token_buffer |
|
|
) |
|
|
self.last_summary_token_count = ( |
|
|
0 |
|
|
) |
|
|
|
|
|
|
|
|
if TIKTOKEN_AVAILABLE: |
|
|
try: |
|
|
|
|
|
self.tokenizer = tiktoken.get_encoding("o200k_base") |
|
|
self.logger.info("Token calculation enabled with o200k_base encoding") |
|
|
except Exception as e: |
|
|
self.tokenizer = None |
|
|
self.logger.warning(f"Failed to initialize tokenizer: {e}") |
|
|
else: |
|
|
self.tokenizer = None |
|
|
self.logger.warning( |
|
|
"tiktoken not available, token-based summary triggering disabled" |
|
|
) |
|
|
|
|
|
|
|
|
self.recent_tool_calls = [] |
|
|
self.max_read_without_write = 5 |
|
|
|
|
|
|
|
|
self.memory_agent = None |
|
|
self.llm_client = None |
|
|
self.llm_client_type = None |
|
|
|
|
|
|
|
|
read_tools_status = "ENABLED" if self.enable_read_tools else "DISABLED" |
|
|
self.logger.info( |
|
|
f"🔧 Code Implementation Agent initialized - Read tools: {read_tools_status}" |
|
|
) |
|
|
if not self.enable_read_tools: |
|
|
self.logger.info( |
|
|
"🚫 Testing mode: read_file and read_code_mem will be skipped when called" |
|
|
) |
|
|
|
|
|
def _create_default_logger(self) -> logging.Logger: |
|
|
"""Create default logger if none provided""" |
|
|
logger = logging.getLogger(f"{__name__}.CodeImplementationAgent") |
|
|
|
|
|
logger.setLevel(logging.INFO) |
|
|
return logger |
|
|
|
|
|
def get_system_prompt(self) -> str: |
|
|
""" |
|
|
Get the system prompt for code implementation |
|
|
""" |
|
|
return GENERAL_CODE_IMPLEMENTATION_SYSTEM_PROMPT |
|
|
|
|
|
def set_memory_agent(self, memory_agent, llm_client=None, llm_client_type=None): |
|
|
""" |
|
|
Set memory agent for code summary generation |
|
|
|
|
|
Args: |
|
|
memory_agent: Memory agent instance |
|
|
llm_client: LLM client for summary generation |
|
|
llm_client_type: Type of LLM client ("anthropic" or "openai") |
|
|
""" |
|
|
self.memory_agent = memory_agent |
|
|
self.llm_client = llm_client |
|
|
self.llm_client_type = llm_client_type |
|
|
self.logger.info("Memory agent integration configured") |
|
|
|
|
|
async def execute_tool_calls(self, tool_calls: List[Dict]) -> List[Dict]: |
|
|
""" |
|
|
Execute MCP tool calls and track implementation progress |
|
|
|
|
|
Args: |
|
|
tool_calls: List of tool calls to execute |
|
|
|
|
|
Returns: |
|
|
List of tool execution results |
|
|
""" |
|
|
results = [] |
|
|
|
|
|
for tool_call in tool_calls: |
|
|
tool_name = tool_call["name"] |
|
|
tool_input = tool_call["input"] |
|
|
|
|
|
self.logger.info(f"Executing MCP tool: {tool_name}") |
|
|
|
|
|
try: |
|
|
|
|
|
if not self.enable_read_tools and tool_name in [ |
|
|
"read_file", |
|
|
"read_code_mem", |
|
|
]: |
|
|
|
|
|
|
|
|
mock_result = json.dumps( |
|
|
{ |
|
|
"status": "skipped", |
|
|
"message": f"{tool_name} tool disabled for testing", |
|
|
"tool_disabled": True, |
|
|
"original_input": tool_input, |
|
|
}, |
|
|
ensure_ascii=False, |
|
|
) |
|
|
|
|
|
results.append( |
|
|
{ |
|
|
"tool_id": tool_call["id"], |
|
|
"tool_name": tool_name, |
|
|
"result": mock_result, |
|
|
} |
|
|
) |
|
|
continue |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if tool_name == "read_file": |
|
|
file_path = tool_call["input"].get("file_path", "unknown") |
|
|
self.logger.info(f"🔍 READ_FILE CALL DETECTED: {file_path}") |
|
|
self.logger.info( |
|
|
f"📊 Files implemented count: {self.files_implemented_count}" |
|
|
) |
|
|
self.logger.info( |
|
|
f"🧠 Memory agent available: {self.memory_agent is not None}" |
|
|
) |
|
|
|
|
|
|
|
|
if self.memory_agent is not None: |
|
|
self.logger.info( |
|
|
f"🔄 INTERCEPTING read_file call for {file_path} (memory agent available)" |
|
|
) |
|
|
result = await self._handle_read_file_with_memory_optimization( |
|
|
tool_call |
|
|
) |
|
|
results.append(result) |
|
|
continue |
|
|
else: |
|
|
self.logger.info( |
|
|
"📁 NO INTERCEPTION: no memory agent available" |
|
|
) |
|
|
|
|
|
if self.mcp_agent: |
|
|
|
|
|
result = await self.mcp_agent.call_tool(tool_name, tool_input) |
|
|
|
|
|
|
|
|
if tool_name == "write_file": |
|
|
await self._track_file_implementation_with_summary( |
|
|
tool_call, result |
|
|
) |
|
|
elif tool_name == "read_file": |
|
|
self._track_dependency_analysis(tool_call, result) |
|
|
|
|
|
|
|
|
self._track_tool_call_for_loop_detection(tool_name) |
|
|
|
|
|
results.append( |
|
|
{ |
|
|
"tool_id": tool_call["id"], |
|
|
"tool_name": tool_name, |
|
|
"result": result, |
|
|
} |
|
|
) |
|
|
else: |
|
|
results.append( |
|
|
{ |
|
|
"tool_id": tool_call["id"], |
|
|
"tool_name": tool_name, |
|
|
"result": json.dumps( |
|
|
{ |
|
|
"status": "error", |
|
|
"message": "MCP agent not initialized", |
|
|
}, |
|
|
ensure_ascii=False, |
|
|
), |
|
|
} |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
self.logger.error(f"MCP tool execution failed: {e}") |
|
|
results.append( |
|
|
{ |
|
|
"tool_id": tool_call["id"], |
|
|
"tool_name": tool_name, |
|
|
"result": json.dumps( |
|
|
{"status": "error", "message": str(e)}, ensure_ascii=False |
|
|
), |
|
|
} |
|
|
) |
|
|
|
|
|
return results |
|
|
|
|
|
|
|
|
|
|
|
async def _handle_read_file_with_memory_optimization(self, tool_call: Dict) -> Dict: |
|
|
""" |
|
|
Intercept read_file calls and redirect to read_code_mem if a summary exists. |
|
|
This prevents unnecessary file reads if the summary is already available. |
|
|
""" |
|
|
file_path = tool_call["input"].get("file_path") |
|
|
if not file_path: |
|
|
return { |
|
|
"tool_id": tool_call["id"], |
|
|
"tool_name": "read_file", |
|
|
"result": json.dumps( |
|
|
{"status": "error", "message": "file_path parameter is required"}, |
|
|
ensure_ascii=False, |
|
|
), |
|
|
} |
|
|
|
|
|
|
|
|
should_use_summary = False |
|
|
if self.memory_agent and self.mcp_agent: |
|
|
try: |
|
|
|
|
|
read_code_mem_result = await self.mcp_agent.call_tool( |
|
|
"read_code_mem", {"file_paths": [file_path]} |
|
|
) |
|
|
|
|
|
|
|
|
import json |
|
|
|
|
|
if isinstance(read_code_mem_result, str): |
|
|
try: |
|
|
result_data = json.loads(read_code_mem_result) |
|
|
|
|
|
should_use_summary = ( |
|
|
result_data.get("status") |
|
|
in ["all_summaries_found", "partial_summaries_found"] |
|
|
and result_data.get("summaries_found", 0) > 0 |
|
|
) |
|
|
except json.JSONDecodeError: |
|
|
should_use_summary = False |
|
|
except Exception as e: |
|
|
self.logger.debug(f"read_code_mem check failed for {file_path}: {e}") |
|
|
should_use_summary = False |
|
|
|
|
|
if should_use_summary: |
|
|
self.logger.info(f"🔄 READ_FILE INTERCEPTED: Using summary for {file_path}") |
|
|
|
|
|
|
|
|
if self.mcp_agent: |
|
|
result = await self.mcp_agent.call_tool( |
|
|
"read_code_mem", {"file_paths": [file_path]} |
|
|
) |
|
|
|
|
|
|
|
|
import json |
|
|
|
|
|
try: |
|
|
result_data = ( |
|
|
json.loads(result) if isinstance(result, str) else result |
|
|
) |
|
|
if isinstance(result_data, dict): |
|
|
|
|
|
file_results = result_data.get("results", []) |
|
|
if file_results and len(file_results) > 0: |
|
|
specific_result = file_results[ |
|
|
0 |
|
|
] |
|
|
|
|
|
transformed_result = { |
|
|
"status": specific_result.get("status", "no_summary"), |
|
|
"file_path": specific_result.get( |
|
|
"file_path", file_path |
|
|
), |
|
|
"summary_content": specific_result.get( |
|
|
"summary_content" |
|
|
), |
|
|
"message": specific_result.get("message", ""), |
|
|
"original_tool": "read_file", |
|
|
"optimization": "redirected_to_read_code_mem", |
|
|
} |
|
|
final_result = json.dumps( |
|
|
transformed_result, ensure_ascii=False |
|
|
) |
|
|
else: |
|
|
|
|
|
result_data["original_tool"] = "read_file" |
|
|
result_data["optimization"] = "redirected_to_read_code_mem" |
|
|
final_result = json.dumps(result_data, ensure_ascii=False) |
|
|
else: |
|
|
final_result = result |
|
|
except (json.JSONDecodeError, TypeError): |
|
|
final_result = result |
|
|
|
|
|
return { |
|
|
"tool_id": tool_call["id"], |
|
|
"tool_name": "read_file", |
|
|
"result": final_result, |
|
|
} |
|
|
else: |
|
|
self.logger.warning( |
|
|
"MCP agent not available for read_code_mem optimization" |
|
|
) |
|
|
else: |
|
|
self.logger.info( |
|
|
f"📁 READ_FILE: No summary for {file_path}, using actual file" |
|
|
) |
|
|
|
|
|
|
|
|
if self.mcp_agent: |
|
|
result = await self.mcp_agent.call_tool("read_file", tool_call["input"]) |
|
|
|
|
|
|
|
|
self._track_dependency_analysis(tool_call, result) |
|
|
|
|
|
|
|
|
self._track_tool_call_for_loop_detection("read_file") |
|
|
|
|
|
return { |
|
|
"tool_id": tool_call["id"], |
|
|
"tool_name": "read_file", |
|
|
"result": result, |
|
|
} |
|
|
else: |
|
|
return { |
|
|
"tool_id": tool_call["id"], |
|
|
"tool_name": "read_file", |
|
|
"result": json.dumps( |
|
|
{"status": "error", "message": "MCP agent not initialized"}, |
|
|
ensure_ascii=False, |
|
|
), |
|
|
} |
|
|
|
|
|
async def _track_file_implementation_with_summary( |
|
|
self, tool_call: Dict, result: Any |
|
|
): |
|
|
""" |
|
|
Track file implementation and create code summary |
|
|
|
|
|
Args: |
|
|
tool_call: The write_file tool call |
|
|
result: Result of the tool execution |
|
|
""" |
|
|
|
|
|
self._track_file_implementation(tool_call, result) |
|
|
|
|
|
|
|
|
if self.memory_agent and self.llm_client and self.llm_client_type: |
|
|
try: |
|
|
file_path = tool_call["input"].get("file_path") |
|
|
file_content = tool_call["input"].get("content", "") |
|
|
|
|
|
if file_path and file_content: |
|
|
|
|
|
summary = await self.memory_agent.create_code_implementation_summary( |
|
|
self.llm_client, |
|
|
self.llm_client_type, |
|
|
file_path, |
|
|
file_content, |
|
|
self.get_files_implemented_count(), |
|
|
) |
|
|
|
|
|
self.logger.info( |
|
|
f"Created code summary for implemented file: {file_path}, summary: {summary[:100]}..." |
|
|
) |
|
|
else: |
|
|
self.logger.warning( |
|
|
"Missing file path or content for summary generation" |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
self.logger.error(f"Failed to create code summary: {e}") |
|
|
|
|
|
def _track_file_implementation(self, tool_call: Dict, result: Any): |
|
|
""" |
|
|
Track file implementation progress |
|
|
""" |
|
|
try: |
|
|
|
|
|
result_data = None |
|
|
|
|
|
|
|
|
if hasattr(result, "content"): |
|
|
|
|
|
if hasattr(result.content, "text"): |
|
|
result_content = result.content.text |
|
|
else: |
|
|
result_content = str(result.content) |
|
|
|
|
|
|
|
|
try: |
|
|
result_data = json.loads(result_content) |
|
|
except json.JSONDecodeError: |
|
|
|
|
|
result_data = { |
|
|
"status": "success", |
|
|
"file_path": tool_call["input"].get("file_path", "unknown"), |
|
|
} |
|
|
elif isinstance(result, str): |
|
|
|
|
|
try: |
|
|
result_data = json.loads(result) |
|
|
except json.JSONDecodeError: |
|
|
result_data = { |
|
|
"status": "success", |
|
|
"file_path": tool_call["input"].get("file_path", "unknown"), |
|
|
} |
|
|
elif isinstance(result, dict): |
|
|
|
|
|
result_data = result |
|
|
else: |
|
|
|
|
|
result_data = { |
|
|
"status": "success", |
|
|
"file_path": tool_call["input"].get("file_path", "unknown"), |
|
|
} |
|
|
|
|
|
|
|
|
file_path = None |
|
|
if result_data and result_data.get("status") == "success": |
|
|
file_path = result_data.get( |
|
|
"file_path", tool_call["input"].get("file_path", "unknown") |
|
|
) |
|
|
else: |
|
|
file_path = tool_call["input"].get("file_path") |
|
|
|
|
|
|
|
|
if file_path and file_path not in self.implemented_files_set: |
|
|
|
|
|
self.implemented_files_set.add(file_path) |
|
|
self.files_implemented_count += 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.implementation_summary["completed_files"].append( |
|
|
{ |
|
|
"file": file_path, |
|
|
"iteration": self.files_implemented_count, |
|
|
"timestamp": time.time(), |
|
|
"size": result_data.get("size", 0) if result_data else 0, |
|
|
} |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
elif file_path and file_path in self.implemented_files_set: |
|
|
|
|
|
self.logger.debug( |
|
|
f"File already tracked, skipping duplicate count: {file_path}" |
|
|
) |
|
|
else: |
|
|
|
|
|
self.logger.warning("No valid file path found for tracking") |
|
|
|
|
|
except Exception as e: |
|
|
self.logger.warning(f"Failed to track file implementation: {e}") |
|
|
|
|
|
|
|
|
file_path = tool_call["input"].get("file_path") |
|
|
if file_path and file_path not in self.implemented_files_set: |
|
|
self.implemented_files_set.add(file_path) |
|
|
self.files_implemented_count += 1 |
|
|
self.logger.info( |
|
|
f"File implementation counted (emergency fallback): count={self.files_implemented_count}, file={file_path}" |
|
|
) |
|
|
|
|
|
def _track_dependency_analysis(self, tool_call: Dict, result: Any): |
|
|
""" |
|
|
Track dependency analysis through read_file calls |
|
|
""" |
|
|
try: |
|
|
file_path = tool_call["input"].get("file_path") |
|
|
if file_path: |
|
|
|
|
|
if file_path not in self.files_read_for_dependencies: |
|
|
self.files_read_for_dependencies.add(file_path) |
|
|
|
|
|
|
|
|
self.implementation_summary["dependency_analysis"].append( |
|
|
{ |
|
|
"file_read": file_path, |
|
|
"timestamp": time.time(), |
|
|
"purpose": "dependency_analysis", |
|
|
} |
|
|
) |
|
|
|
|
|
self.logger.info( |
|
|
f"Dependency analysis tracked: file_read={file_path}" |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
self.logger.warning(f"Failed to track dependency analysis: {e}") |
|
|
|
|
|
def calculate_messages_token_count(self, messages: List[Dict]) -> int: |
|
|
""" |
|
|
Calculate total token count for a list of messages |
|
|
|
|
|
Args: |
|
|
messages: List of chat messages with 'role' and 'content' keys |
|
|
|
|
|
Returns: |
|
|
Total token count |
|
|
""" |
|
|
if not self.tokenizer: |
|
|
|
|
|
total_chars = sum(len(str(msg.get("content", ""))) for msg in messages) |
|
|
|
|
|
return total_chars // 4 |
|
|
|
|
|
try: |
|
|
total_tokens = 0 |
|
|
for message in messages: |
|
|
content = str(message.get("content", "")) |
|
|
role = message.get("role", "") |
|
|
|
|
|
|
|
|
if content: |
|
|
content_tokens = len( |
|
|
self.tokenizer.encode(content, disallowed_special=()) |
|
|
) |
|
|
total_tokens += content_tokens |
|
|
|
|
|
|
|
|
role_tokens = len(self.tokenizer.encode(role, disallowed_special=())) |
|
|
total_tokens += role_tokens + 4 |
|
|
|
|
|
return total_tokens |
|
|
|
|
|
except Exception as e: |
|
|
self.logger.warning(f"Token calculation failed: {e}") |
|
|
|
|
|
total_chars = sum(len(str(msg.get("content", ""))) for msg in messages) |
|
|
return total_chars // 4 |
|
|
|
|
|
def should_trigger_summary_by_tokens(self, messages: List[Dict]) -> bool: |
|
|
""" |
|
|
Check if summary should be triggered based on token count |
|
|
|
|
|
Args: |
|
|
messages: Current conversation messages |
|
|
|
|
|
Returns: |
|
|
True if summary should be triggered based on token count |
|
|
""" |
|
|
if not messages: |
|
|
return False |
|
|
|
|
|
|
|
|
current_token_count = self.calculate_messages_token_count(messages) |
|
|
|
|
|
|
|
|
should_trigger = ( |
|
|
current_token_count > self.summary_trigger_tokens |
|
|
and current_token_count |
|
|
> self.last_summary_token_count |
|
|
+ 10000 |
|
|
) |
|
|
|
|
|
if should_trigger: |
|
|
self.logger.info( |
|
|
f"Token-based summary trigger: current={current_token_count:,}, " |
|
|
f"threshold={self.summary_trigger_tokens:,}, " |
|
|
f"last_summary={self.last_summary_token_count:,}" |
|
|
) |
|
|
|
|
|
return should_trigger |
|
|
|
|
|
def should_trigger_summary( |
|
|
self, summary_trigger: int = 5, messages: List[Dict] = None |
|
|
) -> bool: |
|
|
""" |
|
|
Check if summary should be triggered based on token count (preferred) or file count (fallback) |
|
|
根据token数(首选)或文件数(回退)检查是否应触发总结 |
|
|
|
|
|
Args: |
|
|
summary_trigger: Number of files after which to trigger summary (fallback) |
|
|
messages: Current conversation messages for token calculation |
|
|
|
|
|
Returns: |
|
|
True if summary should be triggered |
|
|
""" |
|
|
|
|
|
if messages and self.tokenizer: |
|
|
return self.should_trigger_summary_by_tokens(messages) |
|
|
|
|
|
|
|
|
self.logger.info("Using fallback file-based summary triggering") |
|
|
should_trigger = ( |
|
|
self.files_implemented_count > 0 |
|
|
and self.files_implemented_count % summary_trigger == 0 |
|
|
and self.files_implemented_count > self.last_summary_file_count |
|
|
) |
|
|
|
|
|
return should_trigger |
|
|
|
|
|
def mark_summary_triggered(self, messages: List[Dict] = None): |
|
|
""" |
|
|
Mark that summary has been triggered for current state |
|
|
标记当前状态的总结已被触发 |
|
|
|
|
|
Args: |
|
|
messages: Current conversation messages for token tracking |
|
|
""" |
|
|
|
|
|
self.last_summary_file_count = self.files_implemented_count |
|
|
|
|
|
|
|
|
if messages and self.tokenizer: |
|
|
self.last_summary_token_count = self.calculate_messages_token_count( |
|
|
messages |
|
|
) |
|
|
self.logger.info( |
|
|
f"Summary marked as triggered - file_count: {self.files_implemented_count}, " |
|
|
f"token_count: {self.last_summary_token_count:,}" |
|
|
) |
|
|
else: |
|
|
self.logger.info( |
|
|
f"Summary marked as triggered for file count: {self.files_implemented_count}" |
|
|
) |
|
|
|
|
|
def get_implementation_summary(self) -> Dict[str, Any]: |
|
|
""" |
|
|
Get current implementation summary |
|
|
获取当前实现总结 |
|
|
""" |
|
|
return self.implementation_summary.copy() |
|
|
|
|
|
def get_files_implemented_count(self) -> int: |
|
|
""" |
|
|
Get the number of files implemented so far |
|
|
获取到目前为止实现的文件数量 |
|
|
""" |
|
|
return self.files_implemented_count |
|
|
|
|
|
def get_read_tools_status(self) -> Dict[str, Any]: |
|
|
""" |
|
|
Get read tools configuration status |
|
|
获取读取工具配置状态 |
|
|
|
|
|
Returns: |
|
|
Dictionary with read tools status information |
|
|
""" |
|
|
return { |
|
|
"read_tools_enabled": self.enable_read_tools, |
|
|
"status": "ENABLED" if self.enable_read_tools else "DISABLED", |
|
|
"tools_affected": ["read_file", "read_code_mem"], |
|
|
"description": "Read tools configuration for testing purposes", |
|
|
} |
|
|
|
|
|
def add_technical_decision(self, decision: str, context: str = ""): |
|
|
""" |
|
|
Add a technical decision to the implementation summary |
|
|
向实现总结添加技术决策 |
|
|
|
|
|
Args: |
|
|
decision: Description of the technical decision |
|
|
context: Additional context for the decision |
|
|
""" |
|
|
self.implementation_summary["technical_decisions"].append( |
|
|
{"decision": decision, "context": context, "timestamp": time.time()} |
|
|
) |
|
|
self.logger.info(f"Technical decision recorded: {decision}") |
|
|
|
|
|
def add_constraint(self, constraint: str, impact: str = ""): |
|
|
""" |
|
|
Add an important constraint to the implementation summary |
|
|
向实现总结添加重要约束 |
|
|
|
|
|
Args: |
|
|
constraint: Description of the constraint |
|
|
impact: Impact of the constraint on implementation |
|
|
""" |
|
|
self.implementation_summary["important_constraints"].append( |
|
|
{"constraint": constraint, "impact": impact, "timestamp": time.time()} |
|
|
) |
|
|
self.logger.info(f"Constraint recorded: {constraint}") |
|
|
|
|
|
def add_architecture_note(self, note: str, component: str = ""): |
|
|
""" |
|
|
Add an architecture note to the implementation summary |
|
|
向实现总结添加架构注释 |
|
|
|
|
|
Args: |
|
|
note: Architecture note description |
|
|
component: Related component or module |
|
|
""" |
|
|
self.implementation_summary["architecture_notes"].append( |
|
|
{"note": note, "component": component, "timestamp": time.time()} |
|
|
) |
|
|
self.logger.info(f"Architecture note recorded: {note}") |
|
|
|
|
|
def get_implementation_statistics(self) -> Dict[str, Any]: |
|
|
""" |
|
|
Get comprehensive implementation statistics |
|
|
获取全面的实现统计信息 |
|
|
""" |
|
|
return { |
|
|
"total_files_implemented": self.files_implemented_count, |
|
|
"files_implemented_count": self.files_implemented_count, |
|
|
"technical_decisions_count": len( |
|
|
self.implementation_summary["technical_decisions"] |
|
|
), |
|
|
"constraints_count": len( |
|
|
self.implementation_summary["important_constraints"] |
|
|
), |
|
|
"architecture_notes_count": len( |
|
|
self.implementation_summary["architecture_notes"] |
|
|
), |
|
|
"dependency_analysis_count": len( |
|
|
self.implementation_summary["dependency_analysis"] |
|
|
), |
|
|
"files_read_for_dependencies": len(self.files_read_for_dependencies), |
|
|
"unique_files_implemented": len(self.implemented_files_set), |
|
|
"completed_files_list": [ |
|
|
f["file"] for f in self.implementation_summary["completed_files"] |
|
|
], |
|
|
"dependency_files_read": list(self.files_read_for_dependencies), |
|
|
"last_summary_file_count": self.last_summary_file_count, |
|
|
"read_tools_status": self.get_read_tools_status(), |
|
|
} |
|
|
|
|
|
def force_enable_optimization(self): |
|
|
""" |
|
|
Force enable optimization for testing purposes |
|
|
强制启用优化用于测试目的 |
|
|
""" |
|
|
self.files_implemented_count = 1 |
|
|
self.logger.info( |
|
|
f"🔧 OPTIMIZATION FORCE ENABLED: files_implemented_count set to {self.files_implemented_count}" |
|
|
) |
|
|
print( |
|
|
f"🔧 OPTIMIZATION FORCE ENABLED: files_implemented_count set to {self.files_implemented_count}" |
|
|
) |
|
|
|
|
|
def reset_implementation_tracking(self): |
|
|
""" |
|
|
Reset implementation tracking (useful for new sessions) |
|
|
重置实现跟踪(对新会话有用) |
|
|
""" |
|
|
self.implementation_summary = { |
|
|
"completed_files": [], |
|
|
"technical_decisions": [], |
|
|
"important_constraints": [], |
|
|
"architecture_notes": [], |
|
|
"dependency_analysis": [], |
|
|
} |
|
|
self.files_implemented_count = 0 |
|
|
self.implemented_files_set = ( |
|
|
set() |
|
|
) |
|
|
self.files_read_for_dependencies = ( |
|
|
set() |
|
|
) |
|
|
self.last_summary_file_count = 0 |
|
|
self.last_summary_token_count = 0 |
|
|
self.logger.info("Implementation tracking reset") |
|
|
|
|
|
|
|
|
self.recent_tool_calls = [] |
|
|
self.logger.info("Analysis loop detection reset") |
|
|
|
|
|
def _track_tool_call_for_loop_detection(self, tool_name: str): |
|
|
""" |
|
|
Track tool calls for analysis loop detection |
|
|
跟踪工具调用以检测分析循环 |
|
|
|
|
|
Args: |
|
|
tool_name: Name of the tool called |
|
|
""" |
|
|
self.recent_tool_calls.append(tool_name) |
|
|
if len(self.recent_tool_calls) > self.max_read_without_write: |
|
|
self.recent_tool_calls.pop(0) |
|
|
|
|
|
if len(set(self.recent_tool_calls)) == 1: |
|
|
self.logger.warning("Analysis loop detected") |
|
|
|
|
|
def is_in_analysis_loop(self) -> bool: |
|
|
""" |
|
|
Check if the agent is in an analysis loop (only reading files, not writing) |
|
|
检查代理是否在分析循环中(只读文件,不写文件) |
|
|
|
|
|
Returns: |
|
|
True if in analysis loop |
|
|
""" |
|
|
if len(self.recent_tool_calls) < self.max_read_without_write: |
|
|
return False |
|
|
|
|
|
|
|
|
analysis_tools = { |
|
|
"read_file", |
|
|
"search_reference_code", |
|
|
"get_all_available_references", |
|
|
} |
|
|
recent_calls_set = set(self.recent_tool_calls) |
|
|
|
|
|
|
|
|
in_loop = ( |
|
|
recent_calls_set.issubset(analysis_tools) and len(recent_calls_set) >= 1 |
|
|
) |
|
|
|
|
|
if in_loop: |
|
|
self.logger.warning( |
|
|
f"Analysis loop detected! Recent calls: {self.recent_tool_calls}" |
|
|
) |
|
|
|
|
|
return in_loop |
|
|
|
|
|
def get_analysis_loop_guidance(self) -> str: |
|
|
""" |
|
|
Get guidance to break out of analysis loop |
|
|
获取跳出分析循环的指导 |
|
|
|
|
|
Returns: |
|
|
Guidance message to encourage implementation |
|
|
""" |
|
|
return f"""🚨 **ANALYSIS LOOP DETECTED - IMMEDIATE ACTION REQUIRED** |
|
|
|
|
|
**Problem**: You've been reading/analyzing files for {len(self.recent_tool_calls)} consecutive calls without writing code. |
|
|
**Recent tool calls**: {' → '.join(self.recent_tool_calls)} |
|
|
|
|
|
**SOLUTION - IMPLEMENT CODE NOW**: |
|
|
1. **STOP ANALYZING** - You have enough information |
|
|
2. **Use write_file** to create the next code file according to the implementation plan |
|
|
3. **Choose ANY file** from the plan that hasn't been implemented yet |
|
|
4. **Write complete, working code** - don't ask for permission or clarification |
|
|
|
|
|
**Files implemented so far**: {self.files_implemented_count} |
|
|
**Your goal**: Implement MORE files, not analyze existing ones! |
|
|
|
|
|
**CRITICAL**: Your next response MUST use write_file to create a new code file!""" |
|
|
|
|
|
async def test_summary_functionality(self, test_file_path: str = None): |
|
|
""" |
|
|
Test if the code summary functionality is working correctly |
|
|
测试代码总结功能是否正常工作 |
|
|
|
|
|
Args: |
|
|
test_file_path: Specific file to test, if None will test all implemented files |
|
|
""" |
|
|
if not self.memory_agent: |
|
|
self.logger.warning("No memory agent available for testing") |
|
|
return |
|
|
|
|
|
if test_file_path: |
|
|
files_to_test = [test_file_path] |
|
|
else: |
|
|
|
|
|
files_to_test = list(self.implemented_files_set)[ |
|
|
:3 |
|
|
] |
|
|
|
|
|
if not files_to_test: |
|
|
self.logger.warning("No implemented files to test") |
|
|
return |
|
|
|
|
|
|
|
|
summary_files_found = 0 |
|
|
|
|
|
for file_path in files_to_test: |
|
|
if self.mcp_agent: |
|
|
try: |
|
|
result = await self.mcp_agent.call_tool( |
|
|
"read_code_mem", {"file_paths": [file_path]} |
|
|
) |
|
|
|
|
|
|
|
|
import json |
|
|
|
|
|
result_data = ( |
|
|
json.loads(result) if isinstance(result, str) else result |
|
|
) |
|
|
|
|
|
if ( |
|
|
result_data.get("status") |
|
|
in ["all_summaries_found", "partial_summaries_found"] |
|
|
and result_data.get("summaries_found", 0) > 0 |
|
|
): |
|
|
summary_files_found += 1 |
|
|
except Exception as e: |
|
|
self.logger.warning( |
|
|
f"Failed to test read_code_mem for {file_path}: {e}" |
|
|
) |
|
|
else: |
|
|
self.logger.warning("MCP agent not available for testing") |
|
|
|
|
|
self.logger.info( |
|
|
f"📋 Summary testing: {summary_files_found}/{len(files_to_test)} files have summaries" |
|
|
) |
|
|
|
|
|
async def test_automatic_read_file_optimization(self): |
|
|
""" |
|
|
Test the automatic read_file optimization that redirects to read_code_mem |
|
|
测试自动read_file优化,重定向到read_code_mem |
|
|
""" |
|
|
print("=" * 80) |
|
|
print("🔄 TESTING AUTOMATIC READ_FILE OPTIMIZATION") |
|
|
print("=" * 80) |
|
|
|
|
|
|
|
|
self.files_implemented_count = 1 |
|
|
|
|
|
|
|
|
test_file = "config.py" |
|
|
|
|
|
print(f"📁 Testing automatic optimization for: {test_file}") |
|
|
print(f"📊 Files implemented count: {self.files_implemented_count}") |
|
|
print( |
|
|
f"🔧 Optimization should be: {'ENABLED' if self.files_implemented_count > 0 else 'DISABLED'}" |
|
|
) |
|
|
|
|
|
|
|
|
simulated_read_file_call = { |
|
|
"id": "test_read_file_optimization", |
|
|
"name": "read_file", |
|
|
"input": {"file_path": test_file}, |
|
|
} |
|
|
|
|
|
print("\n🔄 Simulating read_file call:") |
|
|
print(f" Tool: {simulated_read_file_call['name']}") |
|
|
print(f" File: {simulated_read_file_call['input']['file_path']}") |
|
|
|
|
|
|
|
|
results = await self.execute_tool_calls([simulated_read_file_call]) |
|
|
|
|
|
if results: |
|
|
result = results[0] |
|
|
print("\n✅ Tool execution completed:") |
|
|
print(f" Tool name: {result.get('tool_name', 'N/A')}") |
|
|
print(f" Tool ID: {result.get('tool_id', 'N/A')}") |
|
|
|
|
|
|
|
|
import json |
|
|
|
|
|
try: |
|
|
result_data = json.loads(result.get("result", "{}")) |
|
|
if result_data.get("optimization") == "redirected_to_read_code_mem": |
|
|
print("🎉 SUCCESS: read_file was automatically optimized!") |
|
|
print( |
|
|
f" Original tool: {result_data.get('original_tool', 'N/A')}" |
|
|
) |
|
|
print(f" Status: {result_data.get('status', 'N/A')}") |
|
|
elif result_data.get("status") == "summary_found": |
|
|
print("🎉 SUCCESS: Summary was found and returned!") |
|
|
else: |
|
|
print("ℹ️ INFO: No optimization occurred (no summary available)") |
|
|
except json.JSONDecodeError: |
|
|
print("⚠️ WARNING: Could not parse result as JSON") |
|
|
else: |
|
|
print("❌ ERROR: No results returned from tool execution") |
|
|
|
|
|
print("\n" + "=" * 80) |
|
|
print("🔄 AUTOMATIC READ_FILE OPTIMIZATION TEST COMPLETE") |
|
|
print("=" * 80) |
|
|
|
|
|
async def test_summary_optimization(self, test_file_path: str = "config.py"): |
|
|
""" |
|
|
Test the summary optimization functionality with a specific file |
|
|
测试特定文件的总结优化功能 |
|
|
|
|
|
Args: |
|
|
test_file_path: File path to test (default: config.py which should be in summary) |
|
|
""" |
|
|
if not self.mcp_agent: |
|
|
return False |
|
|
|
|
|
try: |
|
|
|
|
|
result = await self.mcp_agent.call_tool( |
|
|
"read_code_mem", {"file_paths": [test_file_path]} |
|
|
) |
|
|
|
|
|
|
|
|
import json |
|
|
|
|
|
result_data = json.loads(result) if isinstance(result, str) else result |
|
|
|
|
|
return ( |
|
|
result_data.get("status") |
|
|
in ["all_summaries_found", "partial_summaries_found"] |
|
|
and result_data.get("summaries_found", 0) > 0 |
|
|
) |
|
|
except Exception as e: |
|
|
self.logger.warning(f"Failed to test read_code_mem optimization: {e}") |
|
|
return False |
|
|
|
|
|
async def test_read_tools_configuration(self): |
|
|
""" |
|
|
Test the read tools configuration to verify enabling/disabling works correctly |
|
|
测试读取工具配置以验证启用/禁用是否正常工作 |
|
|
""" |
|
|
print("=" * 60) |
|
|
print("🧪 TESTING READ TOOLS CONFIGURATION") |
|
|
print("=" * 60) |
|
|
|
|
|
status = self.get_read_tools_status() |
|
|
print(f"Read tools enabled: {status['read_tools_enabled']}") |
|
|
print(f"Status: {status['status']}") |
|
|
print(f"Tools affected: {status['tools_affected']}") |
|
|
|
|
|
|
|
|
test_tools = [ |
|
|
{ |
|
|
"id": "test_read_file", |
|
|
"name": "read_file", |
|
|
"input": {"file_path": "test.py"}, |
|
|
}, |
|
|
{ |
|
|
"id": "test_read_code_mem", |
|
|
"name": "read_code_mem", |
|
|
"input": {"file_path": "test.py"}, |
|
|
}, |
|
|
{ |
|
|
"id": "test_write_file", |
|
|
"name": "write_file", |
|
|
"input": {"file_path": "test.py", "content": "# test"}, |
|
|
}, |
|
|
] |
|
|
|
|
|
print( |
|
|
f"\n🔄 Testing tool execution with read_tools_enabled={self.enable_read_tools}" |
|
|
) |
|
|
|
|
|
for tool_call in test_tools: |
|
|
tool_name = tool_call["name"] |
|
|
if not self.enable_read_tools and tool_name in [ |
|
|
"read_file", |
|
|
"read_code_mem", |
|
|
]: |
|
|
print(f"🚫 {tool_name}: Would be SKIPPED (disabled)") |
|
|
else: |
|
|
print(f"✅ {tool_name}: Would be EXECUTED") |
|
|
|
|
|
print("=" * 60) |
|
|
print("🧪 READ TOOLS CONFIGURATION TEST COMPLETE") |
|
|
print("=" * 60) |
|
|
|
|
|
return status |
|
|
|