Spaces:
Sleeping
Sleeping
| """ | |
| Code execution utilities with debugging capabilities. | |
| """ | |
| import os | |
| import subprocess | |
| import sys | |
| from typing import Tuple | |
| def execute_code_safely(code: str, timeout: int = 30) -> Tuple[bool, str, str]: | |
| """ | |
| Execute Python code safely in a subprocess and capture output. | |
| Args: | |
| code: Python code to execute | |
| timeout: Maximum execution time in seconds | |
| Returns: | |
| Tuple of (success: bool, stdout: str, stderr: str) | |
| """ | |
| temp_file = "temp_script.py" | |
| try: | |
| # Write code to temporary file | |
| with open(temp_file, "w", encoding="utf-8") as f: | |
| f.write(code) | |
| # Execute with subprocess | |
| result = subprocess.run( | |
| [sys.executable, temp_file], capture_output=True, text=True, timeout=timeout | |
| ) | |
| success = result.returncode == 0 | |
| return success, result.stdout, result.stderr | |
| except subprocess.TimeoutExpired: | |
| return False, "", f"Execution timed out after {timeout} seconds" | |
| except Exception as e: | |
| return False, "", f"Execution error: {str(e)}" | |
| finally: | |
| # Clean up temp file | |
| if os.path.exists(temp_file): | |
| try: | |
| os.remove(temp_file) | |
| except Exception as e: | |
| print(f"Warning: Failed to remove temp file: {str(e)}") | |
| def execute_with_debug( | |
| code: str, llm, is_analysis: bool, data_context: str = "", max_attempts: int = 3 | |
| ) -> str: | |
| """ | |
| Execute code with automatic debugging via LLM. | |
| If execution fails, the LLM is asked to fix the error. | |
| This repeats for up to max_attempts. | |
| Args: | |
| code: Python code to execute | |
| llm: LLM instance for debugging | |
| is_analysis: Whether this is data analysis stage (simpler prompts) | |
| data_context: Context about available data files | |
| max_attempts: Maximum debugging attempts | |
| Returns: | |
| Execution output or error message | |
| """ | |
| from .formatters import extract_code | |
| for attempt in range(max_attempts): | |
| success, stdout, stderr = execute_code_safely(code) | |
| if success: | |
| return stdout if stdout else "Code executed successfully (no output)" | |
| # Debug the error | |
| print(f" Debug attempt {attempt + 1}/{max_attempts}") | |
| if is_analysis: | |
| debug_prompt = f"""Fix this Python code error: | |
| {code} | |
| Error: | |
| {stderr} | |
| Requirements: | |
| - Fix the error | |
| - Keep the same functionality | |
| - No try-except blocks | |
| - All files are in 'data/' directory | |
| - Provide ONLY the corrected code in a markdown code block""" | |
| else: | |
| debug_prompt = f"""Fix this Python code error: | |
| Available Data: | |
| {data_context} | |
| Code with error: | |
| {code} | |
| Error: | |
| {stderr} | |
| Requirements: | |
| - Fix the error using data context | |
| - Keep the same functionality | |
| - No try-except blocks | |
| - Provide ONLY the corrected code in a markdown code block""" | |
| try: | |
| response = llm.invoke(debug_prompt) | |
| # Handle Gemini response format | |
| if hasattr(response, "content") and isinstance(response.content, list): | |
| # Gemini returns list of dicts | |
| from .formatters import gemini_text | |
| response_text = gemini_text(response) | |
| elif hasattr(response, "content"): | |
| response_text = response.content | |
| else: | |
| response_text = str(response) | |
| code = extract_code(response_text) | |
| except Exception as e: | |
| return f"Debugging failed: {str(e)}" | |
| return f"Failed after {max_attempts} attempts. Last error:\n{stderr}" | |