Spaces:
Runtime error
Runtime error
| """ | |
| Highly Powerful Autonomous AI Chatbot - FIXED VERSION | |
| 10x More Autonomous - Automate. Delegate. Accelerate. | |
| Powerful for All. Autonomy for All. | |
| """ | |
| import gradio as gr | |
| import requests | |
| import json | |
| import re | |
| import os | |
| import traceback | |
| import time | |
| import io | |
| import sys | |
| from typing import List, Dict, Optional | |
| from datetime import datetime | |
| # Configuration | |
| API_BASE_URL = "https://router.huggingface.co/v1" | |
| DEFAULT_MODEL = "meta-llama/Llama-3.2-1B-Instruct" | |
| # More powerful model options | |
| MODEL_OPTIONS = [ | |
| "meta-llama/Llama-3.2-1B-Instruct", | |
| "meta-llama/Llama-3.2-3B-Instruct", | |
| "mistralai/Mistral-7B-Instruct-v0.1", | |
| "microsoft/DialoGPT-medium", | |
| "google/flan-t5-base", | |
| "HuggingFaceH4/zephyr-7b-beta", | |
| "microsoft/Phi-3-mini-4k-instruct", | |
| "databricks/dolly-v2-3b" | |
| ] | |
| # Global state | |
| conversation_history = [] | |
| current_model = DEFAULT_MODEL | |
| max_tokens = 2048 | |
| temperature = 0.7 | |
| autonomy_level = 10 | |
| class PowerfulAutonomousChatbot: | |
| """Highly Powerful AI Chatbot with 10x More Autonomy""" | |
| def __init__(self): | |
| self.model = current_model | |
| self.api_url = API_BASE_URL | |
| self.headers = { | |
| "Content-Type": "application/json", | |
| "Authorization": "Bearer hf_placeholder" | |
| } | |
| self.code_execution_count = 0 | |
| self.total_conversations = 0 | |
| def call_llm(self, messages: List[Dict], model: str = None) -> str: | |
| """Call LLM with advanced error handling""" | |
| try: | |
| model_to_use = model or self.model | |
| payload = { | |
| "model": model_to_use, | |
| "messages": messages, | |
| "max_tokens": max_tokens, | |
| "temperature": temperature, | |
| "stream": False | |
| } | |
| response = requests.post( | |
| f"{self.api_url}/chat/completions", | |
| headers=self.headers, | |
| json=payload, | |
| timeout=120 | |
| ) | |
| if response.status_code == 200: | |
| result = response.json() | |
| return result["choices"][0]["message"]["content"] | |
| else: | |
| return f"โ API Error {response.status_code}: {response.text[:500]}" | |
| except requests.exceptions.Timeout: | |
| return "โ Request timed out. Please try again." | |
| except Exception as e: | |
| return f"โ Error: {str(e)}" | |
| def extract_code_blocks(self, text: str) -> List[str]: | |
| """Extract all code blocks from text""" | |
| # Match various code block formats | |
| patterns = [ | |
| r'```(?:python)?[\s\S]*?```', | |
| r'```[\s\S]*?```', | |
| r'```[\w]*\n[\s\S]*?```' | |
| ] | |
| code_blocks = [] | |
| for pattern in patterns: | |
| matches = re.findall(pattern, text, re.MULTILINE) | |
| code_blocks.extend([match.strip('`').strip() for match in matches]) | |
| return code_blocks | |
| def execute_code_advanced(self, code: str) -> Dict: | |
| """Advanced code execution with full analysis""" | |
| try: | |
| # Create execution environment | |
| old_stdout = sys.stdout | |
| old_stderr = sys.stderr | |
| sys.stdout = io.StringIO() | |
| sys.stderr = io.StringIO() | |
| # Execute with safety | |
| start_time = time.time() | |
| exec(code, globals()) | |
| execution_time = time.time() - start_time | |
| # Capture output | |
| stdout_val = sys.stdout.getvalue() | |
| stderr_val = sys.stderr.getvalue() | |
| sys.stdout = old_stdout | |
| sys.stderr = old_stderr | |
| # Analyze results | |
| result = { | |
| "success": True, | |
| "stdout": stdout_val, | |
| "stderr": stderr_val, | |
| "execution_time": execution_time, | |
| "output_size": len(stdout_val) + len(stderr_val) | |
| } | |
| if not stdout_val and not stderr_val: | |
| result["message"] = "โ Code executed successfully with no output" | |
| return result | |
| except SyntaxError as e: | |
| return { | |
| "success": False, | |
| "error": "Syntax Error", | |
| "details": f"Line {e.lineno}: {e.msg}", | |
| "traceback": traceback.format_exc() | |
| } | |
| except Exception as e: | |
| return { | |
| "success": False, | |
| "error": "Runtime Error", | |
| "details": str(e), | |
| "traceback": traceback.format_exc() | |
| } | |
| def autonomous_problem_solving(self, message: str, history: List[List[str]], model: str, autonomy: int) -> str: | |
| """Autonomous problem solving with full automation - NON-ASYNC VERSION""" | |
| self.total_conversations += 1 | |
| conversation_history.append({"role": "user", "content": message, "timestamp": datetime.now().isoformat()}) | |
| # System prompts based on autonomy level | |
| system_prompts = { | |
| 1: "You are a basic Q&A assistant. Answer questions directly and simply.", | |
| 3: "You are a helpful assistant. Provide detailed explanations and examples.", | |
| 5: "You are an expert programmer. Write and explain code thoroughly.", | |
| 7: "You are a senior engineer. Build complete solutions with code, tests, and documentation.", | |
| 10: """You are a FULLY AUTONOMOUS AI PROGRAMMER. | |
| You MUST: | |
| 1. Analyze requests deeply | |
| 2. Write complete, working code | |
| 3. Execute code automatically | |
| 4. Test and debug independently | |
| 5. Provide comprehensive solutions | |
| 6. Add documentation and comments | |
| 7. Suggest improvements | |
| 8. Be proactive and think 3 steps ahead | |
| ALWAYS execute code when possible.""" | |
| } | |
| system_prompt = system_prompts.get(autonomy, system_prompts[7]) | |
| # Prepare messages | |
| messages = [ | |
| {"role": "system", "content": system_prompt}, | |
| *conversation_history[-30:] # Keep last 30 messages | |
| ] | |
| # Get response | |
| response = self.call_llm(messages, model) | |
| conversation_history.append({"role": "assistant", "content": response, "timestamp": datetime.now().isoformat()}) | |
| # High autonomy: autonomous execution | |
| if autonomy >= 5: | |
| code_blocks = self.extract_code_blocks(response) | |
| if code_blocks: | |
| result = f"{response}\n\n" + "="*70 + "\n" | |
| result += f"๐ค AUTONOMOUS CODE EXECUTION (Level {autonomy})\n" | |
| result += "="*70 + "\n\n" | |
| for i, code in enumerate(code_blocks, 1): | |
| self.code_execution_count += 1 | |
| result += f"## ๐ง Code Block {i}\n" | |
| result += f"**Length:** {len(code)} characters\n\n" | |
| result += f"```python\n{code}\n```\n\n" | |
| # Execute code | |
| execution = self.execute_code_advanced(code) | |
| if execution["success"]: | |
| result += f"โ **Execution Successful**\n" | |
| result += f"โฑ๏ธ Time: {execution['execution_time']:.3f}s\n" | |
| result += f"๐ Output Size: {execution['output_size']} chars\n\n" | |
| if execution["stdout"]: | |
| result += f"**Output:**\n```\n{execution['stdout']}\n```\n\n" | |
| if execution["message"]: | |
| result += f"{execution['message']}\n\n" | |
| else: | |
| result += f"โ **Execution Failed**\n" | |
| result += f"**Error:** {execution['error']}\n" | |
| result += f"**Details:** {execution['details']}\n\n" | |
| result += f"**Traceback:**\n```\n{execution.get('traceback', '')}\n```\n\n" | |
| result += "-"*70 + "\n\n" | |
| # Statistics | |
| result += f"๐ **Session Statistics:**\n" | |
| result += f"- Total Executions: {self.code_execution_count}\n" | |
| result += f"- Autonomy Level: {autonomy}/10\n" | |
| result += f"- Model: {model}\n\n" | |
| return result | |
| return response | |
| def get_session_stats(self) -> str: | |
| """Get session statistics""" | |
| return f""" | |
| ๐ **Session Statistics** | |
| โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ | |
| ๐ค Total Conversations: {self.total_conversations} | |
| โก Code Executions: {self.code_execution_count} | |
| ๐ง Current Autonomy: {autonomy_level}/10 | |
| ๐ค Current Model: {current_model} | |
| โฐ Session Start: {conversation_history[0]['timestamp'][:19] if conversation_history else 'N/A'} | |
| โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ | |
| """ | |
| def clear_history(self): | |
| """Clear conversation with stats""" | |
| conversation_history.clear() | |
| self.code_execution_count = 0 | |
| self.total_conversations = 0 | |
| return "โ Conversation and statistics cleared!" | |
| # Initialize powerful chatbot | |
| chatbot = PowerfulAutonomousChatbot() | |
| # Enhanced CSS | |
| CUSTOM_CSS = """ | |
| .gradio-container { | |
| max-width: 1500px !important; | |
| margin: auto !important; | |
| } | |
| .chat-message { | |
| padding: 20px; | |
| margin: 15px 0; | |
| border-radius: 12px; | |
| box-shadow: 0 4px 6px rgba(0,0,0,0.1); | |
| } | |
| .user-message { | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
| color: white; | |
| } | |
| .bot-message { | |
| background: linear-gradient(135deg, #2d3748 0%, #1a202c 100%); | |
| color: #e2e8f0; | |
| } | |
| .code-block { | |
| background: #1e1e1e; | |
| color: #d4d4d4; | |
| padding: 20px; | |
| border-radius: 8px; | |
| font-family: 'Courier New', monospace; | |
| margin: 15px 0; | |
| border-left: 4px solid #667eea; | |
| } | |
| .stats-box { | |
| background: linear-gradient(135deg, #f093fb 0%, #f5576c 100%); | |
| color: white; | |
| padding: 15px; | |
| border-radius: 8px; | |
| margin: 10px 0; | |
| } | |
| .autonomy-indicator { | |
| background: linear-gradient(135deg, #4facfe 0%, #00f2fe 100%); | |
| color: white; | |
| padding: 15px; | |
| border-radius: 8px; | |
| margin: 10px 0; | |
| text-align: center; | |
| font-weight: bold; | |
| } | |
| .quick-action-btn { | |
| background: linear-gradient(135deg, #fa709a 0%, #fee140 100%); | |
| color: #1a202c; | |
| border: none; | |
| padding: 12px 20px; | |
| border-radius: 8px; | |
| font-weight: bold; | |
| cursor: pointer; | |
| transition: transform 0.2s; | |
| } | |
| .quick-action-btn:hover { | |
| transform: translateY(-2px); | |
| } | |
| """ | |
| def chat_interface(message, history, model, autonomy_level_val, clear_flag=False): | |
| """Enhanced chat interface - NON-ASYNC VERSION""" | |
| if clear_flag: | |
| return chatbot.clear_history(), [], "", "", False | |
| if not message: | |
| return history, "", "", "", False | |
| # Update global variables | |
| global current_model, autonomy_level | |
| current_model = model | |
| autonomy_level = autonomy_level_val | |
| # Process message (synchronous) | |
| response = chatbot.autonomous_problem_solving(message, history, model, autonomy_level_val) | |
| history.append([message, response]) | |
| return history, "", "", "", False | |
| def get_stats(): | |
| """Get statistics""" | |
| return chatbot.get_session_stats() | |
| def create_enhanced_interface(): | |
| """Create highly powerful interface""" | |
| with gr.Blocks(css=CUSTOM_CSS, title="๐ค Powerful Autonomous AI", theme=gr.themes.Soft()) as app: | |
| gr.HTML(""" | |
| <div style="text-align: center; padding: 20px; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; border-radius: 10px; margin-bottom: 20px;"> | |
| <h1 style="margin: 0; font-size: 2.5em;">๐ค Powerful Autonomous AI Chatbot</h1> | |
| <p style="margin: 10px 0; font-size: 1.2em;">10x More Autonomous โข Automate. Delegate. Accelerate.</p> | |
| <p style="margin: 0; font-size: 1em;">Powerful for All โข Autonomy for All</p> | |
| </div> | |
| """) | |
| with gr.Row(): | |
| with gr.Column(scale=4): | |
| chatbot_ui = gr.Chatbot( | |
| label="๐ฌ Autonomous AI", | |
| height=700, | |
| avatar_images=("๐ค", "๐ค"), | |
| elem_classes=["chat-message"], | |
| show_copy_button=True | |
| ) | |
| with gr.Row(): | |
| msg_input = gr.Textbox( | |
| label="๐ญ Your Request", | |
| placeholder="Ask me to create, build, analyze, or automate anything!", | |
| lines=3, | |
| scale=4, | |
| info="The more specific, the better the AI will help!" | |
| ) | |
| send_btn = gr.Button("๐ Execute", variant="primary", scale=1) | |
| with gr.Column(scale=1): | |
| gr.Markdown("### โ๏ธ **Configuration**") | |
| model_dropdown = gr.Dropdown( | |
| label="๐ง **Model Selection**", | |
| choices=MODEL_OPTIONS, | |
| value=DEFAULT_MODEL, | |
| info="8 powerful free models", | |
| elem_classes=["model-selector"] | |
| ) | |
| autonomy_slider = gr.Slider( | |
| label="๐๏ธ **Autonomy Level**", | |
| minimum=1, | |
| maximum=10, | |
| value=10, | |
| step=1, | |
| info="10 = Fully Autonomous AI Programmer" | |
| ) | |
| gr.HTML(""" | |
| <div class="autonomy-indicator"> | |
| <h3>Level 10 = FULLY AUTONOMOUS</h3> | |
| <p>AI will write, execute, and test code automatically!</p> | |
| </div> | |
| """) | |
| with gr.Accordion("๐ง **Advanced Settings**", open=False): | |
| temp_slider = gr.Slider( | |
| label="๐ก๏ธ Temperature", | |
| minimum=0.0, | |
| maximum=1.0, | |
| value=0.7, | |
| step=0.1, | |
| info="Higher = more creative" | |
| ) | |
| token_slider = gr.Slider( | |
| label="๐ Max Tokens", | |
| minimum=256, | |
| maximum=4096, | |
| value=2048, | |
| step=256, | |
| info="Response length" | |
| ) | |
| with gr.Row(): | |
| clear_btn = gr.Button("๐๏ธ Clear", variant="stop") | |
| stats_btn = gr.Button("๐ Stats", variant="secondary") | |
| stats_output = gr.HTML(label="Statistics") | |
| gr.Markdown("### ๐ฅ **Quick Actions**") | |
| # Quick action buttons | |
| with gr.Column(): | |
| gr.HTML('<div class="quick-action-btn">๐ Full Data Analysis Pipeline</div>') | |
| gr.HTML('<div class="quick-action-btn">๐ค Complete ML Model</div>') | |
| gr.HTML('<div class="quick-action-btn">๐ Interactive Dashboard</div>') | |
| gr.HTML('<div class="quick-action-btn">๐ Web Scraper + API</div>') | |
| gr.HTML('<div class="quick-action-btn">๐ฎ Build a Game</div>') | |
| gr.HTML('<div class="quick-action-btn">๐ NLP Analysis Tool</div>') | |
| gr.HTML('<div class="quick-action-btn">๐ ๏ธ API Generator</div>') | |
| gr.HTML('<div class="quick-action-btn">๐ก Automation Script</div>') | |
| # Event handlers | |
| def update_settings(temp, tokens): | |
| global temperature, max_tokens | |
| temperature = temp | |
| max_tokens = tokens | |
| return gr.update() | |
| temp_slider.change(fn=update_settings, inputs=[temp_slider, token_slider], outputs=[]) | |
| token_slider.change(fn=update_settings, inputs=[temp_slider, token_slider], outputs=[]) | |
| # Chat events | |
| send_btn.click( | |
| fn=chat_interface, | |
| inputs=[msg_input, chatbot_ui, model_dropdown, autonomy_slider, gr.Checkbox(value=False)], | |
| outputs=[chatbot_ui, msg_input, model_dropdown, autonomy_slider, gr.Checkbox(value=False)] | |
| ) | |
| msg_input.submit( | |
| fn=chat_interface, | |
| inputs=[msg_input, chatbot_ui, model_dropdown, autonomy_slider, gr.Checkbox(value=False)], | |
| outputs=[chatbot_ui, msg_input, model_dropdown, autonomy_slider, gr.Checkbox(value=False)] | |
| ) | |
| # Stats | |
| stats_btn.click(fn=get_stats, outputs=stats_output) | |
| # Clear | |
| clear_btn.click( | |
| fn=lambda: (chatbot.clear_history(), [], "", "", gr.Checkbox(value=True)), | |
| outputs=[chatbot_ui, chatbot_ui, msg_input, autonomy_slider, gr.Checkbox(value=False)] | |
| ) | |
| # Quick actions with example prompts | |
| examples = [ | |
| "Create a complete data analysis pipeline that loads sample data, performs EDA, creates multiple visualizations (histograms, scatter plots, heatmaps, time series), generates insights, and exports a comprehensive report. Execute everything automatically.", | |
| "Build a complete machine learning project from scratch: generate synthetic dataset, build and train 3 different models (Random Forest, SVM, Neural Network), compare performance, visualize results, and create a comprehensive evaluation report.", | |
| "Create an interactive data visualization dashboard using plotly with multiple chart types (line charts, bar charts, pie charts, box plots, scatter plots, heatmaps), allow user controls, and export to HTML. Execute and test everything.", | |
| "Build a web scraper that fetches data from multiple sources, processes and cleans the data, stores it in a structured format, creates visualizations, and provides an API endpoint. Test the complete pipeline.", | |
| "Create a complete Python game (e.g., Snake, Tetris, or 2048) with score tracking, levels, sound effects, and save/load functionality. Include comprehensive documentation and execute it.", | |
| "Build an NLP analysis tool that processes text data, performs sentiment analysis, named entity recognition, topic modeling, and creates beautiful visualizations. Include example data and execute.", | |
| "Create an automatic API generator that takes a CSV/JSON file, analyzes the structure, generates a FastAPI application with CRUD operations, documentation, and testing. Deploy and test the API.", | |
| "Create an automation script that monitors files, processes data, generates reports, sends notifications, and includes error handling and logging. Test the complete workflow." | |
| ] | |
| for i, example in enumerate(examples): | |
| gr.Button(value=f"Example {i+1}", variant="secondary").click( | |
| fn=lambda ex=example: (ex, f"Example {i+1}"), | |
| outputs=[msg_input, model_dropdown] | |
| ) | |
| return app | |
| if __name__ == "__main__": | |
| app = create_enhanced_interface() | |
| app.launch( | |
| server_name="0.0.0.0", | |
| server_port=7861, | |
| share=False, | |
| show_error=True, | |
| quiet=False | |
| ) | |