Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from streamlit_ace import st_ace | |
| import subprocess | |
| import time | |
| import re | |
| import asyncio | |
| import json | |
| import uuid | |
| import os | |
| from code_assistant_runnable import get_runnable | |
| from langchain_core.messages import SystemMessage, AIMessage, HumanMessage, ToolMessage | |
| # Set up page configuration | |
| st.set_page_config(page_title="AI Code Editor", | |
| page_icon=":computer:", | |
| layout="wide") | |
| def create_code_assistant_instance(): | |
| try: | |
| # Import torch first to ensure proper initialization | |
| import torch | |
| torch.set_grad_enabled(False) # Disable gradients since we're only doing inference | |
| return get_runnable() | |
| except Exception as e: | |
| st.error(f"Error initializing chatbot: {str(e)}") | |
| return None | |
| chatbot = create_code_assistant_instance() | |
| # Initialize session states | |
| if 'messages' not in st.session_state: | |
| st.session_state.messages = [ | |
| AIMessage(content="Hello, I am your coding assistant. How can I help you?"), | |
| ] | |
| if 'editor_code' not in st.session_state: | |
| st.session_state.editor_code = '' | |
| # Constants | |
| EDITOR_HEIGHT = 400 | |
| OUTPUT_HEIGHT = 150 | |
| # Minimal CSS for styling | |
| st.markdown(""" | |
| <style type="text/css"> | |
| .output-container { | |
| background-color: rgba(17, 19, 23, 0.8); | |
| border-radius: 4px; | |
| padding: 1rem; | |
| margin-top: 0.5rem; | |
| min-height: 150px; | |
| color: white; | |
| } | |
| .placeholder-text { | |
| color: gray; | |
| font-style: italic; | |
| } | |
| /* Remove extra padding */ | |
| .block-container { | |
| padding-top: 1rem !important; | |
| } | |
| /* Ensure chat messages are visible */ | |
| .stChatMessage { | |
| background-color: rgba(17, 19, 23, 0.8) !important; | |
| } | |
| /* Style section headers consistently */ | |
| .section-header { | |
| font-size: 1rem; | |
| margin-bottom: 1rem; | |
| color: rgb(250, 250, 250); | |
| font-weight: 500; | |
| } | |
| /* Ensure columns align at the top */ | |
| .column-container { | |
| display: flex; | |
| align-items: flex-start; | |
| } | |
| /* Loading indicator styles */ | |
| .loading-spinner { | |
| display: flex; | |
| align-items: center; | |
| gap: 0.5rem; | |
| padding: 0.5rem; | |
| border-radius: 0.25rem; | |
| background-color: rgba(17, 19, 23, 0.8); | |
| } | |
| .loading-text { | |
| color: #ffffff; | |
| font-size: 0.875rem; | |
| } | |
| </style> | |
| """, unsafe_allow_html=True) | |
| def analyze_code(code, language): | |
| """ | |
| Basic code analysis function that looks for common issues | |
| """ | |
| analysis = [] | |
| if language == "python": | |
| # Check for basic Python issues | |
| if "while" in code and "break" not in code: | |
| analysis.append("⚠️ While loop detected without break condition - check for infinite loops") | |
| if "except:" in code and "except Exception:" not in code: | |
| analysis.append("⚠️ Bare except clause detected - consider catching specific exceptions") | |
| if "print" in code and "if __name__ == '__main__':" not in code: | |
| analysis.append("💡 Consider adding main guard for scripts with print statements") | |
| if re.search(r'^\s+', code, re.MULTILINE): | |
| analysis.append("🔍 Mixed indentation detected - check spacing") | |
| elif language == "rust": | |
| if "unwrap()" in code: | |
| analysis.append("⚠️ Usage of unwrap() detected - consider proper error handling") | |
| if "mut" not in code and len(code) > 50: | |
| analysis.append("💡 No mutable variables detected - verify if intentional") | |
| if not analysis: | |
| analysis.append("✅ No immediate issues detected in the code") | |
| return "\n".join(analysis) | |
| def dummy_ai_response(question, code_context, language): | |
| """ | |
| Dummy AI response function with basic code context awareness | |
| """ | |
| time.sleep(1) # Simulate processing time | |
| if "debug" in question.lower(): | |
| return f"Here's my analysis of your {language} code:\n" + analyze_code(code_context, language) | |
| if "how" in question.lower() and "implement" in question.lower(): | |
| return f"To implement this in {language}, you might want to consider:\n1. Breaking down the problem\n2. Using appropriate data structures\n3. Following {language} best practices" | |
| if "error" in question.lower() or "not working" in question.lower(): | |
| return "Let me help you debug that. Could you:\n1. Share the specific error message\n2. Describe what you expected to happen\n3. Describe what actually happened" | |
| return f"I see you're working with {language}. Could you clarify what specific help you need with your code?" | |
| def run_python_code(code): | |
| try: | |
| with open("temp_code.py", "w") as f: | |
| f.write(code) | |
| result = subprocess.run(["python", "temp_code.py"], | |
| capture_output=True, | |
| text=True) | |
| return result.stderr if result.stderr else result.stdout | |
| except Exception as e: | |
| return f"Error: {e}" | |
| def run_rust_code(code): | |
| with open('code.rs', 'w') as file: | |
| file.write(code) | |
| compile_process = subprocess.Popen(['rustc', 'code.rs'], | |
| stdout=subprocess.PIPE, | |
| stderr=subprocess.PIPE, | |
| text=True) | |
| compile_output, compile_errors = compile_process.communicate() | |
| if compile_process.returncode != 0: | |
| return f"Compilation Error: {compile_errors}" | |
| run_process = subprocess.Popen(['./code'], | |
| stdout=subprocess.PIPE, | |
| stderr=subprocess.PIPE, | |
| text=True) | |
| run_output, run_errors = run_process.communicate() | |
| return run_output if not run_errors else run_errors | |
| def run_js_code(): | |
| pass | |
| def dummy_auto_complete(code: str, language: str = None) -> str: | |
| """ | |
| Dummy function to simulate LLM code completion | |
| Args: | |
| code (str): The incomplete code in the editor | |
| language (str, optional): Selected programming language | |
| Returns: | |
| str: The completed code | |
| """ | |
| time.sleep(2) # Simulate processing time | |
| # Example completions based on language | |
| completions = { | |
| "python": """# Function to calculate sum | |
| def calculate_sum(a: int, b: int) -> int: | |
| '''Calculate sum of two integers''' | |
| return a + b""", | |
| "javascript": """// Function to calculate sum | |
| function calculateSum(a, b) { | |
| return a + b; | |
| }""", | |
| "rust": """// Function to calculate sum | |
| fn calculate_sum(a: i32, b: i32) -> i32 { | |
| a + b | |
| }""" | |
| } | |
| # Return language-specific completion or default to Python | |
| return completions.get(language, completions["python"]) | |
| # Sidebar settings | |
| with st.sidebar: | |
| st.title("SolCoder") | |
| st.header("Solana AI Code Editor") | |
| theme = st.selectbox("Editor Theme", | |
| ["monokai", "github", "solarized_dark", "solarized_light", "dracula"]) | |
| font_size = st.slider("Font Size", 12, 24, 14) | |
| show_gutter = st.checkbox("Show Line Numbers", value=True) | |
| language = st.selectbox("Language", ["python", "javascript", "rust"], index=0) | |
| # Create two columns for main layout | |
| col1, col2 = st.columns([3, 2]) | |
| # Left Column - Code Editor and Output | |
| with col1: | |
| st.subheader("") | |
| st.subheader("Code Editor") | |
| st.markdown("Write your code below and use the buttons to run or debug") | |
| # Code editor | |
| editor = st_ace( | |
| value=st.session_state.editor_code, | |
| language=language, | |
| theme=theme, | |
| font_size=font_size, | |
| show_gutter=show_gutter, | |
| auto_update=True, | |
| height=EDITOR_HEIGHT, | |
| key="editor" | |
| ) | |
| # Buttons - Modified to include three columns | |
| button_cols = st.columns(3) | |
| with button_cols[0]: | |
| auto_complete_btn = st.button("Auto-Complete", use_container_width=True) | |
| with button_cols[1]: | |
| run_btn = st.button("Run Code", use_container_width=True) | |
| with button_cols[2]: | |
| debug_btn = st.button("Debug Code", use_container_width=True) | |
| # Handle auto-complete button click | |
| if auto_complete_btn: | |
| with st.spinner("Generating code completion..."): | |
| try: | |
| # Get completed code from dummy function | |
| completed_code = dummy_auto_complete(st.session_state.editor_code, language) | |
| st.markdown(f'<div class="output-area">```{completed_code}```</div>', unsafe_allow_html=True) | |
| # # Update editor content in session state | |
| st.session_state.editor_code = completed_code | |
| # Show success message | |
| st.success("Code successfully completed!") | |
| except Exception as e: | |
| st.error(f"Error during code completion: {str(e)}") | |
| # Output area - simplified container structure | |
| if run_btn: | |
| output = run_python_code(editor) if language == "python" else \ | |
| run_rust_code(editor) if language == "rust" else \ | |
| "Currently, only Python and Rust execution is supported." | |
| st.markdown(f'<div class="output-area">{output}</div>', unsafe_allow_html=True) | |
| else: | |
| st.markdown('<div class="output-area placeholder-text">Code output will appear here...</div>', | |
| unsafe_allow_html=True) | |
| def format_ai_response(response): | |
| """Format AI response into readable message""" | |
| if isinstance(response, dict): | |
| # Extract meaningful content from response structure | |
| if 'generation' in response: | |
| message = response['generation'] | |
| # Parse structured response appropriately | |
| formatted_content = [] | |
| if hasattr(message, 'prefix'): | |
| formatted_content.append(message.prefix) | |
| if hasattr(message, 'imports'): | |
| formatted_content.append(f"```\n{message.imports}\n```") | |
| if hasattr(message, 'code'): | |
| formatted_content.append(f"```\n{message.code}\n```") | |
| return "\n".join(formatted_content) | |
| return str(response) # Fallback for simple responses | |
| # Right Column - Chat Interface | |
| with col2: | |
| # Match header styling with the code section | |
| # st.markdown('<p class="section-header">AI Assistant Chat</p>', unsafe_allow_html=True) | |
| st.subheader("") | |
| st.subheader("Code Assistant Agent") | |
| # conversation | |
| def validate_message(message): | |
| """Validate message before adding to history""" | |
| if not isinstance(message, (AIMessage, HumanMessage)): | |
| return False | |
| if not message.content or not isinstance(message.content, str): | |
| return False | |
| return True | |
| def add_message_to_history(message): | |
| """Safely add message to chat history""" | |
| if validate_message(message): | |
| st.session_state.messages.append(message) | |
| return True | |
| return False | |
| # Update message display section | |
| for message in st.session_state.messages: | |
| if isinstance(message, AIMessage): | |
| with st.chat_message("AI"): | |
| # Handle code blocks in message | |
| content = message.content | |
| if "```" in content: | |
| parts = content.split("```") | |
| for i, part in enumerate(parts): | |
| if i % 2 == 0: # Regular text | |
| if part.strip(): | |
| st.markdown(part) | |
| else: # Code block | |
| st.code(part) | |
| else: | |
| st.markdown(content) | |
| elif isinstance(message, HumanMessage): | |
| with st.chat_message("Human"): | |
| st.markdown(message.content) | |
| # Clear chat button | |
| if st.button("Clear Chat", use_container_width=True): | |
| st.session_state.messages = [] | |
| st.rerun() | |
| if prompt := st.chat_input("Ask about writing solana code..."): | |
| user_message = HumanMessage(content=prompt) | |
| # Add user message to history | |
| if add_message_to_history(user_message): | |
| with st.chat_message("AI"): | |
| # Create a placeholder for the loading indicator | |
| response_placeholder = st.empty() | |
| # Show loading message | |
| with response_placeholder: | |
| with st.spinner("AI is thinking..."): | |
| try: | |
| # Get AI response | |
| ai_response = chatbot.invoke({ | |
| "messages": [("user", prompt)], | |
| "iterations": 0, | |
| "error": "" | |
| }) | |
| # Format and add AI response | |
| formatted_response = format_ai_response(ai_response) | |
| ai_message = AIMessage(content=formatted_response) | |
| # Clear the loading indicator and show the response | |
| response_placeholder.empty() | |
| st.markdown(formatted_response) | |
| # Add to history | |
| add_message_to_history(ai_message) | |
| # Only rerun after successful processing | |
| st.rerun() | |
| except Exception as e: | |
| response_placeholder.error(f"Error generating response: {str(e)}") | |