Spaces:
Running
Running
| import gradio as gr | |
| import torch | |
| import time | |
| import re | |
| from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline | |
| from typing import Dict, List, Tuple, Optional | |
| import os | |
| import json | |
| from functools import lru_cache | |
| import os | |
| os.environ["TOKENIZERS_PARALLELISM"] = "false" # Prevents tokenizer warnings | |
| # CPU-optimized configuration | |
| MODEL_NAME = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" | |
| DEVICE = "cpu" | |
| DTYPE = torch.float32 # Using float32 for CPU stability (float16 not well supported on CPU) | |
| # File system emulation | |
| class FileSystem: | |
| def __init__(self): | |
| self.files = { | |
| "main.js": "// Start coding here\nconsole.log('Hello Axon Pro');", | |
| "utils.js": "// Utility functions\nfunction add(a, b) {\n return a + b;\n}", | |
| "style.css": "/* Add your styles here */\nbody {\n font-family: monospace;\n}" | |
| } | |
| self.current_file = "main.js" | |
| self.history = [] | |
| self.max_history = 50 | |
| def save_file(self, content: str) -> None: | |
| if self.current_file: | |
| self.files[self.current_file] = content | |
| self.history.append(("save", self.current_file, content[:100] + "...")) | |
| if len(self.history) > self.max_history: | |
| self.history.pop(0) | |
| def get_file(self, filename: str) -> str: | |
| return self.files.get(filename, "") | |
| def get_current_file_content(self) -> str: | |
| return self.files.get(self.current_file, "") | |
| def set_current_file(self, filename: str) -> None: | |
| if filename in self.files: | |
| self.current_file = filename | |
| def create_file(self, filename: str, content: str = "") -> None: | |
| if filename not in self.files: | |
| self.files[filename] = content | |
| self.current_file = filename | |
| self.history.append(("create", filename, content[:50] + "...")) | |
| def get_all_files(self) -> List[str]: | |
| return list(self.files.keys()) | |
| def get_context(self) -> str: | |
| """Get context from all files for the AI model""" | |
| context = "" | |
| for filename, content in self.files.items(): | |
| context += f"// File: {filename}\n{content}\n\n" | |
| return context | |
| # Initialize file system | |
| fs = FileSystem() | |
| # Cache the model to avoid reloading | |
| def load_model(): | |
| print("Loading TinyLlama model for CPU... This may take a moment.") | |
| start_time = time.time() | |
| try: | |
| # For CPU, we don't use quantization that requires bitsandbytes | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) | |
| model = AutoModelForCausalLM.from_pretrained( | |
| MODEL_NAME, | |
| torch_dtype=DTYPE, | |
| device_map=DEVICE, | |
| low_cpu_mem_usage=True | |
| ) | |
| # Create the pipeline | |
| pipe = pipeline( | |
| "text-generation", | |
| model=model, | |
| tokenizer=tokenizer, | |
| device=-1, # Force CPU | |
| max_new_tokens=256, | |
| temperature=0.2, | |
| top_p=0.95, | |
| do_sample=True | |
| ) | |
| load_time = time.time() - start_time | |
| print(f"Model loaded successfully in {load_time:.2f} seconds!") | |
| return pipe | |
| except Exception as e: | |
| print(f"Error loading model: {str(e)}") | |
| return None | |
| def run_code(code: str) -> Tuple[str, str]: | |
| """Safely execute JavaScript code in a sandboxed environment""" | |
| start_time = time.time() | |
| output = "" | |
| error = "" | |
| try: | |
| # This is a placeholder - in a real implementation you'd use a proper JS sandbox | |
| # For demonstration, we'll just return a success message | |
| output = f"✓ Code executed successfully\nExecution time: {time.time() - start_time:.4f}s" | |
| except Exception as e: | |
| error = f"✗ Error: {str(e)}" | |
| return output, error | |
| def generate_completion(code: str, cursor_pos: int = 0) -> str: | |
| """Generate code completion based on current context""" | |
| model = load_model() | |
| if not model: | |
| return "Error: Failed to load AI model. Please try again later." | |
| # Get context from all files | |
| context = fs.get_context() | |
| # Create prompt for code completion | |
| prompt = f"""<|system|> | |
| You are an AI programming assistant, an expert at completing code in JavaScript. | |
| Complete the code below. Only return the completion, no explanations. | |
| Keep the same indentation style as the code above. | |
| </s> | |
| <|user|> | |
| Current code context: | |
| {context} | |
| Complete this code: | |
| {code} | |
| </s> | |
| <|assistant|> | |
| """ | |
| try: | |
| # Generate completion | |
| result = model( | |
| prompt, | |
| pad_token_id=model.tokenizer.eos_token_id, | |
| num_return_sequences=1, | |
| return_full_text=False | |
| ) | |
| # Extract and clean the completion | |
| completion = result[0]['generated_text'].strip() | |
| # Remove any parts that repeat the prompt | |
| if code.strip() in completion: | |
| completion = completion.replace(code.strip(), "") | |
| return completion | |
| except Exception as e: | |
| return f"Error generating completion: {str(e)}" | |
| def explain_code(code: str) -> str: | |
| """Generate explanation for the given code""" | |
| model = load_model() | |
| if not model: | |
| return "Error: Failed to load AI model. Please try again later." | |
| prompt = f"""<|system|> | |
| You are an AI programming assistant that explains code clearly and concisely. | |
| Provide a line-by-line explanation of what the code does. | |
| </s> | |
| <|user|> | |
| Explain this JavaScript code: | |
| {code} | |
| </s> | |
| <|assistant|> | |
| """ | |
| try: | |
| result = model( | |
| prompt, | |
| pad_token_id=model.tokenizer.eos_token_id, | |
| max_new_tokens=512, | |
| num_return_sequences=1, | |
| return_full_text=False | |
| ) | |
| return result[0]['generated_text'].strip() | |
| except Exception as e: | |
| return f"Error explaining code: {str(e)}" | |
| def refactor_code(code: str) -> str: | |
| """Suggest improvements to the given code""" | |
| model = load_model() | |
| if not model: | |
| return "Error: Failed to load AI model. Please try again later." | |
| prompt = f"""<|system|> | |
| You are an AI programming assistant that refactors code for better readability, performance, and maintainability. | |
| Provide a refactored version of the code with improvements. | |
| Explain the key changes in a comment at the top. | |
| </s> | |
| <|user|> | |
| Refactor this JavaScript code: | |
| {code} | |
| </s> | |
| <|assistant|> | |
| """ | |
| try: | |
| result = model( | |
| prompt, | |
| pad_token_id=model.tokenizer.eos_token_id, | |
| max_new_tokens=512, | |
| num_return_sequences=1, | |
| return_full_text=False | |
| ) | |
| return result[0]['generated_text'].strip() | |
| except Exception as e: | |
| return f"Error refactoring code: {str(e)}" | |
| def generate_code(prompt: str) -> str: | |
| """Generate code based on a natural language description""" | |
| model = load_model() | |
| if not model: | |
| return "Error: Failed to load AI model. Please try again later." | |
| prompt = f"""<|system|> | |
| You are an AI programming assistant that writes clean, efficient JavaScript code. | |
| Generate code based on the user's description. Include comments for complex parts. | |
| </s> | |
| <|user|> | |
| Write JavaScript code that: {prompt} | |
| </s> | |
| <|assistant|> | |
| """ | |
| try: | |
| result = model( | |
| prompt, | |
| pad_token_id=model.tokenizer.eos_token_id, | |
| max_new_tokens=512, | |
| num_return_sequences=1, | |
| return_full_text=False | |
| ) | |
| return result[0]['generated_text'].strip() | |
| except Exception as e: | |
| return f"Error generating code: {str(e)}" | |
| def process_voice_input(transcript: str) -> str: | |
| """Process voice input and convert to code or command""" | |
| # This would integrate with Web Speech API in the frontend | |
| # For now, just return the transcript as a placeholder | |
| return transcript | |
| def create_diff_view(original: str, modified: str) -> str: | |
| """Create a diff view between two code versions""" | |
| # Simple diff implementation for demonstration | |
| lines_original = original.split('\n') | |
| lines_modified = modified.split('\n') | |
| diff = [] | |
| i, j = 0, 0 | |
| while i < len(lines_original) and j < len(lines_modified): | |
| if lines_original[i] == lines_modified[j]: | |
| diff.append(f" {lines_original[i]}") | |
| i += 1 | |
| j += 1 | |
| else: | |
| if i < len(lines_original): | |
| diff.append(f"- {lines_original[i]}") | |
| i += 1 | |
| if j < len(lines_modified): | |
| diff.append(f"+ {lines_modified[j]}") | |
| j += 1 | |
| # Add remaining lines | |
| while i < len(lines_original): | |
| diff.append(f"- {lines_original[i]}") | |
| i += 1 | |
| while j < len(lines_modified): | |
| diff.append(f"+ {lines_modified[j]}") | |
| j += 1 | |
| return "\n".join(diff) | |
| # Gradio UI Components (updated for Gradio 6.5.1) | |
| with gr.Blocks( | |
| title="Axon Pro - Free AI IDE", | |
| theme=gr.themes.Default( | |
| font=[gr.themes.GoogleFont('JetBrains Mono'), 'monospace'], | |
| font_mono=[gr.themes.GoogleFont('JetBrains Mono'), 'monospace'], | |
| primary_hue="blue", | |
| neutral_hue="gray", | |
| radius=0 | |
| ).set( | |
| button_primary_background_fill="*primary_500", | |
| button_primary_background_fill_hover="*primary_600", | |
| body_background_fill="*neutral_900", | |
| body_text_color="*neutral_50" | |
| ), | |
| css=""" | |
| .editor-container { height: 50vh; } | |
| .terminal { height: 15vh; background-color: #1e1e1e; color: #d4d4d4; overflow: auto; } | |
| .diff-view { height: 40vh; overflow: auto; } | |
| .file-explorer { height: 50vh; background-color: #252526; overflow: auto; } | |
| .ai-chat { height: 40vh; overflow: auto; } | |
| .status-bar { background-color: #007acc; color: white; padding: 5px; text-align: center; } | |
| .btn { min-width: 100px; } | |
| .monaco-editor { height: 100% !important; } | |
| .theme-dark { --body-bg: #1e1e1e; --editor-bg: #1e1e1e; } | |
| .button-large { min-height: 40px !important; } | |
| .tab-nav { background-color: #252526 !important; } | |
| .tabitem { background-color: #1e1e1e !important; } | |
| .code-wrap { white-space: pre-wrap !important; word-break: break-word !important; } | |
| """ | |
| ) as demo: | |
| gr.Markdown("# ⚡ Axon Pro — Free AI-Powered Code IDE") | |
| with gr.Tabs(): | |
| with gr.Tab("Editor", id="editor-tab"): | |
| with gr.Row(equal_height=True): | |
| # File Explorer | |
| with gr.Column(scale=1, min_width=200): | |
| gr.Markdown("### 📁 File Explorer") | |
| file_list = gr.Dropdown( | |
| choices=fs.get_all_files(), | |
| value=fs.current_file, | |
| label="Files", | |
| interactive=True, | |
| container=False | |
| ) | |
| with gr.Row(): | |
| new_file_btn = gr.Button("➕ New File", variant="secondary", elem_classes="button-large") | |
| save_btn = gr.Button("💾 Save", variant="secondary", elem_classes="button-large") | |
| new_file_name = gr.Textbox( | |
| placeholder="filename.js", | |
| label="New File Name", | |
| container=False | |
| ) | |
| # Main Editor Area | |
| with gr.Column(scale=4): | |
| editor = gr.Code( | |
| value=fs.get_current_file_content(), | |
| label="Code Editor", | |
| language="javascript", | |
| lines=20, | |
| interactive=True, | |
| elem_classes="code-wrap" | |
| ) | |
| with gr.Row(): | |
| run_btn = gr.Button("▶ Run (Ctrl+R)", variant="primary", elem_classes="button-large") | |
| complete_btn = gr.Button("✨ Complete (Ctrl+Enter)", variant="secondary", elem_classes="button-large") | |
| explain_btn = gr.Button("📝 Explain (Ctrl+Shift+E)", variant="secondary", elem_classes="button-large") | |
| refactor_btn = gr.Button("🔧 Refactor", variant="secondary", elem_classes="button-large") | |
| generate_btn = gr.Button("⚡ Generate", variant="secondary", elem_classes="button-large") | |
| voice_btn = gr.Button("🎤 Voice", variant="secondary", elem_classes="button-large") | |
| with gr.Tabs(): | |
| with gr.Tab("Terminal", id="terminal-tab"): | |
| terminal = gr.Textbox( | |
| value="$ _ (Ctrl+R to run)", | |
| lines=5, | |
| interactive=False, | |
| elem_classes="terminal" | |
| ) | |
| clear_btn = gr.Button("CLEAR", variant="secondary", elem_classes="button-large") | |
| with gr.Tab("AI Chat", id="chat-tab"): | |
| chat_history = gr.Chatbot( | |
| label="Axon AI", | |
| elem_classes="ai-chat", | |
| height="100%", | |
| bubble_full_width=False, | |
| avatar_images=(None, "https://api.iconify.design/teenyicons:ai-solid.svg") | |
| ) | |
| with gr.Row(): | |
| chat_input = gr.Textbox( | |
| placeholder="Ask about your code...", | |
| label="Message Axon AI", | |
| container=False, | |
| scale=7 | |
| ) | |
| send_btn = gr.Button("Send", variant="primary", elem_classes="button-large", scale=1) | |
| with gr.Tab("Diff View", id="diff-tab"): | |
| diff_view = gr.Code( | |
| value="", | |
| label="AI Changes", | |
| language="diff", | |
| elem_classes="diff-view code-wrap", | |
| interactive=False | |
| ) | |
| with gr.Row(): | |
| apply_btn = gr.Button("Apply Changes", variant="primary", elem_classes="button-large") | |
| discard_btn = gr.Button("Discard Changes", variant="secondary", elem_classes="button-large") | |
| status_bar = gr.Markdown( | |
| f"**AXON PRO v1.0** | {len(fs.get_all_files())} files | {sum(len(content.split('\\n')) for content in fs.files.values())} lines | JavaScript | Axon Pro v1.0 | AI Ready", | |
| elem_classes="status-bar" | |
| ) | |
| # Hidden state for tracking | |
| current_file_state = gr.State(fs.current_file) | |
| diff_original_state = gr.State("") | |
| diff_modified_state = gr.State("") | |
| diff_mode_state = gr.State(False) | |
| # Event handlers | |
| def update_file_content(content, current_file): | |
| fs.save_file(content) | |
| return fs.get_current_file_content() | |
| def load_file(filename): | |
| fs.set_current_file(filename) | |
| return fs.get_current_file_content(), filename | |
| def create_new_file(name): | |
| if name and "." in name: | |
| fs.create_file(name) | |
| return gr.update(choices=fs.get_all_files(), value=name), fs.get_current_file_content() | |
| return gr.update(choices=fs.get_all_files()), fs.get_current_file_content() | |
| def save_file(content, current_file): | |
| fs.save_file(content) | |
| return content | |
| def run_code_wrapper(content): | |
| output, error = run_code(content) | |
| return output if not error else error | |
| def complete_code_wrapper(content): | |
| completion = generate_completion(content) | |
| return content + completion | |
| def explain_code_wrapper(content): | |
| explanation = explain_code(content) | |
| fs.save_file(content) | |
| diff_original = content | |
| diff_modified = f"// EXPLANATION:\n// {explanation.replace('\\n', '\\n// ')}\n\n{content}" | |
| # Switch to diff view | |
| diff = create_diff_view(diff_original, diff_modified) | |
| return diff, diff_original, diff_modified, True | |
| def refactor_code_wrapper(content): | |
| refactored = refactor_code(content) | |
| fs.save_file(content) | |
| diff_original = content | |
| diff_modified = refactored | |
| # Switch to diff view | |
| diff = create_diff_view(diff_original, diff_modified) | |
| return diff, diff_original, diff_modified, True | |
| def generate_code_wrapper(prompt, history): | |
| if not prompt.strip(): | |
| return diff_view.value, diff_original_state.value, diff_modified_state.value, diff_mode_state.value, history | |
| generated = generate_code(prompt) | |
| # Switch to diff view with the generated code | |
| diff_original = "" | |
| diff_modified = generated | |
| # Switch to diff view | |
| diff = create_diff_view(diff_original, diff_modified) | |
| return diff, diff_original, diff_modified, True, history + [[prompt, "Generated code is ready for review in Diff View"]] | |
| def apply_diff(diff_original, diff_modified, in_diff_mode): | |
| if in_diff_mode: | |
| fs.save_file(diff_modified) | |
| return fs.get_current_file_content(), False | |
| return gr.update(), in_diff_mode | |
| def discard_diff(): | |
| return fs.get_current_file_content(), False | |
| def clear_terminal(): | |
| return "$ _ (Ctrl+R to run)" | |
| def handle_chat(message, history): | |
| if not message.strip(): | |
| return "", history | |
| # For demo, just echo the message with a placeholder response | |
| response = f"AI: This is a demo response. In a full implementation, I'd analyze your code context and provide helpful suggestions." | |
| new_history = history + [[message, response]] | |
| return "", new_history | |
| def switch_to_diff_tab(): | |
| return gr.Tabs(selected="diff-tab") | |
| def switch_to_editor_tab(): | |
| return gr.Tabs(selected="editor-tab") | |
| # Bind events | |
| editor.change(update_file_content, [editor, current_file_state], editor) | |
| file_list.change(load_file, file_list, [editor, current_file_state]) | |
| new_file_btn.click(create_new_file, new_file_name, [file_list, editor]) | |
| save_btn.click(save_file, [editor, current_file_state], editor) | |
| run_btn.click(run_code_wrapper, editor, terminal) | |
| complete_btn.click(complete_code_wrapper, editor, editor) | |
| explain_btn.click( | |
| explain_code_wrapper, | |
| editor, | |
| [diff_view, diff_original_state, diff_modified_state, diff_mode_state] | |
| ).then( | |
| switch_to_diff_tab, None, demo.tabs | |
| ) | |
| refactor_btn.click( | |
| refactor_code_wrapper, | |
| editor, | |
| [diff_view, diff_original_state, diff_modified_state, diff_mode_state] | |
| ).then( | |
| switch_to_diff_tab, None, demo.tabs | |
| ) | |
| generate_btn.click( | |
| lambda: gr.update(visible=True), | |
| outputs=chat_input | |
| ) | |
| chat_input.submit( | |
| handle_chat, | |
| [chat_input, chat_history], | |
| [chat_input, chat_history] | |
| ).then( | |
| generate_code_wrapper, | |
| [chat_input, chat_history], | |
| [diff_view, diff_original_state, diff_modified_state, diff_mode_state, chat_history] | |
| ).then( | |
| switch_to_diff_tab, None, demo.tabs | |
| ) | |
| send_btn.click( | |
| handle_chat, | |
| [chat_input, chat_history], | |
| [chat_input, chat_history] | |
| ).then( | |
| generate_code_wrapper, | |
| [chat_input, chat_history], | |
| [diff_view, diff_original_state, diff_modified_state, diff_mode_state, chat_history] | |
| ).then( | |
| switch_to_diff_tab, None, demo.tabs | |
| ) | |
| apply_btn.click( | |
| apply_diff, | |
| [diff_original_state, diff_modified_state, diff_mode_state], | |
| [editor, diff_mode_state] | |
| ).then( | |
| switch_to_editor_tab, None, demo.tabs | |
| ) | |
| discard_btn.click( | |
| discard_diff, | |
| outputs=[editor, diff_mode_state] | |
| ).then( | |
| switch_to_editor_tab, None, demo.tabs | |
| ) | |
| clear_btn.click( | |
| clear_terminal, | |
| outputs=terminal | |
| ) | |
| # Add keyboard shortcuts using Gradio's new event system | |
| demo.load( | |
| None, | |
| None, | |
| None, | |
| _js=""" | |
| () => { | |
| document.addEventListener('keydown', function(e) { | |
| // Ctrl+R: Run code | |
| if (e.ctrlKey && e.key === 'r') { | |
| e.preventDefault(); | |
| const runBtn = document.querySelector('button:contains("Run")'); | |
| if (runBtn) runBtn.click(); | |
| } | |
| // Ctrl+Enter: Complete code | |
| if (e.ctrlKey && e.key === 'Enter') { | |
| e.preventDefault(); | |
| const completeBtn = document.querySelector('button:contains("Complete")'); | |
| if (completeBtn) completeBtn.click(); | |
| } | |
| // Ctrl+Shift+E: Explain code | |
| if (e.ctrlKey && e.shiftKey && e.key === 'E') { | |
| e.preventDefault(); | |
| const explainBtn = document.querySelector('button:contains("Explain")'); | |
| if (explainBtn) explainBtn.click(); | |
| } | |
| // Ctrl+B: Toggle file explorer | |
| if (e.ctrlKey && e.key === 'b') { | |
| e.preventDefault(); | |
| // Implementation would toggle sidebar | |
| } | |
| // Ctrl+J: Toggle terminal | |
| if (e.ctrlKey && e.key === 'j') { | |
| e.preventDefault(); | |
| // Implementation would toggle terminal | |
| } | |
| // Ctrl+L: Toggle AI chat | |
| if (e.ctrlKey && e.key === 'l') { | |
| e.preventDefault(); | |
| // Implementation would toggle chat panel | |
| } | |
| // Ctrl+N: New file | |
| if (e.ctrlKey && e.key === 'n') { | |
| e.preventDefault(); | |
| document.querySelector('input[placeholder="filename.js"]').focus(); | |
| } | |
| }); | |
| } | |
| """ | |
| ) | |
| if __name__ == "__main__": | |
| print("Starting Axon Pro IDE...") | |
| print(f"Running on CPU with {torch.__version__}") | |
| print(f"Model: {MODEL_NAME}") | |
| demo.launch( | |
| server_name="0.0.0.0", | |
| server_port=int(os.getenv("PORT", 7860)), | |
| debug=True, | |
| show_api=False, | |
| favicon_path="https://api.iconify.design/teenyicons:ai-solid.svg", | |
| root_path=os.getenv("ROOT_PATH", "") | |
| ) |