Spaces:
Running
Running
| import os | |
| import gradio as gr | |
| import json | |
| import time | |
| from datetime import datetime | |
| from llama_cpp import Llama | |
| from faster_whisper import WhisperModel | |
| from huggingface_hub import hf_hub_download | |
| # ===== CONFIG ===== | |
| MODELS_DIR = "/data/models" | |
| MAX_TOKENS = 2048 | |
| CONTEXT_SIZE = 4096 | |
| # Model repos for auto-download (5 models to stay under 50GB) | |
| MODEL_REPOS = { | |
| "DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M.gguf": "bartowski/DeepSeek-Coder-V2-Lite-Instruct-GGUF", | |
| "qwen2.5-coder-7b-instruct-q4_k_m.gguf": "Qwen/Qwen2.5-Coder-7B-Instruct-GGUF", | |
| "qwen2.5-coder-3b-instruct-q4_k_m.gguf": "Qwen/Qwen2.5-Coder-3B-Instruct-GGUF", | |
| "qwen2.5-coder-1.5b-instruct-q4_k_m.gguf": "Qwen/Qwen2.5-Coder-1.5B-Instruct-GGUF", | |
| "qwen2.5-coder-0.5b-instruct-q4_k_m.gguf": "Qwen/Qwen2.5-Coder-0.5B-Instruct-GGUF", | |
| } | |
| MODELS = { | |
| "🧠 DeepSeek V2 Lite (Best)": "DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M.gguf", | |
| "⚖️ Qwen2.5 Coder 7B (Balanced)": "qwen2.5-coder-7b-instruct-q4_k_m.gguf", | |
| "🚀 Qwen2.5 Coder 3B (Fast)": "qwen2.5-coder-3b-instruct-q4_k_m.gguf", | |
| "💨 Qwen2.5 Coder 1.5B (Quick)": "qwen2.5-coder-1.5b-instruct-q4_k_m.gguf", | |
| "🔬 Qwen2.5 Coder 0.5B (Instant)": "qwen2.5-coder-0.5b-instruct-q4_k_m.gguf", | |
| } | |
| LANGUAGES = [ | |
| "Python", "JavaScript", "TypeScript", "Go", "Rust", "Java", "C++", "C#", "C", | |
| "PHP", "Ruby", "Swift", "Kotlin", "Scala", "R", "Julia", "Perl", "HTML/CSS", | |
| "SQL", "Bash", "PowerShell", "Lua" | |
| ] | |
| # ===== GLOBAL STATE ===== | |
| loaded_models = {} | |
| current_model_name = None | |
| whisper_model = None | |
| # ===== INITIALIZATION ===== | |
| def init_directories(): | |
| os.makedirs(MODELS_DIR, exist_ok=True) | |
| print(f"📁 Models directory: {MODELS_DIR}") | |
| def get_cached_models(): | |
| if not os.path.exists(MODELS_DIR): | |
| return [] | |
| return [f for f in os.listdir(MODELS_DIR) if f.endswith('.gguf')] | |
| def download_model(filename): | |
| """Download a model if not cached""" | |
| model_path = os.path.join(MODELS_DIR, filename) | |
| if os.path.exists(model_path): | |
| return model_path | |
| repo = MODEL_REPOS.get(filename) | |
| if not repo: | |
| print(f"❌ No repo found for {filename}") | |
| return None | |
| print(f"⬇️ Downloading {filename}...") | |
| try: | |
| path = hf_hub_download( | |
| repo_id=repo, | |
| filename=filename, | |
| local_dir=MODELS_DIR | |
| ) | |
| print(f"✅ Downloaded {filename}") | |
| return path | |
| except Exception as e: | |
| print(f"❌ Download failed: {e}") | |
| return None | |
| def load_model(model_name): | |
| global loaded_models, current_model_name | |
| if model_name == current_model_name and model_name in loaded_models: | |
| return loaded_models[model_name] | |
| # Unload previous model to save RAM | |
| if current_model_name and current_model_name != model_name: | |
| if current_model_name in loaded_models: | |
| del loaded_models[current_model_name] | |
| print(f"🗑️ Unloaded {current_model_name}") | |
| filename = MODELS.get(model_name) | |
| if not filename: | |
| return None | |
| # Download if needed | |
| model_path = download_model(filename) | |
| if not model_path or not os.path.exists(model_path): | |
| return None | |
| print(f"📥 Loading {model_name}...") | |
| try: | |
| llm = Llama( | |
| model_path=model_path, | |
| n_ctx=CONTEXT_SIZE, | |
| n_threads=4, | |
| n_batch=512, | |
| verbose=False | |
| ) | |
| loaded_models[model_name] = llm | |
| current_model_name = model_name | |
| print(f"✅ {model_name} loaded!") | |
| return llm | |
| except Exception as e: | |
| print(f"❌ Failed to load: {e}") | |
| return None | |
| def init_whisper(): | |
| global whisper_model | |
| try: | |
| print("🎤 Loading Whisper...") | |
| whisper_model = WhisperModel("tiny", device="cpu", compute_type="int8") | |
| print("✅ Whisper ready!") | |
| except Exception as e: | |
| print(f"❌ Whisper failed: {e}") | |
| # ===== LLM HELPERS ===== | |
| def generate_response(prompt, model_name, max_tokens=MAX_TOKENS, temperature=0.7): | |
| llm = load_model(model_name) | |
| if not llm: | |
| return "❌ Failed to load model. Please try again." | |
| try: | |
| response = llm( | |
| prompt, | |
| max_tokens=max_tokens, | |
| temperature=temperature, | |
| stop=["<|endoftext|>", "<|im_end|>", "</s>"], | |
| echo=False | |
| ) | |
| return response["choices"][0]["text"].strip() | |
| except Exception as e: | |
| return f"❌ Error: {str(e)}" | |
| def generate_stream(prompt, model_name, max_tokens=MAX_TOKENS, temperature=0.7): | |
| llm = load_model(model_name) | |
| if not llm: | |
| yield "❌ Failed to load model. Please try again." | |
| return | |
| try: | |
| stream = llm( | |
| prompt, | |
| max_tokens=max_tokens, | |
| temperature=temperature, | |
| stop=["<|endoftext|>", "<|im_end|>", "</s>"], | |
| stream=True, | |
| echo=False | |
| ) | |
| for chunk in stream: | |
| if "choices" in chunk and chunk["choices"]: | |
| text = chunk["choices"][0].get("text", "") | |
| if text: | |
| yield text | |
| except Exception as e: | |
| yield f"❌ Error: {str(e)}" | |
| def format_prompt(system, user): | |
| return f"<|im_start|>system\n{system}<|im_end|>\n<|im_start|>user\n{user}<|im_end|>\n<|im_start|>assistant\n" | |
| # ===== WHISPER ===== | |
| def transcribe_audio(audio_path): | |
| if not whisper_model or not audio_path: | |
| return "" | |
| try: | |
| segments, _ = whisper_model.transcribe(audio_path, beam_size=1) | |
| return " ".join([s.text for s in segments]).strip() | |
| except Exception as e: | |
| return f"❌ Transcription error: {str(e)}" | |
| # ===== TOOL FUNCTIONS ===== | |
| # --- CORE TOOLS --- | |
| def chat_stream(message, history, model, temperature, max_tokens): | |
| if not message.strip(): | |
| yield history | |
| return | |
| history = history or [] | |
| # Build conversation context from history | |
| system = "You are Axon, an expert AI coding assistant. Be helpful, concise, and provide working code examples when appropriate." | |
| conversation = "" | |
| for msg in history[-10:]: # Last 10 messages | |
| if msg["role"] == "user": | |
| conversation += f"<|im_start|>user\n{msg['content']}<|im_end|>\n" | |
| elif msg["role"] == "assistant": | |
| conversation += f"<|im_start|>assistant\n{msg['content']}<|im_end|>\n" | |
| prompt = f"<|im_start|>system\n{system}<|im_end|>\n{conversation}<|im_start|>user\n{message}<|im_end|>\n<|im_start|>assistant\n" | |
| # Add user message to history | |
| history = history + [{"role": "user", "content": message}] | |
| response = "" | |
| for chunk in generate_stream(prompt, model, max_tokens, temperature): | |
| response += chunk | |
| # Yield history with partial assistant response | |
| yield history + [{"role": "assistant", "content": response}] | |
| # Final yield with complete response | |
| yield history + [{"role": "assistant", "content": response}] | |
| def generate_code(description, language, model, max_tokens): | |
| if not description.strip(): | |
| return "Please provide a description." | |
| system = f"You are an expert {language} programmer. Generate clean, well-commented, production-ready code." | |
| prompt = format_prompt(system, f"Generate {language} code for: {description}") | |
| return generate_response(prompt, model, max_tokens) | |
| def explain_code(code, model, detail_level, max_tokens): | |
| if not code.strip(): | |
| return "Please provide code to explain." | |
| detail_map = { | |
| "Brief": "Give a brief 2-3 sentence explanation.", | |
| "Normal": "Explain the code clearly with key points.", | |
| "Detailed": "Provide a comprehensive explanation covering logic, patterns, and potential improvements." | |
| } | |
| system = "You are an expert code explainer. " + detail_map.get(detail_level, detail_map["Normal"]) | |
| prompt = format_prompt(system, f"Explain this code:\n\n```\n{code}\n```") | |
| return generate_response(prompt, model, max_tokens) | |
| def debug_code(code, error, model, max_tokens): | |
| if not code.strip(): | |
| return "Please provide code to debug." | |
| system = "You are an expert debugger. Identify bugs, explain the issues, and provide corrected code." | |
| user = f"Debug this code:\n\n```\n{code}\n```" | |
| if error.strip(): | |
| user += f"\n\nError message:\n{error}" | |
| prompt = format_prompt(system, user) | |
| return generate_response(prompt, model, max_tokens) | |
| def review_code(code, model, max_tokens): | |
| if not code.strip(): | |
| return "Please provide code to review." | |
| system = """You are a senior code reviewer. Review the code for: | |
| 1. Code quality and readability | |
| 2. Potential bugs or issues | |
| 3. Security vulnerabilities | |
| 4. Performance concerns | |
| 5. Best practices | |
| Provide specific, actionable feedback.""" | |
| prompt = format_prompt(system, f"Review this code:\n\n```\n{code}\n```") | |
| return generate_response(prompt, model, max_tokens) | |
| # --- ADVANCED TOOLS --- | |
| def security_scan(code, model, max_tokens): | |
| if not code.strip(): | |
| return "Please provide code to scan." | |
| system = """You are a security expert. Scan the code for vulnerabilities including: | |
| - SQL injection | |
| - XSS (Cross-site scripting) | |
| - CSRF vulnerabilities | |
| - Insecure data handling | |
| - Authentication issues | |
| - Input validation problems | |
| - Secrets/credentials exposure | |
| Rate severity (Critical/High/Medium/Low) and provide fixes.""" | |
| prompt = format_prompt(system, f"Security scan this code:\n\n```\n{code}\n```") | |
| return generate_response(prompt, model, max_tokens) | |
| def analyze_complexity(code, model, max_tokens): | |
| if not code.strip(): | |
| return "Please provide code to analyze." | |
| system = """You are an algorithms expert. Analyze the code complexity: | |
| 1. Time complexity (Big O notation) | |
| 2. Space complexity (Big O notation) | |
| 3. Explain the analysis step by step | |
| 4. Suggest optimizations if possible""" | |
| prompt = format_prompt(system, f"Analyze complexity:\n\n```\n{code}\n```") | |
| return generate_response(prompt, model, max_tokens) | |
| def convert_code(code, from_lang, to_lang, model, max_tokens): | |
| if not code.strip(): | |
| return "Please provide code to convert." | |
| system = f"You are an expert polyglot programmer. Convert code from {from_lang} to {to_lang}. Preserve functionality and use idiomatic patterns for the target language." | |
| prompt = format_prompt(system, f"Convert this {from_lang} code to {to_lang}:\n\n```\n{code}\n```") | |
| return generate_response(prompt, model, max_tokens) | |
| def generate_tests(code, language, framework, model, max_tokens): | |
| if not code.strip(): | |
| return "Please provide code to test." | |
| system = f"You are a testing expert. Generate comprehensive {framework} unit tests for {language} code. Include edge cases, error cases, and happy path tests." | |
| prompt = format_prompt(system, f"Generate {framework} tests for:\n\n```\n{code}\n```") | |
| return generate_response(prompt, model, max_tokens) | |
| def document_code(code, language, style, model, max_tokens): | |
| if not code.strip(): | |
| return "Please provide code to document." | |
| style_map = { | |
| "Docstrings": "Add comprehensive docstrings to all functions/classes.", | |
| "Inline Comments": "Add helpful inline comments explaining the logic.", | |
| "Both": "Add both docstrings and inline comments." | |
| } | |
| system = f"You are a documentation expert. {style_map.get(style, style_map['Both'])} Use {language} documentation conventions." | |
| prompt = format_prompt(system, f"Document this code:\n\n```\n{code}\n```") | |
| return generate_response(prompt, model, max_tokens) | |
| def optimize_code(code, language, focus, model, max_tokens): | |
| if not code.strip(): | |
| return "Please provide code to optimize." | |
| system = f"You are a performance optimization expert. Optimize the {language} code focusing on {focus}. Show the optimized code and explain the improvements." | |
| prompt = format_prompt(system, f"Optimize this code for {focus}:\n\n```\n{code}\n```") | |
| return generate_response(prompt, model, max_tokens) | |
| def code_diff(code1, code2, model, max_tokens): | |
| if not code1.strip() or not code2.strip(): | |
| return "Please provide both code snippets to compare." | |
| system = "You are a code analysis expert. Compare the two code snippets and explain the differences, which is better, and why." | |
| prompt = format_prompt(system, f"Compare these code snippets:\n\n**Code 1:**\n```\n{code1}\n```\n\n**Code 2:**\n```\n{code2}\n```") | |
| return generate_response(prompt, model, max_tokens) | |
| def to_pseudocode(code, output_type, model, max_tokens): | |
| if not code.strip(): | |
| return "Please provide code to convert." | |
| if output_type == "Pseudocode": | |
| system = "Convert the code to clear, readable pseudocode that anyone can understand." | |
| else: | |
| system = "Convert the code to a text-based flowchart using ASCII art or a structured description." | |
| prompt = format_prompt(system, f"Convert to {output_type}:\n\n```\n{code}\n```") | |
| return generate_response(prompt, model, max_tokens) | |
| def interview_challenge(topic, difficulty, language, model, max_tokens): | |
| system = f"You are a technical interviewer. Create a {difficulty} {language} coding challenge about {topic}. Include: problem statement, examples, constraints, hints, and a solution with explanation." | |
| prompt = format_prompt(system, f"Create a {difficulty} {topic} challenge in {language}") | |
| return generate_response(prompt, model, max_tokens) | |
| # --- BUILDERS --- | |
| def build_sql(description, sql_type, model, max_tokens): | |
| if not description.strip(): | |
| return "Please describe what SQL you need." | |
| system = f"You are a SQL expert. Generate {sql_type} SQL queries. Include comments explaining the query." | |
| prompt = format_prompt(system, f"Generate SQL for: {description}") | |
| return generate_response(prompt, model, max_tokens) | |
| def build_shell(description, shell_type, model, max_tokens): | |
| if not description.strip(): | |
| return "Please describe what command you need." | |
| system = f"You are a {shell_type} expert. Generate safe, well-commented shell commands. Warn about any dangerous operations." | |
| prompt = format_prompt(system, f"Generate {shell_type} command for: {description}") | |
| return generate_response(prompt, model, max_tokens) | |
| def build_cron(description, model, max_tokens): | |
| if not description.strip(): | |
| return "Please describe the schedule you need." | |
| system = "You are a cron expert. Generate cron expressions with clear explanations. Include the cron format: minute hour day month weekday" | |
| prompt = format_prompt(system, f"Generate cron expression for: {description}") | |
| return generate_response(prompt, model, max_tokens) | |
| def build_regex(description, model, max_tokens): | |
| if not description.strip(): | |
| return "Please describe the pattern you need." | |
| system = "You are a regex expert. Generate regular expressions with explanations and test examples. Support multiple regex flavors if relevant." | |
| prompt = format_prompt(system, f"Generate regex for: {description}") | |
| return generate_response(prompt, model, max_tokens) | |
| def build_api(description, framework, model, max_tokens): | |
| if not description.strip(): | |
| return "Please describe the API endpoint you need." | |
| system = f"You are an API expert. Generate a complete {framework} REST API endpoint with error handling, validation, and documentation." | |
| prompt = format_prompt(system, f"Generate {framework} API endpoint for: {description}") | |
| return generate_response(prompt, model, max_tokens) | |
| # --- DATA TOOLS --- | |
| def generate_mock_data(schema, count, format_type, model, max_tokens): | |
| if not schema.strip(): | |
| return "Please provide a schema or description." | |
| system = f"You are a data generation expert. Generate {count} realistic mock data records in {format_type} format based on the schema." | |
| prompt = format_prompt(system, f"Generate {count} mock records in {format_type}:\n\nSchema: {schema}") | |
| return generate_response(prompt, model, max_tokens) | |
| def convert_data_format(data, from_format, to_format, model, max_tokens): | |
| if not data.strip(): | |
| return "Please provide data to convert." | |
| system = f"Convert the data from {from_format} to {to_format}. Preserve all information and use proper formatting." | |
| prompt = format_prompt(system, f"Convert from {from_format} to {to_format}:\n\n```\n{data}\n```") | |
| return generate_response(prompt, model, max_tokens) | |
| # --- NEW v26 TOOLS --- | |
| def refactor_code(code, language, pattern, model, max_tokens): | |
| if not code.strip(): | |
| return "Please provide code to refactor." | |
| system = f"You are a software architect. Refactor the {language} code using {pattern} patterns. Explain the improvements and show before/after." | |
| prompt = format_prompt(system, f"Refactor using {pattern}:\n\n```\n{code}\n```") | |
| return generate_response(prompt, model, max_tokens) | |
| def generate_benchmark(code, language, model, max_tokens): | |
| if not code.strip(): | |
| return "Please provide code to benchmark." | |
| system = f"You are a performance testing expert. Generate comprehensive {language} benchmark code to measure performance. Include setup, warmup, multiple iterations, and statistics." | |
| prompt = format_prompt(system, f"Generate benchmark for:\n\n```\n{code}\n```") | |
| return generate_response(prompt, model, max_tokens) | |
| def analyze_dependencies(code, language, model, max_tokens): | |
| if not code.strip(): | |
| return "Please provide code to analyze." | |
| system = f"""You are a dependency analysis expert. Analyze the {language} code for: | |
| 1. Direct imports/dependencies | |
| 2. Potential circular dependencies | |
| 3. Unused imports | |
| 4. Missing dependencies | |
| 5. Version compatibility concerns | |
| 6. Security advisories for known packages""" | |
| prompt = format_prompt(system, f"Analyze dependencies:\n\n```\n{code}\n```") | |
| return generate_response(prompt, model, max_tokens) | |
| def generate_changelog(diff_or_commits, version, model, max_tokens): | |
| if not diff_or_commits.strip(): | |
| return "Please provide diff or commit messages." | |
| system = """You are a technical writer. Generate a professional changelog entry with: | |
| - Version number and date | |
| - Categories: Added, Changed, Fixed, Removed, Security | |
| - Clear, user-friendly descriptions | |
| - Breaking changes highlighted""" | |
| prompt = format_prompt(system, f"Generate changelog for version {version}:\n\n{diff_or_commits}") | |
| return generate_response(prompt, model, max_tokens) | |
| def suggest_improvements(code, language, model, max_tokens): | |
| if not code.strip(): | |
| return "Please provide code to analyze." | |
| system = f"""You are a senior {language} developer. Provide actionable improvement suggestions: | |
| 1. Code quality improvements | |
| 2. Performance optimizations | |
| 3. Better error handling | |
| 4. Modern language features to use | |
| 5. Design pattern recommendations | |
| 6. Testing suggestions | |
| Rate each suggestion by impact (High/Medium/Low) and effort.""" | |
| prompt = format_prompt(system, f"Suggest improvements:\n\n```\n{code}\n```") | |
| return generate_response(prompt, model, max_tokens) | |
| # ===== UI ===== | |
| def create_ui(): | |
| # Check cached models | |
| cached = get_cached_models() | |
| total_models = len(MODELS) | |
| with gr.Blocks(title="Axon v26") as demo: | |
| # Header | |
| gr.HTML(""" | |
| <div class="main-header"> | |
| <h1>⚡ Axon v26</h1> | |
| <p>The Ultimate Free AI Coding Assistant • 5 Models • 25 Tools • 100% Local</p> | |
| </div> | |
| """) | |
| # Status bar | |
| status_emoji = "🟢" if len(cached) >= total_models else "🟡" | |
| gr.HTML(f""" | |
| <div class="status-bar" style="background: linear-gradient(135deg, #667eea22, #764ba222);"> | |
| {status_emoji} <strong>{len(cached)}/{total_models}</strong> models cached • | |
| {"All models ready!" if len(cached) >= total_models else "Models download on first use (~1-9GB each)"} | |
| </div> | |
| """) | |
| # Global controls | |
| with gr.Row(): | |
| model_dropdown = gr.Dropdown( | |
| choices=list(MODELS.keys()), | |
| value=list(MODELS.keys())[2], # Default to 3B | |
| label="🤖 Model", | |
| scale=2 | |
| ) | |
| max_tokens = gr.Slider(256, 4096, value=2048, step=256, label="Max Tokens", scale=1) | |
| # Tabs | |
| with gr.Tabs(): | |
| # === CHAT TAB === | |
| with gr.Tab("💬 Chat"): | |
| gr.HTML("<p class='tool-description'>Have a conversation about code, get help, ask questions.</p>") | |
| chatbot = gr.Chatbot(height=400) | |
| with gr.Row(): | |
| msg = gr.Textbox(placeholder="Ask me anything about code...", scale=4, show_label=False) | |
| send_btn = gr.Button("Send", variant="primary") | |
| with gr.Row(): | |
| temperature = gr.Slider(0, 1, value=0.7, step=0.1, label="Temperature") | |
| audio_input = gr.Audio(type="filepath", label="🎤 Voice Input") | |
| transcribe_btn = gr.Button("Transcribe") | |
| clear_btn = gr.Button("Clear Chat") | |
| # === GENERATE TAB === | |
| with gr.Tab("⚡ Generate"): | |
| gr.HTML("<p class='tool-description'>Describe what you want, get working code.</p>") | |
| gen_prompt = gr.Textbox(lines=3, placeholder="Describe the code you want...", label="Description") | |
| gen_lang = gr.Dropdown(choices=LANGUAGES, value="Python", label="Language") | |
| gen_btn = gr.Button("Generate", variant="primary") | |
| gen_output = gr.Code(label="Generated Code", language="python") | |
| # === EXPLAIN TAB === | |
| with gr.Tab("🔍 Explain"): | |
| gr.HTML("<p class='tool-description'>Understand any code with detailed explanations.</p>") | |
| explain_input = gr.Code(lines=10, label="Code to Explain", language="python") | |
| explain_detail = gr.Radio(["Brief", "Normal", "Detailed"], value="Normal", label="Detail Level") | |
| explain_btn = gr.Button("Explain", variant="primary") | |
| explain_output = gr.Markdown(label="Explanation") | |
| # === DEBUG TAB === | |
| with gr.Tab("🔧 Debug"): | |
| gr.HTML("<p class='tool-description'>Find and fix bugs with AI assistance.</p>") | |
| debug_input = gr.Code(lines=10, label="Buggy Code", language="python") | |
| debug_error = gr.Textbox(lines=3, placeholder="Paste error message (optional)", label="Error Message") | |
| debug_btn = gr.Button("Debug", variant="primary") | |
| debug_output = gr.Markdown(label="Debug Results") | |
| # === REVIEW TAB === | |
| with gr.Tab("📋 Review"): | |
| gr.HTML("<p class='tool-description'>Get comprehensive code review feedback.</p>") | |
| review_input = gr.Code(lines=10, label="Code to Review", language="python") | |
| review_btn = gr.Button("Review", variant="primary") | |
| review_output = gr.Markdown(label="Review Results") | |
| # === SECURITY TAB === | |
| with gr.Tab("🔐 Security"): | |
| gr.HTML("<p class='tool-description'>Scan code for security vulnerabilities.</p>") | |
| security_input = gr.Code(lines=10, label="Code to Scan", language="python") | |
| security_btn = gr.Button("Scan", variant="primary") | |
| security_output = gr.Markdown(label="Security Report") | |
| # === COMPLEXITY TAB === | |
| with gr.Tab("📊 Complexity"): | |
| gr.HTML("<p class='tool-description'>Analyze time and space complexity (Big O).</p>") | |
| complexity_input = gr.Code(lines=10, label="Code to Analyze", language="python") | |
| complexity_btn = gr.Button("Analyze", variant="primary") | |
| complexity_output = gr.Markdown(label="Complexity Analysis") | |
| # === CONVERT TAB === | |
| with gr.Tab("🔄 Convert"): | |
| gr.HTML("<p class='tool-description'>Translate code between programming languages.</p>") | |
| convert_input = gr.Code(lines=10, label="Code to Convert", language="python") | |
| with gr.Row(): | |
| convert_from = gr.Dropdown(choices=LANGUAGES, value="Python", label="From") | |
| convert_to = gr.Dropdown(choices=LANGUAGES, value="JavaScript", label="To") | |
| convert_btn = gr.Button("Convert", variant="primary") | |
| convert_output = gr.Code(label="Converted Code", language="javascript") | |
| # === TEST TAB === | |
| with gr.Tab("🧪 Test"): | |
| gr.HTML("<p class='tool-description'>Generate comprehensive unit tests.</p>") | |
| test_input = gr.Code(lines=10, label="Code to Test", language="python") | |
| with gr.Row(): | |
| test_lang = gr.Dropdown(choices=LANGUAGES, value="Python", label="Language") | |
| test_fw = gr.Dropdown(choices=["pytest", "unittest", "Jest", "Mocha", "JUnit", "RSpec"], value="pytest", label="Framework") | |
| test_btn = gr.Button("Generate Tests", variant="primary") | |
| test_output = gr.Code(label="Generated Tests", language="python") | |
| # === DOCUMENT TAB === | |
| with gr.Tab("📝 Document"): | |
| gr.HTML("<p class='tool-description'>Add documentation to your code.</p>") | |
| doc_input = gr.Code(lines=10, label="Code to Document", language="python") | |
| with gr.Row(): | |
| doc_lang = gr.Dropdown(choices=LANGUAGES, value="Python", label="Language") | |
| doc_style = gr.Dropdown(choices=["Docstrings", "Inline Comments", "Both"], value="Both", label="Style") | |
| doc_btn = gr.Button("Document", variant="primary") | |
| doc_output = gr.Code(label="Documented Code", language="python") | |
| # === OPTIMIZE TAB === | |
| with gr.Tab("🚀 Optimize"): | |
| gr.HTML("<p class='tool-description'>Improve code performance.</p>") | |
| opt_input = gr.Code(lines=10, label="Code to Optimize", language="python") | |
| with gr.Row(): | |
| opt_lang = gr.Dropdown(choices=LANGUAGES, value="Python", label="Language") | |
| opt_focus = gr.Dropdown(choices=["Speed", "Memory", "Readability", "All"], value="All", label="Focus") | |
| opt_btn = gr.Button("Optimize", variant="primary") | |
| opt_output = gr.Markdown(label="Optimized Code") | |
| # === DIFF TAB === | |
| with gr.Tab("🔀 Diff"): | |
| gr.HTML("<p class='tool-description'>Compare two code snippets.</p>") | |
| with gr.Row(): | |
| diff_code1 = gr.Code(lines=8, label="Code 1", language="python") | |
| diff_code2 = gr.Code(lines=8, label="Code 2", language="python") | |
| diff_btn = gr.Button("Compare", variant="primary") | |
| diff_output = gr.Markdown(label="Comparison") | |
| # === PSEUDOCODE TAB === | |
| with gr.Tab("📐 Pseudo"): | |
| gr.HTML("<p class='tool-description'>Convert code to pseudocode or flowcharts.</p>") | |
| pseudo_input = gr.Code(lines=10, label="Code", language="python") | |
| pseudo_type = gr.Radio(["Pseudocode", "Flowchart"], value="Pseudocode", label="Output Type") | |
| pseudo_btn = gr.Button("Convert", variant="primary") | |
| pseudo_output = gr.Markdown(label="Output") | |
| # === INTERVIEW TAB === | |
| with gr.Tab("🎓 Interview"): | |
| gr.HTML("<p class='tool-description'>Generate coding interview challenges.</p>") | |
| with gr.Row(): | |
| interview_topic = gr.Textbox(placeholder="e.g., binary trees, dynamic programming", label="Topic") | |
| interview_diff = gr.Dropdown(choices=["Easy", "Medium", "Hard"], value="Medium", label="Difficulty") | |
| interview_lang = gr.Dropdown(choices=LANGUAGES, value="Python", label="Language") | |
| interview_btn = gr.Button("Generate Challenge", variant="primary") | |
| interview_output = gr.Markdown(label="Challenge") | |
| # === SQL TAB === | |
| with gr.Tab("🗄️ SQL"): | |
| gr.HTML("<p class='tool-description'>Generate SQL queries from natural language.</p>") | |
| sql_desc = gr.Textbox(lines=3, placeholder="Describe your query...", label="Description") | |
| sql_type = gr.Dropdown(choices=["MySQL", "PostgreSQL", "SQLite", "SQL Server", "Oracle"], value="PostgreSQL", label="Database") | |
| sql_btn = gr.Button("Generate SQL", variant="primary") | |
| sql_output = gr.Code(label="Generated SQL", language="sql") | |
| # === SHELL TAB === | |
| with gr.Tab("🐚 Shell"): | |
| gr.HTML("<p class='tool-description'>Generate shell commands from descriptions.</p>") | |
| shell_desc = gr.Textbox(lines=3, placeholder="Describe what you want to do...", label="Description") | |
| shell_type = gr.Dropdown(choices=["Bash", "PowerShell", "Zsh", "Fish"], value="Bash", label="Shell") | |
| shell_btn = gr.Button("Generate", variant="primary") | |
| shell_output = gr.Code(label="Generated Command", language="shell") | |
| # === CRON TAB === | |
| with gr.Tab("⏰ Cron"): | |
| gr.HTML("<p class='tool-description'>Generate cron schedule expressions.</p>") | |
| cron_desc = gr.Textbox(lines=2, placeholder="e.g., Every Monday at 9am", label="Schedule Description") | |
| cron_btn = gr.Button("Generate", variant="primary") | |
| cron_output = gr.Markdown(label="Cron Expression") | |
| # === REGEX TAB === | |
| with gr.Tab("🎯 Regex"): | |
| gr.HTML("<p class='tool-description'>Generate regular expressions with explanations.</p>") | |
| regex_desc = gr.Textbox(lines=2, placeholder="e.g., Match email addresses", label="Pattern Description") | |
| regex_btn = gr.Button("Generate", variant="primary") | |
| regex_output = gr.Markdown(label="Regex Pattern") | |
| # === API TAB === | |
| with gr.Tab("🔗 API"): | |
| gr.HTML("<p class='tool-description'>Generate REST API endpoint boilerplate.</p>") | |
| api_desc = gr.Textbox(lines=3, placeholder="Describe your API endpoint...", label="Description") | |
| api_fw = gr.Dropdown(choices=["FastAPI", "Flask", "Express", "Django", "Spring Boot", "Go Gin"], value="FastAPI", label="Framework") | |
| api_btn = gr.Button("Generate", variant="primary") | |
| api_output = gr.Code(label="Generated API", language="python") | |
| # === MOCK DATA TAB === | |
| with gr.Tab("📦 Mock"): | |
| gr.HTML("<p class='tool-description'>Generate realistic test data.</p>") | |
| mock_schema = gr.Textbox(lines=3, placeholder="e.g., users with name, email, age", label="Schema Description") | |
| with gr.Row(): | |
| mock_count = gr.Number(value=5, label="Count", precision=0) | |
| mock_format = gr.Dropdown(choices=["JSON", "CSV", "SQL INSERT", "YAML"], value="JSON", label="Format") | |
| mock_btn = gr.Button("Generate", variant="primary") | |
| mock_output = gr.Code(label="Generated Data", language="json") | |
| # === FORMAT TAB === | |
| with gr.Tab("🔄 Format"): | |
| gr.HTML("<p class='tool-description'>Convert between data formats.</p>") | |
| format_input = gr.Code(lines=10, label="Input Data", language="json") | |
| with gr.Row(): | |
| format_from = gr.Dropdown(choices=["JSON", "YAML", "XML", "CSV", "TOML"], value="JSON", label="From") | |
| format_to = gr.Dropdown(choices=["JSON", "YAML", "XML", "CSV", "TOML"], value="YAML", label="To") | |
| format_btn = gr.Button("Convert", variant="primary") | |
| format_output = gr.Code(label="Converted Data", language="python") | |
| # === REFACTOR TAB (NEW) === | |
| with gr.Tab("🎨 Refactor"): | |
| gr.HTML("<p class='tool-description'>Restructure code using design patterns.</p>") | |
| refactor_input = gr.Code(lines=10, label="Code to Refactor", language="python") | |
| with gr.Row(): | |
| refactor_lang = gr.Dropdown(choices=LANGUAGES, value="Python", label="Language") | |
| refactor_pattern = gr.Dropdown(choices=["Clean Code", "SOLID", "DRY", "Factory", "Singleton", "Observer", "Strategy"], value="Clean Code", label="Pattern") | |
| refactor_btn = gr.Button("Refactor", variant="primary") | |
| refactor_output = gr.Markdown(label="Refactored Code") | |
| # === BENCHMARK TAB (NEW) === | |
| with gr.Tab("📊 Bench"): | |
| gr.HTML("<p class='tool-description'>Generate performance benchmark code.</p>") | |
| bench_input = gr.Code(lines=10, label="Code to Benchmark", language="python") | |
| bench_lang = gr.Dropdown(choices=LANGUAGES, value="Python", label="Language") | |
| bench_btn = gr.Button("Generate Benchmark", variant="primary") | |
| bench_output = gr.Code(label="Benchmark Code", language="python") | |
| # === DEPENDENCIES TAB (NEW) === | |
| with gr.Tab("🔗 Deps"): | |
| gr.HTML("<p class='tool-description'>Analyze imports and dependencies.</p>") | |
| deps_input = gr.Code(lines=10, label="Code to Analyze", language="python") | |
| deps_lang = gr.Dropdown(choices=LANGUAGES, value="Python", label="Language") | |
| deps_btn = gr.Button("Analyze", variant="primary") | |
| deps_output = gr.Markdown(label="Dependency Analysis") | |
| # === CHANGELOG TAB (NEW) === | |
| with gr.Tab("📋 Change"): | |
| gr.HTML("<p class='tool-description'>Generate changelogs from diffs or commits.</p>") | |
| changelog_input = gr.Textbox(lines=10, placeholder="Paste git diff, commit messages, or describe changes...", label="Changes") | |
| changelog_version = gr.Textbox(value="1.0.0", label="Version") | |
| changelog_btn = gr.Button("Generate Changelog", variant="primary") | |
| changelog_output = gr.Markdown(label="Changelog") | |
| # === SUGGEST TAB (NEW) === | |
| with gr.Tab("💡 Suggest"): | |
| gr.HTML("<p class='tool-description'>Get AI-powered improvement suggestions.</p>") | |
| suggest_input = gr.Code(lines=10, label="Code to Analyze", language="python") | |
| suggest_lang = gr.Dropdown(choices=LANGUAGES, value="Python", label="Language") | |
| suggest_btn = gr.Button("Get Suggestions", variant="primary") | |
| suggest_output = gr.Markdown(label="Suggestions") | |
| # Footer | |
| gr.HTML(""" | |
| <div style="text-align: center; margin-top: 20px; padding: 15px; background: linear-gradient(135deg, #667eea11, #764ba211); border-radius: 10px;"> | |
| <p><strong>⚡ Axon v26</strong> • Built with ❤️ by <a href="https://huggingface.co/AIencoder" target="_blank">AIencoder</a></p> | |
| <p style="font-size: 0.85em; color: #666;"> | |
| <a href="https://huggingface.co/datasets/AIencoder/llama-cpp-wheels" target="_blank">🛞 Pre-built Wheels</a> • | |
| 100% Local • No API Keys • MIT License | |
| </p> | |
| </div> | |
| """) | |
| # === EVENT HANDLERS === | |
| # Chat | |
| def respond(message, history, model, temp, tokens): | |
| history = history or [] | |
| for updated_history in chat_stream(message, history, model, temp, tokens): | |
| yield updated_history, "" | |
| msg.submit(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg]) | |
| send_btn.click(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg]) | |
| clear_btn.click(lambda: ([], ""), None, [chatbot, msg]) | |
| transcribe_btn.click(transcribe_audio, audio_input, msg) | |
| # Core tools | |
| gen_btn.click(generate_code, [gen_prompt, gen_lang, model_dropdown, max_tokens], gen_output) | |
| explain_btn.click(explain_code, [explain_input, model_dropdown, explain_detail, max_tokens], explain_output) | |
| debug_btn.click(debug_code, [debug_input, debug_error, model_dropdown, max_tokens], debug_output) | |
| review_btn.click(review_code, [review_input, model_dropdown, max_tokens], review_output) | |
| # Advanced tools | |
| security_btn.click(security_scan, [security_input, model_dropdown, max_tokens], security_output) | |
| complexity_btn.click(analyze_complexity, [complexity_input, model_dropdown, max_tokens], complexity_output) | |
| convert_btn.click(convert_code, [convert_input, convert_from, convert_to, model_dropdown, max_tokens], convert_output) | |
| test_btn.click(generate_tests, [test_input, test_lang, test_fw, model_dropdown, max_tokens], test_output) | |
| doc_btn.click(document_code, [doc_input, doc_lang, doc_style, model_dropdown, max_tokens], doc_output) | |
| opt_btn.click(optimize_code, [opt_input, opt_lang, opt_focus, model_dropdown, max_tokens], opt_output) | |
| diff_btn.click(code_diff, [diff_code1, diff_code2, model_dropdown, max_tokens], diff_output) | |
| pseudo_btn.click(to_pseudocode, [pseudo_input, pseudo_type, model_dropdown, max_tokens], pseudo_output) | |
| interview_btn.click(interview_challenge, [interview_topic, interview_diff, interview_lang, model_dropdown, max_tokens], interview_output) | |
| # Builders | |
| sql_btn.click(build_sql, [sql_desc, sql_type, model_dropdown, max_tokens], sql_output) | |
| shell_btn.click(build_shell, [shell_desc, shell_type, model_dropdown, max_tokens], shell_output) | |
| cron_btn.click(build_cron, [cron_desc, model_dropdown, max_tokens], cron_output) | |
| regex_btn.click(build_regex, [regex_desc, model_dropdown, max_tokens], regex_output) | |
| api_btn.click(build_api, [api_desc, api_fw, model_dropdown, max_tokens], api_output) | |
| # Data tools | |
| mock_btn.click(generate_mock_data, [mock_schema, mock_count, mock_format, model_dropdown, max_tokens], mock_output) | |
| format_btn.click(convert_data_format, [format_input, format_from, format_to, model_dropdown, max_tokens], format_output) | |
| # NEW v26 tools | |
| refactor_btn.click(refactor_code, [refactor_input, refactor_lang, refactor_pattern, model_dropdown, max_tokens], refactor_output) | |
| bench_btn.click(generate_benchmark, [bench_input, bench_lang, model_dropdown, max_tokens], bench_output) | |
| deps_btn.click(analyze_dependencies, [deps_input, deps_lang, model_dropdown, max_tokens], deps_output) | |
| changelog_btn.click(generate_changelog, [changelog_input, changelog_version, model_dropdown, max_tokens], changelog_output) | |
| suggest_btn.click(suggest_improvements, [suggest_input, suggest_lang, model_dropdown, max_tokens], suggest_output) | |
| return demo | |
| # ===== MAIN ===== | |
| if __name__ == "__main__": | |
| print("⚡ Axon v26 - The FINAL Version") | |
| print("=" * 40) | |
| init_directories() | |
| init_whisper() | |
| cached = get_cached_models() | |
| print(f"📦 Cached models: {len(cached)}/{len(MODELS)}") | |
| for m in cached: | |
| print(f" ✅ {m}") | |
| print("\n🚀 Starting Axon...") | |
| demo = create_ui() | |
| demo.launch(server_name="0.0.0.0", server_port=7860, css=""" | |
| .main-header { text-align: center; margin-bottom: 20px; } | |
| .tool-description { color: #666; font-size: 0.9em; margin-bottom: 10px; } | |
| .status-bar { padding: 10px; border-radius: 8px; margin-bottom: 15px; } | |
| """) |