Spaces:
Running
Running
| mport os | |
| import gradio as gr | |
| import json | |
| import time | |
| from datetime import datetime | |
| from pathlib import Path | |
| from llama_cpp import Llama | |
| from faster_whisper import WhisperModel | |
| from huggingface_hub import hf_hub_download # Added for auto-download | |
| # ===== CONFIG ===== | |
| MODELS_DIR = "/data/models" | |
| MAX_TOKENS = 2048 | |
| CONTEXT_SIZE = 4096 | |
| MODEL_REPOS = { | |
| # 30B: Unsloth is the most reliable source for Qwen3 GGUFs currently | |
| "Qwen3-Coder-30B-A3B-Instruct-Q4_K_M.gguf": "unsloth/Qwen3-Coder-30B-A3B-Instruct-GGUF", | |
| # 3B: Qwen actually has an official one, but bartowski is safer fallback | |
| "qwen2.5-coder-3b-instruct-q4_k_m.gguf": "Qwen/Qwen2.5-Coder-3B-Instruct-GGUF", | |
| # 7B: Official Qwen GGUF is often missing/broken. Bartowski is the go-to here. | |
| "qwen2.5-coder-7b-instruct-q4_k_m.gguf": "bartowski/Qwen2.5-Coder-7B-Instruct-GGUF", | |
| # 14B: Bartowski is recommended for consistency | |
| "qwen2.5-coder-14b-instruct-q4_k_m.gguf": "bartowski/Qwen2.5-Coder-14B-Instruct-GGUF", | |
| # DeepSeek: Definitely needs community repo | |
| "DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M.gguf": "bartowski/DeepSeek-Coder-V2-Lite-Instruct-GGUF", | |
| # Tiny models | |
| "qwen2.5-coder-1.5b-instruct-q4_k_m.gguf": "Qwen/Qwen2.5-Coder-1.5B-Instruct-GGUF", | |
| "qwen2.5-coder-0.5b-instruct-q4_k_m.gguf": "Qwen/Qwen2.5-Coder-0.5B-Instruct-GGUF", | |
| } | |
| MODELS = { | |
| "⭐ Qwen3 Coder 30B-A3B (Best)": "Qwen3-Coder-30B-A3B-Instruct-Q4_K_M.gguf", | |
| "🏆 Qwen2.5 Coder 14B (Premium)": "qwen2.5-coder-14b-instruct-q4_k_m.gguf", | |
| "🧠 DeepSeek V2 Lite (Logic)": "DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M.gguf", | |
| "⚖️ Qwen2.5 Coder 7B (Balanced)": "qwen2.5-coder-7b-instruct-q4_k_m.gguf", | |
| "🚀 Qwen2.5 Coder 3B (Fast)": "qwen2.5-coder-3b-instruct-q4_k_m.gguf", | |
| "⚡ DeepSeek Coder 6.7B": "deepseek-coder-6.7b-instruct.Q4_K_M.gguf", | |
| "💨 Qwen2.5 Coder 1.5B (Quick)": "qwen2.5-coder-1.5b-instruct-q4_k_m.gguf", | |
| "🔬 Qwen2.5 Coder 0.5B (Instant)": "qwen2.5-coder-0.5b-instruct-q4_k_m.gguf", | |
| } | |
| MODEL_INFO = { | |
| "⭐ Qwen3 Coder 30B-A3B (Best)": "🏆 Best quality • MoE 30B/3B • ~10GB", | |
| "🏆 Qwen2.5 Coder 14B (Premium)": "💎 Premium • ~8GB • Complex tasks", | |
| "🧠 DeepSeek V2 Lite (Logic)": "🧠 MoE 16B • ~9GB • Algorithms", | |
| "⚖️ Qwen2.5 Coder 7B (Balanced)": "⚖️ Balanced • ~4.5GB • Recommended", | |
| "🚀 Qwen2.5 Coder 3B (Fast)": "🚀 Fast • ~2GB • Great all-rounder", | |
| "⚡ DeepSeek Coder 6.7B": "⚡ Logic focused • ~4GB", | |
| "💨 Qwen2.5 Coder 1.5B (Quick)": "💨 Quick • ~1GB • Simple tasks", | |
| "🔬 Qwen2.5 Coder 0.5B (Instant)": "🔬 Instant • ~0.3GB • Lightning fast", | |
| } | |
| LANGUAGES = [ | |
| "Python", "JavaScript", "TypeScript", "Go", "Rust", | |
| "Java", "C++", "C#", "C", "PHP", "Ruby", "Swift", "Kotlin", | |
| "Scala", "R", "Julia", "Perl", "HTML/CSS", "SQL", "Bash", "PowerShell", "Lua" | |
| ] | |
| # ===== MODEL CACHE ===== | |
| loaded_models = {} | |
| current_model_name = None | |
| def load_model(model_name): | |
| global loaded_models, current_model_name | |
| if model_name == current_model_name and model_name in loaded_models: | |
| return loaded_models[model_name] | |
| if current_model_name and current_model_name != model_name: | |
| if current_model_name in loaded_models: | |
| del loaded_models[current_model_name] | |
| print(f"🗑️ Unloaded {current_model_name}") | |
| filename = MODELS.get(model_name) | |
| if not filename: | |
| return None | |
| model_path = os.path.join(MODELS_DIR, filename) | |
| # --- AUTO DOWNLOAD LOGIC --- | |
| if not os.path.exists(model_path): | |
| print(f"⬇️ Model not found. Attempting download for {filename}...") | |
| repo_id = MODEL_REPOS.get(filename, "Qwen/Qwen2.5-Coder-3B-Instruct-GGUF") # Default fallback | |
| try: | |
| hf_hub_download( | |
| repo_id=repo_id, | |
| filename=filename, | |
| local_dir=MODELS_DIR, | |
| local_dir_use_symlinks=False | |
| ) | |
| print("✅ Download complete!") | |
| except Exception as e: | |
| print(f"❌ Download failed: {e}") | |
| return None | |
| print(f"📥 Loading {model_name}...") | |
| try: | |
| llm = Llama( | |
| model_path=model_path, | |
| n_ctx=CONTEXT_SIZE, | |
| n_threads=4, | |
| n_batch=512, | |
| verbose=False | |
| ) | |
| loaded_models[model_name] = llm | |
| current_model_name = model_name | |
| print(f"✅ {model_name} loaded!") | |
| return llm | |
| except Exception as e: | |
| print(f"❌ Failed to load: {e}") | |
| return None | |
| # ===== WHISPER ===== | |
| whisper_model = None | |
| def init_whisper(): | |
| global whisper_model | |
| try: | |
| print("Loading Whisper...") | |
| whisper_model = WhisperModel("tiny", device="cpu", compute_type="int8") | |
| print("✅ Whisper ready!") | |
| except Exception as e: | |
| print(f"❌ Whisper failed: {e}") | |
| init_whisper() | |
| # ===== HELPERS ===== | |
| def get_status(): | |
| available = [name for name, file in MODELS.items() if os.path.exists(os.path.join(MODELS_DIR, file))] | |
| if current_model_name: | |
| short = current_model_name.split('(')[0].strip().split()[-1] | |
| return f"🟢 Ready • {len(available)} models • Active: {short}" | |
| return f"🟡 {len(available)} models available" | |
| def get_model_info(model_name): | |
| return MODEL_INFO.get(model_name, "") | |
| def validate_input(text, name="Input"): | |
| if not text or not text.strip(): | |
| return False, f"⚠️ {name} cannot be empty." | |
| if len(text) > 50000: | |
| return False, f"⚠️ {name} too long." | |
| return True, None | |
| def transcribe_audio(audio): | |
| if not audio: | |
| return "" | |
| if not whisper_model: | |
| return "❌ Whisper unavailable." | |
| try: | |
| segments, _ = whisper_model.transcribe(audio) | |
| return " ".join([s.text for s in segments]).strip() or "⚠️ No speech detected." | |
| except Exception as e: | |
| return f"❌ {str(e)[:50]}" | |
| def generate_response(model_name, prompt, temperature=0.7, max_tokens=2048): | |
| llm = load_model(model_name) | |
| if not llm: | |
| return "❌ **Model not available.**" | |
| try: | |
| if "deepseek" in model_name.lower(): | |
| formatted = f"### Instruction:\n{prompt}\n\n### Response:\n" | |
| stop_tokens = ["### Instruction:", "### Response:"] | |
| else: | |
| formatted = f"<|im_start|>system\nYou are an expert coding assistant.<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant\n" | |
| stop_tokens = ["<|im_end|>", "<|im_start|>"] | |
| output = llm( | |
| formatted, | |
| max_tokens=max_tokens, | |
| temperature=temperature, | |
| top_p=0.9, | |
| top_k=40, | |
| repeat_penalty=1.1, | |
| stop=stop_tokens, | |
| echo=False | |
| ) | |
| response = output["choices"][0]["text"].strip() | |
| return response if response else "⚠️ Empty response." | |
| except Exception as e: | |
| return f"❌ **Error:** {str(e)[:100]}" | |
| def extract_code(text): | |
| if not text or "```" not in text: | |
| return text | |
| try: | |
| parts = text.split("```") | |
| if len(parts) >= 2: | |
| code = parts[1] | |
| if "\n" in code: | |
| code = code.split("\n", 1)[-1] | |
| return code.strip() | |
| except: | |
| pass | |
| return text | |
| # ===== HISTORY FUNCTIONS ===== | |
| def export_chat_history(history): | |
| if not history: | |
| return None, "⚠️ No chat history to export." | |
| export = { | |
| "exported_at": datetime.now().isoformat(), | |
| "tool": "Axon v6 Chat", | |
| "messages": history # Direct dump for Gradio 5 format | |
| } | |
| filename = f"/tmp/axon_chat_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json" | |
| with open(filename, "w") as f: | |
| json.dump(export, f, indent=2) | |
| return filename, f"✅ Exported {len(history)} messages!" | |
| def export_code(code, language): | |
| if not code or not code.strip(): | |
| return None, "⚠️ No code to export." | |
| ext_map = { | |
| "Python": "py", "JavaScript": "js", "TypeScript": "ts", "Go": "go", | |
| "Rust": "rs", "Java": "java", "C++": "cpp", "C#": "cs", "C": "c", | |
| "PHP": "php", "Ruby": "rb", "Swift": "swift", "Kotlin": "kt", | |
| "HTML/CSS": "html", "SQL": "sql", "Bash": "sh", "PowerShell": "ps1", "Lua": "lua" | |
| } | |
| ext = ext_map.get(language, "txt") | |
| filename = f"/tmp/axon_code_{datetime.now().strftime('%Y%m%d_%H%M%S')}.{ext}" | |
| with open(filename, "w") as f: | |
| f.write(code) | |
| return filename, f"✅ Exported as .{ext}!" | |
| # ===== STREAMING (UPDATED FOR GRADIO 5) ===== | |
| def chat_stream(message, history, model_name, temperature, max_tokens): | |
| history = history or [] | |
| valid, error = validate_input(message, "Message") | |
| if not valid: | |
| history.append({"role": "user", "content": message}) | |
| history.append({"role": "assistant", "content": error}) | |
| yield history | |
| return | |
| llm = load_model(model_name) | |
| if not llm: | |
| history.append({"role": "user", "content": message}) | |
| history.append({"role": "assistant", "content": "❌ Model not available."}) | |
| yield history | |
| return | |
| if "deepseek" in model_name.lower(): | |
| conv = "### Instruction:\nYou are an expert coding assistant. Use markdown code blocks.\n\n" | |
| for msg in history: | |
| if msg['role'] == 'user': | |
| conv += f"User: {msg['content']}\n" | |
| else: | |
| conv += f"Assistant: {msg['content']}\n\n" | |
| conv += f"User: {message}\n\n### Response:\n" | |
| stop_tokens = ["### Instruction:", "User:"] | |
| else: | |
| conv = "<|im_start|>system\nYou are an expert coding assistant. Use markdown code blocks.<|im_end|>\n" | |
| for msg in history: | |
| role = msg['role'] | |
| content = msg['content'] | |
| conv += f"<|im_start|>{role}\n{content}<|im_end|>\n" | |
| conv += f"<|im_start|>user\n{message}<|im_end|>\n<|im_start|>assistant\n" | |
| stop_tokens = ["<|im_end|>", "<|im_start|>"] | |
| history.append({"role": "user", "content": message}) | |
| history.append({"role": "assistant", "content": ""}) | |
| try: | |
| full = "" | |
| for chunk in llm(conv, max_tokens=max_tokens, temperature=temperature, top_p=0.9, stop=stop_tokens, stream=True): | |
| text_chunk = chunk["choices"][0]["text"] | |
| full += text_chunk | |
| history[-1]['content'] = full | |
| yield history | |
| except Exception as e: | |
| history[-1]['content'] = f"❌ Error: {str(e)[:100]}" | |
| yield history | |
| def generate_stream(prompt, language, model_name, temperature, max_tokens): | |
| valid, error = validate_input(prompt, "Description") | |
| if not valid: | |
| yield error | |
| return | |
| llm = load_model(model_name) | |
| if not llm: | |
| yield "❌ Model not available." | |
| return | |
| if "deepseek" in model_name.lower(): | |
| formatted = f"### Instruction:\nWrite clean {language} code with comments:\n{prompt}\n\nOutput only code:\n\n### Response:\n" | |
| stop_tokens = ["### Instruction:"] | |
| else: | |
| formatted = f"<|im_start|>system\nYou are an expert coder.<|im_end|>\n<|im_start|>user\nWrite clean {language} code with comments:\n{prompt}\n\nOutput only code:<|im_end|>\n<|im_start|>assistant\n" | |
| stop_tokens = ["<|im_end|>"] | |
| try: | |
| full = "" | |
| for chunk in llm(formatted, max_tokens=max_tokens, temperature=temperature, stop=stop_tokens, stream=True): | |
| full += chunk["choices"][0]["text"] | |
| yield extract_code(full) | |
| except Exception as e: | |
| yield f"❌ {str(e)[:50]}" | |
| # ===== CORE FEATURES ===== | |
| def explain_code(code, model_name, detail, max_tokens): | |
| valid, err = validate_input(code, "Code") | |
| if not valid: | |
| return err | |
| prompts = { | |
| "Brief": f"Explain briefly (2-3 sentences):\n{code}", | |
| "Normal": f"Explain this code:\n{code}", | |
| "Detailed": f"Detailed explanation (purpose, logic, complexity, improvements):\n{code}" | |
| } | |
| return generate_response(model_name, prompts.get(detail, prompts["Normal"]), 0.5, max_tokens) | |
| def fix_code(code, error_msg, model_name, max_tokens): | |
| valid, err = validate_input(code, "Code") | |
| if not valid: | |
| return err | |
| e = error_msg.strip() if error_msg else "Not working" | |
| return generate_response(model_name, f"Fix this code. Error: {e}\n\n{code}\n\nFixed code and explanation:", 0.3, max_tokens) | |
| def review_code(code, model_name, max_tokens): | |
| valid, err = validate_input(code, "Code") | |
| if not valid: | |
| return err | |
| return generate_response(model_name, f"Review for bugs, performance, security:\n{code}", 0.4, max_tokens) | |
| def convert_code(code, from_lang, to_lang, model_name, max_tokens): | |
| valid, err = validate_input(code, "Code") | |
| if not valid: | |
| return err | |
| if from_lang == to_lang: | |
| return "⚠️ Same language." | |
| result = generate_response(model_name, f"Convert {from_lang} to {to_lang}. Code only:\n{code}", 0.3, max_tokens) | |
| return result if result.startswith("❌") else extract_code(result) | |
| def generate_tests(code, language, framework, model_name, max_tokens): | |
| valid, err = validate_input(code, "Code") | |
| if not valid: | |
| return err | |
| fw = framework.strip() if framework else "pytest" | |
| result = generate_response(model_name, f"Generate {fw} tests for {language}. Code only:\n{code}", 0.3, max_tokens) | |
| return result if result.startswith("❌") else extract_code(result) | |
| def document_code(code, language, style, model_name, max_tokens): | |
| valid, err = validate_input(code, "Code") | |
| if not valid: | |
| return err | |
| result = generate_response(model_name, f"Add {style.lower()} to this {language} code:\n{code}", 0.4, max_tokens) | |
| return result if style == "README" or result.startswith("❌") else extract_code(result) | |
| def optimize_code(code, language, focus, model_name, max_tokens): | |
| valid, err = validate_input(code, "Code") | |
| if not valid: | |
| return err | |
| return generate_response(model_name, f"Optimize {language} for {focus.lower()}. Explain:\n{code}", 0.3, max_tokens) | |
| def security_scan(code, model_name, max_tokens): | |
| valid, err = validate_input(code, "Code") | |
| if not valid: | |
| return err | |
| prompt = """Security audit this code. Check for: | |
| 1. Injection vulnerabilities (SQL, XSS, Command) | |
| 2. Authentication issues | |
| 3. Data exposure | |
| 4. Input validation | |
| 5. Cryptography issues | |
| For each issue: Severity (🔴🟠🟡🟢), Location, Description, Fix. | |
| Code: | |
| """ + code | |
| return generate_response(model_name, prompt, 0.3, max_tokens) | |
| def analyze_complexity(code, model_name, max_tokens): | |
| valid, err = validate_input(code, "Code") | |
| if not valid: | |
| return err | |
| prompt = """Analyze time and space complexity: | |
| 1. Time Complexity (Big O) | |
| 2. Space Complexity (Big O) | |
| 3. Best/Average/Worst cases | |
| 4. Bottlenecks | |
| 5. Optimization suggestions | |
| Code: | |
| """ + code | |
| return generate_response(model_name, prompt, 0.4, max_tokens) | |
| def build_sql(description, db_type, model_name, max_tokens): | |
| valid, err = validate_input(description, "Description") | |
| if not valid: | |
| return err | |
| result = generate_response(model_name, f"Write optimized {db_type} SQL for:\n{description}\n\nSQL only:", 0.2, max_tokens) | |
| return result if result.startswith("❌") else extract_code(result) | |
| def build_shell(description, shell_type, model_name, max_tokens): | |
| valid, err = validate_input(description, "Description") | |
| if not valid: | |
| return err | |
| result = generate_response(model_name, f"Write {shell_type} command for:\n{description}\n\nCommand only:", 0.2, max_tokens) | |
| return result if result.startswith("❌") else extract_code(result) | |
| def code_diff(code1, code2, model_name, max_tokens): | |
| v1, e1 = validate_input(code1, "Code 1") | |
| v2, e2 = validate_input(code2, "Code 2") | |
| if not v1: | |
| return e1 | |
| if not v2: | |
| return e2 | |
| prompt = f"""Compare these code snippets: | |
| 1. Key differences | |
| 2. Functionality changes | |
| 3. Performance impact | |
| 4. Which is better and why | |
| === CODE 1 === | |
| {code1} | |
| === CODE 2 === | |
| {code2}""" | |
| return generate_response(model_name, prompt, 0.4, max_tokens) | |
| def generate_mock_data(schema, count, format_type, model_name, max_tokens): | |
| valid, err = validate_input(schema, "Schema") | |
| if not valid: | |
| return err | |
| result = generate_response(model_name, f"Generate {count} realistic mock entries as {format_type}:\n{schema}", 0.7, max_tokens) | |
| return result if result.startswith("❌") else extract_code(result) | |
| def interview_challenge(topic, difficulty, language, model_name, max_tokens): | |
| valid, err = validate_input(topic, "Topic") | |
| if not valid: | |
| return err | |
| prompt = f"""Create {difficulty} {language} interview challenge about {topic}. | |
| Include: | |
| 1. Problem statement | |
| 2. Examples (2-3) | |
| 3. Constraints | |
| 4. Hints | |
| 5. Solution with explanation""" | |
| return generate_response(model_name, prompt, 0.6, max_tokens) | |
| def to_pseudocode(code, output_type, model_name, max_tokens): | |
| valid, err = validate_input(code, "Code") | |
| if not valid: | |
| return err | |
| if output_type == "Pseudocode": | |
| prompt = f"Convert to pseudocode:\n{code}" | |
| else: | |
| prompt = f"Create Mermaid.js flowchart for:\n{code}" | |
| return generate_response(model_name, prompt, 0.3, max_tokens) | |
| def build_cron(description, model_name, max_tokens): | |
| valid, err = validate_input(description, "Description") | |
| if not valid: | |
| return err | |
| return generate_response(model_name, f"Create cron expression for: {description}\n\nInclude: expression, breakdown, next 5 runs", 0.2, max_tokens) | |
| def build_regex(description, model_name, max_tokens): | |
| valid, err = validate_input(description, "Description") | |
| if not valid: | |
| return err | |
| return generate_response(model_name, f"Create regex for: {description}\n\nPattern, explanation, examples, Python code:", 0.3, max_tokens) | |
| def build_api(description, framework, model_name, max_tokens): | |
| valid, err = validate_input(description, "Description") | |
| if not valid: | |
| return err | |
| result = generate_response(model_name, f"Create {framework} REST endpoint:\n{description}\n\nCode:", 0.3, max_tokens) | |
| return result if result.startswith("❌") else extract_code(result) | |
| def convert_data_format(data, from_fmt, to_fmt, model_name, max_tokens): | |
| valid, err = validate_input(data, "Data") | |
| if not valid: | |
| return err | |
| if from_fmt == to_fmt: | |
| return "⚠️ Same format." | |
| result = generate_response(model_name, f"Convert {from_fmt} to {to_fmt}:\n{data}\n\nOutput only:", 0.1, max_tokens) | |
| return result if result.startswith("❌") else extract_code(result) | |
| # ===== THEME ===== | |
| light_theme = gr.themes.Soft( | |
| primary_hue="indigo", | |
| secondary_hue="blue", | |
| ) | |
| dark_theme = gr.themes.Soft( | |
| primary_hue="indigo", | |
| secondary_hue="blue", | |
| ).set( | |
| body_background_fill="#0f172a", | |
| body_background_fill_dark="#0f172a", | |
| block_background_fill="#1e293b", | |
| block_background_fill_dark="#1e293b", | |
| border_color_primary="#334155", | |
| border_color_primary_dark="#334155", | |
| ) | |
| # ===== UI ===== | |
| # FIX: Title and theme moved here | |
| with gr.Blocks(title="Axon v6", theme=dark_theme) as demo: | |
| # State for theme | |
| is_dark = gr.State(True) | |
| # Header | |
| gr.HTML(""" | |
| <div style="background: linear-gradient(135deg, #6366f1, #8b5cf6, #06b6d4); border-radius: 16px; padding: 24px; margin-bottom: 16px;"> | |
| <div style="display: flex; justify-content: space-between; align-items: center; flex-wrap: wrap; gap: 16px;"> | |
| <div> | |
| <h1 style="color: white; margin: 0; font-size: 2rem;">🔥 Axon v6</h1> | |
| <p style="color: rgba(255,255,255,0.9); margin: 4px 0 0 0;">AI Coding Assistant • 8 Models • 19 Tools • 100% Local</p> | |
| </div> | |
| <div style="display: flex; gap: 8px; flex-wrap: wrap;"> | |
| <span style="background: rgba(255,255,255,0.2); padding: 4px 12px; border-radius: 20px; font-size: 0.8rem; color: white;">🤖 8 Models</span> | |
| <span style="background: rgba(255,255,255,0.2); padding: 4px 12px; border-radius: 20px; font-size: 0.8rem; color: white;">🛠️ 19 Tools</span> | |
| <span style="background: rgba(255,255,255,0.2); padding: 4px 12px; border-radius: 20px; font-size: 0.8rem; color: white;">⚡ llama.cpp</span> | |
| </div> | |
| </div> | |
| </div> | |
| """) | |
| # Status row | |
| with gr.Row(): | |
| status = gr.Markdown(value=get_status, every=5) | |
| # Settings | |
| with gr.Row(): | |
| model_dropdown = gr.Dropdown(choices=list(MODELS.keys()), value="🚀 Qwen2.5 Coder 3B (Fast)", label="🤖 Model", scale=3) | |
| temperature = gr.Slider(0, 1, value=0.7, step=0.1, label="🌡️ Creativity", scale=2) | |
| max_tokens = gr.Slider(256, 4096, value=2048, step=256, label="📏 Max Tokens", scale=2) | |
| model_info = gr.Markdown(value="🚀 Fast • ~2GB • Great all-rounder") | |
| model_dropdown.change(get_model_info, model_dropdown, model_info) | |
| with gr.Tabs(): | |
| # ===== HOME ===== | |
| with gr.TabItem("🏠 Home"): | |
| gr.HTML(""" | |
| <div style="padding: 20px;"> | |
| <h2 style="margin-top: 0;">Welcome to Axon v6! 🔥</h2> | |
| <p>The ultimate free AI coding assistant - running 100% locally on your browser.</p> | |
| <h3>🚀 Quick Start</h3> | |
| <ol> | |
| <li><strong>Select a model</strong> from the dropdown above</li> | |
| <li><strong>Choose a tool</strong> from the tabs</li> | |
| <li><strong>Start coding!</strong></li> | |
| </ol> | |
| <h3>🤖 Models</h3> | |
| <table style="width: 100%; border-collapse: collapse;"> | |
| <tr style="background: rgba(99, 102, 241, 0.2);"> | |
| <th style="padding: 8px; text-align: left;">Model</th> | |
| <th style="padding: 8px; text-align: left;">Size</th> | |
| <th style="padding: 8px; text-align: left;">Best For</th> | |
| </tr> | |
| <tr><td style="padding: 8px;">⭐ Qwen3 30B-A3B</td><td>~10GB</td><td>Best quality (MoE)</td></tr> | |
| <tr><td style="padding: 8px;">🏆 Qwen2.5 14B</td><td>~8GB</td><td>Premium tasks</td></tr> | |
| <tr><td style="padding: 8px;">🧠 DeepSeek V2 Lite</td><td>~9GB</td><td>Complex logic</td></tr> | |
| <tr><td style="padding: 8px;">⚖️ Qwen2.5 7B</td><td>~4.5GB</td><td>Balanced</td></tr> | |
| <tr><td style="padding: 8px;">🚀 Qwen2.5 3B</td><td>~2GB</td><td>Fast & capable</td></tr> | |
| <tr><td style="padding: 8px;">⚡ DeepSeek 6.7B</td><td>~4GB</td><td>Algorithms</td></tr> | |
| <tr><td style="padding: 8px;">💨 Qwen2.5 1.5B</td><td>~1GB</td><td>Quick tasks</td></tr> | |
| <tr><td style="padding: 8px;">🔬 Qwen2.5 0.5B</td><td>~0.3GB</td><td>Instant</td></tr> | |
| </table> | |
| <h3>🛠️ 19 Tools Available</h3> | |
| <p> | |
| <strong>Core:</strong> Chat, Generate, Explain, Debug, Review<br> | |
| <strong>Advanced:</strong> Security, Complexity, Convert, Test, Document, Optimize, Diff, Pseudo, Interview<br> | |
| <strong>Builders:</strong> SQL, Shell, Cron, Regex, API<br> | |
| <strong>Data:</strong> Mock Data, Format Converter | |
| </p> | |
| <h3>🔗 Links</h3> | |
| <p> | |
| <a href="https://huggingface.co/datasets/AIencoder/llama-cpp-wheels" target="_blank">🛞 Pre-built Wheels</a> • | |
| <a href="https://github.com/ggerganov/llama.cpp" target="_blank">📦 llama.cpp</a> • | |
| <a href="https://huggingface.co/Qwen" target="_blank">🤖 Qwen Models</a> | |
| </p> | |
| </div> | |
| """) | |
| # Share buttons | |
| gr.HTML(""" | |
| <div style="padding: 20px; border-top: 1px solid #334155;"> | |
| <h3>📤 Share Axon</h3> | |
| <div style="display: flex; gap: 10px; flex-wrap: wrap;"> | |
| <a href="https://twitter.com/intent/tweet?text=Check%20out%20Axon%20v6%20-%20Free%20AI%20Coding%20Assistant!%20🔥%208%20Models,%2019%20Tools,%20100%25%20Local&url=https://huggingface.co/spaces/AIencoder/Axon" target="_blank" style="background: #1DA1F2; color: white; padding: 8px 16px; border-radius: 8px; text-decoration: none;">🐦 Twitter</a> | |
| <a href="https://www.reddit.com/submit?url=https://huggingface.co/spaces/AIencoder/Axon&title=Axon%20v6%20-%20Free%20AI%20Coding%20Assistant%20with%208%20Models%20and%2019%20Tools" target="_blank" style="background: #FF4500; color: white; padding: 8px 16px; border-radius: 8px; text-decoration: none;">🤖 Reddit</a> | |
| <a href="https://www.linkedin.com/sharing/share-offsite/?url=https://huggingface.co/spaces/AIencoder/Axon" target="_blank" style="background: #0A66C2; color: white; padding: 8px 16px; border-radius: 8px; text-decoration: none;">💼 LinkedIn</a> | |
| <a href="https://huggingface.co/spaces/AIencoder/Axon" target="_blank" style="background: #FFD21E; color: black; padding: 8px 16px; border-radius: 8px; text-decoration: none;">🤗 HuggingFace</a> | |
| </div> | |
| </div> | |
| """) | |
| # ===== CHAT ===== | |
| with gr.TabItem("💬 Chat"): | |
| chatbot = gr.Chatbot(height=400) # Removed type="messages" | |
| with gr.Row(): | |
| msg = gr.Textbox(placeholder="Ask anything...", show_label=False, scale=8) | |
| send = gr.Button("Send", variant="primary", scale=1) | |
| with gr.Row(): | |
| audio = gr.Audio(sources=["microphone"], type="filepath", label="🎤", scale=2) | |
| transcribe = gr.Button("🎤 Transcribe", scale=1) | |
| clear = gr.Button("🗑️ Clear", scale=1) | |
| export_chat_btn = gr.Button("💾 Export", scale=1) | |
| chat_export_file = gr.File(label="Download", visible=False) | |
| chat_export_status = gr.Markdown("") | |
| # ===== GENERATE ===== | |
| with gr.TabItem("⚡ Generate"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| gen_prompt = gr.Textbox(label="📝 Describe", lines=3) | |
| with gr.Row(): | |
| gen_lang = gr.Dropdown(LANGUAGES, value="Python", label="Language") | |
| gen_temp = gr.Slider(0, 1, value=0.3, step=0.1, label="🌡️") | |
| gen_btn = gr.Button("⚡ Generate", variant="primary") | |
| with gr.Column(): | |
| gen_output = gr.Code(label="Code", language="python", lines=14) | |
| with gr.Row(): | |
| gen_export_btn = gr.Button("💾 Export Code") | |
| gen_export_status = gr.Markdown("") | |
| gen_export_file = gr.File(label="Download", visible=False) | |
| # ===== EXPLAIN ===== | |
| with gr.TabItem("🔍 Explain"): | |
| with gr.Row(): | |
| with gr.Column(): # FIXED: used to be Column() | |
| explain_input = gr.Code(label="Code", lines=10) | |
| explain_detail = gr.Radio(["Brief", "Normal", "Detailed"], value="Normal") | |
| explain_btn = gr.Button("🔍 Explain", variant="primary") | |
| with gr.Column(): | |
| explain_output = gr.Markdown() | |
| # ===== DEBUG ===== | |
| with gr.TabItem("🔧 Debug"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| fix_input = gr.Code(label="Code", lines=8) | |
| fix_error = gr.Textbox(label="Error", lines=2) | |
| fix_btn = gr.Button("🔧 Fix", variant="primary") | |
| with gr.Column(): | |
| fix_output = gr.Markdown() | |
| # ===== REVIEW ===== | |
| with gr.TabItem("📋 Review"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| review_input = gr.Code(label="Code", lines=10) | |
| review_btn = gr.Button("📋 Review", variant="primary") | |
| with gr.Column(): | |
| review_output = gr.Markdown() | |
| # ===== SECURITY ===== | |
| with gr.TabItem("🔐 Security"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| security_input = gr.Code(label="Code", lines=10) | |
| security_btn = gr.Button("🔐 Scan", variant="primary") | |
| with gr.Column(): | |
| security_output = gr.Markdown() | |
| # ===== COMPLEXITY ===== | |
| with gr.TabItem("📊 Complexity"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| complexity_input = gr.Code(label="Code", lines=10) | |
| complexity_btn = gr.Button("📊 Analyze", variant="primary") | |
| with gr.Column(): | |
| complexity_output = gr.Markdown() | |
| # ===== CONVERT ===== | |
| with gr.TabItem("🔄 Convert"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| convert_input = gr.Code(label="Source", lines=10) | |
| with gr.Row(): | |
| convert_from = gr.Dropdown(LANGUAGES, value="Python", label="From") | |
| convert_to = gr.Dropdown(LANGUAGES, value="JavaScript", label="To") | |
| convert_btn = gr.Button("🔄 Convert", variant="primary") | |
| with gr.Column(): | |
| convert_output = gr.Code(label="Result", lines=10) | |
| # ===== TEST ===== | |
| with gr.TabItem("🧪 Test"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| test_input = gr.Code(label="Code", lines=10) | |
| with gr.Row(): | |
| test_lang = gr.Dropdown(LANGUAGES[:10], value="Python", label="Language") | |
| test_fw = gr.Textbox(label="Framework", placeholder="pytest") | |
| test_btn = gr.Button("🧪 Generate", variant="primary") | |
| with gr.Column(): | |
| test_output = gr.Code(label="Tests", lines=10) | |
| # ===== DOCUMENT ===== | |
| with gr.TabItem("📝 Document"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| doc_input = gr.Code(label="Code", lines=10) | |
| with gr.Row(): | |
| doc_lang = gr.Dropdown(LANGUAGES, value="Python", label="Language") | |
| doc_style = gr.Dropdown(["Docstrings", "Comments", "Both", "README"], value="Both") | |
| doc_btn = gr.Button("📝 Document", variant="primary") | |
| with gr.Column(): | |
| doc_output = gr.Code(label="Documented", lines=10) | |
| # ===== OPTIMIZE ===== | |
| with gr.TabItem("🚀 Optimize"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| opt_input = gr.Code(label="Code", lines=10) | |
| with gr.Row(): | |
| opt_lang = gr.Dropdown(LANGUAGES, value="Python", label="Language") | |
| opt_focus = gr.Dropdown(["All", "Performance", "Readability", "Memory"], value="All") | |
| opt_btn = gr.Button("🚀 Optimize", variant="primary") | |
| with gr.Column(): | |
| opt_output = gr.Markdown() | |
| # ===== DIFF ===== | |
| with gr.TabItem("🔀 Diff"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| diff_code1 = gr.Code(label="Code 1", lines=8) | |
| diff_code2 = gr.Code(label="Code 2", lines=8) | |
| diff_btn = gr.Button("🔀 Compare", variant="primary") | |
| with gr.Column(): | |
| diff_output = gr.Markdown() | |
| # ===== PSEUDOCODE ===== | |
| with gr.TabItem("📐 Pseudo"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| pseudo_input = gr.Code(label="Code", lines=10) | |
| pseudo_type = gr.Radio(["Pseudocode", "Flowchart"], value="Pseudocode") | |
| pseudo_btn = gr.Button("📐 Convert", variant="primary") | |
| with gr.Column(): | |
| pseudo_output = gr.Markdown() | |
| # ===== INTERVIEW ===== | |
| with gr.TabItem("🎓 Interview"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| interview_topic = gr.Textbox(label="Topic", placeholder="Binary trees...") | |
| with gr.Row(): | |
| interview_diff = gr.Dropdown(["Easy", "Medium", "Hard"], value="Medium") | |
| interview_lang = gr.Dropdown(LANGUAGES[:8], value="Python") | |
| interview_btn = gr.Button("🎓 Generate", variant="primary") | |
| with gr.Column(): | |
| interview_output = gr.Markdown() | |
| # ===== BUILDERS ===== | |
| with gr.TabItem("🛠️ Builders"): | |
| gr.Markdown("### 🗄️ SQL") | |
| with gr.Row(): | |
| with gr.Column(): | |
| sql_desc = gr.Textbox(label="Describe", lines=2) | |
| sql_type = gr.Dropdown(["PostgreSQL", "MySQL", "SQLite"], value="PostgreSQL") | |
| sql_btn = gr.Button("🗄️ Build", variant="primary") | |
| with gr.Column(): | |
| sql_output = gr.Code(lines=6) | |
| gr.Markdown("---\n### 🐚 Shell") | |
| with gr.Row(): | |
| with gr.Column(): | |
| shell_desc = gr.Textbox(label="Describe", lines=2) | |
| shell_type = gr.Dropdown(["Bash", "PowerShell", "Zsh"], value="Bash") | |
| shell_btn = gr.Button("🐚 Build", variant="primary") | |
| with gr.Column(): | |
| shell_output = gr.Code(lines=6) | |
| gr.Markdown("---\n### ⏰ Cron") | |
| with gr.Row(): | |
| with gr.Column(): | |
| cron_desc = gr.Textbox(label="Describe", lines=2) | |
| cron_btn = gr.Button("⏰ Build", variant="primary") | |
| with gr.Column(): | |
| cron_output = gr.Markdown() | |
| gr.Markdown("---\n### 🎯 Regex") | |
| with gr.Row(): | |
| with gr.Column(): | |
| regex_desc = gr.Textbox(label="Describe", lines=2) | |
| regex_btn = gr.Button("🎯 Build", variant="primary") | |
| with gr.Column(): | |
| regex_output = gr.Markdown() | |
| gr.Markdown("---\n### 🔗 API") | |
| with gr.Row(): | |
| with gr.Column(): | |
| api_desc = gr.Textbox(label="Describe", lines=2) | |
| api_fw = gr.Dropdown(["FastAPI", "Express", "Flask"], value="FastAPI") | |
| api_btn = gr.Button("🔗 Build", variant="primary") | |
| with gr.Column(): | |
| api_output = gr.Code(lines=8) | |
| # ===== DATA ===== | |
| with gr.TabItem("📦 Data"): | |
| gr.Markdown("### 📦 Mock Data") | |
| with gr.Row(): | |
| with gr.Column(): | |
| mock_schema = gr.Textbox(label="Schema", lines=2, placeholder="User: name, email, age...") | |
| with gr.Row(): | |
| mock_count = gr.Slider(1, 20, value=5, step=1, label="Count") | |
| mock_format = gr.Dropdown(["JSON", "CSV", "SQL"], value="JSON") | |
| mock_btn = gr.Button("📦 Generate", variant="primary") | |
| with gr.Column(): | |
| mock_output = gr.Code(lines=10) | |
| gr.Markdown("---\n### 🔄 Format Converter") | |
| with gr.Row(): | |
| with gr.Column(): | |
| format_input = gr.Code(label="Input", lines=6) | |
| with gr.Row(): | |
| format_from = gr.Dropdown(["JSON", "YAML", "XML", "CSV"], value="JSON") | |
| format_to = gr.Dropdown(["JSON", "YAML", "XML", "CSV"], value="YAML") | |
| format_btn = gr.Button("🔄 Convert", variant="primary") | |
| with gr.Column(): | |
| format_output = gr.Code(label="Output", lines=6) | |
| # Footer | |
| gr.HTML(""" | |
| <div style="text-align:center;padding:16px;border-top:1px solid #334155;margin-top:20px;"> | |
| <p style="opacity:0.7;">🔥 Axon v6 • Built with ❤️ by <a href="https://huggingface.co/AIencoder" target="_blank" style="color:#8b5cf6;">AIencoder</a></p> | |
| <p style="opacity:0.5;font-size:0.8rem;"> | |
| Wheels: <a href="https://huggingface.co/datasets/AIencoder/llama-cpp-wheels" target="_blank" style="color:#8b5cf6;">AIencoder/llama-cpp-wheels</a> • | |
| Powered by <a href="https://github.com/ggerganov/llama.cpp" target="_blank" style="color:#8b5cf6;">llama.cpp</a> | |
| </p> | |
| </div> | |
| """) | |
| # ===== EVENTS ===== | |
| def respond(message, history, model, temp, tokens): | |
| # Gradio 5 automatically handles history state | |
| for updated in chat_stream(message, history, model, temp, tokens): | |
| yield updated, "" | |
| msg.submit(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg]) | |
| send.click(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg]) | |
| clear.click(lambda: [], None, chatbot) | |
| transcribe.click(transcribe_audio, audio, msg) | |
| # Export handlers | |
| def handle_chat_export(history): | |
| file, status = export_chat_history(history) | |
| return gr.update(value=file, visible=file is not None), status | |
| def handle_code_export(code, lang): | |
| file, status = export_code(code, lang) | |
| return gr.update(value=file, visible=file is not None), status | |
| export_chat_btn.click(handle_chat_export, chatbot, [chat_export_file, chat_export_status]) | |
| gen_export_btn.click(handle_code_export, [gen_output, gen_lang], [gen_export_file, gen_export_status]) | |
| gen_btn.click(generate_stream, [gen_prompt, gen_lang, model_dropdown, gen_temp, max_tokens], gen_output) | |
| explain_btn.click(explain_code, [explain_input, model_dropdown, explain_detail, max_tokens], explain_output) | |
| fix_btn.click(fix_code, [fix_input, fix_error, model_dropdown, max_tokens], fix_output) | |
| review_btn.click(review_code, [review_input, model_dropdown, max_tokens], review_output) | |
| convert_btn.click(convert_code, [convert_input, convert_from, convert_to, model_dropdown, max_tokens], convert_output) | |
| test_btn.click(generate_tests, [test_input, test_lang, test_fw, model_dropdown, max_tokens], test_output) | |
| doc_btn.click(document_code, [doc_input, doc_lang, doc_style, model_dropdown, max_tokens], doc_output) | |
| opt_btn.click(optimize_code, [opt_input, opt_lang, opt_focus, model_dropdown, max_tokens], opt_output) | |
| security_btn.click(security_scan, [security_input, model_dropdown, max_tokens], security_output) | |
| complexity_btn.click(analyze_complexity, [complexity_input, model_dropdown, max_tokens], complexity_output) | |
| diff_btn.click(code_diff, [diff_code1, diff_code2, model_dropdown, max_tokens], diff_output) | |
| pseudo_btn.click(to_pseudocode, [pseudo_input, pseudo_type, model_dropdown, max_tokens], pseudo_output) | |
| interview_btn.click(interview_challenge, [interview_topic, interview_diff, interview_lang, model_dropdown, max_tokens], interview_output) | |
| sql_btn.click(build_sql, [sql_desc, sql_type, model_dropdown, max_tokens], sql_output) | |
| shell_btn.click(build_shell, [shell_desc, shell_type, model_dropdown, max_tokens], shell_output) | |
| cron_btn.click(build_cron, [cron_desc, model_dropdown, max_tokens], cron_output) | |
| regex_btn.click(build_regex, [regex_desc, model_dropdown, max_tokens], regex_output) | |
| api_btn.click(build_api, [api_desc, api_fw, model_dropdown, max_tokens], api_output) | |
| mock_btn.click(generate_mock_data, [mock_schema, mock_count, mock_format, model_dropdown, max_tokens], mock_output) | |
| format_btn.click(convert_data_format, [format_input, format_from, format_to, model_dropdown, max_tokens], format_output) | |
| # Preload | |
| print("🔥 Preloading model...") | |
| load_model("🚀 Qwen2.5 Coder 3B (Fast)") | |
| # Launch (Removed 'title' and 'theme', they are in Blocks) | |
| demo.launch(server_name="0.0.0.0", server_port=7860) |