mport os import gradio as gr import json import time from datetime import datetime from pathlib import Path from llama_cpp import Llama from faster_whisper import WhisperModel from huggingface_hub import hf_hub_download # Added for auto-download # ===== CONFIG ===== MODELS_DIR = "/data/models" MAX_TOKENS = 2048 CONTEXT_SIZE = 4096 MODEL_REPOS = { # 30B: Unsloth is the most reliable source for Qwen3 GGUFs currently "Qwen3-Coder-30B-A3B-Instruct-Q4_K_M.gguf": "unsloth/Qwen3-Coder-30B-A3B-Instruct-GGUF", # 3B: Qwen actually has an official one, but bartowski is safer fallback "qwen2.5-coder-3b-instruct-q4_k_m.gguf": "Qwen/Qwen2.5-Coder-3B-Instruct-GGUF", # 7B: Official Qwen GGUF is often missing/broken. Bartowski is the go-to here. "qwen2.5-coder-7b-instruct-q4_k_m.gguf": "bartowski/Qwen2.5-Coder-7B-Instruct-GGUF", # 14B: Bartowski is recommended for consistency "qwen2.5-coder-14b-instruct-q4_k_m.gguf": "bartowski/Qwen2.5-Coder-14B-Instruct-GGUF", # DeepSeek: Definitely needs community repo "DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M.gguf": "bartowski/DeepSeek-Coder-V2-Lite-Instruct-GGUF", # Tiny models "qwen2.5-coder-1.5b-instruct-q4_k_m.gguf": "Qwen/Qwen2.5-Coder-1.5B-Instruct-GGUF", "qwen2.5-coder-0.5b-instruct-q4_k_m.gguf": "Qwen/Qwen2.5-Coder-0.5B-Instruct-GGUF", } MODELS = { "โญ Qwen3 Coder 30B-A3B (Best)": "Qwen3-Coder-30B-A3B-Instruct-Q4_K_M.gguf", "๐Ÿ† Qwen2.5 Coder 14B (Premium)": "qwen2.5-coder-14b-instruct-q4_k_m.gguf", "๐Ÿง  DeepSeek V2 Lite (Logic)": "DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M.gguf", "โš–๏ธ Qwen2.5 Coder 7B (Balanced)": "qwen2.5-coder-7b-instruct-q4_k_m.gguf", "๐Ÿš€ Qwen2.5 Coder 3B (Fast)": "qwen2.5-coder-3b-instruct-q4_k_m.gguf", "โšก DeepSeek Coder 6.7B": "deepseek-coder-6.7b-instruct.Q4_K_M.gguf", "๐Ÿ’จ Qwen2.5 Coder 1.5B (Quick)": "qwen2.5-coder-1.5b-instruct-q4_k_m.gguf", "๐Ÿ”ฌ Qwen2.5 Coder 0.5B (Instant)": "qwen2.5-coder-0.5b-instruct-q4_k_m.gguf", } MODEL_INFO = { "โญ Qwen3 Coder 30B-A3B (Best)": "๐Ÿ† Best quality โ€ข MoE 30B/3B โ€ข ~10GB", "๐Ÿ† Qwen2.5 Coder 14B (Premium)": "๐Ÿ’Ž Premium โ€ข ~8GB โ€ข Complex tasks", "๐Ÿง  DeepSeek V2 Lite (Logic)": "๐Ÿง  MoE 16B โ€ข ~9GB โ€ข Algorithms", "โš–๏ธ Qwen2.5 Coder 7B (Balanced)": "โš–๏ธ Balanced โ€ข ~4.5GB โ€ข Recommended", "๐Ÿš€ Qwen2.5 Coder 3B (Fast)": "๐Ÿš€ Fast โ€ข ~2GB โ€ข Great all-rounder", "โšก DeepSeek Coder 6.7B": "โšก Logic focused โ€ข ~4GB", "๐Ÿ’จ Qwen2.5 Coder 1.5B (Quick)": "๐Ÿ’จ Quick โ€ข ~1GB โ€ข Simple tasks", "๐Ÿ”ฌ Qwen2.5 Coder 0.5B (Instant)": "๐Ÿ”ฌ Instant โ€ข ~0.3GB โ€ข Lightning fast", } LANGUAGES = [ "Python", "JavaScript", "TypeScript", "Go", "Rust", "Java", "C++", "C#", "C", "PHP", "Ruby", "Swift", "Kotlin", "Scala", "R", "Julia", "Perl", "HTML/CSS", "SQL", "Bash", "PowerShell", "Lua" ] # ===== MODEL CACHE ===== loaded_models = {} current_model_name = None def load_model(model_name): global loaded_models, current_model_name if model_name == current_model_name and model_name in loaded_models: return loaded_models[model_name] if current_model_name and current_model_name != model_name: if current_model_name in loaded_models: del loaded_models[current_model_name] print(f"๐Ÿ—‘๏ธ Unloaded {current_model_name}") filename = MODELS.get(model_name) if not filename: return None model_path = os.path.join(MODELS_DIR, filename) # --- AUTO DOWNLOAD LOGIC --- if not os.path.exists(model_path): print(f"โฌ‡๏ธ Model not found. Attempting download for {filename}...") repo_id = MODEL_REPOS.get(filename, "Qwen/Qwen2.5-Coder-3B-Instruct-GGUF") # Default fallback try: hf_hub_download( repo_id=repo_id, filename=filename, local_dir=MODELS_DIR, local_dir_use_symlinks=False ) print("โœ… Download complete!") except Exception as e: print(f"โŒ Download failed: {e}") return None print(f"๐Ÿ“ฅ Loading {model_name}...") try: llm = Llama( model_path=model_path, n_ctx=CONTEXT_SIZE, n_threads=4, n_batch=512, verbose=False ) loaded_models[model_name] = llm current_model_name = model_name print(f"โœ… {model_name} loaded!") return llm except Exception as e: print(f"โŒ Failed to load: {e}") return None # ===== WHISPER ===== whisper_model = None def init_whisper(): global whisper_model try: print("Loading Whisper...") whisper_model = WhisperModel("tiny", device="cpu", compute_type="int8") print("โœ… Whisper ready!") except Exception as e: print(f"โŒ Whisper failed: {e}") init_whisper() # ===== HELPERS ===== def get_status(): available = [name for name, file in MODELS.items() if os.path.exists(os.path.join(MODELS_DIR, file))] if current_model_name: short = current_model_name.split('(')[0].strip().split()[-1] return f"๐ŸŸข Ready โ€ข {len(available)} models โ€ข Active: {short}" return f"๐ŸŸก {len(available)} models available" def get_model_info(model_name): return MODEL_INFO.get(model_name, "") def validate_input(text, name="Input"): if not text or not text.strip(): return False, f"โš ๏ธ {name} cannot be empty." if len(text) > 50000: return False, f"โš ๏ธ {name} too long." return True, None def transcribe_audio(audio): if not audio: return "" if not whisper_model: return "โŒ Whisper unavailable." try: segments, _ = whisper_model.transcribe(audio) return " ".join([s.text for s in segments]).strip() or "โš ๏ธ No speech detected." except Exception as e: return f"โŒ {str(e)[:50]}" def generate_response(model_name, prompt, temperature=0.7, max_tokens=2048): llm = load_model(model_name) if not llm: return "โŒ **Model not available.**" try: if "deepseek" in model_name.lower(): formatted = f"### Instruction:\n{prompt}\n\n### Response:\n" stop_tokens = ["### Instruction:", "### Response:"] else: formatted = f"<|im_start|>system\nYou are an expert coding assistant.<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant\n" stop_tokens = ["<|im_end|>", "<|im_start|>"] output = llm( formatted, max_tokens=max_tokens, temperature=temperature, top_p=0.9, top_k=40, repeat_penalty=1.1, stop=stop_tokens, echo=False ) response = output["choices"][0]["text"].strip() return response if response else "โš ๏ธ Empty response." except Exception as e: return f"โŒ **Error:** {str(e)[:100]}" def extract_code(text): if not text or "```" not in text: return text try: parts = text.split("```") if len(parts) >= 2: code = parts[1] if "\n" in code: code = code.split("\n", 1)[-1] return code.strip() except: pass return text # ===== HISTORY FUNCTIONS ===== def export_chat_history(history): if not history: return None, "โš ๏ธ No chat history to export." export = { "exported_at": datetime.now().isoformat(), "tool": "Axon v6 Chat", "messages": history # Direct dump for Gradio 5 format } filename = f"/tmp/axon_chat_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json" with open(filename, "w") as f: json.dump(export, f, indent=2) return filename, f"โœ… Exported {len(history)} messages!" def export_code(code, language): if not code or not code.strip(): return None, "โš ๏ธ No code to export." ext_map = { "Python": "py", "JavaScript": "js", "TypeScript": "ts", "Go": "go", "Rust": "rs", "Java": "java", "C++": "cpp", "C#": "cs", "C": "c", "PHP": "php", "Ruby": "rb", "Swift": "swift", "Kotlin": "kt", "HTML/CSS": "html", "SQL": "sql", "Bash": "sh", "PowerShell": "ps1", "Lua": "lua" } ext = ext_map.get(language, "txt") filename = f"/tmp/axon_code_{datetime.now().strftime('%Y%m%d_%H%M%S')}.{ext}" with open(filename, "w") as f: f.write(code) return filename, f"โœ… Exported as .{ext}!" # ===== STREAMING (UPDATED FOR GRADIO 5) ===== def chat_stream(message, history, model_name, temperature, max_tokens): history = history or [] valid, error = validate_input(message, "Message") if not valid: history.append({"role": "user", "content": message}) history.append({"role": "assistant", "content": error}) yield history return llm = load_model(model_name) if not llm: history.append({"role": "user", "content": message}) history.append({"role": "assistant", "content": "โŒ Model not available."}) yield history return if "deepseek" in model_name.lower(): conv = "### Instruction:\nYou are an expert coding assistant. Use markdown code blocks.\n\n" for msg in history: if msg['role'] == 'user': conv += f"User: {msg['content']}\n" else: conv += f"Assistant: {msg['content']}\n\n" conv += f"User: {message}\n\n### Response:\n" stop_tokens = ["### Instruction:", "User:"] else: conv = "<|im_start|>system\nYou are an expert coding assistant. Use markdown code blocks.<|im_end|>\n" for msg in history: role = msg['role'] content = msg['content'] conv += f"<|im_start|>{role}\n{content}<|im_end|>\n" conv += f"<|im_start|>user\n{message}<|im_end|>\n<|im_start|>assistant\n" stop_tokens = ["<|im_end|>", "<|im_start|>"] history.append({"role": "user", "content": message}) history.append({"role": "assistant", "content": ""}) try: full = "" for chunk in llm(conv, max_tokens=max_tokens, temperature=temperature, top_p=0.9, stop=stop_tokens, stream=True): text_chunk = chunk["choices"][0]["text"] full += text_chunk history[-1]['content'] = full yield history except Exception as e: history[-1]['content'] = f"โŒ Error: {str(e)[:100]}" yield history def generate_stream(prompt, language, model_name, temperature, max_tokens): valid, error = validate_input(prompt, "Description") if not valid: yield error return llm = load_model(model_name) if not llm: yield "โŒ Model not available." return if "deepseek" in model_name.lower(): formatted = f"### Instruction:\nWrite clean {language} code with comments:\n{prompt}\n\nOutput only code:\n\n### Response:\n" stop_tokens = ["### Instruction:"] else: formatted = f"<|im_start|>system\nYou are an expert coder.<|im_end|>\n<|im_start|>user\nWrite clean {language} code with comments:\n{prompt}\n\nOutput only code:<|im_end|>\n<|im_start|>assistant\n" stop_tokens = ["<|im_end|>"] try: full = "" for chunk in llm(formatted, max_tokens=max_tokens, temperature=temperature, stop=stop_tokens, stream=True): full += chunk["choices"][0]["text"] yield extract_code(full) except Exception as e: yield f"โŒ {str(e)[:50]}" # ===== CORE FEATURES ===== def explain_code(code, model_name, detail, max_tokens): valid, err = validate_input(code, "Code") if not valid: return err prompts = { "Brief": f"Explain briefly (2-3 sentences):\n{code}", "Normal": f"Explain this code:\n{code}", "Detailed": f"Detailed explanation (purpose, logic, complexity, improvements):\n{code}" } return generate_response(model_name, prompts.get(detail, prompts["Normal"]), 0.5, max_tokens) def fix_code(code, error_msg, model_name, max_tokens): valid, err = validate_input(code, "Code") if not valid: return err e = error_msg.strip() if error_msg else "Not working" return generate_response(model_name, f"Fix this code. Error: {e}\n\n{code}\n\nFixed code and explanation:", 0.3, max_tokens) def review_code(code, model_name, max_tokens): valid, err = validate_input(code, "Code") if not valid: return err return generate_response(model_name, f"Review for bugs, performance, security:\n{code}", 0.4, max_tokens) def convert_code(code, from_lang, to_lang, model_name, max_tokens): valid, err = validate_input(code, "Code") if not valid: return err if from_lang == to_lang: return "โš ๏ธ Same language." result = generate_response(model_name, f"Convert {from_lang} to {to_lang}. Code only:\n{code}", 0.3, max_tokens) return result if result.startswith("โŒ") else extract_code(result) def generate_tests(code, language, framework, model_name, max_tokens): valid, err = validate_input(code, "Code") if not valid: return err fw = framework.strip() if framework else "pytest" result = generate_response(model_name, f"Generate {fw} tests for {language}. Code only:\n{code}", 0.3, max_tokens) return result if result.startswith("โŒ") else extract_code(result) def document_code(code, language, style, model_name, max_tokens): valid, err = validate_input(code, "Code") if not valid: return err result = generate_response(model_name, f"Add {style.lower()} to this {language} code:\n{code}", 0.4, max_tokens) return result if style == "README" or result.startswith("โŒ") else extract_code(result) def optimize_code(code, language, focus, model_name, max_tokens): valid, err = validate_input(code, "Code") if not valid: return err return generate_response(model_name, f"Optimize {language} for {focus.lower()}. Explain:\n{code}", 0.3, max_tokens) def security_scan(code, model_name, max_tokens): valid, err = validate_input(code, "Code") if not valid: return err prompt = """Security audit this code. Check for: 1. Injection vulnerabilities (SQL, XSS, Command) 2. Authentication issues 3. Data exposure 4. Input validation 5. Cryptography issues For each issue: Severity (๐Ÿ”ด๐ŸŸ ๐ŸŸก๐ŸŸข), Location, Description, Fix. Code: """ + code return generate_response(model_name, prompt, 0.3, max_tokens) def analyze_complexity(code, model_name, max_tokens): valid, err = validate_input(code, "Code") if not valid: return err prompt = """Analyze time and space complexity: 1. Time Complexity (Big O) 2. Space Complexity (Big O) 3. Best/Average/Worst cases 4. Bottlenecks 5. Optimization suggestions Code: """ + code return generate_response(model_name, prompt, 0.4, max_tokens) def build_sql(description, db_type, model_name, max_tokens): valid, err = validate_input(description, "Description") if not valid: return err result = generate_response(model_name, f"Write optimized {db_type} SQL for:\n{description}\n\nSQL only:", 0.2, max_tokens) return result if result.startswith("โŒ") else extract_code(result) def build_shell(description, shell_type, model_name, max_tokens): valid, err = validate_input(description, "Description") if not valid: return err result = generate_response(model_name, f"Write {shell_type} command for:\n{description}\n\nCommand only:", 0.2, max_tokens) return result if result.startswith("โŒ") else extract_code(result) def code_diff(code1, code2, model_name, max_tokens): v1, e1 = validate_input(code1, "Code 1") v2, e2 = validate_input(code2, "Code 2") if not v1: return e1 if not v2: return e2 prompt = f"""Compare these code snippets: 1. Key differences 2. Functionality changes 3. Performance impact 4. Which is better and why === CODE 1 === {code1} === CODE 2 === {code2}""" return generate_response(model_name, prompt, 0.4, max_tokens) def generate_mock_data(schema, count, format_type, model_name, max_tokens): valid, err = validate_input(schema, "Schema") if not valid: return err result = generate_response(model_name, f"Generate {count} realistic mock entries as {format_type}:\n{schema}", 0.7, max_tokens) return result if result.startswith("โŒ") else extract_code(result) def interview_challenge(topic, difficulty, language, model_name, max_tokens): valid, err = validate_input(topic, "Topic") if not valid: return err prompt = f"""Create {difficulty} {language} interview challenge about {topic}. Include: 1. Problem statement 2. Examples (2-3) 3. Constraints 4. Hints 5. Solution with explanation""" return generate_response(model_name, prompt, 0.6, max_tokens) def to_pseudocode(code, output_type, model_name, max_tokens): valid, err = validate_input(code, "Code") if not valid: return err if output_type == "Pseudocode": prompt = f"Convert to pseudocode:\n{code}" else: prompt = f"Create Mermaid.js flowchart for:\n{code}" return generate_response(model_name, prompt, 0.3, max_tokens) def build_cron(description, model_name, max_tokens): valid, err = validate_input(description, "Description") if not valid: return err return generate_response(model_name, f"Create cron expression for: {description}\n\nInclude: expression, breakdown, next 5 runs", 0.2, max_tokens) def build_regex(description, model_name, max_tokens): valid, err = validate_input(description, "Description") if not valid: return err return generate_response(model_name, f"Create regex for: {description}\n\nPattern, explanation, examples, Python code:", 0.3, max_tokens) def build_api(description, framework, model_name, max_tokens): valid, err = validate_input(description, "Description") if not valid: return err result = generate_response(model_name, f"Create {framework} REST endpoint:\n{description}\n\nCode:", 0.3, max_tokens) return result if result.startswith("โŒ") else extract_code(result) def convert_data_format(data, from_fmt, to_fmt, model_name, max_tokens): valid, err = validate_input(data, "Data") if not valid: return err if from_fmt == to_fmt: return "โš ๏ธ Same format." result = generate_response(model_name, f"Convert {from_fmt} to {to_fmt}:\n{data}\n\nOutput only:", 0.1, max_tokens) return result if result.startswith("โŒ") else extract_code(result) # ===== THEME ===== light_theme = gr.themes.Soft( primary_hue="indigo", secondary_hue="blue", ) dark_theme = gr.themes.Soft( primary_hue="indigo", secondary_hue="blue", ).set( body_background_fill="#0f172a", body_background_fill_dark="#0f172a", block_background_fill="#1e293b", block_background_fill_dark="#1e293b", border_color_primary="#334155", border_color_primary_dark="#334155", ) # ===== UI ===== # FIX: Title and theme moved here with gr.Blocks(title="Axon v6", theme=dark_theme) as demo: # State for theme is_dark = gr.State(True) # Header gr.HTML("""

๐Ÿ”ฅ Axon v6

AI Coding Assistant โ€ข 8 Models โ€ข 19 Tools โ€ข 100% Local

๐Ÿค– 8 Models ๐Ÿ› ๏ธ 19 Tools โšก llama.cpp
""") # Status row with gr.Row(): status = gr.Markdown(value=get_status, every=5) # Settings with gr.Row(): model_dropdown = gr.Dropdown(choices=list(MODELS.keys()), value="๐Ÿš€ Qwen2.5 Coder 3B (Fast)", label="๐Ÿค– Model", scale=3) temperature = gr.Slider(0, 1, value=0.7, step=0.1, label="๐ŸŒก๏ธ Creativity", scale=2) max_tokens = gr.Slider(256, 4096, value=2048, step=256, label="๐Ÿ“ Max Tokens", scale=2) model_info = gr.Markdown(value="๐Ÿš€ Fast โ€ข ~2GB โ€ข Great all-rounder") model_dropdown.change(get_model_info, model_dropdown, model_info) with gr.Tabs(): # ===== HOME ===== with gr.TabItem("๐Ÿ  Home"): gr.HTML("""

Welcome to Axon v6! ๐Ÿ”ฅ

The ultimate free AI coding assistant - running 100% locally on your browser.

๐Ÿš€ Quick Start

  1. Select a model from the dropdown above
  2. Choose a tool from the tabs
  3. Start coding!

๐Ÿค– Models

Model Size Best For
โญ Qwen3 30B-A3B~10GBBest quality (MoE)
๐Ÿ† Qwen2.5 14B~8GBPremium tasks
๐Ÿง  DeepSeek V2 Lite~9GBComplex logic
โš–๏ธ Qwen2.5 7B~4.5GBBalanced
๐Ÿš€ Qwen2.5 3B~2GBFast & capable
โšก DeepSeek 6.7B~4GBAlgorithms
๐Ÿ’จ Qwen2.5 1.5B~1GBQuick tasks
๐Ÿ”ฌ Qwen2.5 0.5B~0.3GBInstant

๐Ÿ› ๏ธ 19 Tools Available

Core: Chat, Generate, Explain, Debug, Review
Advanced: Security, Complexity, Convert, Test, Document, Optimize, Diff, Pseudo, Interview
Builders: SQL, Shell, Cron, Regex, API
Data: Mock Data, Format Converter

๐Ÿ”— Links

๐Ÿ›ž Pre-built Wheels โ€ข ๐Ÿ“ฆ llama.cpp โ€ข ๐Ÿค– Qwen Models

""") # Share buttons gr.HTML("""

๐Ÿ“ค Share Axon

๐Ÿฆ Twitter ๐Ÿค– Reddit ๐Ÿ’ผ LinkedIn ๐Ÿค— HuggingFace
""") # ===== CHAT ===== with gr.TabItem("๐Ÿ’ฌ Chat"): chatbot = gr.Chatbot(height=400) # Removed type="messages" with gr.Row(): msg = gr.Textbox(placeholder="Ask anything...", show_label=False, scale=8) send = gr.Button("Send", variant="primary", scale=1) with gr.Row(): audio = gr.Audio(sources=["microphone"], type="filepath", label="๐ŸŽค", scale=2) transcribe = gr.Button("๐ŸŽค Transcribe", scale=1) clear = gr.Button("๐Ÿ—‘๏ธ Clear", scale=1) export_chat_btn = gr.Button("๐Ÿ’พ Export", scale=1) chat_export_file = gr.File(label="Download", visible=False) chat_export_status = gr.Markdown("") # ===== GENERATE ===== with gr.TabItem("โšก Generate"): with gr.Row(): with gr.Column(): gen_prompt = gr.Textbox(label="๐Ÿ“ Describe", lines=3) with gr.Row(): gen_lang = gr.Dropdown(LANGUAGES, value="Python", label="Language") gen_temp = gr.Slider(0, 1, value=0.3, step=0.1, label="๐ŸŒก๏ธ") gen_btn = gr.Button("โšก Generate", variant="primary") with gr.Column(): gen_output = gr.Code(label="Code", language="python", lines=14) with gr.Row(): gen_export_btn = gr.Button("๐Ÿ’พ Export Code") gen_export_status = gr.Markdown("") gen_export_file = gr.File(label="Download", visible=False) # ===== EXPLAIN ===== with gr.TabItem("๐Ÿ” Explain"): with gr.Row(): with gr.Column(): # FIXED: used to be Column() explain_input = gr.Code(label="Code", lines=10) explain_detail = gr.Radio(["Brief", "Normal", "Detailed"], value="Normal") explain_btn = gr.Button("๐Ÿ” Explain", variant="primary") with gr.Column(): explain_output = gr.Markdown() # ===== DEBUG ===== with gr.TabItem("๐Ÿ”ง Debug"): with gr.Row(): with gr.Column(): fix_input = gr.Code(label="Code", lines=8) fix_error = gr.Textbox(label="Error", lines=2) fix_btn = gr.Button("๐Ÿ”ง Fix", variant="primary") with gr.Column(): fix_output = gr.Markdown() # ===== REVIEW ===== with gr.TabItem("๐Ÿ“‹ Review"): with gr.Row(): with gr.Column(): review_input = gr.Code(label="Code", lines=10) review_btn = gr.Button("๐Ÿ“‹ Review", variant="primary") with gr.Column(): review_output = gr.Markdown() # ===== SECURITY ===== with gr.TabItem("๐Ÿ” Security"): with gr.Row(): with gr.Column(): security_input = gr.Code(label="Code", lines=10) security_btn = gr.Button("๐Ÿ” Scan", variant="primary") with gr.Column(): security_output = gr.Markdown() # ===== COMPLEXITY ===== with gr.TabItem("๐Ÿ“Š Complexity"): with gr.Row(): with gr.Column(): complexity_input = gr.Code(label="Code", lines=10) complexity_btn = gr.Button("๐Ÿ“Š Analyze", variant="primary") with gr.Column(): complexity_output = gr.Markdown() # ===== CONVERT ===== with gr.TabItem("๐Ÿ”„ Convert"): with gr.Row(): with gr.Column(): convert_input = gr.Code(label="Source", lines=10) with gr.Row(): convert_from = gr.Dropdown(LANGUAGES, value="Python", label="From") convert_to = gr.Dropdown(LANGUAGES, value="JavaScript", label="To") convert_btn = gr.Button("๐Ÿ”„ Convert", variant="primary") with gr.Column(): convert_output = gr.Code(label="Result", lines=10) # ===== TEST ===== with gr.TabItem("๐Ÿงช Test"): with gr.Row(): with gr.Column(): test_input = gr.Code(label="Code", lines=10) with gr.Row(): test_lang = gr.Dropdown(LANGUAGES[:10], value="Python", label="Language") test_fw = gr.Textbox(label="Framework", placeholder="pytest") test_btn = gr.Button("๐Ÿงช Generate", variant="primary") with gr.Column(): test_output = gr.Code(label="Tests", lines=10) # ===== DOCUMENT ===== with gr.TabItem("๐Ÿ“ Document"): with gr.Row(): with gr.Column(): doc_input = gr.Code(label="Code", lines=10) with gr.Row(): doc_lang = gr.Dropdown(LANGUAGES, value="Python", label="Language") doc_style = gr.Dropdown(["Docstrings", "Comments", "Both", "README"], value="Both") doc_btn = gr.Button("๐Ÿ“ Document", variant="primary") with gr.Column(): doc_output = gr.Code(label="Documented", lines=10) # ===== OPTIMIZE ===== with gr.TabItem("๐Ÿš€ Optimize"): with gr.Row(): with gr.Column(): opt_input = gr.Code(label="Code", lines=10) with gr.Row(): opt_lang = gr.Dropdown(LANGUAGES, value="Python", label="Language") opt_focus = gr.Dropdown(["All", "Performance", "Readability", "Memory"], value="All") opt_btn = gr.Button("๐Ÿš€ Optimize", variant="primary") with gr.Column(): opt_output = gr.Markdown() # ===== DIFF ===== with gr.TabItem("๐Ÿ”€ Diff"): with gr.Row(): with gr.Column(): diff_code1 = gr.Code(label="Code 1", lines=8) diff_code2 = gr.Code(label="Code 2", lines=8) diff_btn = gr.Button("๐Ÿ”€ Compare", variant="primary") with gr.Column(): diff_output = gr.Markdown() # ===== PSEUDOCODE ===== with gr.TabItem("๐Ÿ“ Pseudo"): with gr.Row(): with gr.Column(): pseudo_input = gr.Code(label="Code", lines=10) pseudo_type = gr.Radio(["Pseudocode", "Flowchart"], value="Pseudocode") pseudo_btn = gr.Button("๐Ÿ“ Convert", variant="primary") with gr.Column(): pseudo_output = gr.Markdown() # ===== INTERVIEW ===== with gr.TabItem("๐ŸŽ“ Interview"): with gr.Row(): with gr.Column(): interview_topic = gr.Textbox(label="Topic", placeholder="Binary trees...") with gr.Row(): interview_diff = gr.Dropdown(["Easy", "Medium", "Hard"], value="Medium") interview_lang = gr.Dropdown(LANGUAGES[:8], value="Python") interview_btn = gr.Button("๐ŸŽ“ Generate", variant="primary") with gr.Column(): interview_output = gr.Markdown() # ===== BUILDERS ===== with gr.TabItem("๐Ÿ› ๏ธ Builders"): gr.Markdown("### ๐Ÿ—„๏ธ SQL") with gr.Row(): with gr.Column(): sql_desc = gr.Textbox(label="Describe", lines=2) sql_type = gr.Dropdown(["PostgreSQL", "MySQL", "SQLite"], value="PostgreSQL") sql_btn = gr.Button("๐Ÿ—„๏ธ Build", variant="primary") with gr.Column(): sql_output = gr.Code(lines=6) gr.Markdown("---\n### ๐Ÿš Shell") with gr.Row(): with gr.Column(): shell_desc = gr.Textbox(label="Describe", lines=2) shell_type = gr.Dropdown(["Bash", "PowerShell", "Zsh"], value="Bash") shell_btn = gr.Button("๐Ÿš Build", variant="primary") with gr.Column(): shell_output = gr.Code(lines=6) gr.Markdown("---\n### โฐ Cron") with gr.Row(): with gr.Column(): cron_desc = gr.Textbox(label="Describe", lines=2) cron_btn = gr.Button("โฐ Build", variant="primary") with gr.Column(): cron_output = gr.Markdown() gr.Markdown("---\n### ๐ŸŽฏ Regex") with gr.Row(): with gr.Column(): regex_desc = gr.Textbox(label="Describe", lines=2) regex_btn = gr.Button("๐ŸŽฏ Build", variant="primary") with gr.Column(): regex_output = gr.Markdown() gr.Markdown("---\n### ๐Ÿ”— API") with gr.Row(): with gr.Column(): api_desc = gr.Textbox(label="Describe", lines=2) api_fw = gr.Dropdown(["FastAPI", "Express", "Flask"], value="FastAPI") api_btn = gr.Button("๐Ÿ”— Build", variant="primary") with gr.Column(): api_output = gr.Code(lines=8) # ===== DATA ===== with gr.TabItem("๐Ÿ“ฆ Data"): gr.Markdown("### ๐Ÿ“ฆ Mock Data") with gr.Row(): with gr.Column(): mock_schema = gr.Textbox(label="Schema", lines=2, placeholder="User: name, email, age...") with gr.Row(): mock_count = gr.Slider(1, 20, value=5, step=1, label="Count") mock_format = gr.Dropdown(["JSON", "CSV", "SQL"], value="JSON") mock_btn = gr.Button("๐Ÿ“ฆ Generate", variant="primary") with gr.Column(): mock_output = gr.Code(lines=10) gr.Markdown("---\n### ๐Ÿ”„ Format Converter") with gr.Row(): with gr.Column(): format_input = gr.Code(label="Input", lines=6) with gr.Row(): format_from = gr.Dropdown(["JSON", "YAML", "XML", "CSV"], value="JSON") format_to = gr.Dropdown(["JSON", "YAML", "XML", "CSV"], value="YAML") format_btn = gr.Button("๐Ÿ”„ Convert", variant="primary") with gr.Column(): format_output = gr.Code(label="Output", lines=6) # Footer gr.HTML("""

๐Ÿ”ฅ Axon v6 โ€ข Built with โค๏ธ by AIencoder

Wheels: AIencoder/llama-cpp-wheels โ€ข Powered by llama.cpp

""") # ===== EVENTS ===== def respond(message, history, model, temp, tokens): # Gradio 5 automatically handles history state for updated in chat_stream(message, history, model, temp, tokens): yield updated, "" msg.submit(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg]) send.click(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg]) clear.click(lambda: [], None, chatbot) transcribe.click(transcribe_audio, audio, msg) # Export handlers def handle_chat_export(history): file, status = export_chat_history(history) return gr.update(value=file, visible=file is not None), status def handle_code_export(code, lang): file, status = export_code(code, lang) return gr.update(value=file, visible=file is not None), status export_chat_btn.click(handle_chat_export, chatbot, [chat_export_file, chat_export_status]) gen_export_btn.click(handle_code_export, [gen_output, gen_lang], [gen_export_file, gen_export_status]) gen_btn.click(generate_stream, [gen_prompt, gen_lang, model_dropdown, gen_temp, max_tokens], gen_output) explain_btn.click(explain_code, [explain_input, model_dropdown, explain_detail, max_tokens], explain_output) fix_btn.click(fix_code, [fix_input, fix_error, model_dropdown, max_tokens], fix_output) review_btn.click(review_code, [review_input, model_dropdown, max_tokens], review_output) convert_btn.click(convert_code, [convert_input, convert_from, convert_to, model_dropdown, max_tokens], convert_output) test_btn.click(generate_tests, [test_input, test_lang, test_fw, model_dropdown, max_tokens], test_output) doc_btn.click(document_code, [doc_input, doc_lang, doc_style, model_dropdown, max_tokens], doc_output) opt_btn.click(optimize_code, [opt_input, opt_lang, opt_focus, model_dropdown, max_tokens], opt_output) security_btn.click(security_scan, [security_input, model_dropdown, max_tokens], security_output) complexity_btn.click(analyze_complexity, [complexity_input, model_dropdown, max_tokens], complexity_output) diff_btn.click(code_diff, [diff_code1, diff_code2, model_dropdown, max_tokens], diff_output) pseudo_btn.click(to_pseudocode, [pseudo_input, pseudo_type, model_dropdown, max_tokens], pseudo_output) interview_btn.click(interview_challenge, [interview_topic, interview_diff, interview_lang, model_dropdown, max_tokens], interview_output) sql_btn.click(build_sql, [sql_desc, sql_type, model_dropdown, max_tokens], sql_output) shell_btn.click(build_shell, [shell_desc, shell_type, model_dropdown, max_tokens], shell_output) cron_btn.click(build_cron, [cron_desc, model_dropdown, max_tokens], cron_output) regex_btn.click(build_regex, [regex_desc, model_dropdown, max_tokens], regex_output) api_btn.click(build_api, [api_desc, api_fw, model_dropdown, max_tokens], api_output) mock_btn.click(generate_mock_data, [mock_schema, mock_count, mock_format, model_dropdown, max_tokens], mock_output) format_btn.click(convert_data_format, [format_input, format_from, format_to, model_dropdown, max_tokens], format_output) # Preload print("๐Ÿ”ฅ Preloading model...") load_model("๐Ÿš€ Qwen2.5 Coder 3B (Fast)") # Launch (Removed 'title' and 'theme', they are in Blocks) demo.launch(server_name="0.0.0.0", server_port=7860)