Spaces:
Sleeping
Sleeping
| import os | |
| import gradio as gr | |
| import json | |
| import time | |
| from datetime import datetime | |
| from llama_cpp import Llama | |
| from faster_whisper import WhisperModel | |
| from huggingface_hub import hf_hub_download | |
| # ===== CONFIG ===== | |
| MODELS_DIR = "/models" | |
| CONTEXT_SIZE = 4096 | |
| MODEL_REPOS = { | |
| "qwen2.5-coder-7b-instruct-q4_k_m.gguf": "bartowski/Qwen2.5-Coder-7B-Instruct-GGUF", | |
| "qwen2.5-coder-3b-instruct-q4_k_m.gguf": "Qwen/Qwen2.5-Coder-3B-Instruct-GGUF", | |
| "qwen2.5-coder-1.5b-instruct-q4_k_m.gguf": "Qwen/Qwen2.5-Coder-1.5B-Instruct-GGUF", | |
| "qwen2.5-coder-0.5b-instruct-q4_k_m.gguf": "Qwen/Qwen2.5-Coder-0.5B-Instruct-GGUF", | |
| "DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M.gguf": "bartowski/DeepSeek-Coder-V2-Lite-Instruct-GGUF", | |
| } | |
| MODELS = { | |
| "🧠 DeepSeek V2 Lite (Best)": "DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M.gguf", | |
| "⚖️ Qwen2.5 Coder 7B (Balanced)": "qwen2.5-coder-7b-instruct-q4_k_m.gguf", | |
| "🚀 Qwen2.5 Coder 3B (Fast)": "qwen2.5-coder-3b-instruct-q4_k_m.gguf", | |
| "💨 Qwen2.5 Coder 1.5B (Quick)": "qwen2.5-coder-1.5b-instruct-q4_k_m.gguf", | |
| "🔬 Qwen2.5 Coder 0.5B (Instant)": "qwen2.5-coder-0.5b-instruct-q4_k_m.gguf", | |
| } | |
| MODEL_INFO = { | |
| "🧠 DeepSeek V2 Lite (Best)": "🏆 MoE 16B • ~9GB • Best quality", | |
| "⚖️ Qwen2.5 Coder 7B (Balanced)": "⚖️ Balanced • ~4.5GB • Recommended", | |
| "🚀 Qwen2.5 Coder 3B (Fast)": "🚀 Fast • ~2GB • Great all-rounder", | |
| "💨 Qwen2.5 Coder 1.5B (Quick)": "💨 Quick • ~1GB • Simple tasks", | |
| "🔬 Qwen2.5 Coder 0.5B (Instant)": "🔬 Instant • ~0.3GB • Lightning fast", | |
| } | |
| LANGUAGES = [ | |
| "Python", "JavaScript", "TypeScript", "Go", "Rust", | |
| "Java", "C++", "C#", "C", "PHP", "Ruby", "Swift", "Kotlin", | |
| "Scala", "R", "Julia", "Perl", "HTML/CSS", "SQL", "Bash", "PowerShell", "Lua" | |
| ] | |
| # ===== MODEL CACHE ===== | |
| loaded_models = {} | |
| current_model_name = None | |
| def load_model(model_name): | |
| global loaded_models, current_model_name | |
| if model_name == current_model_name and model_name in loaded_models: | |
| return loaded_models[model_name] | |
| if current_model_name and current_model_name != model_name: | |
| if current_model_name in loaded_models: | |
| del loaded_models[current_model_name] | |
| print(f"🗑️ Unloaded {current_model_name}") | |
| filename = MODELS.get(model_name) | |
| if not filename: | |
| return None | |
| model_path = os.path.join(MODELS_DIR, filename) | |
| # Auto-download if needed | |
| if not os.path.exists(model_path): | |
| repo_id = MODEL_REPOS.get(filename) | |
| if repo_id: | |
| print(f"⬇️ Downloading {filename}...") | |
| try: | |
| hf_hub_download(repo_id=repo_id, filename=filename, local_dir=MODELS_DIR) | |
| print(f"✅ Downloaded {filename}") | |
| except Exception as e: | |
| print(f"❌ Download failed: {e}") | |
| return None | |
| else: | |
| return None | |
| print(f"📥 Loading {model_name}...") | |
| try: | |
| llm = Llama( | |
| model_path=model_path, | |
| n_ctx=CONTEXT_SIZE, | |
| n_threads=4, | |
| n_batch=512, | |
| verbose=False | |
| ) | |
| loaded_models[model_name] = llm | |
| current_model_name = model_name | |
| print(f"✅ {model_name} loaded!") | |
| return llm | |
| except Exception as e: | |
| print(f"❌ Failed to load: {e}") | |
| return None | |
| # ===== WHISPER ===== | |
| whisper_model = None | |
| def init_whisper(): | |
| global whisper_model | |
| try: | |
| print("Loading Whisper...") | |
| whisper_model = WhisperModel("tiny", device="cpu", compute_type="int8") | |
| print("✅ Whisper ready!") | |
| except Exception as e: | |
| print(f"❌ Whisper failed: {e}") | |
| init_whisper() | |
| # ===== HELPERS ===== | |
| def get_status(): | |
| available = [name for name, file in MODELS.items() if os.path.exists(os.path.join(MODELS_DIR, file))] | |
| if current_model_name: | |
| short = current_model_name.split('(')[0].strip().split()[-1] | |
| return f"🟢 Ready • {len(available)}/{len(MODELS)} cached • Active: {short}" | |
| return f"🟡 {len(available)}/{len(MODELS)} models cached" | |
| def get_model_info(model_name): | |
| return MODEL_INFO.get(model_name, "") | |
| def validate_input(text, name="Input"): | |
| if not text or not text.strip(): | |
| return False, f"⚠️ {name} cannot be empty." | |
| if len(text) > 50000: | |
| return False, f"⚠️ {name} too long." | |
| return True, None | |
| def transcribe_audio(audio): | |
| if not audio: | |
| return "" | |
| if not whisper_model: | |
| return "❌ Whisper unavailable." | |
| try: | |
| segments, _ = whisper_model.transcribe(audio) | |
| return " ".join([s.text for s in segments]).strip() or "⚠️ No speech detected." | |
| except Exception as e: | |
| return f"❌ {str(e)[:50]}" | |
| def generate_response(model_name, prompt, temperature=0.7, max_tokens=2048): | |
| llm = load_model(model_name) | |
| if not llm: | |
| return "❌ **Model not available.** Try selecting a different model." | |
| try: | |
| if "deepseek" in model_name.lower(): | |
| formatted = f"### Instruction:\n{prompt}\n\n### Response:\n" | |
| stop_tokens = ["### Instruction:", "### Response:"] | |
| else: | |
| formatted = f"<|im_start|>system\nYou are an expert coding assistant.<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant\n" | |
| stop_tokens = ["<|im_end|>", "<|im_start|>"] | |
| output = llm( | |
| formatted, | |
| max_tokens=max_tokens, | |
| temperature=temperature, | |
| top_p=0.9, | |
| top_k=40, | |
| repeat_penalty=1.1, | |
| stop=stop_tokens, | |
| echo=False | |
| ) | |
| response = output["choices"][0]["text"].strip() | |
| return response if response else "⚠️ Empty response." | |
| except Exception as e: | |
| return f"❌ **Error:** {str(e)[:100]}" | |
| def extract_code(text): | |
| if not text or "```" not in text: | |
| return text | |
| try: | |
| parts = text.split("```") | |
| if len(parts) >= 2: | |
| code = parts[1] | |
| if "\n" in code: | |
| code = code.split("\n", 1)[-1] | |
| return code.strip() | |
| except: | |
| pass | |
| return text | |
| # ===== HISTORY ===== | |
| def export_chat_history(history): | |
| if not history: | |
| return None, "⚠️ No chat history to export." | |
| filename = f"/tmp/axon_chat_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json" | |
| with open(filename, "w") as f: | |
| json.dump({"exported_at": datetime.now().isoformat(), "messages": history}, f, indent=2) | |
| return filename, f"✅ Exported {len(history)} messages!" | |
| def export_code(code, language): | |
| if not code or not code.strip(): | |
| return None, "⚠️ No code to export." | |
| ext_map = {"Python": "py", "JavaScript": "js", "TypeScript": "ts", "Go": "go", "Rust": "rs", "Java": "java", "C++": "cpp", "C#": "cs", "C": "c", "PHP": "php", "Ruby": "rb", "Swift": "swift", "Kotlin": "kt", "HTML/CSS": "html", "SQL": "sql", "Bash": "sh", "PowerShell": "ps1", "Lua": "lua"} | |
| ext = ext_map.get(language, "txt") | |
| filename = f"/tmp/axon_code_{datetime.now().strftime('%Y%m%d_%H%M%S')}.{ext}" | |
| with open(filename, "w") as f: | |
| f.write(code) | |
| return filename, f"✅ Exported as .{ext}!" | |
| # ===== STREAMING ===== | |
| def chat_stream(message, history, model_name, temperature, max_tokens): | |
| history = history or [] | |
| valid, error = validate_input(message, "Message") | |
| if not valid: | |
| history.append({"role": "user", "content": message}) | |
| history.append({"role": "assistant", "content": error}) | |
| yield history | |
| return | |
| llm = load_model(model_name) | |
| if not llm: | |
| history.append({"role": "user", "content": message}) | |
| history.append({"role": "assistant", "content": "❌ Model not available."}) | |
| yield history | |
| return | |
| if "deepseek" in model_name.lower(): | |
| conv = "### Instruction:\nYou are an expert coding assistant. Use markdown code blocks.\n\n" | |
| for msg in history: | |
| conv += f"{'User' if msg['role']=='user' else 'Assistant'}: {msg['content']}\n\n" | |
| conv += f"User: {message}\n\n### Response:\n" | |
| stop_tokens = ["### Instruction:", "User:"] | |
| else: | |
| conv = "<|im_start|>system\nYou are an expert coding assistant. Use markdown code blocks.<|im_end|>\n" | |
| for msg in history: | |
| conv += f"<|im_start|>{msg['role']}\n{msg['content']}<|im_end|>\n" | |
| conv += f"<|im_start|>user\n{message}<|im_end|>\n<|im_start|>assistant\n" | |
| stop_tokens = ["<|im_end|>", "<|im_start|>"] | |
| history.append({"role": "user", "content": message}) | |
| history.append({"role": "assistant", "content": ""}) | |
| try: | |
| full = "" | |
| for chunk in llm(conv, max_tokens=max_tokens, temperature=temperature, top_p=0.9, stop=stop_tokens, stream=True): | |
| full += chunk["choices"][0]["text"] | |
| history[-1]['content'] = full | |
| yield history | |
| except Exception as e: | |
| history[-1]['content'] = f"❌ Error: {str(e)[:100]}" | |
| yield history | |
| def generate_stream(prompt, language, model_name, temperature, max_tokens): | |
| valid, error = validate_input(prompt, "Description") | |
| if not valid: | |
| yield error | |
| return | |
| llm = load_model(model_name) | |
| if not llm: | |
| yield "❌ Model not available." | |
| return | |
| if "deepseek" in model_name.lower(): | |
| formatted = f"### Instruction:\nWrite clean {language} code with comments:\n{prompt}\n\n### Response:\n" | |
| stop_tokens = ["### Instruction:"] | |
| else: | |
| formatted = f"<|im_start|>system\nYou are an expert coder.<|im_end|>\n<|im_start|>user\nWrite clean {language} code with comments:\n{prompt}<|im_end|>\n<|im_start|>assistant\n" | |
| stop_tokens = ["<|im_end|>"] | |
| try: | |
| full = "" | |
| for chunk in llm(formatted, max_tokens=max_tokens, temperature=temperature, stop=stop_tokens, stream=True): | |
| full += chunk["choices"][0]["text"] | |
| yield extract_code(full) | |
| except Exception as e: | |
| yield f"❌ {str(e)[:50]}" | |
| # ===== FEATURES ===== | |
| def explain_code(code, model_name, detail, max_tokens): | |
| valid, err = validate_input(code, "Code") | |
| if not valid: return err | |
| prompts = {"Brief": f"Explain briefly:\n{code}", "Normal": f"Explain this code:\n{code}", "Detailed": f"Detailed explanation:\n{code}"} | |
| return generate_response(model_name, prompts.get(detail, prompts["Normal"]), 0.5, max_tokens) | |
| def fix_code(code, error_msg, model_name, max_tokens): | |
| valid, err = validate_input(code, "Code") | |
| if not valid: return err | |
| return generate_response(model_name, f"Fix this code. Error: {error_msg or 'Not working'}\n\n{code}", 0.3, max_tokens) | |
| def review_code(code, model_name, max_tokens): | |
| valid, err = validate_input(code, "Code") | |
| if not valid: return err | |
| return generate_response(model_name, f"Review for bugs, performance, security:\n{code}", 0.4, max_tokens) | |
| def convert_code(code, from_lang, to_lang, model_name, max_tokens): | |
| valid, err = validate_input(code, "Code") | |
| if not valid: return err | |
| if from_lang == to_lang: return "⚠️ Same language." | |
| result = generate_response(model_name, f"Convert {from_lang} to {to_lang}. Code only:\n{code}", 0.3, max_tokens) | |
| return result if result.startswith("❌") else extract_code(result) | |
| def generate_tests(code, language, framework, model_name, max_tokens): | |
| valid, err = validate_input(code, "Code") | |
| if not valid: return err | |
| result = generate_response(model_name, f"Generate {framework or 'pytest'} tests for {language}:\n{code}", 0.3, max_tokens) | |
| return result if result.startswith("❌") else extract_code(result) | |
| def document_code(code, language, style, model_name, max_tokens): | |
| valid, err = validate_input(code, "Code") | |
| if not valid: return err | |
| result = generate_response(model_name, f"Add {style.lower()} to this {language} code:\n{code}", 0.4, max_tokens) | |
| return result if style == "README" or result.startswith("❌") else extract_code(result) | |
| def optimize_code(code, language, focus, model_name, max_tokens): | |
| valid, err = validate_input(code, "Code") | |
| if not valid: return err | |
| return generate_response(model_name, f"Optimize {language} for {focus.lower()}. Explain:\n{code}", 0.3, max_tokens) | |
| def security_scan(code, model_name, max_tokens): | |
| valid, err = validate_input(code, "Code") | |
| if not valid: return err | |
| return generate_response(model_name, f"Security audit. Check for injection, auth issues, data exposure, input validation. For each: Severity, Location, Fix.\n\nCode:\n{code}", 0.3, max_tokens) | |
| def analyze_complexity(code, model_name, max_tokens): | |
| valid, err = validate_input(code, "Code") | |
| if not valid: return err | |
| return generate_response(model_name, f"Analyze time/space complexity (Big O), bottlenecks, optimizations:\n{code}", 0.4, max_tokens) | |
| def build_sql(description, db_type, model_name, max_tokens): | |
| valid, err = validate_input(description, "Description") | |
| if not valid: return err | |
| result = generate_response(model_name, f"Write {db_type} SQL for:\n{description}", 0.2, max_tokens) | |
| return result if result.startswith("❌") else extract_code(result) | |
| def build_shell(description, shell_type, model_name, max_tokens): | |
| valid, err = validate_input(description, "Description") | |
| if not valid: return err | |
| result = generate_response(model_name, f"Write {shell_type} command for:\n{description}", 0.2, max_tokens) | |
| return result if result.startswith("❌") else extract_code(result) | |
| def code_diff(code1, code2, model_name, max_tokens): | |
| v1, e1 = validate_input(code1, "Code 1") | |
| v2, e2 = validate_input(code2, "Code 2") | |
| if not v1: return e1 | |
| if not v2: return e2 | |
| return generate_response(model_name, f"Compare:\n=== CODE 1 ===\n{code1}\n\n=== CODE 2 ===\n{code2}", 0.4, max_tokens) | |
| def generate_mock_data(schema, count, format_type, model_name, max_tokens): | |
| valid, err = validate_input(schema, "Schema") | |
| if not valid: return err | |
| result = generate_response(model_name, f"Generate {count} mock entries as {format_type}:\n{schema}", 0.7, max_tokens) | |
| return result if result.startswith("❌") else extract_code(result) | |
| def interview_challenge(topic, difficulty, language, model_name, max_tokens): | |
| valid, err = validate_input(topic, "Topic") | |
| if not valid: return err | |
| return generate_response(model_name, f"Create {difficulty} {language} interview challenge about {topic}. Include problem, examples, constraints, hints, solution.", 0.6, max_tokens) | |
| def to_pseudocode(code, output_type, model_name, max_tokens): | |
| valid, err = validate_input(code, "Code") | |
| if not valid: return err | |
| prompt = f"Convert to pseudocode:\n{code}" if output_type == "Pseudocode" else f"Create Mermaid flowchart:\n{code}" | |
| return generate_response(model_name, prompt, 0.3, max_tokens) | |
| def build_cron(description, model_name, max_tokens): | |
| valid, err = validate_input(description, "Description") | |
| if not valid: return err | |
| return generate_response(model_name, f"Create cron expression for: {description}\nInclude: expression, breakdown, next 5 runs", 0.2, max_tokens) | |
| def build_regex(description, model_name, max_tokens): | |
| valid, err = validate_input(description, "Description") | |
| if not valid: return err | |
| return generate_response(model_name, f"Create regex for: {description}\nPattern, explanation, examples, Python code:", 0.3, max_tokens) | |
| def build_api(description, framework, model_name, max_tokens): | |
| valid, err = validate_input(description, "Description") | |
| if not valid: return err | |
| result = generate_response(model_name, f"Create {framework} REST endpoint:\n{description}", 0.3, max_tokens) | |
| return result if result.startswith("❌") else extract_code(result) | |
| def convert_data_format(data, from_fmt, to_fmt, model_name, max_tokens): | |
| valid, err = validate_input(data, "Data") | |
| if not valid: return err | |
| if from_fmt == to_fmt: return "⚠️ Same format." | |
| result = generate_response(model_name, f"Convert {from_fmt} to {to_fmt}:\n{data}", 0.1, max_tokens) | |
| return result if result.startswith("❌") else extract_code(result) | |
| # ===== UI ===== | |
| with gr.Blocks(title="Axon v6") as demo: | |
| gr.HTML(""" | |
| <div style="background: linear-gradient(135deg, #6366f1, #8b5cf6, #06b6d4); border-radius: 16px; padding: 24px; margin-bottom: 16px;"> | |
| <h1 style="color: white; margin: 0; font-size: 2rem;">🔥 Axon v6</h1> | |
| <p style="color: rgba(255,255,255,0.9); margin: 4px 0 0 0;">AI Coding Assistant • 5 Models • 19 Tools • 100% Local</p> | |
| <div style="display: flex; gap: 8px; margin-top: 12px; flex-wrap: wrap;"> | |
| <span style="background: rgba(255,255,255,0.2); padding: 4px 12px; border-radius: 20px; font-size: 0.8rem; color: white;">🤖 5 Models</span> | |
| <span style="background: rgba(255,255,255,0.2); padding: 4px 12px; border-radius: 20px; font-size: 0.8rem; color: white;">🛠️ 19 Tools</span> | |
| <span style="background: rgba(255,255,255,0.2); padding: 4px 12px; border-radius: 20px; font-size: 0.8rem; color: white;">⚡ llama.cpp</span> | |
| </div> | |
| </div> | |
| """) | |
| status = gr.Markdown(value=get_status, every=5) | |
| with gr.Row(): | |
| model_dropdown = gr.Dropdown(choices=list(MODELS.keys()), value="🚀 Qwen2.5 Coder 3B (Fast)", label="🤖 Model", scale=3) | |
| temperature = gr.Slider(0, 1, value=0.7, step=0.1, label="🌡️ Creativity", scale=2) | |
| max_tokens = gr.Slider(256, 4096, value=2048, step=256, label="📏 Max Tokens", scale=2) | |
| model_info = gr.Markdown(value="🚀 Fast • ~2GB • Great all-rounder") | |
| model_dropdown.change(get_model_info, model_dropdown, model_info) | |
| with gr.Tabs(): | |
| with gr.TabItem("🏠 Home"): | |
| gr.HTML(""" | |
| <div style="padding: 20px;"> | |
| <h2>Welcome to Axon v6! 🔥</h2> | |
| <p>Free AI coding assistant - 100% local, no API keys.</p> | |
| <h3>🚀 Quick Start</h3> | |
| <ol><li>Select a model</li><li>Choose a tool</li><li>Start coding!</li></ol> | |
| <p><em>Models download automatically on first use</em></p> | |
| <h3>🤖 Models</h3> | |
| <table style="width:100%;border-collapse:collapse;"> | |
| <tr style="background:rgba(99,102,241,0.2);"><th style="padding:8px;text-align:left;">Model</th><th>Size</th><th>Best For</th></tr> | |
| <tr><td style="padding:8px;">🧠 DeepSeek V2 Lite</td><td>~9GB</td><td>Best quality</td></tr> | |
| <tr><td style="padding:8px;">⚖️ Qwen2.5 7B</td><td>~4.5GB</td><td>Balanced</td></tr> | |
| <tr><td style="padding:8px;">🚀 Qwen2.5 3B</td><td>~2GB</td><td>Fast</td></tr> | |
| <tr><td style="padding:8px;">💨 Qwen2.5 1.5B</td><td>~1GB</td><td>Quick</td></tr> | |
| <tr><td style="padding:8px;">🔬 Qwen2.5 0.5B</td><td>~0.3GB</td><td>Instant</td></tr> | |
| </table> | |
| <h3>📤 Share</h3> | |
| <div style="display:flex;gap:10px;flex-wrap:wrap;"> | |
| <a href="https://twitter.com/intent/tweet?text=Check%20out%20Axon%20v6!&url=https://huggingface.co/spaces/AIencoder/Axon" target="_blank" style="background:#1DA1F2;color:white;padding:8px 16px;border-radius:8px;text-decoration:none;">🐦 Twitter</a> | |
| <a href="https://www.reddit.com/submit?url=https://huggingface.co/spaces/AIencoder/Axon&title=Axon%20v6" target="_blank" style="background:#FF4500;color:white;padding:8px 16px;border-radius:8px;text-decoration:none;">🤖 Reddit</a> | |
| <a href="https://www.linkedin.com/sharing/share-offsite/?url=https://huggingface.co/spaces/AIencoder/Axon" target="_blank" style="background:#0A66C2;color:white;padding:8px 16px;border-radius:8px;text-decoration:none;">💼 LinkedIn</a> | |
| </div> | |
| </div> | |
| """) | |
| with gr.TabItem("💬 Chat"): | |
| chatbot = gr.Chatbot(height=400) | |
| with gr.Row(): | |
| msg = gr.Textbox(placeholder="Ask anything...", show_label=False, scale=8) | |
| send = gr.Button("Send", variant="primary", scale=1) | |
| with gr.Row(): | |
| audio = gr.Audio(sources=["microphone"], type="filepath", label="🎤", scale=2) | |
| transcribe_btn = gr.Button("🎤", scale=1) | |
| clear = gr.Button("🗑️", scale=1) | |
| export_chat_btn = gr.Button("💾", scale=1) | |
| chat_export_file = gr.File(visible=False) | |
| chat_export_status = gr.Markdown("") | |
| with gr.TabItem("⚡ Generate"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| gen_prompt = gr.Textbox(label="📝 Describe", lines=3) | |
| gen_lang = gr.Dropdown(LANGUAGES, value="Python", label="Language") | |
| gen_btn = gr.Button("⚡ Generate", variant="primary") | |
| with gr.Column(): | |
| gen_output = gr.Code(label="Code", language="python", lines=14) | |
| gen_export_btn = gr.Button("💾 Export") | |
| gen_export_file = gr.File(visible=False) | |
| gen_export_status = gr.Markdown("") | |
| with gr.TabItem("🔍 Explain"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| explain_input = gr.Code(label="Code", lines=10) | |
| explain_detail = gr.Radio(["Brief", "Normal", "Detailed"], value="Normal") | |
| explain_btn = gr.Button("🔍 Explain", variant="primary") | |
| with gr.Column(): | |
| explain_output = gr.Markdown() | |
| with gr.TabItem("🔧 Debug"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| fix_input = gr.Code(label="Code", lines=8) | |
| fix_error = gr.Textbox(label="Error", lines=2) | |
| fix_btn = gr.Button("🔧 Fix", variant="primary") | |
| with gr.Column(): | |
| fix_output = gr.Markdown() | |
| with gr.TabItem("📋 Review"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| review_input = gr.Code(label="Code", lines=10) | |
| review_btn = gr.Button("📋 Review", variant="primary") | |
| with gr.Column(): | |
| review_output = gr.Markdown() | |
| with gr.TabItem("🔐 Security"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| security_input = gr.Code(label="Code", lines=10) | |
| security_btn = gr.Button("🔐 Scan", variant="primary") | |
| with gr.Column(): | |
| security_output = gr.Markdown() | |
| with gr.TabItem("📊 Complexity"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| complexity_input = gr.Code(label="Code", lines=10) | |
| complexity_btn = gr.Button("📊 Analyze", variant="primary") | |
| with gr.Column(): | |
| complexity_output = gr.Markdown() | |
| with gr.TabItem("🔄 Convert"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| convert_input = gr.Code(label="Source", lines=10) | |
| with gr.Row(): | |
| convert_from = gr.Dropdown(LANGUAGES, value="Python", label="From") | |
| convert_to = gr.Dropdown(LANGUAGES, value="JavaScript", label="To") | |
| convert_btn = gr.Button("🔄 Convert", variant="primary") | |
| with gr.Column(): | |
| convert_output = gr.Code(label="Result", lines=10) | |
| with gr.TabItem("🧪 Test"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| test_input = gr.Code(label="Code", lines=10) | |
| with gr.Row(): | |
| test_lang = gr.Dropdown(LANGUAGES[:10], value="Python", label="Language") | |
| test_fw = gr.Textbox(label="Framework", placeholder="pytest") | |
| test_btn = gr.Button("🧪 Generate", variant="primary") | |
| with gr.Column(): | |
| test_output = gr.Code(label="Tests", lines=10) | |
| with gr.TabItem("📝 Document"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| doc_input = gr.Code(label="Code", lines=10) | |
| with gr.Row(): | |
| doc_lang = gr.Dropdown(LANGUAGES, value="Python", label="Language") | |
| doc_style = gr.Dropdown(["Docstrings", "Comments", "Both", "README"], value="Both") | |
| doc_btn = gr.Button("📝 Document", variant="primary") | |
| with gr.Column(): | |
| doc_output = gr.Code(label="Documented", lines=10) | |
| with gr.TabItem("🚀 Optimize"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| opt_input = gr.Code(label="Code", lines=10) | |
| with gr.Row(): | |
| opt_lang = gr.Dropdown(LANGUAGES, value="Python", label="Language") | |
| opt_focus = gr.Dropdown(["All", "Performance", "Readability", "Memory"], value="All") | |
| opt_btn = gr.Button("🚀 Optimize", variant="primary") | |
| with gr.Column(): | |
| opt_output = gr.Markdown() | |
| with gr.TabItem("🔀 Diff"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| diff_code1 = gr.Code(label="Code 1", lines=8) | |
| diff_code2 = gr.Code(label="Code 2", lines=8) | |
| diff_btn = gr.Button("🔀 Compare", variant="primary") | |
| with gr.Column(): | |
| diff_output = gr.Markdown() | |
| with gr.TabItem("📐 Pseudo"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| pseudo_input = gr.Code(label="Code", lines=10) | |
| pseudo_type = gr.Radio(["Pseudocode", "Flowchart"], value="Pseudocode") | |
| pseudo_btn = gr.Button("📐 Convert", variant="primary") | |
| with gr.Column(): | |
| pseudo_output = gr.Markdown() | |
| with gr.TabItem("🎓 Interview"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| interview_topic = gr.Textbox(label="Topic", placeholder="Binary trees...") | |
| with gr.Row(): | |
| interview_diff = gr.Dropdown(["Easy", "Medium", "Hard"], value="Medium") | |
| interview_lang = gr.Dropdown(LANGUAGES[:8], value="Python") | |
| interview_btn = gr.Button("🎓 Generate", variant="primary") | |
| with gr.Column(): | |
| interview_output = gr.Markdown() | |
| with gr.TabItem("🛠️ Builders"): | |
| gr.Markdown("### 🗄️ SQL") | |
| with gr.Row(): | |
| with gr.Column(): | |
| sql_desc = gr.Textbox(label="Describe", lines=2) | |
| sql_type = gr.Dropdown(["PostgreSQL", "MySQL", "SQLite"], value="PostgreSQL") | |
| sql_btn = gr.Button("🗄️ Build", variant="primary") | |
| with gr.Column(): | |
| sql_output = gr.Code(lines=6) | |
| gr.Markdown("### 🐚 Shell") | |
| with gr.Row(): | |
| with gr.Column(): | |
| shell_desc = gr.Textbox(label="Describe", lines=2) | |
| shell_type = gr.Dropdown(["Bash", "PowerShell", "Zsh"], value="Bash") | |
| shell_btn = gr.Button("🐚 Build", variant="primary") | |
| with gr.Column(): | |
| shell_output = gr.Code(lines=6) | |
| gr.Markdown("### ⏰ Cron") | |
| with gr.Row(): | |
| with gr.Column(): | |
| cron_desc = gr.Textbox(label="Describe", lines=2) | |
| cron_btn = gr.Button("⏰ Build", variant="primary") | |
| with gr.Column(): | |
| cron_output = gr.Markdown() | |
| gr.Markdown("### 🎯 Regex") | |
| with gr.Row(): | |
| with gr.Column(): | |
| regex_desc = gr.Textbox(label="Describe", lines=2) | |
| regex_btn = gr.Button("🎯 Build", variant="primary") | |
| with gr.Column(): | |
| regex_output = gr.Markdown() | |
| gr.Markdown("### 🔗 API") | |
| with gr.Row(): | |
| with gr.Column(): | |
| api_desc = gr.Textbox(label="Describe", lines=2) | |
| api_fw = gr.Dropdown(["FastAPI", "Express", "Flask"], value="FastAPI") | |
| api_btn = gr.Button("🔗 Build", variant="primary") | |
| with gr.Column(): | |
| api_output = gr.Code(lines=8) | |
| with gr.TabItem("📦 Data"): | |
| gr.Markdown("### 📦 Mock Data") | |
| with gr.Row(): | |
| with gr.Column(): | |
| mock_schema = gr.Textbox(label="Schema", lines=2, placeholder="User: name, email, age...") | |
| with gr.Row(): | |
| mock_count = gr.Slider(1, 20, value=5, step=1, label="Count") | |
| mock_format = gr.Dropdown(["JSON", "CSV", "SQL"], value="JSON") | |
| mock_btn = gr.Button("📦 Generate", variant="primary") | |
| with gr.Column(): | |
| mock_output = gr.Code(lines=10) | |
| gr.Markdown("### 🔄 Format Converter") | |
| with gr.Row(): | |
| with gr.Column(): | |
| format_input = gr.Code(label="Input", lines=6) | |
| with gr.Row(): | |
| format_from = gr.Dropdown(["JSON", "YAML", "XML", "CSV"], value="JSON") | |
| format_to = gr.Dropdown(["JSON", "YAML", "XML", "CSV"], value="YAML") | |
| format_btn = gr.Button("🔄 Convert", variant="primary") | |
| with gr.Column(): | |
| format_output = gr.Code(label="Output", lines=6) | |
| gr.HTML('<div style="text-align:center;padding:16px;opacity:0.6;">🔥 Axon v6 • <a href="https://huggingface.co/AIencoder">AIencoder</a> • <a href="https://huggingface.co/datasets/AIencoder/llama-cpp-wheels">Wheels</a></div>') | |
| # Events | |
| def respond(message, history, model, temp, tokens): | |
| for updated in chat_stream(message, history, model, temp, tokens): | |
| yield updated, "" | |
| msg.submit(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg]) | |
| send.click(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg]) | |
| clear.click(lambda: [], None, chatbot) | |
| transcribe_btn.click(transcribe_audio, audio, msg) | |
| def handle_chat_export(history): | |
| file, status = export_chat_history(history) | |
| return gr.update(value=file, visible=file is not None), status | |
| def handle_code_export(code, lang): | |
| file, status = export_code(code, lang) | |
| return gr.update(value=file, visible=file is not None), status | |
| export_chat_btn.click(handle_chat_export, chatbot, [chat_export_file, chat_export_status]) | |
| gen_export_btn.click(handle_code_export, [gen_output, gen_lang], [gen_export_file, gen_export_status]) | |
| gen_btn.click(generate_stream, [gen_prompt, gen_lang, model_dropdown, temperature, max_tokens], gen_output) | |
| explain_btn.click(explain_code, [explain_input, model_dropdown, explain_detail, max_tokens], explain_output) | |
| fix_btn.click(fix_code, [fix_input, fix_error, model_dropdown, max_tokens], fix_output) | |
| review_btn.click(review_code, [review_input, model_dropdown, max_tokens], review_output) | |
| convert_btn.click(convert_code, [convert_input, convert_from, convert_to, model_dropdown, max_tokens], convert_output) | |
| test_btn.click(generate_tests, [test_input, test_lang, test_fw, model_dropdown, max_tokens], test_output) | |
| doc_btn.click(document_code, [doc_input, doc_lang, doc_style, model_dropdown, max_tokens], doc_output) | |
| opt_btn.click(optimize_code, [opt_input, opt_lang, opt_focus, model_dropdown, max_tokens], opt_output) | |
| security_btn.click(security_scan, [security_input, model_dropdown, max_tokens], security_output) | |
| complexity_btn.click(analyze_complexity, [complexity_input, model_dropdown, max_tokens], complexity_output) | |
| diff_btn.click(code_diff, [diff_code1, diff_code2, model_dropdown, max_tokens], diff_output) | |
| pseudo_btn.click(to_pseudocode, [pseudo_input, pseudo_type, model_dropdown, max_tokens], pseudo_output) | |
| interview_btn.click(interview_challenge, [interview_topic, interview_diff, interview_lang, model_dropdown, max_tokens], interview_output) | |
| sql_btn.click(build_sql, [sql_desc, sql_type, model_dropdown, max_tokens], sql_output) | |
| shell_btn.click(build_shell, [shell_desc, shell_type, model_dropdown, max_tokens], shell_output) | |
| cron_btn.click(build_cron, [cron_desc, model_dropdown, max_tokens], cron_output) | |
| regex_btn.click(build_regex, [regex_desc, model_dropdown, max_tokens], regex_output) | |
| api_btn.click(build_api, [api_desc, api_fw, model_dropdown, max_tokens], api_output) | |
| mock_btn.click(generate_mock_data, [mock_schema, mock_count, mock_format, model_dropdown, max_tokens], mock_output) | |
| format_btn.click(convert_data_format, [format_input, format_from, format_to, model_dropdown, max_tokens], format_output) | |
| # Pre-download ALL models on startup | |
| print("🔥 Axon v6 starting...") | |
| print("⬇️ Pre-downloading all models...") | |
| download_results = {} | |
| for model_name, filename in MODELS.items(): | |
| print(f"\n📥 Checking: {model_name}") | |
| print(f" Filename: {filename}") | |
| model_path = os.path.join(MODELS_DIR, filename) | |
| if os.path.exists(model_path): | |
| print(f" ✅ Already cached") | |
| download_results[model_name] = "cached" | |
| else: | |
| repo_id = MODEL_REPOS.get(filename) | |
| print(f" Repo: {repo_id}") | |
| if repo_id: | |
| try: | |
| print(f" ⬇️ Downloading...") | |
| hf_hub_download(repo_id=repo_id, filename=filename, local_dir=MODELS_DIR) | |
| print(f" ✅ Downloaded!") | |
| download_results[model_name] = "downloaded" | |
| except Exception as e: | |
| print(f" ❌ FAILED: {e}") | |
| download_results[model_name] = f"failed: {e}" | |
| else: | |
| print(f" ❌ No repo configured!") | |
| download_results[model_name] = "no repo" | |
| print("\n" + "="*50) | |
| print("📊 DOWNLOAD SUMMARY:") | |
| for model, status in download_results.items(): | |
| emoji = "✅" if status in ["cached", "downloaded"] else "❌" | |
| print(f" {emoji} {model}: {status}") | |
| print("="*50 + "\n") | |
| print("🚀 Launching Axon...") | |
| demo.launch(server_name="0.0.0.0", server_port=7860, theme=gr.themes.Soft(primary_hue="indigo")) |