Spaces:
Sleeping
Sleeping
| """ | |
| 🔥 GOD Coding Machine - Docker Edition | |
| Runs Ollama locally on HuggingFace Spaces | |
| No rate limits! Full power! | |
| """ | |
| import gradio as gr | |
| import requests | |
| import json | |
| OLLAMA_URL = "http://localhost:11434" | |
| # Models available (pulled in entrypoint.sh) | |
| MODELS = { | |
| "Qwen2.5-Coder 7B (Best)": "qwen2.5-coder:7b", | |
| "Qwen2.5-Coder 3B (Fast)": "qwen2.5-coder:3b", | |
| } | |
| def check_ollama(): | |
| """Check if Ollama is running""" | |
| try: | |
| r = requests.get(f"{OLLAMA_URL}/api/tags", timeout=5) | |
| return r.status_code == 200 | |
| except: | |
| return False | |
| def get_models(): | |
| """Get available models from Ollama""" | |
| try: | |
| r = requests.get(f"{OLLAMA_URL}/api/tags", timeout=5) | |
| if r.status_code == 200: | |
| models = r.json().get("models", []) | |
| return {m["name"]: m["name"] for m in models} | |
| except: | |
| pass | |
| return MODELS | |
| def chat_stream(message: str, history: list, model_name: str, temperature: float, max_tokens: int): | |
| """Stream chat responses""" | |
| if not check_ollama(): | |
| yield "⏳ Ollama is starting up... please wait 30 seconds and try again." | |
| return | |
| model = MODELS.get(model_name, "qwen2.5-coder:7b") | |
| # Build messages | |
| messages = [ | |
| { | |
| "role": "system", | |
| "content": """You are an expert coding assistant. You help with: | |
| - Writing clean, efficient, well-documented code | |
| - Debugging and fixing issues | |
| - Explaining code and programming concepts | |
| - Code reviews and best practices | |
| Always provide code in markdown code blocks with the language specified.""" | |
| } | |
| ] | |
| for user_msg, assistant_msg in history: | |
| messages.append({"role": "user", "content": user_msg}) | |
| if assistant_msg: | |
| messages.append({"role": "assistant", "content": assistant_msg}) | |
| messages.append({"role": "user", "content": message}) | |
| try: | |
| response = requests.post( | |
| f"{OLLAMA_URL}/api/chat", | |
| json={ | |
| "model": model, | |
| "messages": messages, | |
| "stream": True, | |
| "options": { | |
| "temperature": temperature, | |
| "num_predict": max_tokens | |
| } | |
| }, | |
| stream=True, | |
| timeout=300 | |
| ) | |
| full_response = "" | |
| for line in response.iter_lines(): | |
| if line: | |
| try: | |
| data = json.loads(line) | |
| if "message" in data and "content" in data["message"]: | |
| chunk = data["message"]["content"] | |
| full_response += chunk | |
| yield full_response | |
| except: | |
| continue | |
| except Exception as e: | |
| yield f"❌ Error: {str(e)}" | |
| def generate_code(prompt: str, language: str, model_name: str): | |
| """Generate code from description""" | |
| if not prompt.strip(): | |
| return "Please describe what you want to build." | |
| if not check_ollama(): | |
| return "⏳ Ollama is starting... please wait and try again." | |
| model = MODELS.get(model_name, "qwen2.5-coder:7b") | |
| full_prompt = f"""Write {language} code for: | |
| {prompt} | |
| Requirements: | |
| - Clean, well-commented code | |
| - Follow {language} best practices | |
| - Include error handling | |
| Output ONLY the code in a markdown code block, no explanations.""" | |
| try: | |
| response = requests.post( | |
| f"{OLLAMA_URL}/api/generate", | |
| json={ | |
| "model": model, | |
| "prompt": full_prompt, | |
| "stream": False, | |
| "options": {"temperature": 0.3, "num_predict": 2048} | |
| }, | |
| timeout=300 | |
| ) | |
| if response.status_code == 200: | |
| result = response.json().get("response", "") | |
| # Clean up markdown | |
| if "```" in result: | |
| parts = result.split("```") | |
| if len(parts) >= 2: | |
| code = parts[1] | |
| if "\n" in code: | |
| code = code.split("\n", 1)[-1] | |
| return code.strip() | |
| return result | |
| return f"Error: {response.text}" | |
| except Exception as e: | |
| return f"❌ Error: {str(e)}" | |
| def explain_code(code: str, model_name: str): | |
| """Explain code""" | |
| if not code.strip(): | |
| return "Please paste code to explain." | |
| if not check_ollama(): | |
| return "⏳ Ollama starting... please wait." | |
| model = MODELS.get(model_name, "qwen2.5-coder:7b") | |
| prompt = f"""Explain this code in detail: | |
| ``` | |
| {code} | |
| ``` | |
| Cover: | |
| 1. **Purpose**: What does it do? | |
| 2. **How it works**: Step by step | |
| 3. **Key concepts**: Important programming concepts | |
| 4. **Improvements**: Suggestions for better code""" | |
| try: | |
| response = requests.post( | |
| f"{OLLAMA_URL}/api/generate", | |
| json={ | |
| "model": model, | |
| "prompt": prompt, | |
| "stream": False, | |
| "options": {"temperature": 0.5, "num_predict": 2048} | |
| }, | |
| timeout=300 | |
| ) | |
| if response.status_code == 200: | |
| return response.json().get("response", "") | |
| return f"Error: {response.text}" | |
| except Exception as e: | |
| return f"❌ Error: {str(e)}" | |
| def fix_code(code: str, error_msg: str, model_name: str): | |
| """Fix buggy code""" | |
| if not code.strip(): | |
| return "Please paste code to fix." | |
| if not check_ollama(): | |
| return "⏳ Ollama starting... please wait." | |
| model = MODELS.get(model_name, "qwen2.5-coder:7b") | |
| prompt = f"""Fix this buggy code: | |
| **Code:** | |
| ``` | |
| {code} | |
| ``` | |
| **Error:** | |
| {error_msg if error_msg.strip() else "Code doesn't work correctly"} | |
| Please: | |
| 1. Identify the bug | |
| 2. Explain what's wrong | |
| 3. Provide fixed code | |
| 4. Explain the fix""" | |
| try: | |
| response = requests.post( | |
| f"{OLLAMA_URL}/api/generate", | |
| json={ | |
| "model": model, | |
| "prompt": prompt, | |
| "stream": False, | |
| "options": {"temperature": 0.3, "num_predict": 2048} | |
| }, | |
| timeout=300 | |
| ) | |
| if response.status_code == 200: | |
| return response.json().get("response", "") | |
| return f"Error: {response.text}" | |
| except Exception as e: | |
| return f"❌ Error: {str(e)}" | |
| def review_code(code: str, model_name: str): | |
| """Review code""" | |
| if not code.strip(): | |
| return "Please paste code to review." | |
| if not check_ollama(): | |
| return "⏳ Ollama starting... please wait." | |
| model = MODELS.get(model_name, "qwen2.5-coder:7b") | |
| prompt = f"""Review this code: | |
| ``` | |
| {code} | |
| ``` | |
| Evaluate: | |
| 1. **Code Quality**: Clean, readable? | |
| 2. **Best Practices**: Follows conventions? | |
| 3. **Bugs**: Any issues? | |
| 4. **Performance**: Any concerns? | |
| 5. **Security**: Any vulnerabilities? | |
| 6. **Improvements**: Specific suggestions with examples""" | |
| try: | |
| response = requests.post( | |
| f"{OLLAMA_URL}/api/generate", | |
| json={ | |
| "model": model, | |
| "prompt": prompt, | |
| "stream": False, | |
| "options": {"temperature": 0.5, "num_predict": 2048} | |
| }, | |
| timeout=300 | |
| ) | |
| if response.status_code == 200: | |
| return response.json().get("response", "") | |
| return f"Error: {response.text}" | |
| except Exception as e: | |
| return f"❌ Error: {str(e)}" | |
| # ============== BUILD UI ============== | |
| with gr.Blocks( | |
| title="🔥 GOD Coding Machine", | |
| theme=gr.themes.Soft(primary_hue="purple", secondary_hue="blue"), | |
| ) as demo: | |
| gr.Markdown(""" | |
| # 🔥 FREE GOD Coding Machine | |
| ### AI Coding Assistant - Running Locally on HuggingFace Spaces | |
| **🚀 Docker Edition** • Ollama running locally • **No rate limits!** • 18GB RAM | |
| """) | |
| with gr.Row(): | |
| model_dropdown = gr.Dropdown( | |
| choices=list(MODELS.keys()), | |
| value="Qwen2.5-Coder 7B (Best)", | |
| label="🤖 Model", | |
| scale=2 | |
| ) | |
| temperature = gr.Slider( | |
| 0.0, 1.0, value=0.7, step=0.1, | |
| label="🌡️ Temperature", scale=1 | |
| ) | |
| max_tokens = gr.Slider( | |
| 256, 4096, value=2048, step=256, | |
| label="📏 Max Tokens", scale=1 | |
| ) | |
| with gr.Tabs(): | |
| # Chat Tab | |
| with gr.TabItem("💬 Chat"): | |
| chatbot = gr.Chatbot(height=450, show_label=False) | |
| with gr.Row(): | |
| msg = gr.Textbox( | |
| placeholder="Ask anything about coding...", | |
| show_label=False, scale=9, container=False | |
| ) | |
| send_btn = gr.Button("Send", variant="primary", scale=1) | |
| clear_btn = gr.Button("🗑️ Clear") | |
| gr.Examples([ | |
| "Write a Python function to find all prime numbers up to n", | |
| "Explain async/await in JavaScript", | |
| "How do I implement a REST API in FastAPI?", | |
| "What's the difference between a list and tuple in Python?", | |
| ], inputs=msg) | |
| # Generate Tab | |
| with gr.TabItem("⚡ Generate Code"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| gen_prompt = gr.Textbox( | |
| label="📝 Describe what you want", | |
| placeholder="A function that...", lines=4 | |
| ) | |
| gen_lang = gr.Dropdown( | |
| ["Python", "JavaScript", "TypeScript", "Rust", "Go", "Java", "C++", "C#", "Ruby", "PHP"], | |
| value="Python", label="💻 Language" | |
| ) | |
| gen_btn = gr.Button("🚀 Generate", variant="primary", size="lg") | |
| with gr.Column(): | |
| gen_output = gr.Code(label="Generated Code", language="python", lines=20) | |
| gr.Examples([ | |
| ["A function to merge two sorted linked lists", "Python"], | |
| ["A debounce hook for React", "TypeScript"], | |
| ["Binary search tree with insert and search", "Java"], | |
| ], inputs=[gen_prompt, gen_lang]) | |
| # Explain Tab | |
| with gr.TabItem("🔍 Explain Code"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| explain_input = gr.Code(label="📋 Paste code", language="python", lines=15) | |
| explain_btn = gr.Button("🔍 Explain", variant="primary", size="lg") | |
| with gr.Column(): | |
| explain_output = gr.Markdown(label="Explanation") | |
| # Fix Tab | |
| with gr.TabItem("🔧 Fix Code"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| fix_input = gr.Code(label="🐛 Buggy code", language="python", lines=12) | |
| fix_error = gr.Textbox(label="❌ Error (optional)", lines=3) | |
| fix_btn = gr.Button("🔧 Fix", variant="primary", size="lg") | |
| with gr.Column(): | |
| fix_output = gr.Markdown(label="Solution") | |
| # Review Tab | |
| with gr.TabItem("📝 Code Review"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| review_input = gr.Code(label="📋 Code to review", language="python", lines=15) | |
| review_btn = gr.Button("📝 Review", variant="primary", size="lg") | |
| with gr.Column(): | |
| review_output = gr.Markdown(label="Review") | |
| gr.Markdown(""" | |
| --- | |
| <center> | |
| 🔥 <b>Docker Edition</b> - Ollama running locally | <b>Models</b>: Qwen2.5-Coder 7B & 3B | <b>No rate limits!</b> | |
| </center> | |
| """) | |
| # Event handlers | |
| def respond(message, history, model, temp, max_tok): | |
| history = history or [] | |
| response = "" | |
| for chunk in chat_stream(message, history, model, temp, max_tok): | |
| response = chunk | |
| yield history + [[message, response]], "" | |
| msg.submit(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg]) | |
| send_btn.click(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg]) | |
| clear_btn.click(lambda: [], None, chatbot) | |
| gen_btn.click(generate_code, [gen_prompt, gen_lang, model_dropdown], gen_output) | |
| explain_btn.click(explain_code, [explain_input, model_dropdown], explain_output) | |
| fix_btn.click(fix_code, [fix_input, fix_error, model_dropdown], fix_output) | |
| review_btn.click(review_code, [review_input, model_dropdown], review_output) | |
| if __name__ == "__main__": | |
| demo.launch(server_name="0.0.0.0", server_port=7860) | |