Spaces:
Running
Running
| import gradio as gr | |
| import requests | |
| import json | |
| OLLAMA_URL = "http://localhost:11434" | |
| MODELS = { | |
| "Qwen2.5-Coder 1.5B (Fastest)": "qwen2.5-coder:1.5b", | |
| "Qwen2.5-Coder 3B (Fast)": "qwen2.5-coder:3b", | |
| "Qwen2.5-Coder 7B (Quality)": "qwen2.5-coder:7b", | |
| } | |
| def check_ollama(): | |
| try: | |
| r = requests.get(f"{OLLAMA_URL}/api/tags", timeout=5) | |
| return r.status_code == 200 | |
| except: | |
| return False | |
| def chat_stream(message, history, model_name, temperature, max_tokens): | |
| if not check_ollama(): | |
| yield "⏳ Ollama starting... wait 30 seconds and try again." | |
| return | |
| model = MODELS.get(model_name, "qwen2.5-coder:3b") | |
| messages = [{"role": "system", "content": "You are an expert coding assistant. Always use markdown code blocks."}] | |
| for user_msg, assistant_msg in history: | |
| messages.append({"role": "user", "content": user_msg}) | |
| if assistant_msg: | |
| messages.append({"role": "assistant", "content": assistant_msg}) | |
| messages.append({"role": "user", "content": message}) | |
| try: | |
| response = requests.post( | |
| f"{OLLAMA_URL}/api/chat", | |
| json={"model": model, "messages": messages, "stream": True, "options": {"temperature": temperature, "num_predict": max_tokens}}, | |
| stream=True, timeout=300 | |
| ) | |
| full = "" | |
| for line in response.iter_lines(): | |
| if line: | |
| try: | |
| data = json.loads(line) | |
| if "message" in data: | |
| full += data["message"].get("content", "") | |
| yield full | |
| except: | |
| continue | |
| except Exception as e: | |
| yield f"Error: {e}" | |
| def generate_code(prompt, language, model_name, max_tokens): | |
| if not prompt.strip(): | |
| return "Please describe what you want." | |
| if not check_ollama(): | |
| return "⏳ Ollama starting..." | |
| model = MODELS.get(model_name, "qwen2.5-coder:3b") | |
| full_prompt = f"Write {language} code for: {prompt}\n\nOutput ONLY code in a markdown block." | |
| try: | |
| r = requests.post( | |
| f"{OLLAMA_URL}/api/generate", | |
| json={"model": model, "prompt": full_prompt, "stream": False, "options": {"temperature": 0.3, "num_predict": max_tokens}}, | |
| timeout=300 | |
| ) | |
| if r.status_code == 200: | |
| result = r.json().get("response", "") | |
| if "```" in result: | |
| parts = result.split("```") | |
| if len(parts) >= 2: | |
| code = parts[1] | |
| if "\n" in code: | |
| code = code.split("\n", 1)[-1] | |
| return code.strip() | |
| return result | |
| return f"Error: {r.text}" | |
| except Exception as e: | |
| return f"Error: {e}" | |
| def explain_code(code, model_name, max_tokens): | |
| if not code.strip(): | |
| return "Paste code to explain." | |
| if not check_ollama(): | |
| return "⏳ Ollama starting..." | |
| model = MODELS.get(model_name, "qwen2.5-coder:3b") | |
| try: | |
| r = requests.post( | |
| f"{OLLAMA_URL}/api/generate", | |
| json={"model": model, "prompt": f"Explain this code:\n```\n{code}\n```", "stream": False, "options": {"num_predict": max_tokens}}, | |
| timeout=300 | |
| ) | |
| return r.json().get("response", "") if r.status_code == 200 else f"Error: {r.text}" | |
| except Exception as e: | |
| return f"Error: {e}" | |
| def fix_code(code, error, model_name, max_tokens): | |
| if not code.strip(): | |
| return "Paste code to fix." | |
| if not check_ollama(): | |
| return "⏳ Ollama starting..." | |
| model = MODELS.get(model_name, "qwen2.5-coder:3b") | |
| prompt = f"Fix this code:\n```\n{code}\n```\nError: {error or 'Not working'}" | |
| try: | |
| r = requests.post( | |
| f"{OLLAMA_URL}/api/generate", | |
| json={"model": model, "prompt": prompt, "stream": False, "options": {"temperature": 0.3, "num_predict": max_tokens}}, | |
| timeout=300 | |
| ) | |
| return r.json().get("response", "") if r.status_code == 200 else f"Error: {r.text}" | |
| except Exception as e: | |
| return f"Error: {e}" | |
| with gr.Blocks(title="GOD Coding Machine", theme=gr.themes.Soft(primary_hue="purple")) as demo: | |
| gr.Markdown("# 🔥 GOD Coding Machine\n**Docker Edition** • Qwen2.5-Coder running locally • No rate limits!") | |
| with gr.Row(): | |
| model_dropdown = gr.Dropdown(choices=list(MODELS.keys()), value="Qwen2.5-Coder 3B (Fast)", label="🤖 Model") | |
| temperature = gr.Slider(0, 1, value=0.7, step=0.1, label="🌡️ Temperature") | |
| max_tokens = gr.Slider(256, 4096, value=2048, step=256, label="📏 Max Tokens") | |
| with gr.Tabs(): | |
| with gr.TabItem("💬 Chat"): | |
| chatbot = gr.Chatbot(height=400) | |
| with gr.Row(): | |
| msg = gr.Textbox(placeholder="Ask about coding...", show_label=False, scale=9) | |
| send = gr.Button("Send", variant="primary", scale=1) | |
| clear = gr.Button("Clear") | |
| gr.Examples(["Write a Python quicksort function", "Explain async/await in JavaScript"], inputs=msg) | |
| with gr.TabItem("⚡ Generate"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| gen_prompt = gr.Textbox(label="Describe what you want", lines=3) | |
| gen_lang = gr.Dropdown(["Python", "JavaScript", "TypeScript", "Go", "Rust", "Java", "C++"], value="Python", label="Language") | |
| gen_btn = gr.Button("Generate", variant="primary") | |
| gen_output = gr.Code(label="Code", language="python", lines=15) | |
| with gr.TabItem("🔍 Explain"): | |
| with gr.Row(): | |
| explain_input = gr.Code(label="Paste code", lines=10) | |
| explain_output = gr.Markdown() | |
| explain_btn = gr.Button("Explain", variant="primary") | |
| with gr.TabItem("🔧 Fix"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| fix_input = gr.Code(label="Buggy code", lines=10) | |
| fix_error = gr.Textbox(label="Error message", lines=2) | |
| fix_btn = gr.Button("Fix", variant="primary") | |
| fix_output = gr.Markdown() | |
| def respond(message, history, model, temp, tokens): | |
| history = history or [] | |
| for chunk in chat_stream(message, history, model, temp, tokens): | |
| yield history + [[message, chunk]], "" | |
| msg.submit(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg]) | |
| send.click(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg]) | |
| clear.click(lambda: [], None, chatbot) | |
| gen_btn.click(generate_code, [gen_prompt, gen_lang, model_dropdown, max_tokens], gen_output) | |
| explain_btn.click(explain_code, [explain_input, model_dropdown, max_tokens], explain_output) | |
| fix_btn.click(fix_code, [fix_input, fix_error, model_dropdown, max_tokens], fix_output) | |
| demo.launch(server_name="0.0.0.0", server_port=7860) |