AIencoder commited on
Commit
525bab4
·
verified ·
1 Parent(s): 2d07114

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +68 -318
app.py CHANGED
@@ -1,62 +1,23 @@
1
- """
2
- 🔥 GOD Coding Machine - Docker Edition
3
- Runs Ollama locally on HuggingFace Spaces
4
- No rate limits! Full power!
5
- """
6
-
7
  import gradio as gr
8
  import requests
9
  import json
10
 
11
  OLLAMA_URL = "http://localhost:11434"
12
-
13
- # Models available (pulled in entrypoint.sh)
14
- MODELS = {
15
- "Qwen2.5-Coder 7B (Best)": "qwen2.5-coder:7b",
16
- "Qwen2.5-Coder 3B (Fast)": "qwen2.5-coder:3b",
17
- }
18
 
19
  def check_ollama():
20
- """Check if Ollama is running"""
21
  try:
22
  r = requests.get(f"{OLLAMA_URL}/api/tags", timeout=5)
23
  return r.status_code == 200
24
  except:
25
  return False
26
 
27
- def get_models():
28
- """Get available models from Ollama"""
29
- try:
30
- r = requests.get(f"{OLLAMA_URL}/api/tags", timeout=5)
31
- if r.status_code == 200:
32
- models = r.json().get("models", [])
33
- return {m["name"]: m["name"] for m in models}
34
- except:
35
- pass
36
- return MODELS
37
-
38
- def chat_stream(message: str, history: list, model_name: str, temperature: float, max_tokens: int):
39
- """Stream chat responses"""
40
-
41
  if not check_ollama():
42
- yield "⏳ Ollama is starting up... please wait 30 seconds and try again."
43
  return
44
 
45
- model = MODELS.get(model_name, "qwen2.5-coder:7b")
46
-
47
- # Build messages
48
- messages = [
49
- {
50
- "role": "system",
51
- "content": """You are an expert coding assistant. You help with:
52
- - Writing clean, efficient, well-documented code
53
- - Debugging and fixing issues
54
- - Explaining code and programming concepts
55
- - Code reviews and best practices
56
-
57
- Always provide code in markdown code blocks with the language specified."""
58
- }
59
- ]
60
 
61
  for user_msg, assistant_msg in history:
62
  messages.append({"role": "user", "content": user_msg})
@@ -68,71 +29,39 @@ Always provide code in markdown code blocks with the language specified."""
68
  try:
69
  response = requests.post(
70
  f"{OLLAMA_URL}/api/chat",
71
- json={
72
- "model": model,
73
- "messages": messages,
74
- "stream": True,
75
- "options": {
76
- "temperature": temperature,
77
- "num_predict": max_tokens
78
- }
79
- },
80
- stream=True,
81
- timeout=300
82
  )
83
 
84
- full_response = ""
85
  for line in response.iter_lines():
86
  if line:
87
  try:
88
  data = json.loads(line)
89
- if "message" in data and "content" in data["message"]:
90
- chunk = data["message"]["content"]
91
- full_response += chunk
92
- yield full_response
93
  except:
94
  continue
95
-
96
  except Exception as e:
97
- yield f"Error: {str(e)}"
98
 
99
- def generate_code(prompt: str, language: str, model_name: str):
100
- """Generate code from description"""
101
-
102
  if not prompt.strip():
103
- return "Please describe what you want to build."
104
-
105
  if not check_ollama():
106
- return "⏳ Ollama is starting... please wait and try again."
107
 
108
- model = MODELS.get(model_name, "qwen2.5-coder:7b")
109
 
110
- full_prompt = f"""Write {language} code for:
111
-
112
- {prompt}
113
-
114
- Requirements:
115
- - Clean, well-commented code
116
- - Follow {language} best practices
117
- - Include error handling
118
-
119
- Output ONLY the code in a markdown code block, no explanations."""
120
-
121
  try:
122
- response = requests.post(
123
  f"{OLLAMA_URL}/api/generate",
124
- json={
125
- "model": model,
126
- "prompt": full_prompt,
127
- "stream": False,
128
- "options": {"temperature": 0.3, "num_predict": 2048}
129
- },
130
  timeout=300
131
  )
132
-
133
- if response.status_code == 200:
134
- result = response.json().get("response", "")
135
- # Clean up markdown
136
  if "```" in result:
137
  parts = result.split("```")
138
  if len(parts) >= 2:
@@ -141,270 +70,91 @@ Output ONLY the code in a markdown code block, no explanations."""
141
  code = code.split("\n", 1)[-1]
142
  return code.strip()
143
  return result
144
- return f"Error: {response.text}"
145
-
146
- except Exception as e:
147
- return f"❌ Error: {str(e)}"
148
-
149
- def explain_code(code: str, model_name: str):
150
- """Explain code"""
151
-
152
- if not code.strip():
153
- return "Please paste code to explain."
154
-
155
- if not check_ollama():
156
- return "⏳ Ollama starting... please wait."
157
-
158
- model = MODELS.get(model_name, "qwen2.5-coder:7b")
159
-
160
- prompt = f"""Explain this code in detail:
161
-
162
- ```
163
- {code}
164
- ```
165
-
166
- Cover:
167
- 1. **Purpose**: What does it do?
168
- 2. **How it works**: Step by step
169
- 3. **Key concepts**: Important programming concepts
170
- 4. **Improvements**: Suggestions for better code"""
171
-
172
- try:
173
- response = requests.post(
174
- f"{OLLAMA_URL}/api/generate",
175
- json={
176
- "model": model,
177
- "prompt": prompt,
178
- "stream": False,
179
- "options": {"temperature": 0.5, "num_predict": 2048}
180
- },
181
- timeout=300
182
- )
183
-
184
- if response.status_code == 200:
185
- return response.json().get("response", "")
186
- return f"Error: {response.text}"
187
-
188
  except Exception as e:
189
- return f"Error: {str(e)}"
190
 
191
- def fix_code(code: str, error_msg: str, model_name: str):
192
- """Fix buggy code"""
193
-
194
  if not code.strip():
195
- return "Please paste code to fix."
196
-
197
  if not check_ollama():
198
- return "⏳ Ollama starting... please wait."
199
-
200
- model = MODELS.get(model_name, "qwen2.5-coder:7b")
201
 
202
- prompt = f"""Fix this buggy code:
203
-
204
- **Code:**
205
- ```
206
- {code}
207
- ```
208
-
209
- **Error:**
210
- {error_msg if error_msg.strip() else "Code doesn't work correctly"}
211
-
212
- Please:
213
- 1. Identify the bug
214
- 2. Explain what's wrong
215
- 3. Provide fixed code
216
- 4. Explain the fix"""
217
-
218
  try:
219
- response = requests.post(
220
  f"{OLLAMA_URL}/api/generate",
221
- json={
222
- "model": model,
223
- "prompt": prompt,
224
- "stream": False,
225
- "options": {"temperature": 0.3, "num_predict": 2048}
226
- },
227
  timeout=300
228
  )
229
-
230
- if response.status_code == 200:
231
- return response.json().get("response", "")
232
- return f"Error: {response.text}"
233
-
234
  except Exception as e:
235
- return f"Error: {str(e)}"
236
 
237
- def review_code(code: str, model_name: str):
238
- """Review code"""
239
-
240
  if not code.strip():
241
- return "Please paste code to review."
242
-
243
  if not check_ollama():
244
- return "⏳ Ollama starting... please wait."
245
 
246
- model = MODELS.get(model_name, "qwen2.5-coder:7b")
247
 
248
- prompt = f"""Review this code:
249
-
250
- ```
251
- {code}
252
- ```
253
-
254
- Evaluate:
255
- 1. **Code Quality**: Clean, readable?
256
- 2. **Best Practices**: Follows conventions?
257
- 3. **Bugs**: Any issues?
258
- 4. **Performance**: Any concerns?
259
- 5. **Security**: Any vulnerabilities?
260
- 6. **Improvements**: Specific suggestions with examples"""
261
-
262
  try:
263
- response = requests.post(
264
  f"{OLLAMA_URL}/api/generate",
265
- json={
266
- "model": model,
267
- "prompt": prompt,
268
- "stream": False,
269
- "options": {"temperature": 0.5, "num_predict": 2048}
270
- },
271
  timeout=300
272
  )
273
-
274
- if response.status_code == 200:
275
- return response.json().get("response", "")
276
- return f"Error: {response.text}"
277
-
278
  except Exception as e:
279
- return f"Error: {str(e)}"
280
-
281
-
282
- # ============== BUILD UI ==============
283
 
284
- with gr.Blocks(
285
- title="🔥 GOD Coding Machine",
286
- theme=gr.themes.Soft(primary_hue="purple", secondary_hue="blue"),
287
- ) as demo:
288
 
289
- gr.Markdown("""
290
- # 🔥 FREE GOD Coding Machine
291
- ### AI Coding Assistant - Running Locally on HuggingFace Spaces
292
 
293
- **🚀 Docker Edition** Ollama running locally • **No rate limits!** • 18GB RAM
294
- """)
295
-
296
- with gr.Row():
297
- model_dropdown = gr.Dropdown(
298
- choices=list(MODELS.keys()),
299
- value="Qwen2.5-Coder 7B (Best)",
300
- label="🤖 Model",
301
- scale=2
302
- )
303
- temperature = gr.Slider(
304
- 0.0, 1.0, value=0.7, step=0.1,
305
- label="🌡️ Temperature", scale=1
306
- )
307
- max_tokens = gr.Slider(
308
- 256, 4096, value=2048, step=256,
309
- label="📏 Max Tokens", scale=1
310
- )
311
 
312
  with gr.Tabs():
313
-
314
- # Chat Tab
315
  with gr.TabItem("💬 Chat"):
316
- chatbot = gr.Chatbot(height=450, show_label=False)
317
-
318
- with gr.Row():
319
- msg = gr.Textbox(
320
- placeholder="Ask anything about coding...",
321
- show_label=False, scale=9, container=False
322
- )
323
- send_btn = gr.Button("Send", variant="primary", scale=1)
324
-
325
- clear_btn = gr.Button("🗑️ Clear")
326
-
327
- gr.Examples([
328
- "Write a Python function to find all prime numbers up to n",
329
- "Explain async/await in JavaScript",
330
- "How do I implement a REST API in FastAPI?",
331
- "What's the difference between a list and tuple in Python?",
332
- ], inputs=msg)
333
-
334
- # Generate Tab
335
- with gr.TabItem("⚡ Generate Code"):
336
  with gr.Row():
337
- with gr.Column():
338
- gen_prompt = gr.Textbox(
339
- label="📝 Describe what you want",
340
- placeholder="A function that...", lines=4
341
- )
342
- gen_lang = gr.Dropdown(
343
- ["Python", "JavaScript", "TypeScript", "Rust", "Go", "Java", "C++", "C#", "Ruby", "PHP"],
344
- value="Python", label="💻 Language"
345
- )
346
- gen_btn = gr.Button("🚀 Generate", variant="primary", size="lg")
347
-
348
- with gr.Column():
349
- gen_output = gr.Code(label="Generated Code", language="python", lines=20)
350
-
351
- gr.Examples([
352
- ["A function to merge two sorted linked lists", "Python"],
353
- ["A debounce hook for React", "TypeScript"],
354
- ["Binary search tree with insert and search", "Java"],
355
- ], inputs=[gen_prompt, gen_lang])
356
 
357
- # Explain Tab
358
- with gr.TabItem("🔍 Explain Code"):
359
  with gr.Row():
360
  with gr.Column():
361
- explain_input = gr.Code(label="📋 Paste code", language="python", lines=15)
362
- explain_btn = gr.Button("🔍 Explain", variant="primary", size="lg")
363
- with gr.Column():
364
- explain_output = gr.Markdown(label="Explanation")
365
 
366
- # Fix Tab
367
- with gr.TabItem("🔧 Fix Code"):
368
  with gr.Row():
369
- with gr.Column():
370
- fix_input = gr.Code(label="🐛 Buggy code", language="python", lines=12)
371
- fix_error = gr.Textbox(label="❌ Error (optional)", lines=3)
372
- fix_btn = gr.Button("🔧 Fix", variant="primary", size="lg")
373
- with gr.Column():
374
- fix_output = gr.Markdown(label="Solution")
375
 
376
- # Review Tab
377
- with gr.TabItem("📝 Code Review"):
378
  with gr.Row():
379
  with gr.Column():
380
- review_input = gr.Code(label="📋 Code to review", language="python", lines=15)
381
- review_btn = gr.Button("📝 Review", variant="primary", size="lg")
382
- with gr.Column():
383
- review_output = gr.Markdown(label="Review")
384
 
385
- gr.Markdown("""
386
- ---
387
- <center>
388
- 🔥 <b>Docker Edition</b> - Ollama running locally | <b>Models</b>: Qwen2.5-Coder 7B & 3B | <b>No rate limits!</b>
389
- </center>
390
- """)
391
-
392
- # Event handlers
393
- def respond(message, history, model, temp, max_tok):
394
  history = history or []
395
- response = ""
396
- for chunk in chat_stream(message, history, model, temp, max_tok):
397
- response = chunk
398
- yield history + [[message, response]], ""
399
-
400
- msg.submit(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg])
401
- send_btn.click(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg])
402
- clear_btn.click(lambda: [], None, chatbot)
403
 
404
- gen_btn.click(generate_code, [gen_prompt, gen_lang, model_dropdown], gen_output)
405
- explain_btn.click(explain_code, [explain_input, model_dropdown], explain_output)
406
- fix_btn.click(fix_code, [fix_input, fix_error, model_dropdown], fix_output)
407
- review_btn.click(review_code, [review_input, model_dropdown], review_output)
 
 
408
 
409
- if __name__ == "__main__":
410
- demo.launch(server_name="0.0.0.0", server_port=7860)
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import requests
3
  import json
4
 
5
  OLLAMA_URL = "http://localhost:11434"
6
+ MODEL = "qwen2.5-coder:3b"
 
 
 
 
 
7
 
8
  def check_ollama():
 
9
  try:
10
  r = requests.get(f"{OLLAMA_URL}/api/tags", timeout=5)
11
  return r.status_code == 200
12
  except:
13
  return False
14
 
15
+ def chat_stream(message, history, temperature):
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  if not check_ollama():
17
+ yield "⏳ Ollama starting... wait 30 seconds and try again."
18
  return
19
 
20
+ messages = [{"role": "system", "content": "You are an expert coding assistant. Always use markdown code blocks."}]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  for user_msg, assistant_msg in history:
23
  messages.append({"role": "user", "content": user_msg})
 
29
  try:
30
  response = requests.post(
31
  f"{OLLAMA_URL}/api/chat",
32
+ json={"model": MODEL, "messages": messages, "stream": True, "options": {"temperature": temperature}},
33
+ stream=True, timeout=300
 
 
 
 
 
 
 
 
 
34
  )
35
 
36
+ full = ""
37
  for line in response.iter_lines():
38
  if line:
39
  try:
40
  data = json.loads(line)
41
+ if "message" in data:
42
+ full += data["message"].get("content", "")
43
+ yield full
 
44
  except:
45
  continue
 
46
  except Exception as e:
47
+ yield f"Error: {e}"
48
 
49
+ def generate_code(prompt, language):
 
 
50
  if not prompt.strip():
51
+ return "Please describe what you want."
 
52
  if not check_ollama():
53
+ return "⏳ Ollama starting..."
54
 
55
+ full_prompt = f"Write {language} code for: {prompt}\n\nOutput ONLY code in a markdown block."
56
 
 
 
 
 
 
 
 
 
 
 
 
57
  try:
58
+ r = requests.post(
59
  f"{OLLAMA_URL}/api/generate",
60
+ json={"model": MODEL, "prompt": full_prompt, "stream": False, "options": {"temperature": 0.3}},
 
 
 
 
 
61
  timeout=300
62
  )
63
+ if r.status_code == 200:
64
+ result = r.json().get("response", "")
 
 
65
  if "```" in result:
66
  parts = result.split("```")
67
  if len(parts) >= 2:
 
70
  code = code.split("\n", 1)[-1]
71
  return code.strip()
72
  return result
73
+ return f"Error: {r.text}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  except Exception as e:
75
+ return f"Error: {e}"
76
 
77
+ def explain_code(code):
 
 
78
  if not code.strip():
79
+ return "Paste code to explain."
 
80
  if not check_ollama():
81
+ return "⏳ Ollama starting..."
 
 
82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  try:
84
+ r = requests.post(
85
  f"{OLLAMA_URL}/api/generate",
86
+ json={"model": MODEL, "prompt": f"Explain this code:\n```\n{code}\n```", "stream": False},
 
 
 
 
 
87
  timeout=300
88
  )
89
+ return r.json().get("response", "") if r.status_code == 200 else f"Error: {r.text}"
 
 
 
 
90
  except Exception as e:
91
+ return f"Error: {e}"
92
 
93
+ def fix_code(code, error):
 
 
94
  if not code.strip():
95
+ return "Paste code to fix."
 
96
  if not check_ollama():
97
+ return "⏳ Ollama starting..."
98
 
99
+ prompt = f"Fix this code:\n```\n{code}\n```\nError: {error or 'Not working'}"
100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
  try:
102
+ r = requests.post(
103
  f"{OLLAMA_URL}/api/generate",
104
+ json={"model": MODEL, "prompt": prompt, "stream": False, "options": {"temperature": 0.3}},
 
 
 
 
 
105
  timeout=300
106
  )
107
+ return r.json().get("response", "") if r.status_code == 200 else f"Error: {r.text}"
 
 
 
 
108
  except Exception as e:
109
+ return f"Error: {e}"
 
 
 
110
 
111
+ with gr.Blocks(title="GOD Coding Machine", theme=gr.themes.Soft(primary_hue="purple")) as demo:
 
 
 
112
 
113
+ gr.Markdown("# 🔥 GOD Coding Machine\n**Docker Edition** • Qwen2.5-Coder running locally • No rate limits!")
 
 
114
 
115
+ temperature = gr.Slider(0, 1, value=0.7, step=0.1, label="Temperature")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
  with gr.Tabs():
 
 
118
  with gr.TabItem("💬 Chat"):
119
+ chatbot = gr.Chatbot(height=400)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
  with gr.Row():
121
+ msg = gr.Textbox(placeholder="Ask about coding...", show_label=False, scale=9)
122
+ send = gr.Button("Send", variant="primary", scale=1)
123
+ clear = gr.Button("Clear")
124
+ gr.Examples(["Write a Python quicksort function", "Explain async/await in JavaScript"], inputs=msg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
 
126
+ with gr.TabItem("⚡ Generate"):
 
127
  with gr.Row():
128
  with gr.Column():
129
+ gen_prompt = gr.Textbox(label="Describe what you want", lines=3)
130
+ gen_lang = gr.Dropdown(["Python", "JavaScript", "TypeScript", "Go", "Rust", "Java", "C++"], value="Python", label="Language")
131
+ gen_btn = gr.Button("Generate", variant="primary")
132
+ gen_output = gr.Code(label="Code", language="python", lines=15)
133
 
134
+ with gr.TabItem("🔍 Explain"):
 
135
  with gr.Row():
136
+ explain_input = gr.Code(label="Paste code", lines=10)
137
+ explain_output = gr.Markdown()
138
+ explain_btn = gr.Button("Explain", variant="primary")
 
 
 
139
 
140
+ with gr.TabItem("🔧 Fix"):
 
141
  with gr.Row():
142
  with gr.Column():
143
+ fix_input = gr.Code(label="Buggy code", lines=10)
144
+ fix_error = gr.Textbox(label="Error message", lines=2)
145
+ fix_btn = gr.Button("Fix", variant="primary")
146
+ fix_output = gr.Markdown()
147
 
148
+ def respond(message, history, temp):
 
 
 
 
 
 
 
 
149
  history = history or []
150
+ for chunk in chat_stream(message, history, temp):
151
+ yield history + [[message, chunk]], ""
 
 
 
 
 
 
152
 
153
+ msg.submit(respond, [msg, chatbot, temperature], [chatbot, msg])
154
+ send.click(respond, [msg, chatbot, temperature], [chatbot, msg])
155
+ clear.click(lambda: [], None, chatbot)
156
+ gen_btn.click(generate_code, [gen_prompt, gen_lang], gen_output)
157
+ explain_btn.click(explain_code, explain_input, explain_output)
158
+ fix_btn.click(fix_code, [fix_input, fix_error], fix_output)
159
 
160
+ demo.launch(server_name="0.0.0.0", server_port=7860)