AIencoder commited on
Commit
869ccfb
·
verified ·
1 Parent(s): 83830d0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -16
app.py CHANGED
@@ -3,7 +3,12 @@ import requests
3
  import json
4
 
5
  OLLAMA_URL = "http://localhost:11434"
6
- MODEL = "qwen2.5-coder:3b"
 
 
 
 
 
7
 
8
  def check_ollama():
9
  try:
@@ -12,11 +17,12 @@ def check_ollama():
12
  except:
13
  return False
14
 
15
- def chat_stream(message, history, temperature, max_tokens):
16
  if not check_ollama():
17
  yield "⏳ Ollama starting... wait 30 seconds and try again."
18
  return
19
 
 
20
  messages = [{"role": "system", "content": "You are an expert coding assistant. Always use markdown code blocks."}]
21
 
22
  for user_msg, assistant_msg in history:
@@ -29,7 +35,7 @@ def chat_stream(message, history, temperature, max_tokens):
29
  try:
30
  response = requests.post(
31
  f"{OLLAMA_URL}/api/chat",
32
- json={"model": MODEL, "messages": messages, "stream": True, "options": {"temperature": temperature, "num_predict": max_tokens}},
33
  stream=True, timeout=300
34
  )
35
 
@@ -46,18 +52,19 @@ def chat_stream(message, history, temperature, max_tokens):
46
  except Exception as e:
47
  yield f"Error: {e}"
48
 
49
- def generate_code(prompt, language, max_tokens):
50
  if not prompt.strip():
51
  return "Please describe what you want."
52
  if not check_ollama():
53
  return "⏳ Ollama starting..."
54
 
 
55
  full_prompt = f"Write {language} code for: {prompt}\n\nOutput ONLY code in a markdown block."
56
 
57
  try:
58
  r = requests.post(
59
  f"{OLLAMA_URL}/api/generate",
60
- json={"model": MODEL, "prompt": full_prompt, "stream": False, "options": {"temperature": 0.3, "num_predict": max_tokens}},
61
  timeout=300
62
  )
63
  if r.status_code == 200:
@@ -74,34 +81,37 @@ def generate_code(prompt, language, max_tokens):
74
  except Exception as e:
75
  return f"Error: {e}"
76
 
77
- def explain_code(code, max_tokens):
78
  if not code.strip():
79
  return "Paste code to explain."
80
  if not check_ollama():
81
  return "⏳ Ollama starting..."
82
 
 
 
83
  try:
84
  r = requests.post(
85
  f"{OLLAMA_URL}/api/generate",
86
- json={"model": MODEL, "prompt": f"Explain this code:\n```\n{code}\n```", "stream": False, "options": {"num_predict": max_tokens}},
87
  timeout=300
88
  )
89
  return r.json().get("response", "") if r.status_code == 200 else f"Error: {r.text}"
90
  except Exception as e:
91
  return f"Error: {e}"
92
 
93
- def fix_code(code, error, max_tokens):
94
  if not code.strip():
95
  return "Paste code to fix."
96
  if not check_ollama():
97
  return "⏳ Ollama starting..."
98
 
 
99
  prompt = f"Fix this code:\n```\n{code}\n```\nError: {error or 'Not working'}"
100
 
101
  try:
102
  r = requests.post(
103
  f"{OLLAMA_URL}/api/generate",
104
- json={"model": MODEL, "prompt": prompt, "stream": False, "options": {"temperature": 0.3, "num_predict": max_tokens}},
105
  timeout=300
106
  )
107
  return r.json().get("response", "") if r.status_code == 200 else f"Error: {r.text}"
@@ -113,6 +123,7 @@ with gr.Blocks(title="GOD Coding Machine", theme=gr.themes.Soft(primary_hue="pur
113
  gr.Markdown("# 🔥 GOD Coding Machine\n**Docker Edition** • Qwen2.5-Coder running locally • No rate limits!")
114
 
115
  with gr.Row():
 
116
  temperature = gr.Slider(0, 1, value=0.7, step=0.1, label="🌡️ Temperature")
117
  max_tokens = gr.Slider(256, 4096, value=2048, step=256, label="📏 Max Tokens")
118
 
@@ -147,16 +158,16 @@ with gr.Blocks(title="GOD Coding Machine", theme=gr.themes.Soft(primary_hue="pur
147
  fix_btn = gr.Button("Fix", variant="primary")
148
  fix_output = gr.Markdown()
149
 
150
- def respond(message, history, temp, tokens):
151
  history = history or []
152
- for chunk in chat_stream(message, history, temp, tokens):
153
  yield history + [[message, chunk]], ""
154
 
155
- msg.submit(respond, [msg, chatbot, temperature, max_tokens], [chatbot, msg])
156
- send.click(respond, [msg, chatbot, temperature, max_tokens], [chatbot, msg])
157
  clear.click(lambda: [], None, chatbot)
158
- gen_btn.click(generate_code, [gen_prompt, gen_lang, max_tokens], gen_output)
159
- explain_btn.click(explain_code, [explain_input, max_tokens], explain_output)
160
- fix_btn.click(fix_code, [fix_input, fix_error, max_tokens], fix_output)
161
 
162
  demo.launch(server_name="0.0.0.0", server_port=7860)
 
3
  import json
4
 
5
  OLLAMA_URL = "http://localhost:11434"
6
+
7
+ MODELS = {
8
+ "Qwen2.5-Coder 1.5B (Fastest)": "qwen2.5-coder:1.5b",
9
+ "Qwen2.5-Coder 3B (Fast)": "qwen2.5-coder:3b",
10
+ "Qwen2.5-Coder 7B (Quality)": "qwen2.5-coder:7b",
11
+ }
12
 
13
  def check_ollama():
14
  try:
 
17
  except:
18
  return False
19
 
20
+ def chat_stream(message, history, model_name, temperature, max_tokens):
21
  if not check_ollama():
22
  yield "⏳ Ollama starting... wait 30 seconds and try again."
23
  return
24
 
25
+ model = MODELS.get(model_name, "qwen2.5-coder:3b")
26
  messages = [{"role": "system", "content": "You are an expert coding assistant. Always use markdown code blocks."}]
27
 
28
  for user_msg, assistant_msg in history:
 
35
  try:
36
  response = requests.post(
37
  f"{OLLAMA_URL}/api/chat",
38
+ json={"model": model, "messages": messages, "stream": True, "options": {"temperature": temperature, "num_predict": max_tokens}},
39
  stream=True, timeout=300
40
  )
41
 
 
52
  except Exception as e:
53
  yield f"Error: {e}"
54
 
55
+ def generate_code(prompt, language, model_name, max_tokens):
56
  if not prompt.strip():
57
  return "Please describe what you want."
58
  if not check_ollama():
59
  return "⏳ Ollama starting..."
60
 
61
+ model = MODELS.get(model_name, "qwen2.5-coder:3b")
62
  full_prompt = f"Write {language} code for: {prompt}\n\nOutput ONLY code in a markdown block."
63
 
64
  try:
65
  r = requests.post(
66
  f"{OLLAMA_URL}/api/generate",
67
+ json={"model": model, "prompt": full_prompt, "stream": False, "options": {"temperature": 0.3, "num_predict": max_tokens}},
68
  timeout=300
69
  )
70
  if r.status_code == 200:
 
81
  except Exception as e:
82
  return f"Error: {e}"
83
 
84
+ def explain_code(code, model_name, max_tokens):
85
  if not code.strip():
86
  return "Paste code to explain."
87
  if not check_ollama():
88
  return "⏳ Ollama starting..."
89
 
90
+ model = MODELS.get(model_name, "qwen2.5-coder:3b")
91
+
92
  try:
93
  r = requests.post(
94
  f"{OLLAMA_URL}/api/generate",
95
+ json={"model": model, "prompt": f"Explain this code:\n```\n{code}\n```", "stream": False, "options": {"num_predict": max_tokens}},
96
  timeout=300
97
  )
98
  return r.json().get("response", "") if r.status_code == 200 else f"Error: {r.text}"
99
  except Exception as e:
100
  return f"Error: {e}"
101
 
102
+ def fix_code(code, error, model_name, max_tokens):
103
  if not code.strip():
104
  return "Paste code to fix."
105
  if not check_ollama():
106
  return "⏳ Ollama starting..."
107
 
108
+ model = MODELS.get(model_name, "qwen2.5-coder:3b")
109
  prompt = f"Fix this code:\n```\n{code}\n```\nError: {error or 'Not working'}"
110
 
111
  try:
112
  r = requests.post(
113
  f"{OLLAMA_URL}/api/generate",
114
+ json={"model": model, "prompt": prompt, "stream": False, "options": {"temperature": 0.3, "num_predict": max_tokens}},
115
  timeout=300
116
  )
117
  return r.json().get("response", "") if r.status_code == 200 else f"Error: {r.text}"
 
123
  gr.Markdown("# 🔥 GOD Coding Machine\n**Docker Edition** • Qwen2.5-Coder running locally • No rate limits!")
124
 
125
  with gr.Row():
126
+ model_dropdown = gr.Dropdown(choices=list(MODELS.keys()), value="Qwen2.5-Coder 3B (Fast)", label="🤖 Model")
127
  temperature = gr.Slider(0, 1, value=0.7, step=0.1, label="🌡️ Temperature")
128
  max_tokens = gr.Slider(256, 4096, value=2048, step=256, label="📏 Max Tokens")
129
 
 
158
  fix_btn = gr.Button("Fix", variant="primary")
159
  fix_output = gr.Markdown()
160
 
161
+ def respond(message, history, model, temp, tokens):
162
  history = history or []
163
+ for chunk in chat_stream(message, history, model, temp, tokens):
164
  yield history + [[message, chunk]], ""
165
 
166
+ msg.submit(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg])
167
+ send.click(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg])
168
  clear.click(lambda: [], None, chatbot)
169
+ gen_btn.click(generate_code, [gen_prompt, gen_lang, model_dropdown, max_tokens], gen_output)
170
+ explain_btn.click(explain_code, [explain_input, model_dropdown, max_tokens], explain_output)
171
+ fix_btn.click(fix_code, [fix_input, fix_error, model_dropdown, max_tokens], fix_output)
172
 
173
  demo.launch(server_name="0.0.0.0", server_port=7860)