Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -12,7 +12,7 @@ def check_ollama():
|
|
| 12 |
except:
|
| 13 |
return False
|
| 14 |
|
| 15 |
-
def chat_stream(message, history, temperature):
|
| 16 |
if not check_ollama():
|
| 17 |
yield "⏳ Ollama starting... wait 30 seconds and try again."
|
| 18 |
return
|
|
@@ -29,7 +29,7 @@ def chat_stream(message, history, temperature):
|
|
| 29 |
try:
|
| 30 |
response = requests.post(
|
| 31 |
f"{OLLAMA_URL}/api/chat",
|
| 32 |
-
json={"model": MODEL, "messages": messages, "stream": True, "options": {"temperature": temperature}},
|
| 33 |
stream=True, timeout=300
|
| 34 |
)
|
| 35 |
|
|
@@ -46,7 +46,7 @@ def chat_stream(message, history, temperature):
|
|
| 46 |
except Exception as e:
|
| 47 |
yield f"Error: {e}"
|
| 48 |
|
| 49 |
-
def generate_code(prompt, language):
|
| 50 |
if not prompt.strip():
|
| 51 |
return "Please describe what you want."
|
| 52 |
if not check_ollama():
|
|
@@ -57,7 +57,7 @@ def generate_code(prompt, language):
|
|
| 57 |
try:
|
| 58 |
r = requests.post(
|
| 59 |
f"{OLLAMA_URL}/api/generate",
|
| 60 |
-
json={"model": MODEL, "prompt": full_prompt, "stream": False, "options": {"temperature": 0.3}},
|
| 61 |
timeout=300
|
| 62 |
)
|
| 63 |
if r.status_code == 200:
|
|
@@ -74,7 +74,7 @@ def generate_code(prompt, language):
|
|
| 74 |
except Exception as e:
|
| 75 |
return f"Error: {e}"
|
| 76 |
|
| 77 |
-
def explain_code(code):
|
| 78 |
if not code.strip():
|
| 79 |
return "Paste code to explain."
|
| 80 |
if not check_ollama():
|
|
@@ -83,14 +83,14 @@ def explain_code(code):
|
|
| 83 |
try:
|
| 84 |
r = requests.post(
|
| 85 |
f"{OLLAMA_URL}/api/generate",
|
| 86 |
-
json={"model": MODEL, "prompt": f"Explain this code:\n```\n{code}\n```", "stream": False},
|
| 87 |
timeout=300
|
| 88 |
)
|
| 89 |
return r.json().get("response", "") if r.status_code == 200 else f"Error: {r.text}"
|
| 90 |
except Exception as e:
|
| 91 |
return f"Error: {e}"
|
| 92 |
|
| 93 |
-
def fix_code(code, error):
|
| 94 |
if not code.strip():
|
| 95 |
return "Paste code to fix."
|
| 96 |
if not check_ollama():
|
|
@@ -101,7 +101,7 @@ def fix_code(code, error):
|
|
| 101 |
try:
|
| 102 |
r = requests.post(
|
| 103 |
f"{OLLAMA_URL}/api/generate",
|
| 104 |
-
json={"model": MODEL, "prompt": prompt, "stream": False, "options": {"temperature": 0.3}},
|
| 105 |
timeout=300
|
| 106 |
)
|
| 107 |
return r.json().get("response", "") if r.status_code == 200 else f"Error: {r.text}"
|
|
@@ -112,7 +112,9 @@ with gr.Blocks(title="GOD Coding Machine", theme=gr.themes.Soft(primary_hue="pur
|
|
| 112 |
|
| 113 |
gr.Markdown("# 🔥 GOD Coding Machine\n**Docker Edition** • Qwen2.5-Coder running locally • No rate limits!")
|
| 114 |
|
| 115 |
-
|
|
|
|
|
|
|
| 116 |
|
| 117 |
with gr.Tabs():
|
| 118 |
with gr.TabItem("💬 Chat"):
|
|
@@ -145,16 +147,16 @@ with gr.Blocks(title="GOD Coding Machine", theme=gr.themes.Soft(primary_hue="pur
|
|
| 145 |
fix_btn = gr.Button("Fix", variant="primary")
|
| 146 |
fix_output = gr.Markdown()
|
| 147 |
|
| 148 |
-
def respond(message, history, temp):
|
| 149 |
history = history or []
|
| 150 |
-
for chunk in chat_stream(message, history, temp):
|
| 151 |
yield history + [[message, chunk]], ""
|
| 152 |
|
| 153 |
-
msg.submit(respond, [msg, chatbot, temperature], [chatbot, msg])
|
| 154 |
-
send.click(respond, [msg, chatbot, temperature], [chatbot, msg])
|
| 155 |
clear.click(lambda: [], None, chatbot)
|
| 156 |
-
gen_btn.click(generate_code, [gen_prompt, gen_lang], gen_output)
|
| 157 |
-
explain_btn.click(explain_code, explain_input, explain_output)
|
| 158 |
-
fix_btn.click(fix_code, [fix_input, fix_error], fix_output)
|
| 159 |
|
| 160 |
demo.launch(server_name="0.0.0.0", server_port=7860)
|
|
|
|
| 12 |
except:
|
| 13 |
return False
|
| 14 |
|
| 15 |
+
def chat_stream(message, history, temperature, max_tokens):
|
| 16 |
if not check_ollama():
|
| 17 |
yield "⏳ Ollama starting... wait 30 seconds and try again."
|
| 18 |
return
|
|
|
|
| 29 |
try:
|
| 30 |
response = requests.post(
|
| 31 |
f"{OLLAMA_URL}/api/chat",
|
| 32 |
+
json={"model": MODEL, "messages": messages, "stream": True, "options": {"temperature": temperature, "num_predict": max_tokens}},
|
| 33 |
stream=True, timeout=300
|
| 34 |
)
|
| 35 |
|
|
|
|
| 46 |
except Exception as e:
|
| 47 |
yield f"Error: {e}"
|
| 48 |
|
| 49 |
+
def generate_code(prompt, language, max_tokens):
|
| 50 |
if not prompt.strip():
|
| 51 |
return "Please describe what you want."
|
| 52 |
if not check_ollama():
|
|
|
|
| 57 |
try:
|
| 58 |
r = requests.post(
|
| 59 |
f"{OLLAMA_URL}/api/generate",
|
| 60 |
+
json={"model": MODEL, "prompt": full_prompt, "stream": False, "options": {"temperature": 0.3, "num_predict": max_tokens}},
|
| 61 |
timeout=300
|
| 62 |
)
|
| 63 |
if r.status_code == 200:
|
|
|
|
| 74 |
except Exception as e:
|
| 75 |
return f"Error: {e}"
|
| 76 |
|
| 77 |
+
def explain_code(code, max_tokens):
|
| 78 |
if not code.strip():
|
| 79 |
return "Paste code to explain."
|
| 80 |
if not check_ollama():
|
|
|
|
| 83 |
try:
|
| 84 |
r = requests.post(
|
| 85 |
f"{OLLAMA_URL}/api/generate",
|
| 86 |
+
json={"model": MODEL, "prompt": f"Explain this code:\n```\n{code}\n```", "stream": False, "options": {"num_predict": max_tokens}},
|
| 87 |
timeout=300
|
| 88 |
)
|
| 89 |
return r.json().get("response", "") if r.status_code == 200 else f"Error: {r.text}"
|
| 90 |
except Exception as e:
|
| 91 |
return f"Error: {e}"
|
| 92 |
|
| 93 |
+
def fix_code(code, error, max_tokens):
|
| 94 |
if not code.strip():
|
| 95 |
return "Paste code to fix."
|
| 96 |
if not check_ollama():
|
|
|
|
| 101 |
try:
|
| 102 |
r = requests.post(
|
| 103 |
f"{OLLAMA_URL}/api/generate",
|
| 104 |
+
json={"model": MODEL, "prompt": prompt, "stream": False, "options": {"temperature": 0.3, "num_predict": max_tokens}},
|
| 105 |
timeout=300
|
| 106 |
)
|
| 107 |
return r.json().get("response", "") if r.status_code == 200 else f"Error: {r.text}"
|
|
|
|
| 112 |
|
| 113 |
gr.Markdown("# 🔥 GOD Coding Machine\n**Docker Edition** • Qwen2.5-Coder running locally • No rate limits!")
|
| 114 |
|
| 115 |
+
with gr.Row():
|
| 116 |
+
temperature = gr.Slider(0, 1, value=0.7, step=0.1, label="🌡️ Temperature")
|
| 117 |
+
max_tokens = gr.Slider(256, 4096, value=2048, step=256, label="📏 Max Tokens")
|
| 118 |
|
| 119 |
with gr.Tabs():
|
| 120 |
with gr.TabItem("💬 Chat"):
|
|
|
|
| 147 |
fix_btn = gr.Button("Fix", variant="primary")
|
| 148 |
fix_output = gr.Markdown()
|
| 149 |
|
| 150 |
+
def respond(message, history, temp, tokens):
|
| 151 |
history = history or []
|
| 152 |
+
for chunk in chat_stream(message, history, temp, tokens):
|
| 153 |
yield history + [[message, chunk]], ""
|
| 154 |
|
| 155 |
+
msg.submit(respond, [msg, chatbot, temperature, max_tokens], [chatbot, msg])
|
| 156 |
+
send.click(respond, [msg, chatbot, temperature, max_tokens], [chatbot, msg])
|
| 157 |
clear.click(lambda: [], None, chatbot)
|
| 158 |
+
gen_btn.click(generate_code, [gen_prompt, gen_lang, max_tokens], gen_output)
|
| 159 |
+
explain_btn.click(explain_code, [explain_input, max_tokens], explain_output)
|
| 160 |
+
fix_btn.click(fix_code, [fix_input, fix_error, max_tokens], fix_output)
|
| 161 |
|
| 162 |
demo.launch(server_name="0.0.0.0", server_port=7860)
|