AIencoder commited on
Commit
2bc2fa5
·
verified ·
1 Parent(s): 8ee6c34

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +698 -136
app.py CHANGED
@@ -22,13 +22,13 @@ MODELS = {
22
  }
23
 
24
  MODEL_INFO = {
25
- "⭐ Qwen3 30B-A3B (Best)": "🏆 Best quality • MoE 30B/3B active",
26
  "Qwen2.5 Coder 7B": "⚖️ Balanced • Great for most tasks",
27
  "Qwen2.5 Coder 3B": "🚀 Fast & capable • Recommended",
28
  "Qwen2.5 Coder 1.5B (Fast)": "⚡ Fastest • Simple tasks",
29
  "DeepSeek Coder 6.7B": "🧠 Complex logic • Algorithms",
30
  "DeepSeek Coder 1.3B (Fast)": "⚡ Quick completions",
31
- "StarCoder2 7B": "🐙 GitHub trained",
32
  "StarCoder2 3B": "🐙 Fast GitHub style",
33
  "CodeGemma 7B": "🔷 Google • Strong docs",
34
  "CodeGemma 2B (Fast)": "🔷 Quick & efficient",
@@ -41,18 +41,25 @@ LANGUAGES = [
41
  "HTML/CSS", "SQL", "Bash", "PowerShell", "Lua"
42
  ]
43
 
44
- # Whisper init
45
  whisper_model = None
46
- try:
47
- print("Loading Whisper...")
48
- whisper_model = WhisperModel("tiny", device="cpu", compute_type="int8")
49
- print("✅ Whisper ready!")
50
- except Exception as e:
51
- print(f"❌ Whisper failed: {e}")
52
 
53
- # ===== HELPERS =====
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
- def check_ollama():
56
  try:
57
  r = requests.get(f"{OLLAMA_URL}/api/tags", timeout=5)
58
  return r.status_code == 200
@@ -65,35 +72,45 @@ def get_status():
65
  if r.status_code == 200:
66
  models = r.json().get("models", [])
67
  return f"🟢 Online • {len(models)} models"
68
- except:
69
- pass
70
- return "🔴 Offline"
 
 
 
 
71
 
72
  def get_model_info(model_name):
73
  return MODEL_INFO.get(model_name, "")
74
 
75
- def validate_input(text, name="Input"):
76
  if not text or not text.strip():
77
- return False, f"⚠️ {name} cannot be empty."
78
  if len(text) > 100000:
79
- return False, f"⚠️ {name} too long (max 100KB)."
80
  return True, None
81
 
82
  def transcribe_audio(audio):
83
  if audio is None:
84
  return ""
 
85
  if whisper_model is None:
86
- return "❌ Whisper not available."
 
87
  try:
88
  segments, _ = whisper_model.transcribe(audio)
89
  text = " ".join([seg.text for seg in segments]).strip()
90
- return text if text else "⚠️ No speech detected."
 
 
 
 
91
  except Exception as e:
92
  return f"❌ Transcription failed: {str(e)[:50]}"
93
 
94
- def call_ollama(model_name, prompt, temperature=0.7, max_tokens=2048):
95
- if not check_ollama():
96
- return "❌ **Ollama not running.** Please wait for it to start."
97
 
98
  model = MODELS.get(model_name, "qwen2.5-coder:3b")
99
 
@@ -101,34 +118,60 @@ def call_ollama(model_name, prompt, temperature=0.7, max_tokens=2048):
101
  try:
102
  r = requests.post(
103
  f"{OLLAMA_URL}/api/generate",
104
- json={"model": model, "prompt": prompt, "stream": False,
105
- "options": {"temperature": temperature, "num_predict": max_tokens}},
 
 
 
 
 
 
 
106
  timeout=TIMEOUT
107
  )
108
 
109
  if r.status_code == 200:
110
  response = r.json().get("response", "")
111
- return response if response.strip() else "⚠️ Empty response. Try rephrasing."
 
 
 
112
  elif r.status_code == 404:
113
- return f"❌ **Model not found:** `{model}`"
 
 
 
 
 
 
 
114
  else:
115
- return f"❌ **Error {r.status_code}**"
116
-
117
  except requests.exceptions.Timeout:
118
  if attempt < MAX_RETRIES - 1:
119
  time.sleep(2)
120
  continue
121
- return "❌ **Timeout.** Try smaller model."
 
122
  except requests.exceptions.ConnectionError:
123
- return "❌ **Connection failed.**"
 
 
 
 
 
 
 
124
  except Exception as e:
125
- return f"❌ **Error:** {str(e)[:50]}"
126
 
127
- return "❌ **Max retries reached.**"
128
 
129
  def extract_code(text):
130
  if not text or "```" not in text:
131
  return text
 
132
  try:
133
  parts = text.split("```")
134
  if len(parts) >= 2:
@@ -136,7 +179,7 @@ def extract_code(text):
136
  if "\n" in code:
137
  code = code.split("\n", 1)[-1]
138
  return code.strip()
139
- except:
140
  pass
141
  return text
142
 
@@ -148,12 +191,12 @@ def chat_stream(message, history, model_name, temperature, max_tokens):
148
  yield history + [[message, error]]
149
  return
150
 
151
- if not check_ollama():
152
- yield history + [[message, "❌ **Ollama not running.**"]]
153
  return
154
 
155
  model = MODELS.get(model_name, "qwen2.5-coder:3b")
156
- messages = [{"role": "system", "content": "You are an expert coding assistant. Use markdown code blocks."}]
157
 
158
  for user_msg, assistant_msg in history:
159
  messages.append({"role": "user", "content": user_msg})
@@ -165,13 +208,22 @@ def chat_stream(message, history, model_name, temperature, max_tokens):
165
  try:
166
  response = requests.post(
167
  f"{OLLAMA_URL}/api/chat",
168
- json={"model": model, "messages": messages, "stream": True,
169
- "options": {"temperature": temperature, "num_predict": max_tokens}},
170
- stream=True, timeout=TIMEOUT
 
 
 
 
 
171
  )
172
 
 
 
 
 
173
  if response.status_code != 200:
174
- yield history + [[message, f"❌ **Error {response.status_code}**"]]
175
  return
176
 
177
  full = ""
@@ -179,16 +231,24 @@ def chat_stream(message, history, model_name, temperature, max_tokens):
179
  if line:
180
  try:
181
  data = json.loads(line)
 
 
 
182
  if "message" in data:
183
  full += data["message"].get("content", "")
184
  yield history + [[message, full]]
185
- except:
186
  continue
187
-
 
 
 
188
  except requests.exceptions.Timeout:
189
- yield history + [[message, "❌ **Timeout.**"]]
 
 
190
  except Exception as e:
191
- yield history + [[message, f"❌ **Error:** {str(e)[:50]}"]]
192
 
193
  def generate_code(prompt, language, model_name, temperature, max_tokens):
194
  valid, error = validate_input(prompt, "Description")
@@ -196,33 +256,50 @@ def generate_code(prompt, language, model_name, temperature, max_tokens):
196
  return error
197
 
198
  full_prompt = (
199
- f"Write {language} code for:\n\n{prompt}\n\n"
200
- "Requirements: Clean code, comments, handle edge cases. Output ONLY code in markdown block."
 
 
 
 
 
201
  )
202
- result = call_ollama(model_name, full_prompt, temperature, max_tokens)
203
- return result if result.startswith("❌") or result.startswith("⚠️") else extract_code(result)
 
 
 
204
 
205
  def explain_code(code, model_name, detail_level, max_tokens):
206
  valid, error = validate_input(code, "Code")
207
  if not valid:
208
  return error
209
 
210
- details = {
211
- "Brief": "Give 2-3 sentence explanation.",
212
- "Normal": "Explain the code with main logic.",
213
- "Detailed": "Detailed explanation with complexity and improvements."
214
  }
215
- prompt = details.get(detail_level, details["Normal"]) + "\n\nCode:\n" + code
216
- return call_ollama(model_name, prompt, 0.5, max_tokens)
 
217
 
218
  def fix_code(code, error_msg, model_name, max_tokens):
219
  valid, error = validate_input(code, "Code")
220
  if not valid:
221
  return error
222
 
223
- err = error_msg if error_msg and error_msg.strip() else "Not working as expected"
224
- prompt = f"Fix this code and explain what was wrong.\n\nCode:\n{code}\n\nError: {err}"
225
- return call_ollama(model_name, prompt, 0.3, max_tokens)
 
 
 
 
 
 
 
 
226
 
227
  def review_code(code, model_name, max_tokens):
228
  valid, error = validate_input(code, "Code")
@@ -230,216 +307,701 @@ def review_code(code, model_name, max_tokens):
230
  return error
231
 
232
  prompt = (
233
- "Review this code for:\n"
234
- "1. Code Quality\n2. Bugs\n3. Performance\n4. Security\n5. Suggestions\n\n"
 
 
 
 
235
  "Code:\n" + code
236
  )
237
- return call_ollama(model_name, prompt, 0.4, max_tokens)
238
 
239
- def convert_code(code, from_lang, to_lang, model_name, max_tokens):
240
  valid, error = validate_input(code, "Code")
241
  if not valid:
242
  return error
243
- if from_lang == to_lang:
244
- return "⚠️ Same language selected."
245
 
246
- prompt = f"Convert this {from_lang} to {to_lang}. Output ONLY code.\n\n{from_lang}:\n{code}"
247
- result = call_ollama(model_name, prompt, 0.3, max_tokens)
248
- return result if result.startswith("❌") or result.startswith("⚠️") else extract_code(result)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
249
 
250
  def generate_tests(code, language, framework, model_name, max_tokens):
251
  valid, error = validate_input(code, "Code")
252
  if not valid:
253
  return error
254
 
255
- fw = framework if framework else "pytest" if language == "Python" else "Jest"
256
- prompt = f"Generate unit tests for this {language} code using {fw}. Output ONLY test code.\n\n{code}"
257
- result = call_ollama(model_name, prompt, 0.3, max_tokens)
258
- return result if result.startswith("") or result.startswith("⚠️") else extract_code(result)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
259
 
260
  def document_code(code, language, style, model_name, max_tokens):
261
  valid, error = validate_input(code, "Code")
262
  if not valid:
263
  return error
264
 
265
- prompt = f"Add {style.lower()} to this {language} code.\n\n{code}"
266
- result = call_ollama(model_name, prompt, 0.4, max_tokens)
267
- return result if style == "README" or result.startswith("❌") else extract_code(result)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268
 
269
  def optimize_code(code, language, focus, model_name, max_tokens):
270
  valid, error = validate_input(code, "Code")
271
  if not valid:
272
  return error
273
 
274
- prompt = f"Optimize this {language} code for {focus.lower()}. Explain changes.\n\n{code}"
275
- return call_ollama(model_name, prompt, 0.3, max_tokens)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
276
 
277
  def build_regex(description, model_name, max_tokens):
278
  valid, error = validate_input(description, "Description")
279
  if not valid:
280
  return error
281
 
282
- prompt = f"Create regex for: {description}\n\nProvide pattern, explanation, examples, and Python code."
283
- return call_ollama(model_name, prompt, 0.3, max_tokens)
 
 
 
 
 
 
 
 
 
284
 
285
  def build_api(description, framework, model_name, max_tokens):
286
  valid, error = validate_input(description, "Description")
287
  if not valid:
288
  return error
289
 
290
- prompt = f"Create REST API using {framework}:\n\n{description}\n\nInclude validation and error handling."
291
- result = call_ollama(model_name, prompt, 0.3, max_tokens)
292
- return result if result.startswith("") else extract_code(result)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
293
 
294
- # ===== UI (Gradio 6.0 compatible) =====
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
295
 
296
  with gr.Blocks(title="Axon v6") as demo:
297
 
298
- gr.Markdown("# 🔥 Axon v6\n### AI Coding Assistant • 10 Models • 9 Tools • 100% Local")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
299
 
300
- status = gr.Markdown(value=get_status, every=5)
 
 
301
 
 
302
  with gr.Row():
303
- model_dropdown = gr.Dropdown(choices=list(MODELS.keys()), value="Qwen2.5 Coder 3B", label="🤖 Model", scale=3)
 
 
 
 
 
304
  temperature = gr.Slider(0, 1, value=0.7, step=0.1, label="🌡️ Creativity", scale=2)
305
  max_tokens = gr.Slider(256, 8192, value=2048, step=256, label="📏 Max Tokens", scale=2)
306
 
307
- model_info = gr.Markdown(value="🚀 Fast & capable • Recommended")
308
- model_dropdown.change(get_model_info, model_dropdown, model_info)
309
 
310
  with gr.Tabs():
311
 
 
312
  with gr.TabItem("💬 Chat"):
313
- chatbot = gr.Chatbot(height=450)
314
  with gr.Row():
315
- msg = gr.Textbox(placeholder="Ask anything...", show_label=False, scale=8)
 
 
 
316
  send = gr.Button("Send ➤", variant="primary", scale=1)
317
  with gr.Row():
318
- audio = gr.Audio(sources=["microphone"], type="filepath", label="🎤", scale=2)
319
- transcribe = gr.Button("🎤 Transcribe", scale=1)
320
  clear = gr.Button("🗑️ Clear", scale=1)
 
 
 
 
 
 
 
321
 
 
322
  with gr.TabItem("⚡ Generate"):
323
  with gr.Row():
324
  with gr.Column(scale=1):
325
- gen_prompt = gr.Textbox(label="📝 Describe what to build", lines=4)
 
 
 
 
326
  with gr.Row():
327
- gen_lang = gr.Dropdown(LANGUAGES, value="Python", label="Language", scale=2)
328
  gen_temp = gr.Slider(0, 1, value=0.3, step=0.1, label="🌡️", scale=1)
329
- gen_btn = gr.Button("⚡ Generate", variant="primary")
330
  with gr.Column(scale=2):
331
- gen_output = gr.Code(label="Code", language="python", lines=18)
332
 
 
333
  with gr.TabItem("🔍 Explain"):
334
  with gr.Row():
335
  with gr.Column(scale=1):
336
- explain_input = gr.Code(label="📋 Code", lines=12)
337
- explain_detail = gr.Radio(["Brief", "Normal", "Detailed"], value="Normal", label="Detail")
338
- explain_btn = gr.Button("🔍 Explain", variant="primary")
 
 
 
339
  with gr.Column(scale=1):
340
  explain_output = gr.Markdown(label="Explanation")
341
 
 
342
  with gr.TabItem("🔧 Debug"):
343
  with gr.Row():
344
  with gr.Column(scale=1):
345
- fix_input = gr.Code(label="🐛 Buggy Code", lines=10)
346
- fix_error = gr.Textbox(label="❌ Error", lines=2)
347
- fix_btn = gr.Button("🔧 Fix", variant="primary")
 
 
 
 
348
  with gr.Column(scale=1):
349
  fix_output = gr.Markdown(label="Solution")
350
 
 
351
  with gr.TabItem("📋 Review"):
352
  with gr.Row():
353
  with gr.Column(scale=1):
354
- review_input = gr.Code(label="📋 Code", lines=14)
355
- review_btn = gr.Button("📋 Review", variant="primary")
356
  with gr.Column(scale=1):
357
- review_output = gr.Markdown(label="Review")
358
 
 
359
  with gr.TabItem("🔄 Convert"):
360
  with gr.Row():
361
  with gr.Column(scale=1):
362
- convert_input = gr.Code(label="📥 Source", lines=12)
363
  with gr.Row():
364
- convert_from = gr.Dropdown(LANGUAGES, value="Python", label="From")
365
- convert_to = gr.Dropdown(LANGUAGES, value="JavaScript", label="To")
366
- convert_btn = gr.Button("🔄 Convert", variant="primary")
367
  with gr.Column(scale=1):
368
- convert_output = gr.Code(label="📤 Result", lines=12)
369
 
 
370
  with gr.TabItem("🧪 Test"):
371
  with gr.Row():
372
  with gr.Column(scale=1):
373
- test_input = gr.Code(label="📋 Code", lines=12)
374
  with gr.Row():
375
- test_lang = gr.Dropdown(LANGUAGES[:12], value="Python", label="Language")
376
- test_fw = gr.Textbox(label="Framework", placeholder="pytest")
377
- test_btn = gr.Button("🧪 Generate Tests", variant="primary")
378
  with gr.Column(scale=1):
379
- test_output = gr.Code(label="Tests", lines=12)
380
 
 
381
  with gr.TabItem("📝 Document"):
382
  with gr.Row():
383
  with gr.Column(scale=1):
384
- doc_input = gr.Code(label="📋 Code", lines=12)
385
  with gr.Row():
386
- doc_lang = gr.Dropdown(LANGUAGES, value="Python", label="Language")
387
- doc_style = gr.Dropdown(["Docstrings", "Comments", "Both", "README"], value="Both", label="Style")
388
- doc_btn = gr.Button("📝 Document", variant="primary")
 
 
 
389
  with gr.Column(scale=1):
390
- doc_output = gr.Code(label="Documented", lines=12)
391
 
 
392
  with gr.TabItem("🚀 Optimize"):
393
  with gr.Row():
394
  with gr.Column(scale=1):
395
- opt_input = gr.Code(label="📋 Code", lines=12)
396
  with gr.Row():
397
- opt_lang = gr.Dropdown(LANGUAGES, value="Python", label="Language")
398
- opt_focus = gr.Dropdown(["All", "Performance", "Readability", "Memory"], value="All", label="Focus")
399
- opt_btn = gr.Button("🚀 Optimize", variant="primary")
 
 
 
400
  with gr.Column(scale=1):
401
- opt_output = gr.Markdown(label="Optimized")
402
 
 
403
  with gr.TabItem("🛠️ Tools"):
 
 
404
  gr.Markdown("### 🎯 Regex Builder")
405
  with gr.Row():
406
  with gr.Column(scale=1):
407
- regex_desc = gr.Textbox(label="Describe pattern", lines=2)
 
 
 
 
408
  regex_btn = gr.Button("🎯 Build Regex", variant="primary")
409
  with gr.Column(scale=1):
410
- regex_output = gr.Markdown(label="Pattern")
411
 
412
- gr.Markdown("---\n### 🔗 API Builder")
 
 
 
413
  with gr.Row():
414
  with gr.Column(scale=1):
415
- api_desc = gr.Textbox(label="Describe endpoint", lines=2)
416
- api_fw = gr.Dropdown(["FastAPI", "Express", "Flask", "Gin"], value="FastAPI", label="Framework")
 
 
 
 
 
 
 
417
  api_btn = gr.Button("🔗 Build API", variant="primary")
418
  with gr.Column(scale=1):
419
- api_output = gr.Code(label="API Code", lines=12)
420
 
421
- gr.Markdown("<center>🔒 100% Local • Robust Error Handling</center>")
 
 
 
 
 
 
422
 
423
- # Events
 
424
  def respond(message, history, model, temp, tokens):
425
  history = history or []
426
- for updated in chat_stream(message, history, model, temp, tokens):
427
- yield updated, ""
428
 
429
  msg.submit(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg])
430
  send.click(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg])
431
  clear.click(lambda: [], None, chatbot)
432
- transcribe.click(transcribe_audio, audio, msg)
433
 
434
  gen_btn.click(generate_code, [gen_prompt, gen_lang, model_dropdown, gen_temp, max_tokens], gen_output)
435
  explain_btn.click(explain_code, [explain_input, model_dropdown, explain_detail, max_tokens], explain_output)
436
  fix_btn.click(fix_code, [fix_input, fix_error, model_dropdown, max_tokens], fix_output)
437
  review_btn.click(review_code, [review_input, model_dropdown, max_tokens], review_output)
438
  convert_btn.click(convert_code, [convert_input, convert_from, convert_to, model_dropdown, max_tokens], convert_output)
439
- test_btn.click(generate_tests, [test_input, test_lang, test_fw, model_dropdown, max_tokens], test_output)
440
  doc_btn.click(document_code, [doc_input, doc_lang, doc_style, model_dropdown, max_tokens], doc_output)
441
  opt_btn.click(optimize_code, [opt_input, opt_lang, opt_focus, model_dropdown, max_tokens], opt_output)
442
  regex_btn.click(build_regex, [regex_desc, model_dropdown, max_tokens], regex_output)
443
- api_btn.click(build_api, [api_desc, api_fw, model_dropdown, max_tokens], api_output)
444
 
 
445
  demo.launch(server_name="0.0.0.0", server_port=7860)
 
22
  }
23
 
24
  MODEL_INFO = {
25
+ "⭐ Qwen3 30B-A3B (Best)": "🏆 Best quality • MoE 30B/3B active • Complex tasks",
26
  "Qwen2.5 Coder 7B": "⚖️ Balanced • Great for most tasks",
27
  "Qwen2.5 Coder 3B": "🚀 Fast & capable • Recommended",
28
  "Qwen2.5 Coder 1.5B (Fast)": "⚡ Fastest • Simple tasks",
29
  "DeepSeek Coder 6.7B": "🧠 Complex logic • Algorithms",
30
  "DeepSeek Coder 1.3B (Fast)": "⚡ Quick completions",
31
+ "StarCoder2 7B": "🐙 GitHub trained • Real-world patterns",
32
  "StarCoder2 3B": "🐙 Fast GitHub style",
33
  "CodeGemma 7B": "🔷 Google • Strong docs",
34
  "CodeGemma 2B (Fast)": "🔷 Quick & efficient",
 
41
  "HTML/CSS", "SQL", "Bash", "PowerShell", "Lua"
42
  ]
43
 
44
+ # ===== WHISPER INIT =====
45
  whisper_model = None
 
 
 
 
 
 
46
 
47
+ def init_whisper():
48
+ global whisper_model
49
+ try:
50
+ print("Loading Whisper...")
51
+ whisper_model = WhisperModel("tiny", device="cpu", compute_type="int8")
52
+ print("✅ Whisper ready!")
53
+ return True
54
+ except Exception as e:
55
+ print(f"❌ Whisper failed to load: {e}")
56
+ return False
57
+
58
+ init_whisper()
59
+
60
+ # ===== HELPER FUNCTIONS =====
61
 
62
+ def check_ollama_health():
63
  try:
64
  r = requests.get(f"{OLLAMA_URL}/api/tags", timeout=5)
65
  return r.status_code == 200
 
72
  if r.status_code == 200:
73
  models = r.json().get("models", [])
74
  return f"🟢 Online • {len(models)} models"
75
+ except requests.exceptions.ConnectionError:
76
+ return "🔴 Offline • Ollama not running"
77
+ except requests.exceptions.Timeout:
78
+ return "🟡 Slow • Connection timeout"
79
+ except Exception as e:
80
+ return f"🔴 Error • {str(e)[:30]}"
81
+ return "🟡 Starting..."
82
 
83
  def get_model_info(model_name):
84
  return MODEL_INFO.get(model_name, "")
85
 
86
+ def validate_input(text, field_name="Input"):
87
  if not text or not text.strip():
88
+ return False, f"⚠️ {field_name} cannot be empty."
89
  if len(text) > 100000:
90
+ return False, f"⚠️ {field_name} is too long (max 100KB)."
91
  return True, None
92
 
93
  def transcribe_audio(audio):
94
  if audio is None:
95
  return ""
96
+
97
  if whisper_model is None:
98
+ return "❌ Whisper not loaded. Voice input unavailable."
99
+
100
  try:
101
  segments, _ = whisper_model.transcribe(audio)
102
  text = " ".join([seg.text for seg in segments]).strip()
103
+ if not text:
104
+ return "⚠️ No speech detected. Try again."
105
+ return text
106
+ except FileNotFoundError:
107
+ return "❌ Audio file not found."
108
  except Exception as e:
109
  return f"❌ Transcription failed: {str(e)[:50]}"
110
 
111
+ def call_ollama_with_retry(model_name, prompt, temperature=0.7, max_tokens=2048):
112
+ if not check_ollama_health():
113
+ return "❌ **Ollama is not running.**\n\nPlease wait for Ollama to start, or check the logs."
114
 
115
  model = MODELS.get(model_name, "qwen2.5-coder:3b")
116
 
 
118
  try:
119
  r = requests.post(
120
  f"{OLLAMA_URL}/api/generate",
121
+ json={
122
+ "model": model,
123
+ "prompt": prompt,
124
+ "stream": False,
125
+ "options": {
126
+ "temperature": temperature,
127
+ "num_predict": max_tokens
128
+ }
129
+ },
130
  timeout=TIMEOUT
131
  )
132
 
133
  if r.status_code == 200:
134
  response = r.json().get("response", "")
135
+ if not response.strip():
136
+ return "⚠️ Model returned empty response. Try rephrasing your request."
137
+ return response
138
+
139
  elif r.status_code == 404:
140
+ return f"❌ **Model not found:** `{model}`\n\nThe model may still be downloading. Check logs or try a different model."
141
+
142
+ elif r.status_code == 500:
143
+ error_msg = r.text[:200] if r.text else "Unknown server error"
144
+ if "out of memory" in error_msg.lower():
145
+ return "❌ **Out of memory.**\n\nTry a smaller model like `Qwen2.5 Coder 1.5B (Fast)`."
146
+ return f"❌ **Server error:** {error_msg}"
147
+
148
  else:
149
+ return f"❌ **HTTP {r.status_code}:** {r.text[:100]}"
150
+
151
  except requests.exceptions.Timeout:
152
  if attempt < MAX_RETRIES - 1:
153
  time.sleep(2)
154
  continue
155
+ return "❌ **Request timed out.**\n\nThe model is taking too long. Try:\n- A smaller model\n- Shorter input\n- Lower max tokens"
156
+
157
  except requests.exceptions.ConnectionError:
158
+ if attempt < MAX_RETRIES - 1:
159
+ time.sleep(2)
160
+ continue
161
+ return "❌ **Connection failed.**\n\nOllama may have crashed. Check the logs."
162
+
163
+ except json.JSONDecodeError:
164
+ return "❌ **Invalid response from Ollama.**\n\nThe model returned malformed data."
165
+
166
  except Exception as e:
167
+ return f"❌ **Unexpected error:** {str(e)[:100]}"
168
 
169
+ return "❌ **Max retries reached.** Please try again."
170
 
171
  def extract_code(text):
172
  if not text or "```" not in text:
173
  return text
174
+
175
  try:
176
  parts = text.split("```")
177
  if len(parts) >= 2:
 
179
  if "\n" in code:
180
  code = code.split("\n", 1)[-1]
181
  return code.strip()
182
+ except Exception:
183
  pass
184
  return text
185
 
 
191
  yield history + [[message, error]]
192
  return
193
 
194
+ if not check_ollama_health():
195
+ yield history + [[message, "❌ **Ollama is not running.** Please wait for it to start."]]
196
  return
197
 
198
  model = MODELS.get(model_name, "qwen2.5-coder:3b")
199
+ messages = [{"role": "system", "content": "You are an expert coding assistant. Provide clear, well-commented code. Always use markdown code blocks with language tags."}]
200
 
201
  for user_msg, assistant_msg in history:
202
  messages.append({"role": "user", "content": user_msg})
 
208
  try:
209
  response = requests.post(
210
  f"{OLLAMA_URL}/api/chat",
211
+ json={
212
+ "model": model,
213
+ "messages": messages,
214
+ "stream": True,
215
+ "options": {"temperature": temperature, "num_predict": max_tokens}
216
+ },
217
+ stream=True,
218
+ timeout=TIMEOUT
219
  )
220
 
221
+ if response.status_code == 404:
222
+ yield history + [[message, f"❌ **Model not found:** `{model}`\n\nTry a different model."]]
223
+ return
224
+
225
  if response.status_code != 200:
226
+ yield history + [[message, f"❌ **Error {response.status_code}:** {response.text[:100]}"]]
227
  return
228
 
229
  full = ""
 
231
  if line:
232
  try:
233
  data = json.loads(line)
234
+ if "error" in data:
235
+ yield history + [[message, f"❌ **Model error:** {data['error']}"]]
236
+ return
237
  if "message" in data:
238
  full += data["message"].get("content", "")
239
  yield history + [[message, full]]
240
+ except json.JSONDecodeError:
241
  continue
242
+
243
+ if not full.strip():
244
+ yield history + [[message, "⚠️ Model returned empty response. Try rephrasing."]]
245
+
246
  except requests.exceptions.Timeout:
247
+ yield history + [[message, "❌ **Request timed out.** Try a smaller model or shorter input."]]
248
+ except requests.exceptions.ConnectionError:
249
+ yield history + [[message, "❌ **Connection lost.** Ollama may have crashed."]]
250
  except Exception as e:
251
+ yield history + [[message, f"❌ **Error:** {str(e)[:100]}"]]
252
 
253
  def generate_code(prompt, language, model_name, temperature, max_tokens):
254
  valid, error = validate_input(prompt, "Description")
 
256
  return error
257
 
258
  full_prompt = (
259
+ f"Write {language} code for the following task:\n\n"
260
+ f"{prompt}\n\n"
261
+ "Requirements:\n"
262
+ "- Clean, production-ready code\n"
263
+ "- Add helpful comments\n"
264
+ "- Handle edge cases\n"
265
+ "- Output ONLY the code in a markdown code block"
266
  )
267
+
268
+ result = call_ollama_with_retry(model_name, full_prompt, temperature, max_tokens)
269
+ if result.startswith("❌") or result.startswith("⚠️"):
270
+ return result
271
+ return extract_code(result)
272
 
273
  def explain_code(code, model_name, detail_level, max_tokens):
274
  valid, error = validate_input(code, "Code")
275
  if not valid:
276
  return error
277
 
278
+ detail_prompts = {
279
+ "Brief": "Give a brief 2-3 sentence explanation of what this code does.",
280
+ "Normal": "Explain what this code does, including the main logic and any important details.",
281
+ "Detailed": "Give a detailed explanation including: purpose, how it works step-by-step, time/space complexity, and potential improvements."
282
  }
283
+
284
+ prompt = detail_prompts.get(detail_level, detail_prompts["Normal"]) + "\n\nCode:\n" + code
285
+ return call_ollama_with_retry(model_name, prompt, 0.5, max_tokens)
286
 
287
  def fix_code(code, error_msg, model_name, max_tokens):
288
  valid, error = validate_input(code, "Code")
289
  if not valid:
290
  return error
291
 
292
+ error_text = error_msg if error_msg and error_msg.strip() else "Code is not working as expected"
293
+ prompt = (
294
+ "Fix the following code and explain what was wrong.\n\n"
295
+ "Code:\n" + code + "\n\n"
296
+ "Error/Problem: " + error_text + "\n\n"
297
+ "Provide:\n"
298
+ "1. The fixed code in a markdown code block\n"
299
+ "2. Brief explanation of what was wrong\n"
300
+ "3. Any suggestions to prevent similar issues"
301
+ )
302
+ return call_ollama_with_retry(model_name, prompt, 0.3, max_tokens)
303
 
304
  def review_code(code, model_name, max_tokens):
305
  valid, error = validate_input(code, "Code")
 
307
  return error
308
 
309
  prompt = (
310
+ "Review this code and provide feedback on:\n\n"
311
+ "1. **Code Quality** - Is it clean, readable, well-structured?\n"
312
+ "2. **Bugs/Issues** - Any potential bugs or problems?\n"
313
+ "3. **Performance** - Any performance concerns?\n"
314
+ "4. **Security** - Any security issues?\n"
315
+ "5. **Suggestions** - How could it be improved?\n\n"
316
  "Code:\n" + code
317
  )
318
+ return call_ollama_with_retry(model_name, prompt, 0.4, max_tokens)
319
 
320
+ def convert_code(code, source_lang, target_lang, model_name, max_tokens):
321
  valid, error = validate_input(code, "Code")
322
  if not valid:
323
  return error
 
 
324
 
325
+ if source_lang == target_lang:
326
+ return "⚠️ Source and target languages are the same."
327
+
328
+ prompt = (
329
+ f"Convert this {source_lang} code to {target_lang}.\n\n"
330
+ "Requirements:\n"
331
+ f"- Write idiomatic {target_lang} code\n"
332
+ "- Preserve the functionality exactly\n"
333
+ "- Add comments explaining any language-specific differences\n"
334
+ "- Output ONLY the converted code in a markdown code block\n\n"
335
+ f"{source_lang} Code:\n" + code
336
+ )
337
+
338
+ result = call_ollama_with_retry(model_name, prompt, 0.3, max_tokens)
339
+ if result.startswith("❌") or result.startswith("⚠️"):
340
+ return result
341
+ return extract_code(result)
342
 
343
  def generate_tests(code, language, framework, model_name, max_tokens):
344
  valid, error = validate_input(code, "Code")
345
  if not valid:
346
  return error
347
 
348
+ frameworks = {
349
+ "Python": "pytest",
350
+ "JavaScript": "Jest",
351
+ "TypeScript": "Jest",
352
+ "Java": "JUnit",
353
+ "C#": "NUnit",
354
+ "Go": "testing package",
355
+ "Rust": "built-in test framework",
356
+ "Ruby": "RSpec",
357
+ "PHP": "PHPUnit",
358
+ }
359
+
360
+ fw = framework if framework and framework.strip() else frameworks.get(language, "appropriate testing framework")
361
+
362
+ prompt = (
363
+ f"Generate comprehensive unit tests for this {language} code using {fw}.\n\n"
364
+ "Requirements:\n"
365
+ "- Test all functions/methods\n"
366
+ "- Include edge cases\n"
367
+ "- Include both positive and negative tests\n"
368
+ "- Add descriptive test names\n"
369
+ "- Output ONLY the test code in a markdown code block\n\n"
370
+ "Code to test:\n" + code
371
+ )
372
+
373
+ result = call_ollama_with_retry(model_name, prompt, 0.3, max_tokens)
374
+ if result.startswith("❌") or result.startswith("⚠️"):
375
+ return result
376
+ return extract_code(result)
377
 
378
  def document_code(code, language, style, model_name, max_tokens):
379
  valid, error = validate_input(code, "Code")
380
  if not valid:
381
  return error
382
 
383
+ styles = {
384
+ "Docstrings": "Add comprehensive docstrings to all functions, classes, and methods",
385
+ "Comments": "Add inline comments explaining the logic",
386
+ "Both": "Add both docstrings and inline comments",
387
+ "README": "Generate a README.md documenting this code"
388
+ }
389
+
390
+ prompt = (
391
+ f"Document this {language} code.\n\n"
392
+ f"Task: {styles.get(style, styles['Both'])}\n\n"
393
+ "Requirements:\n"
394
+ "- Be clear and concise\n"
395
+ "- Explain parameters, return values, and exceptions\n"
396
+ "- Include usage examples where helpful\n"
397
+ "- Output the fully documented code in a markdown code block\n\n"
398
+ "Code:\n" + code
399
+ )
400
+
401
+ result = call_ollama_with_retry(model_name, prompt, 0.4, max_tokens)
402
+ if style == "README" or result.startswith("❌") or result.startswith("⚠️"):
403
+ return result
404
+ return extract_code(result)
405
 
406
  def optimize_code(code, language, focus, model_name, max_tokens):
407
  valid, error = validate_input(code, "Code")
408
  if not valid:
409
  return error
410
 
411
+ focus_prompts = {
412
+ "Performance": "Optimize for speed and efficiency. Reduce time complexity where possible.",
413
+ "Readability": "Refactor for better readability and maintainability. Follow best practices.",
414
+ "Memory": "Optimize memory usage. Reduce allocations and improve data structures.",
415
+ "All": "Optimize for performance, readability, and memory usage."
416
+ }
417
+
418
+ prompt = (
419
+ f"Optimize this {language} code.\n\n"
420
+ f"Focus: {focus_prompts.get(focus, focus_prompts['All'])}\n\n"
421
+ "Requirements:\n"
422
+ "- Explain what you changed and why\n"
423
+ "- Preserve the original functionality\n"
424
+ "- Show before/after complexity if relevant\n"
425
+ "- Output the optimized code in a markdown code block\n\n"
426
+ "Code:\n" + code
427
+ )
428
+
429
+ return call_ollama_with_retry(model_name, prompt, 0.3, max_tokens)
430
 
431
  def build_regex(description, model_name, max_tokens):
432
  valid, error = validate_input(description, "Description")
433
  if not valid:
434
  return error
435
 
436
+ prompt = (
437
+ "Create a regex pattern for the following requirement:\n\n"
438
+ f"{description}\n\n"
439
+ "Provide:\n"
440
+ "1. The regex pattern\n"
441
+ "2. Explanation of each part\n"
442
+ "3. Example matches and non-matches\n"
443
+ "4. Code example in Python showing usage"
444
+ )
445
+
446
+ return call_ollama_with_retry(model_name, prompt, 0.3, max_tokens)
447
 
448
  def build_api(description, framework, model_name, max_tokens):
449
  valid, error = validate_input(description, "Description")
450
  if not valid:
451
  return error
452
 
453
+ prompt = (
454
+ f"Create a REST API endpoint using {framework}.\n\n"
455
+ f"Requirements:\n{description}\n\n"
456
+ "Include:\n"
457
+ "- Route definition with proper HTTP methods\n"
458
+ "- Request validation\n"
459
+ "- Error handling\n"
460
+ "- Response formatting\n"
461
+ "- Brief documentation comments\n"
462
+ "- Output the code in a markdown code block"
463
+ )
464
+
465
+ result = call_ollama_with_retry(model_name, prompt, 0.3, max_tokens)
466
+ if result.startswith("❌") or result.startswith("⚠️"):
467
+ return result
468
+ return extract_code(result)
469
+
470
+ # ===== PREMIUM CSS =====
471
+
472
+ css = """
473
+ /* ===== GLOBAL ===== */
474
+ :root {
475
+ --primary: #6366f1;
476
+ --primary-dark: #4f46e5;
477
+ --secondary: #8b5cf6;
478
+ --accent: #06b6d4;
479
+ --bg-dark: #0f172a;
480
+ --bg-card: #1e293b;
481
+ --bg-hover: #334155;
482
+ --text-primary: #f1f5f9;
483
+ --text-secondary: #94a3b8;
484
+ --border: #334155;
485
+ --success: #10b981;
486
+ --warning: #f59e0b;
487
+ --error: #ef4444;
488
+ --gradient: linear-gradient(135deg, #6366f1 0%, #8b5cf6 50%, #06b6d4 100%);
489
+ }
490
+
491
+ .gradio-container {
492
+ max-width: 1500px !important;
493
+ margin: auto !important;
494
+ background: var(--bg-dark) !important;
495
+ font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif !important;
496
+ }
497
+
498
+ /* ===== HEADER ===== */
499
+ .header-section {
500
+ background: var(--gradient);
501
+ border-radius: 20px;
502
+ padding: 32px 40px;
503
+ margin-bottom: 24px;
504
+ position: relative;
505
+ overflow: hidden;
506
+ box-shadow: 0 20px 40px rgba(99, 102, 241, 0.3);
507
+ }
508
+
509
+ .header-section::before {
510
+ content: '';
511
+ position: absolute;
512
+ top: -50%;
513
+ right: -50%;
514
+ width: 100%;
515
+ height: 200%;
516
+ background: radial-gradient(circle, rgba(255,255,255,0.1) 0%, transparent 60%);
517
+ animation: pulse 4s ease-in-out infinite;
518
+ }
519
 
520
+ @keyframes pulse {
521
+ 0%, 100% { transform: scale(1); opacity: 0.5; }
522
+ 50% { transform: scale(1.1); opacity: 0.8; }
523
+ }
524
+
525
+ .header-content {
526
+ position: relative;
527
+ z-index: 1;
528
+ display: flex;
529
+ justify-content: space-between;
530
+ align-items: center;
531
+ flex-wrap: wrap;
532
+ gap: 20px;
533
+ }
534
+
535
+ .header-title {
536
+ color: white;
537
+ margin: 0;
538
+ font-size: 2.8rem;
539
+ font-weight: 800;
540
+ letter-spacing: -0.02em;
541
+ text-shadow: 0 2px 10px rgba(0,0,0,0.2);
542
+ }
543
+
544
+ .header-subtitle {
545
+ color: rgba(255,255,255,0.9);
546
+ margin: 8px 0 0 0;
547
+ font-size: 1.1rem;
548
+ font-weight: 400;
549
+ }
550
+
551
+ .header-badges {
552
+ display: flex;
553
+ gap: 10px;
554
+ flex-wrap: wrap;
555
+ }
556
+
557
+ .badge {
558
+ background: rgba(255,255,255,0.2);
559
+ backdrop-filter: blur(10px);
560
+ padding: 8px 16px;
561
+ border-radius: 50px;
562
+ font-size: 0.85rem;
563
+ font-weight: 500;
564
+ color: white;
565
+ border: 1px solid rgba(255,255,255,0.2);
566
+ }
567
+
568
+ /* ===== STATUS BAR ===== */
569
+ .status-bar {
570
+ background: var(--bg-card);
571
+ border: 1px solid var(--border);
572
+ border-radius: 16px;
573
+ padding: 16px 24px;
574
+ margin-bottom: 20px;
575
+ }
576
+
577
+ /* ===== SETTINGS PANEL ===== */
578
+ .settings-panel {
579
+ background: var(--bg-card);
580
+ border: 1px solid var(--border);
581
+ border-radius: 16px;
582
+ padding: 20px 24px;
583
+ margin-bottom: 20px;
584
+ }
585
+
586
+ /* ===== MODEL INFO ===== */
587
+ .model-info-box {
588
+ background: linear-gradient(135deg, rgba(99, 102, 241, 0.1) 0%, rgba(139, 92, 246, 0.1) 100%);
589
+ border: 1px solid rgba(99, 102, 241, 0.3);
590
+ border-radius: 12px;
591
+ padding: 12px 18px;
592
+ font-size: 0.9rem;
593
+ color: var(--text-secondary);
594
+ margin-top: 12px;
595
+ margin-bottom: 20px;
596
+ }
597
+
598
+ /* ===== TABS ===== */
599
+ .tab-nav {
600
+ background: var(--bg-card) !important;
601
+ border: 1px solid var(--border) !important;
602
+ border-radius: 16px !important;
603
+ padding: 8px !important;
604
+ gap: 6px !important;
605
+ margin-bottom: 20px !important;
606
+ flex-wrap: wrap !important;
607
+ }
608
+
609
+ .tab-nav button {
610
+ background: transparent !important;
611
+ border: none !important;
612
+ border-radius: 12px !important;
613
+ padding: 12px 20px !important;
614
+ font-weight: 600 !important;
615
+ font-size: 0.9rem !important;
616
+ color: var(--text-secondary) !important;
617
+ transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1) !important;
618
+ }
619
+
620
+ .tab-nav button:hover {
621
+ background: var(--bg-hover) !important;
622
+ color: var(--text-primary) !important;
623
+ }
624
+
625
+ .tab-nav button.selected {
626
+ background: var(--gradient) !important;
627
+ color: white !important;
628
+ box-shadow: 0 4px 15px rgba(99, 102, 241, 0.4) !important;
629
+ }
630
+
631
+ /* ===== CHATBOT ===== */
632
+ .chatbot-container {
633
+ background: var(--bg-card) !important;
634
+ border: 1px solid var(--border) !important;
635
+ border-radius: 16px !important;
636
+ }
637
+
638
+ /* ===== INPUTS ===== */
639
+ textarea, input[type="text"] {
640
+ background: var(--bg-card) !important;
641
+ border: 1px solid var(--border) !important;
642
+ border-radius: 12px !important;
643
+ color: var(--text-primary) !important;
644
+ padding: 14px 18px !important;
645
+ font-size: 0.95rem !important;
646
+ transition: all 0.2s ease !important;
647
+ }
648
+
649
+ textarea:focus, input[type="text"]:focus {
650
+ border-color: var(--primary) !important;
651
+ box-shadow: 0 0 0 3px rgba(99, 102, 241, 0.2) !important;
652
+ outline: none !important;
653
+ }
654
+
655
+ /* ===== CODE BLOCKS ===== */
656
+ .code-wrap {
657
+ border-radius: 16px !important;
658
+ overflow: hidden !important;
659
+ border: 1px solid var(--border) !important;
660
+ }
661
+
662
+ /* ===== BUTTONS ===== */
663
+ .primary-btn, button.primary {
664
+ background: var(--gradient) !important;
665
+ border: none !important;
666
+ border-radius: 12px !important;
667
+ padding: 14px 28px !important;
668
+ font-weight: 600 !important;
669
+ font-size: 0.95rem !important;
670
+ color: white !important;
671
+ cursor: pointer !important;
672
+ transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1) !important;
673
+ box-shadow: 0 4px 15px rgba(99, 102, 241, 0.3) !important;
674
+ }
675
+
676
+ .primary-btn:hover, button.primary:hover {
677
+ transform: translateY(-2px) !important;
678
+ box-shadow: 0 8px 25px rgba(99, 102, 241, 0.4) !important;
679
+ }
680
+
681
+ .secondary-btn {
682
+ background: var(--bg-hover) !important;
683
+ border: 1px solid var(--border) !important;
684
+ border-radius: 12px !important;
685
+ padding: 12px 20px !important;
686
+ font-weight: 500 !important;
687
+ color: var(--text-secondary) !important;
688
+ transition: all 0.2s ease !important;
689
+ }
690
+
691
+ .secondary-btn:hover {
692
+ background: var(--bg-card) !important;
693
+ border-color: var(--primary) !important;
694
+ color: var(--text-primary) !important;
695
+ }
696
+
697
+ /* ===== MARKDOWN OUTPUT ===== */
698
+ .markdown-output {
699
+ background: var(--bg-card);
700
+ border: 1px solid var(--border);
701
+ border-radius: 16px;
702
+ padding: 24px;
703
+ color: var(--text-primary);
704
+ line-height: 1.7;
705
+ }
706
+
707
+ /* ===== DIVIDER ===== */
708
+ .divider {
709
+ height: 1px;
710
+ background: var(--border);
711
+ margin: 24px 0;
712
+ }
713
+
714
+ /* ===== TOOL SECTION ===== */
715
+ .tool-section {
716
+ background: var(--bg-card);
717
+ border: 1px solid var(--border);
718
+ border-radius: 16px;
719
+ padding: 24px;
720
+ margin-bottom: 20px;
721
+ }
722
+
723
+ .tool-title {
724
+ color: var(--text-primary);
725
+ font-size: 1.2rem;
726
+ font-weight: 600;
727
+ margin-bottom: 16px;
728
+ }
729
+
730
+ /* ===== FOOTER ===== */
731
+ .footer {
732
+ text-align: center;
733
+ padding: 24px;
734
+ color: var(--text-secondary);
735
+ font-size: 0.85rem;
736
+ border-top: 1px solid var(--border);
737
+ margin-top: 32px;
738
+ }
739
+
740
+ /* ===== SCROLLBAR ===== */
741
+ ::-webkit-scrollbar {
742
+ width: 8px;
743
+ height: 8px;
744
+ }
745
+
746
+ ::-webkit-scrollbar-track {
747
+ background: var(--bg-dark);
748
+ }
749
+
750
+ ::-webkit-scrollbar-thumb {
751
+ background: var(--border);
752
+ border-radius: 4px;
753
+ }
754
+
755
+ ::-webkit-scrollbar-thumb:hover {
756
+ background: var(--text-secondary);
757
+ }
758
+
759
+ /* ===== HIDE DEFAULT FOOTER ===== */
760
+ footer { display: none !important; }
761
+
762
+ /* ===== RESPONSIVE ===== */
763
+ @media (max-width: 768px) {
764
+ .header-title { font-size: 2rem; }
765
+ .header-content { flex-direction: column; text-align: center; }
766
+ .header-badges { justify-content: center; }
767
+ .tab-nav button { padding: 10px 14px !important; font-size: 0.8rem !important; }
768
+ }
769
+ """
770
+
771
+ # ===== UI (Gradio 6.0 Compatible) =====
772
 
773
  with gr.Blocks(title="Axon v6") as demo:
774
 
775
+ # Header
776
+ gr.HTML("""
777
+ <div class="header-section">
778
+ <div class="header-content">
779
+ <div>
780
+ <h1 class="header-title">🔥 Axon v6</h1>
781
+ <p class="header-subtitle">AI-Powered Coding Assistant</p>
782
+ </div>
783
+ <div class="header-badges">
784
+ <span class="badge">🤖 10 Models</span>
785
+ <span class="badge">🛠️ 9 Tools</span>
786
+ <span class="badge">🔒 100% Local</span>
787
+ <span class="badge">⚡ No Rate Limits</span>
788
+ </div>
789
+ </div>
790
+ </div>
791
+ """)
792
 
793
+ # Status
794
+ with gr.Row():
795
+ status = gr.Markdown(value=get_status, every=5)
796
 
797
+ # Settings Panel
798
  with gr.Row():
799
+ model_dropdown = gr.Dropdown(
800
+ choices=list(MODELS.keys()),
801
+ value="Qwen2.5 Coder 3B",
802
+ label="🤖 Model",
803
+ scale=3
804
+ )
805
  temperature = gr.Slider(0, 1, value=0.7, step=0.1, label="🌡️ Creativity", scale=2)
806
  max_tokens = gr.Slider(256, 8192, value=2048, step=256, label="📏 Max Tokens", scale=2)
807
 
808
+ model_info_display = gr.Markdown(value="🚀 Fast & capable • Recommended")
809
+ model_dropdown.change(get_model_info, model_dropdown, model_info_display)
810
 
811
  with gr.Tabs():
812
 
813
+ # ===== CHAT =====
814
  with gr.TabItem("💬 Chat"):
815
+ chatbot = gr.Chatbot(height=500)
816
  with gr.Row():
817
+ msg = gr.Textbox(
818
+ placeholder="Ask anything about coding... Press Enter to send",
819
+ show_label=False, scale=8, lines=1
820
+ )
821
  send = gr.Button("Send ➤", variant="primary", scale=1)
822
  with gr.Row():
823
+ audio_input = gr.Audio(sources=["microphone"], type="filepath", label="🎤 Voice", scale=2)
824
+ transcribe_btn = gr.Button("🎤 Transcribe", scale=1)
825
  clear = gr.Button("🗑️ Clear", scale=1)
826
+ with gr.Accordion("💡 Quick Prompts", open=False):
827
+ gr.Examples([
828
+ "Write a Python function to find all prime numbers up to n",
829
+ "Explain async/await vs promises in JavaScript",
830
+ "How do I implement a binary search tree?",
831
+ "Write a REST API with authentication in FastAPI"
832
+ ], inputs=msg)
833
 
834
+ # ===== GENERATE =====
835
  with gr.TabItem("⚡ Generate"):
836
  with gr.Row():
837
  with gr.Column(scale=1):
838
+ gen_prompt = gr.Textbox(
839
+ label="📝 Describe what you want to build",
840
+ placeholder="e.g., A function that validates email addresses with regex",
841
+ lines=5
842
+ )
843
  with gr.Row():
844
+ gen_lang = gr.Dropdown(LANGUAGES, value="Python", label="🔤 Language", scale=2)
845
  gen_temp = gr.Slider(0, 1, value=0.3, step=0.1, label="🌡️", scale=1)
846
+ gen_btn = gr.Button("⚡ Generate Code", variant="primary", size="lg")
847
  with gr.Column(scale=2):
848
+ gen_output = gr.Code(label="Generated Code", language="python", lines=22)
849
 
850
+ # ===== EXPLAIN =====
851
  with gr.TabItem("🔍 Explain"):
852
  with gr.Row():
853
  with gr.Column(scale=1):
854
+ explain_input = gr.Code(label="📋 Paste your code", lines=14)
855
+ explain_detail = gr.Radio(
856
+ ["Brief", "Normal", "Detailed"],
857
+ value="Normal", label="📊 Detail Level"
858
+ )
859
+ explain_btn = gr.Button("🔍 Explain Code", variant="primary", size="lg")
860
  with gr.Column(scale=1):
861
  explain_output = gr.Markdown(label="Explanation")
862
 
863
+ # ===== DEBUG =====
864
  with gr.TabItem("🔧 Debug"):
865
  with gr.Row():
866
  with gr.Column(scale=1):
867
+ fix_input = gr.Code(label="🐛 Paste buggy code", lines=12)
868
+ fix_error = gr.Textbox(
869
+ label="❌ Error message (optional)",
870
+ placeholder="Paste error or describe the issue",
871
+ lines=3
872
+ )
873
+ fix_btn = gr.Button("🔧 Fix Code", variant="primary", size="lg")
874
  with gr.Column(scale=1):
875
  fix_output = gr.Markdown(label="Solution")
876
 
877
+ # ===== REVIEW =====
878
  with gr.TabItem("📋 Review"):
879
  with gr.Row():
880
  with gr.Column(scale=1):
881
+ review_input = gr.Code(label="📋 Code to review", lines=16)
882
+ review_btn = gr.Button("📋 Review Code", variant="primary", size="lg")
883
  with gr.Column(scale=1):
884
+ review_output = gr.Markdown(label="Code Review")
885
 
886
+ # ===== CONVERT =====
887
  with gr.TabItem("🔄 Convert"):
888
  with gr.Row():
889
  with gr.Column(scale=1):
890
+ convert_input = gr.Code(label="📥 Source Code", lines=14)
891
  with gr.Row():
892
+ convert_from = gr.Dropdown(LANGUAGES, value="Python", label="From", scale=1)
893
+ convert_to = gr.Dropdown(LANGUAGES, value="JavaScript", label="To", scale=1)
894
+ convert_btn = gr.Button("🔄 Convert Code", variant="primary", size="lg")
895
  with gr.Column(scale=1):
896
+ convert_output = gr.Code(label="📤 Converted Code", lines=14)
897
 
898
+ # ===== TEST =====
899
  with gr.TabItem("🧪 Test"):
900
  with gr.Row():
901
  with gr.Column(scale=1):
902
+ test_input = gr.Code(label="📋 Code to test", lines=14)
903
  with gr.Row():
904
+ test_lang = gr.Dropdown(LANGUAGES[:12], value="Python", label="Language", scale=2)
905
+ test_framework = gr.Textbox(label="Framework", placeholder="e.g., pytest", scale=2)
906
+ test_btn = gr.Button("🧪 Generate Tests", variant="primary", size="lg")
907
  with gr.Column(scale=1):
908
+ test_output = gr.Code(label="Generated Tests", lines=14)
909
 
910
+ # ===== DOCUMENT =====
911
  with gr.TabItem("📝 Document"):
912
  with gr.Row():
913
  with gr.Column(scale=1):
914
+ doc_input = gr.Code(label="📋 Code to document", lines=14)
915
  with gr.Row():
916
+ doc_lang = gr.Dropdown(LANGUAGES, value="Python", label="Language", scale=2)
917
+ doc_style = gr.Dropdown(
918
+ ["Docstrings", "Comments", "Both", "README"],
919
+ value="Both", label="Style", scale=2
920
+ )
921
+ doc_btn = gr.Button("📝 Document", variant="primary", size="lg")
922
  with gr.Column(scale=1):
923
+ doc_output = gr.Code(label="Documented Code", lines=14)
924
 
925
+ # ===== OPTIMIZE =====
926
  with gr.TabItem("🚀 Optimize"):
927
  with gr.Row():
928
  with gr.Column(scale=1):
929
+ opt_input = gr.Code(label="📋 Code to optimize", lines=14)
930
  with gr.Row():
931
+ opt_lang = gr.Dropdown(LANGUAGES, value="Python", label="Language", scale=2)
932
+ opt_focus = gr.Dropdown(
933
+ ["All", "Performance", "Readability", "Memory"],
934
+ value="All", label="Focus", scale=2
935
+ )
936
+ opt_btn = gr.Button("🚀 Optimize", variant="primary", size="lg")
937
  with gr.Column(scale=1):
938
+ opt_output = gr.Markdown(label="Optimized Code")
939
 
940
+ # ===== TOOLS =====
941
  with gr.TabItem("🛠️ Tools"):
942
+
943
+ # Regex Builder
944
  gr.Markdown("### 🎯 Regex Builder")
945
  with gr.Row():
946
  with gr.Column(scale=1):
947
+ regex_desc = gr.Textbox(
948
+ label="Describe the pattern",
949
+ placeholder="e.g., Match email addresses, validate phone numbers...",
950
+ lines=3
951
+ )
952
  regex_btn = gr.Button("🎯 Build Regex", variant="primary")
953
  with gr.Column(scale=1):
954
+ regex_output = gr.Markdown(label="Regex Pattern")
955
 
956
+ gr.Markdown("---")
957
+
958
+ # API Builder
959
+ gr.Markdown("### 🔗 API Builder")
960
  with gr.Row():
961
  with gr.Column(scale=1):
962
+ api_desc = gr.Textbox(
963
+ label="Describe the endpoint",
964
+ placeholder="e.g., POST endpoint for user registration...",
965
+ lines=3
966
+ )
967
+ api_framework = gr.Dropdown(
968
+ ["FastAPI (Python)", "Express (Node.js)", "Gin (Go)", "Spring Boot (Java)", "Flask (Python)", "Django REST (Python)"],
969
+ value="FastAPI (Python)", label="Framework"
970
+ )
971
  api_btn = gr.Button("🔗 Build API", variant="primary")
972
  with gr.Column(scale=1):
973
+ api_output = gr.Code(label="API Code", lines=14)
974
 
975
+ # Footer
976
+ gr.HTML("""
977
+ <div class="footer">
978
+ <p>🔒 Running 100% locally via Ollama • Your code never leaves your machine</p>
979
+ <p style="margin-top: 8px; opacity: 0.7;">Axon v6 • Built with ❤️</p>
980
+ </div>
981
+ """)
982
 
983
+ # ===== EVENT HANDLERS =====
984
+
985
  def respond(message, history, model, temp, tokens):
986
  history = history or []
987
+ for updated_history in chat_stream(message, history, model, temp, tokens):
988
+ yield updated_history, ""
989
 
990
  msg.submit(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg])
991
  send.click(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg])
992
  clear.click(lambda: [], None, chatbot)
993
+ transcribe_btn.click(transcribe_audio, audio_input, msg)
994
 
995
  gen_btn.click(generate_code, [gen_prompt, gen_lang, model_dropdown, gen_temp, max_tokens], gen_output)
996
  explain_btn.click(explain_code, [explain_input, model_dropdown, explain_detail, max_tokens], explain_output)
997
  fix_btn.click(fix_code, [fix_input, fix_error, model_dropdown, max_tokens], fix_output)
998
  review_btn.click(review_code, [review_input, model_dropdown, max_tokens], review_output)
999
  convert_btn.click(convert_code, [convert_input, convert_from, convert_to, model_dropdown, max_tokens], convert_output)
1000
+ test_btn.click(generate_tests, [test_input, test_lang, test_framework, model_dropdown, max_tokens], test_output)
1001
  doc_btn.click(document_code, [doc_input, doc_lang, doc_style, model_dropdown, max_tokens], doc_output)
1002
  opt_btn.click(optimize_code, [opt_input, opt_lang, opt_focus, model_dropdown, max_tokens], opt_output)
1003
  regex_btn.click(build_regex, [regex_desc, model_dropdown, max_tokens], regex_output)
1004
+ api_btn.click(build_api, [api_desc, api_framework, model_dropdown, max_tokens], api_output)
1005
 
1006
+ # Launch with CSS (Gradio 6.0 way)
1007
  demo.launch(server_name="0.0.0.0", server_port=7860)