AIencoder commited on
Commit
8ee6c34
·
verified ·
1 Parent(s): 1904e03

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +224 -838
app.py CHANGED
@@ -1,9 +1,12 @@
1
  import gradio as gr
2
  import requests
3
  import json
 
4
  from faster_whisper import WhisperModel
5
 
6
  OLLAMA_URL = "http://localhost:11434"
 
 
7
 
8
  MODELS = {
9
  "⭐ Qwen3 30B-A3B (Best)": "hf.co/bartowski/Qwen_Qwen3-30B-A3B-GGUF:Q4_K_M",
@@ -19,13 +22,13 @@ MODELS = {
19
  }
20
 
21
  MODEL_INFO = {
22
- "⭐ Qwen3 30B-A3B (Best)": "🏆 Best quality • MoE 30B/3B active • Complex tasks",
23
  "Qwen2.5 Coder 7B": "⚖️ Balanced • Great for most tasks",
24
  "Qwen2.5 Coder 3B": "🚀 Fast & capable • Recommended",
25
  "Qwen2.5 Coder 1.5B (Fast)": "⚡ Fastest • Simple tasks",
26
  "DeepSeek Coder 6.7B": "🧠 Complex logic • Algorithms",
27
  "DeepSeek Coder 1.3B (Fast)": "⚡ Quick completions",
28
- "StarCoder2 7B": "🐙 GitHub trained • Real-world patterns",
29
  "StarCoder2 3B": "🐙 Fast GitHub style",
30
  "CodeGemma 7B": "🔷 Google • Strong docs",
31
  "CodeGemma 2B (Fast)": "🔷 Quick & efficient",
@@ -38,67 +41,119 @@ LANGUAGES = [
38
  "HTML/CSS", "SQL", "Bash", "PowerShell", "Lua"
39
  ]
40
 
41
- print("Loading Whisper...")
42
- whisper_model = WhisperModel("tiny", device="cpu", compute_type="int8")
43
- print("Whisper ready!")
 
 
 
 
 
44
 
45
- # ===== HELPER FUNCTIONS =====
 
 
 
 
 
 
 
46
 
47
  def get_status():
48
  try:
49
- r = requests.get(f"{OLLAMA_URL}/api/tags", timeout=2)
50
  if r.status_code == 200:
51
  models = r.json().get("models", [])
52
  return f"🟢 Online • {len(models)} models"
53
  except:
54
  pass
55
- return "🟡 Starting..."
56
 
57
  def get_model_info(model_name):
58
  return MODEL_INFO.get(model_name, "")
59
 
 
 
 
 
 
 
 
60
  def transcribe_audio(audio):
61
  if audio is None:
62
  return ""
 
 
63
  try:
64
  segments, _ = whisper_model.transcribe(audio)
65
- return " ".join([seg.text for seg in segments]).strip()
 
66
  except Exception as e:
67
- return f"[STT Error: {e}]"
68
 
69
  def call_ollama(model_name, prompt, temperature=0.7, max_tokens=2048):
 
 
 
70
  model = MODELS.get(model_name, "qwen2.5-coder:3b")
71
- try:
72
- r = requests.post(
73
- f"{OLLAMA_URL}/api/generate",
74
- json={"model": model, "prompt": prompt, "stream": False, "options": {"temperature": temperature, "num_predict": max_tokens}},
75
- timeout=300
76
- )
77
- if r.status_code == 200:
78
- return r.json().get("response", "")
79
- return f"❌ Error: {r.text}"
80
- except Exception as e:
81
- return f"❌ Error: {e}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
 
83
  def extract_code(text):
84
- if "```" in text:
 
 
85
  parts = text.split("```")
86
  if len(parts) >= 2:
87
  code = parts[1]
88
  if "\n" in code:
89
  code = code.split("\n", 1)[-1]
90
  return code.strip()
 
 
91
  return text
92
 
93
  # ===== CORE FUNCTIONS =====
94
 
95
  def chat_stream(message, history, model_name, temperature, max_tokens):
96
- if not message.strip():
97
- yield history
 
 
 
 
 
98
  return
99
 
100
  model = MODELS.get(model_name, "qwen2.5-coder:3b")
101
- messages = [{"role": "system", "content": "You are an expert coding assistant. Provide clear, well-commented code. Always use markdown code blocks with language tags."}]
102
 
103
  for user_msg, assistant_msg in history:
104
  messages.append({"role": "user", "content": user_msg})
@@ -110,9 +165,14 @@ def chat_stream(message, history, model_name, temperature, max_tokens):
110
  try:
111
  response = requests.post(
112
  f"{OLLAMA_URL}/api/chat",
113
- json={"model": model, "messages": messages, "stream": True, "options": {"temperature": temperature, "num_predict": max_tokens}},
114
- stream=True, timeout=300
 
115
  )
 
 
 
 
116
 
117
  full = ""
118
  for line in response.iter_lines():
@@ -124,936 +184,262 @@ def chat_stream(message, history, model_name, temperature, max_tokens):
124
  yield history + [[message, full]]
125
  except:
126
  continue
 
 
 
127
  except Exception as e:
128
- yield history + [[message, f"❌ Error: {e}"]]
129
 
130
  def generate_code(prompt, language, model_name, temperature, max_tokens):
131
- if not prompt.strip():
132
- return "⚠️ Please describe what you want to build."
 
133
 
134
  full_prompt = (
135
- f"Write {language} code for the following task:\n\n"
136
- f"{prompt}\n\n"
137
- "Requirements:\n"
138
- "- Clean, production-ready code\n"
139
- "- Add helpful comments\n"
140
- "- Handle edge cases\n"
141
- "- Output ONLY the code in a markdown code block"
142
  )
143
-
144
  result = call_ollama(model_name, full_prompt, temperature, max_tokens)
145
- return extract_code(result)
146
 
147
  def explain_code(code, model_name, detail_level, max_tokens):
148
- if not code.strip():
149
- return "⚠️ Paste code to explain."
 
150
 
151
- detail_prompts = {
152
- "Brief": "Give a brief 2-3 sentence explanation of what this code does.",
153
- "Normal": "Explain what this code does, including the main logic and any important details.",
154
- "Detailed": "Give a detailed explanation including: purpose, how it works step-by-step, time/space complexity, and potential improvements."
155
  }
156
-
157
- prompt = detail_prompts[detail_level] + "\n\nCode:\n" + code
158
  return call_ollama(model_name, prompt, 0.5, max_tokens)
159
 
160
- def fix_code(code, error, model_name, max_tokens):
161
- if not code.strip():
162
- return "⚠️ Paste code to fix."
 
163
 
164
- error_msg = error if error else "Code is not working as expected"
165
- prompt = (
166
- "Fix the following code and explain what was wrong.\n\n"
167
- "Code:\n" + code + "\n\n"
168
- "Error/Problem: " + error_msg + "\n\n"
169
- "Provide:\n"
170
- "1. The fixed code in a markdown code block\n"
171
- "2. Brief explanation of what was wrong\n"
172
- "3. Any suggestions to prevent similar issues"
173
- )
174
  return call_ollama(model_name, prompt, 0.3, max_tokens)
175
 
176
  def review_code(code, model_name, max_tokens):
177
- if not code.strip():
178
- return "⚠️ Paste code to review."
 
179
 
180
  prompt = (
181
- "Review this code and provide feedback on:\n\n"
182
- "1. **Code Quality** - Is it clean, readable, well-structured?\n"
183
- "2. **Bugs/Issues** - Any potential bugs or problems?\n"
184
- "3. **Performance** - Any performance concerns?\n"
185
- "4. **Security** - Any security issues?\n"
186
- "5. **Suggestions** - How could it be improved?\n\n"
187
  "Code:\n" + code
188
  )
189
  return call_ollama(model_name, prompt, 0.4, max_tokens)
190
 
191
- def convert_code(code, source_lang, target_lang, model_name, max_tokens):
192
- if not code.strip():
193
- return "⚠️ Paste code to convert."
194
-
195
- if source_lang == target_lang:
196
- return "⚠️ Source and target languages are the same."
197
-
198
- prompt = (
199
- f"Convert this {source_lang} code to {target_lang}.\n\n"
200
- "Requirements:\n"
201
- f"- Write idiomatic {target_lang} code\n"
202
- "- Preserve the functionality exactly\n"
203
- "- Add comments explaining any language-specific differences\n"
204
- "- Output ONLY the converted code in a markdown code block\n\n"
205
- f"{source_lang} Code:\n" + code
206
- )
207
 
 
208
  result = call_ollama(model_name, prompt, 0.3, max_tokens)
209
- return extract_code(result)
210
 
211
  def generate_tests(code, language, framework, model_name, max_tokens):
212
- if not code.strip():
213
- return "⚠️ Paste code to generate tests for."
214
-
215
- frameworks = {
216
- "Python": "pytest",
217
- "JavaScript": "Jest",
218
- "TypeScript": "Jest",
219
- "Java": "JUnit",
220
- "C#": "NUnit",
221
- "Go": "testing package",
222
- "Rust": "built-in test framework",
223
- "Ruby": "RSpec",
224
- "PHP": "PHPUnit",
225
- }
226
-
227
- fw = framework if framework else frameworks.get(language, "appropriate testing framework")
228
-
229
- prompt = (
230
- f"Generate comprehensive unit tests for this {language} code using {fw}.\n\n"
231
- "Requirements:\n"
232
- "- Test all functions/methods\n"
233
- "- Include edge cases\n"
234
- "- Include both positive and negative tests\n"
235
- "- Add descriptive test names\n"
236
- "- Output ONLY the test code in a markdown code block\n\n"
237
- "Code to test:\n" + code
238
- )
239
 
 
 
240
  result = call_ollama(model_name, prompt, 0.3, max_tokens)
241
- return extract_code(result)
242
 
243
  def document_code(code, language, style, model_name, max_tokens):
244
- if not code.strip():
245
- return "⚠️ Paste code to document."
246
-
247
- styles = {
248
- "Docstrings": "Add comprehensive docstrings to all functions, classes, and methods",
249
- "Comments": "Add inline comments explaining the logic",
250
- "Both": "Add both docstrings and inline comments",
251
- "README": "Generate a README.md documenting this code"
252
- }
253
-
254
- prompt = (
255
- f"Document this {language} code.\n\n"
256
- f"Task: {styles[style]}\n\n"
257
- "Requirements:\n"
258
- "- Be clear and concise\n"
259
- "- Explain parameters, return values, and exceptions\n"
260
- "- Include usage examples where helpful\n"
261
- "- Output the fully documented code in a markdown code block\n\n"
262
- "Code:\n" + code
263
- )
264
 
 
265
  result = call_ollama(model_name, prompt, 0.4, max_tokens)
266
- if style == "README":
267
- return result
268
- return extract_code(result)
269
 
270
  def optimize_code(code, language, focus, model_name, max_tokens):
271
- if not code.strip():
272
- return "⚠️ Paste code to optimize."
273
-
274
- focus_prompts = {
275
- "Performance": "Optimize for speed and efficiency. Reduce time complexity where possible.",
276
- "Readability": "Refactor for better readability and maintainability. Follow best practices.",
277
- "Memory": "Optimize memory usage. Reduce allocations and improve data structures.",
278
- "All": "Optimize for performance, readability, and memory usage."
279
- }
280
-
281
- prompt = (
282
- f"Optimize this {language} code.\n\n"
283
- f"Focus: {focus_prompts[focus]}\n\n"
284
- "Requirements:\n"
285
- "- Explain what you changed and why\n"
286
- "- Preserve the original functionality\n"
287
- "- Show before/after complexity if relevant\n"
288
- "- Output the optimized code in a markdown code block\n\n"
289
- "Code:\n" + code
290
- )
291
 
 
292
  return call_ollama(model_name, prompt, 0.3, max_tokens)
293
 
294
  def build_regex(description, model_name, max_tokens):
295
- if not description.strip():
296
- return "⚠️ Describe the pattern you want to match."
297
-
298
- prompt = (
299
- "Create a regex pattern for the following requirement:\n\n"
300
- f"{description}\n\n"
301
- "Provide:\n"
302
- "1. The regex pattern\n"
303
- "2. Explanation of each part\n"
304
- "3. Example matches and non-matches\n"
305
- "4. Code example in Python showing usage"
306
- )
307
 
 
308
  return call_ollama(model_name, prompt, 0.3, max_tokens)
309
 
310
  def build_api(description, framework, model_name, max_tokens):
311
- if not description.strip():
312
- return "⚠️ Describe the API endpoint you want to build."
313
-
314
- prompt = (
315
- f"Create a REST API endpoint using {framework}.\n\n"
316
- f"Requirements:\n{description}\n\n"
317
- "Include:\n"
318
- "- Route definition with proper HTTP methods\n"
319
- "- Request validation\n"
320
- "- Error handling\n"
321
- "- Response formatting\n"
322
- "- Brief documentation comments\n"
323
- "- Output the code in a markdown code block"
324
- )
325
 
 
326
  result = call_ollama(model_name, prompt, 0.3, max_tokens)
327
- return extract_code(result)
328
-
329
- # ===== PREMIUM CSS =====
330
-
331
- css = """
332
- /* ===== GLOBAL ===== */
333
- :root {
334
- --primary: #6366f1;
335
- --primary-dark: #4f46e5;
336
- --secondary: #8b5cf6;
337
- --accent: #06b6d4;
338
- --bg-dark: #0f172a;
339
- --bg-card: #1e293b;
340
- --bg-hover: #334155;
341
- --text-primary: #f1f5f9;
342
- --text-secondary: #94a3b8;
343
- --border: #334155;
344
- --success: #10b981;
345
- --warning: #f59e0b;
346
- --error: #ef4444;
347
- --gradient: linear-gradient(135deg, #6366f1 0%, #8b5cf6 50%, #06b6d4 100%);
348
- }
349
-
350
- .gradio-container {
351
- max-width: 1500px !important;
352
- margin: auto !important;
353
- background: var(--bg-dark) !important;
354
- font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif !important;
355
- }
356
-
357
- .dark {
358
- --bg-dark: #0f172a;
359
- }
360
-
361
- /* ===== HEADER ===== */
362
- .header-section {
363
- background: var(--gradient);
364
- border-radius: 20px;
365
- padding: 32px 40px;
366
- margin-bottom: 24px;
367
- position: relative;
368
- overflow: hidden;
369
- box-shadow: 0 20px 40px rgba(99, 102, 241, 0.3);
370
- }
371
-
372
- .header-section::before {
373
- content: '';
374
- position: absolute;
375
- top: -50%;
376
- right: -50%;
377
- width: 100%;
378
- height: 200%;
379
- background: radial-gradient(circle, rgba(255,255,255,0.1) 0%, transparent 60%);
380
- animation: pulse 4s ease-in-out infinite;
381
- }
382
-
383
- @keyframes pulse {
384
- 0%, 100% { transform: scale(1); opacity: 0.5; }
385
- 50% { transform: scale(1.1); opacity: 0.8; }
386
- }
387
-
388
- .header-content {
389
- position: relative;
390
- z-index: 1;
391
- display: flex;
392
- justify-content: space-between;
393
- align-items: center;
394
- flex-wrap: wrap;
395
- gap: 20px;
396
- }
397
-
398
- .header-title {
399
- color: white;
400
- margin: 0;
401
- font-size: 2.8rem;
402
- font-weight: 800;
403
- letter-spacing: -0.02em;
404
- text-shadow: 0 2px 10px rgba(0,0,0,0.2);
405
- }
406
-
407
- .header-subtitle {
408
- color: rgba(255,255,255,0.9);
409
- margin: 8px 0 0 0;
410
- font-size: 1.1rem;
411
- font-weight: 400;
412
- }
413
-
414
- .header-badges {
415
- display: flex;
416
- gap: 10px;
417
- flex-wrap: wrap;
418
- }
419
-
420
- .badge {
421
- background: rgba(255,255,255,0.2);
422
- backdrop-filter: blur(10px);
423
- padding: 8px 16px;
424
- border-radius: 50px;
425
- font-size: 0.85rem;
426
- font-weight: 500;
427
- color: white;
428
- border: 1px solid rgba(255,255,255,0.2);
429
- }
430
-
431
- /* ===== STATUS BAR ===== */
432
- .status-bar {
433
- background: var(--bg-card);
434
- border: 1px solid var(--border);
435
- border-radius: 16px;
436
- padding: 16px 24px;
437
- margin-bottom: 20px;
438
- display: flex;
439
- justify-content: space-between;
440
- align-items: center;
441
- flex-wrap: wrap;
442
- gap: 16px;
443
- }
444
-
445
- .status-indicator {
446
- display: flex;
447
- align-items: center;
448
- gap: 8px;
449
- font-weight: 500;
450
- color: var(--text-primary);
451
- }
452
-
453
- .status-dot {
454
- width: 10px;
455
- height: 10px;
456
- border-radius: 50%;
457
- background: var(--success);
458
- animation: blink 2s ease-in-out infinite;
459
- }
460
-
461
- @keyframes blink {
462
- 0%, 100% { opacity: 1; }
463
- 50% { opacity: 0.5; }
464
- }
465
-
466
- /* ===== SETTINGS PANEL ===== */
467
- .settings-panel {
468
- background: var(--bg-card);
469
- border: 1px solid var(--border);
470
- border-radius: 16px;
471
- padding: 20px 24px;
472
- margin-bottom: 20px;
473
- }
474
-
475
- .settings-panel label {
476
- color: var(--text-secondary) !important;
477
- font-weight: 500 !important;
478
- font-size: 0.9rem !important;
479
- }
480
-
481
- .settings-panel input, .settings-panel select {
482
- background: var(--bg-dark) !important;
483
- border: 1px solid var(--border) !important;
484
- border-radius: 10px !important;
485
- color: var(--text-primary) !important;
486
- transition: all 0.2s ease !important;
487
- }
488
-
489
- .settings-panel input:focus, .settings-panel select:focus {
490
- border-color: var(--primary) !important;
491
- box-shadow: 0 0 0 3px rgba(99, 102, 241, 0.2) !important;
492
- }
493
-
494
- /* ===== MODEL INFO ===== */
495
- .model-info-box {
496
- background: linear-gradient(135deg, rgba(99, 102, 241, 0.1) 0%, rgba(139, 92, 246, 0.1) 100%);
497
- border: 1px solid rgba(99, 102, 241, 0.3);
498
- border-radius: 12px;
499
- padding: 12px 18px;
500
- font-size: 0.9rem;
501
- color: var(--text-secondary);
502
- margin-top: 12px;
503
- }
504
-
505
- /* ===== TABS ===== */
506
- .tabs {
507
- background: transparent !important;
508
- }
509
-
510
- .tab-nav {
511
- background: var(--bg-card) !important;
512
- border: 1px solid var(--border) !important;
513
- border-radius: 16px !important;
514
- padding: 8px !important;
515
- gap: 6px !important;
516
- margin-bottom: 20px !important;
517
- flex-wrap: wrap !important;
518
- }
519
-
520
- .tab-nav button {
521
- background: transparent !important;
522
- border: none !important;
523
- border-radius: 12px !important;
524
- padding: 12px 20px !important;
525
- font-weight: 600 !important;
526
- font-size: 0.9rem !important;
527
- color: var(--text-secondary) !important;
528
- transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1) !important;
529
- }
530
-
531
- .tab-nav button:hover {
532
- background: var(--bg-hover) !important;
533
- color: var(--text-primary) !important;
534
- }
535
-
536
- .tab-nav button.selected {
537
- background: var(--gradient) !important;
538
- color: white !important;
539
- box-shadow: 0 4px 15px rgba(99, 102, 241, 0.4) !important;
540
- }
541
-
542
- .tabitem {
543
- background: transparent !important;
544
- border: none !important;
545
- padding: 0 !important;
546
- }
547
-
548
- /* ===== CARDS ===== */
549
- .card {
550
- background: var(--bg-card);
551
- border: 1px solid var(--border);
552
- border-radius: 16px;
553
- padding: 24px;
554
- transition: all 0.3s ease;
555
- }
556
-
557
- .card:hover {
558
- border-color: var(--primary);
559
- box-shadow: 0 8px 30px rgba(99, 102, 241, 0.15);
560
- }
561
-
562
- /* ===== CHATBOT ===== */
563
- .chatbot {
564
- background: var(--bg-card) !important;
565
- border: 1px solid var(--border) !important;
566
- border-radius: 16px !important;
567
- height: 500px !important;
568
- }
569
-
570
- .chatbot .message {
571
- border-radius: 16px !important;
572
- padding: 16px 20px !important;
573
- margin: 8px !important;
574
- font-size: 0.95rem !important;
575
- line-height: 1.6 !important;
576
- }
577
-
578
- .chatbot .user {
579
- background: var(--gradient) !important;
580
- color: white !important;
581
- margin-left: 20% !important;
582
- }
583
-
584
- .chatbot .bot {
585
- background: var(--bg-hover) !important;
586
- color: var(--text-primary) !important;
587
- margin-right: 20% !important;
588
- border: 1px solid var(--border) !important;
589
- }
590
-
591
- /* ===== INPUTS ===== */
592
- .input-container textarea,
593
- .input-container input[type="text"] {
594
- background: var(--bg-card) !important;
595
- border: 1px solid var(--border) !important;
596
- border-radius: 12px !important;
597
- color: var(--text-primary) !important;
598
- padding: 14px 18px !important;
599
- font-size: 0.95rem !important;
600
- transition: all 0.2s ease !important;
601
- }
602
-
603
- .input-container textarea:focus,
604
- .input-container input[type="text"]:focus {
605
- border-color: var(--primary) !important;
606
- box-shadow: 0 0 0 3px rgba(99, 102, 241, 0.2) !important;
607
- outline: none !important;
608
- }
609
-
610
- .input-container textarea::placeholder {
611
- color: var(--text-secondary) !important;
612
- }
613
-
614
- /* ===== CODE BLOCKS ===== */
615
- .code-container {
616
- border-radius: 16px !important;
617
- overflow: hidden !important;
618
- border: 1px solid var(--border) !important;
619
- }
620
-
621
- .code-container pre {
622
- background: #0d1117 !important;
623
- padding: 20px !important;
624
- margin: 0 !important;
625
- font-family: 'JetBrains Mono', 'Fira Code', monospace !important;
626
- font-size: 0.9rem !important;
627
- line-height: 1.6 !important;
628
- }
629
-
630
- /* ===== BUTTONS ===== */
631
- .primary-btn {
632
- background: var(--gradient) !important;
633
- border: none !important;
634
- border-radius: 12px !important;
635
- padding: 14px 28px !important;
636
- font-weight: 600 !important;
637
- font-size: 0.95rem !important;
638
- color: white !important;
639
- cursor: pointer !important;
640
- transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1) !important;
641
- box-shadow: 0 4px 15px rgba(99, 102, 241, 0.3) !important;
642
- }
643
-
644
- .primary-btn:hover {
645
- transform: translateY(-2px) !important;
646
- box-shadow: 0 8px 25px rgba(99, 102, 241, 0.4) !important;
647
- }
648
-
649
- .primary-btn:active {
650
- transform: translateY(0) !important;
651
- }
652
-
653
- .secondary-btn {
654
- background: var(--bg-hover) !important;
655
- border: 1px solid var(--border) !important;
656
- border-radius: 12px !important;
657
- padding: 12px 20px !important;
658
- font-weight: 500 !important;
659
- color: var(--text-secondary) !important;
660
- transition: all 0.2s ease !important;
661
- }
662
-
663
- .secondary-btn:hover {
664
- background: var(--bg-card) !important;
665
- border-color: var(--primary) !important;
666
- color: var(--text-primary) !important;
667
- }
668
-
669
- /* ===== DROPDOWNS ===== */
670
- .dropdown-container select {
671
- background: var(--bg-card) !important;
672
- border: 1px solid var(--border) !important;
673
- border-radius: 10px !important;
674
- color: var(--text-primary) !important;
675
- padding: 10px 14px !important;
676
- }
677
-
678
- /* ===== SLIDERS ===== */
679
- input[type="range"] {
680
- accent-color: var(--primary) !important;
681
- }
682
-
683
- /* ===== RADIO & CHECKBOX ===== */
684
- .radio-group label {
685
- background: var(--bg-hover) !important;
686
- border: 1px solid var(--border) !important;
687
- border-radius: 10px !important;
688
- padding: 10px 16px !important;
689
- color: var(--text-secondary) !important;
690
- transition: all 0.2s ease !important;
691
- }
692
-
693
- .radio-group label.selected {
694
- background: var(--primary) !important;
695
- border-color: var(--primary) !important;
696
- color: white !important;
697
- }
698
-
699
- /* ===== ACCORDION ===== */
700
- .accordion {
701
- background: var(--bg-card) !important;
702
- border: 1px solid var(--border) !important;
703
- border-radius: 12px !important;
704
- margin-top: 16px !important;
705
- }
706
-
707
- .accordion-header {
708
- padding: 14px 18px !important;
709
- color: var(--text-secondary) !important;
710
- font-weight: 500 !important;
711
- }
712
-
713
- /* ===== MARKDOWN OUTPUT ===== */
714
- .markdown-output {
715
- background: var(--bg-card);
716
- border: 1px solid var(--border);
717
- border-radius: 16px;
718
- padding: 24px;
719
- color: var(--text-primary);
720
- line-height: 1.7;
721
- }
722
-
723
- .markdown-output h1, .markdown-output h2, .markdown-output h3 {
724
- color: var(--text-primary);
725
- margin-top: 1.5em;
726
- margin-bottom: 0.5em;
727
- }
728
-
729
- .markdown-output code {
730
- background: var(--bg-hover);
731
- padding: 2px 6px;
732
- border-radius: 4px;
733
- font-family: 'JetBrains Mono', monospace;
734
- font-size: 0.9em;
735
- }
736
-
737
- .markdown-output pre code {
738
- display: block;
739
- padding: 16px;
740
- overflow-x: auto;
741
- }
742
-
743
- /* ===== AUDIO INPUT ===== */
744
- .audio-input {
745
- background: var(--bg-card) !important;
746
- border: 1px solid var(--border) !important;
747
- border-radius: 12px !important;
748
- }
749
-
750
- /* ===== DIVIDER ===== */
751
- .divider {
752
- height: 1px;
753
- background: var(--border);
754
- margin: 24px 0;
755
- }
756
-
757
- /* ===== TOOL SECTION ===== */
758
- .tool-section {
759
- background: var(--bg-card);
760
- border: 1px solid var(--border);
761
- border-radius: 16px;
762
- padding: 24px;
763
- margin-bottom: 20px;
764
- }
765
 
766
- .tool-title {
767
- color: var(--text-primary);
768
- font-size: 1.1rem;
769
- font-weight: 600;
770
- margin-bottom: 16px;
771
- display: flex;
772
- align-items: center;
773
- gap: 8px;
774
- }
775
-
776
- /* ===== FOOTER ===== */
777
- .footer {
778
- text-align: center;
779
- padding: 24px;
780
- color: var(--text-secondary);
781
- font-size: 0.85rem;
782
- border-top: 1px solid var(--border);
783
- margin-top: 32px;
784
- }
785
-
786
- .footer a {
787
- color: var(--primary);
788
- text-decoration: none;
789
- }
790
-
791
- /* ===== SCROLLBAR ===== */
792
- ::-webkit-scrollbar {
793
- width: 8px;
794
- height: 8px;
795
- }
796
-
797
- ::-webkit-scrollbar-track {
798
- background: var(--bg-dark);
799
- }
800
 
801
- ::-webkit-scrollbar-thumb {
802
- background: var(--border);
803
- border-radius: 4px;
804
- }
805
-
806
- ::-webkit-scrollbar-thumb:hover {
807
- background: var(--text-secondary);
808
- }
809
-
810
- /* ===== HIDE FOOTER ===== */
811
- footer { display: none !important; }
812
-
813
- /* ===== RESPONSIVE ===== */
814
- @media (max-width: 768px) {
815
- .header-title { font-size: 2rem; }
816
- .header-content { flex-direction: column; text-align: center; }
817
- .header-badges { justify-content: center; }
818
- .tab-nav button { padding: 10px 14px !important; font-size: 0.8rem !important; }
819
- }
820
- """
821
-
822
- # ===== UI =====
823
-
824
- with gr.Blocks(title="Axon v6", css=css, theme=gr.themes.Base()) as demo:
825
 
826
- # Header
827
- gr.HTML("""
828
- <div class="header-section">
829
- <div class="header-content">
830
- <div>
831
- <h1 class="header-title">🔥 Axon v6</h1>
832
- <p class="header-subtitle">AI-Powered Coding Assistant</p>
833
- </div>
834
- <div class="header-badges">
835
- <span class="badge">🤖 10 Models</span>
836
- <span class="badge">🛠️ 9 Tools</span>
837
- <span class="badge">🔒 100% Local</span>
838
- <span class="badge">⚡ No Rate Limits</span>
839
- </div>
840
- </div>
841
- </div>
842
- """)
843
 
844
- # Status & Model Info
845
- with gr.Row():
846
- status = gr.Markdown(value=get_status, every=5)
847
 
848
- # Settings Panel
849
- with gr.Row(elem_classes="settings-panel"):
850
- model_dropdown = gr.Dropdown(
851
- choices=list(MODELS.keys()),
852
- value="Qwen2.5 Coder 3B",
853
- label="🤖 Model",
854
- scale=3
855
- )
856
  temperature = gr.Slider(0, 1, value=0.7, step=0.1, label="🌡️ Creativity", scale=2)
857
  max_tokens = gr.Slider(256, 8192, value=2048, step=256, label="📏 Max Tokens", scale=2)
858
 
859
- model_info_display = gr.Markdown(value="🚀 Fast & capable • Recommended", elem_classes="model-info-box")
860
- model_dropdown.change(get_model_info, model_dropdown, model_info_display)
861
 
862
- with gr.Tabs(elem_classes="tabs"):
863
 
864
- # ===== CHAT =====
865
- with gr.TabItem("💬 Chat", elem_classes="tabitem"):
866
- chatbot = gr.Chatbot(height=500, show_copy_button=True, bubble_full_width=False, elem_classes="chatbot")
867
  with gr.Row():
868
- msg = gr.Textbox(
869
- placeholder="Ask anything about coding... Press Enter to send",
870
- show_label=False, scale=8, lines=1,
871
- elem_classes="input-container"
872
- )
873
- send = gr.Button("Send ➤", variant="primary", scale=1, elem_classes="primary-btn")
874
  with gr.Row():
875
- audio_input = gr.Audio(sources=["microphone"], type="filepath", label="🎤 Voice", scale=2)
876
- transcribe_btn = gr.Button("🎤 Transcribe", scale=1, elem_classes="secondary-btn")
877
- clear = gr.Button("🗑️ Clear", scale=1, elem_classes="secondary-btn")
878
- with gr.Accordion("💡 Quick Prompts", open=False):
879
- gr.Examples([
880
- "Write a Python function to find all prime numbers up to n",
881
- "Explain async/await vs promises in JavaScript",
882
- "How do I implement a binary search tree?",
883
- "Write a REST API with authentication in FastAPI"
884
- ], inputs=msg)
885
 
886
- # ===== GENERATE =====
887
- with gr.TabItem("⚡ Generate", elem_classes="tabitem"):
888
  with gr.Row():
889
  with gr.Column(scale=1):
890
- gen_prompt = gr.Textbox(
891
- label="📝 Describe what you want to build",
892
- placeholder="e.g., A function that validates email addresses with regex",
893
- lines=5, elem_classes="input-container"
894
- )
895
  with gr.Row():
896
- gen_lang = gr.Dropdown(LANGUAGES, value="Python", label="🔤 Language", scale=2)
897
  gen_temp = gr.Slider(0, 1, value=0.3, step=0.1, label="🌡️", scale=1)
898
- gen_btn = gr.Button("⚡ Generate Code", variant="primary", size="lg", elem_classes="primary-btn")
899
  with gr.Column(scale=2):
900
- gen_output = gr.Code(label="Generated Code", language="python", lines=22, elem_classes="code-container")
901
 
902
- # ===== EXPLAIN =====
903
- with gr.TabItem("🔍 Explain", elem_classes="tabitem"):
904
  with gr.Row():
905
  with gr.Column(scale=1):
906
- explain_input = gr.Code(label="📋 Paste your code", lines=14, elem_classes="code-container")
907
- explain_detail = gr.Radio(
908
- ["Brief", "Normal", "Detailed"],
909
- value="Normal", label="📊 Detail Level"
910
- )
911
- explain_btn = gr.Button("🔍 Explain Code", variant="primary", size="lg", elem_classes="primary-btn")
912
  with gr.Column(scale=1):
913
- explain_output = gr.Markdown(label="Explanation", elem_classes="markdown-output")
914
 
915
- # ===== DEBUG =====
916
- with gr.TabItem("🔧 Debug", elem_classes="tabitem"):
917
  with gr.Row():
918
  with gr.Column(scale=1):
919
- fix_input = gr.Code(label="🐛 Paste buggy code", lines=12, elem_classes="code-container")
920
- fix_error = gr.Textbox(
921
- label=" Error message (optional)",
922
- placeholder="Paste error or describe the issue",
923
- lines=3, elem_classes="input-container"
924
- )
925
- fix_btn = gr.Button("🔧 Fix Code", variant="primary", size="lg", elem_classes="primary-btn")
926
  with gr.Column(scale=1):
927
- fix_output = gr.Markdown(label="Solution", elem_classes="markdown-output")
928
 
929
- # ===== REVIEW =====
930
- with gr.TabItem("📋 Review", elem_classes="tabitem"):
931
  with gr.Row():
932
  with gr.Column(scale=1):
933
- review_input = gr.Code(label="📋 Code to review", lines=16, elem_classes="code-container")
934
- review_btn = gr.Button("📋 Review Code", variant="primary", size="lg", elem_classes="primary-btn")
935
  with gr.Column(scale=1):
936
- review_output = gr.Markdown(label="Code Review", elem_classes="markdown-output")
937
 
938
- # ===== CONVERT =====
939
- with gr.TabItem("🔄 Convert", elem_classes="tabitem"):
940
  with gr.Row():
941
  with gr.Column(scale=1):
942
- convert_input = gr.Code(label="📥 Source Code", lines=14, elem_classes="code-container")
943
  with gr.Row():
944
- convert_from = gr.Dropdown(LANGUAGES, value="Python", label="From", scale=1)
945
- gr.HTML("<div style='display:flex;align-items:center;justify-content:center;font-size:1.5rem;'>➜</div>")
946
- convert_to = gr.Dropdown(LANGUAGES, value="JavaScript", label="To", scale=1)
947
- convert_btn = gr.Button("🔄 Convert Code", variant="primary", size="lg", elem_classes="primary-btn")
948
  with gr.Column(scale=1):
949
- convert_output = gr.Code(label="📤 Converted Code", lines=14, elem_classes="code-container")
950
 
951
- # ===== TEST =====
952
- with gr.TabItem("🧪 Test", elem_classes="tabitem"):
953
  with gr.Row():
954
  with gr.Column(scale=1):
955
- test_input = gr.Code(label="📋 Code to test", lines=14, elem_classes="code-container")
956
  with gr.Row():
957
- test_lang = gr.Dropdown(LANGUAGES[:12], value="Python", label="Language", scale=2)
958
- test_framework = gr.Textbox(label="Framework", placeholder="e.g., pytest", scale=2)
959
- test_btn = gr.Button("🧪 Generate Tests", variant="primary", size="lg", elem_classes="primary-btn")
960
  with gr.Column(scale=1):
961
- test_output = gr.Code(label="Generated Tests", lines=14, elem_classes="code-container")
962
 
963
- # ===== DOCUMENT =====
964
- with gr.TabItem("📝 Document", elem_classes="tabitem"):
965
  with gr.Row():
966
  with gr.Column(scale=1):
967
- doc_input = gr.Code(label="📋 Code to document", lines=14, elem_classes="code-container")
968
  with gr.Row():
969
- doc_lang = gr.Dropdown(LANGUAGES, value="Python", label="Language", scale=2)
970
- doc_style = gr.Dropdown(
971
- ["Docstrings", "Comments", "Both", "README"],
972
- value="Both", label="Style", scale=2
973
- )
974
- doc_btn = gr.Button("📝 Document", variant="primary", size="lg", elem_classes="primary-btn")
975
  with gr.Column(scale=1):
976
- doc_output = gr.Code(label="Documented Code", lines=14, elem_classes="code-container")
977
 
978
- # ===== OPTIMIZE =====
979
- with gr.TabItem("🚀 Optimize", elem_classes="tabitem"):
980
  with gr.Row():
981
  with gr.Column(scale=1):
982
- opt_input = gr.Code(label="📋 Code to optimize", lines=14, elem_classes="code-container")
983
  with gr.Row():
984
- opt_lang = gr.Dropdown(LANGUAGES, value="Python", label="Language", scale=2)
985
- opt_focus = gr.Dropdown(
986
- ["All", "Performance", "Readability", "Memory"],
987
- value="All", label="Focus", scale=2
988
- )
989
- opt_btn = gr.Button("🚀 Optimize", variant="primary", size="lg", elem_classes="primary-btn")
990
  with gr.Column(scale=1):
991
- opt_output = gr.Markdown(label="Optimized Code", elem_classes="markdown-output")
992
 
993
- # ===== TOOLS =====
994
- with gr.TabItem("🛠️ Tools", elem_classes="tabitem"):
995
-
996
- # Regex Builder
997
- gr.HTML("<div class='tool-section'><div class='tool-title'>🎯 Regex Builder</div></div>")
998
  with gr.Row():
999
  with gr.Column(scale=1):
1000
- regex_desc = gr.Textbox(
1001
- label="Describe the pattern",
1002
- placeholder="e.g., Match email addresses, validate phone numbers with country code...",
1003
- lines=3, elem_classes="input-container"
1004
- )
1005
- regex_btn = gr.Button("🎯 Build Regex", variant="primary", elem_classes="primary-btn")
1006
  with gr.Column(scale=1):
1007
- regex_output = gr.Markdown(label="Regex Pattern", elem_classes="markdown-output")
1008
 
1009
- gr.HTML("<div class='divider'></div>")
1010
-
1011
- # API Builder
1012
- gr.HTML("<div class='tool-section'><div class='tool-title'>🔗 API Builder</div></div>")
1013
  with gr.Row():
1014
  with gr.Column(scale=1):
1015
- api_desc = gr.Textbox(
1016
- label="Describe the endpoint",
1017
- placeholder="e.g., POST endpoint for user registration with email/password validation...",
1018
- lines=3, elem_classes="input-container"
1019
- )
1020
- api_framework = gr.Dropdown(
1021
- ["FastAPI (Python)", "Express (Node.js)", "Gin (Go)", "Spring Boot (Java)", "Flask (Python)", "Django REST (Python)"],
1022
- value="FastAPI (Python)", label="Framework"
1023
- )
1024
- api_btn = gr.Button("🔗 Build API", variant="primary", elem_classes="primary-btn")
1025
  with gr.Column(scale=1):
1026
- api_output = gr.Code(label="API Code", lines=14, elem_classes="code-container")
1027
 
1028
- # Footer
1029
- gr.HTML("""
1030
- <div class="footer">
1031
- <p>🔒 Running 100% locally via Ollama • Your code never leaves your machine</p>
1032
- <p style="margin-top: 8px; opacity: 0.7;">Built with ❤️ using Gradio</p>
1033
- </div>
1034
- """)
1035
 
1036
- # ===== EVENT HANDLERS =====
1037
-
1038
  def respond(message, history, model, temp, tokens):
1039
  history = history or []
1040
- for updated_history in chat_stream(message, history, model, temp, tokens):
1041
- yield updated_history, ""
1042
 
1043
  msg.submit(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg])
1044
  send.click(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg])
1045
  clear.click(lambda: [], None, chatbot)
1046
- transcribe_btn.click(transcribe_audio, audio_input, msg)
1047
 
1048
  gen_btn.click(generate_code, [gen_prompt, gen_lang, model_dropdown, gen_temp, max_tokens], gen_output)
1049
  explain_btn.click(explain_code, [explain_input, model_dropdown, explain_detail, max_tokens], explain_output)
1050
  fix_btn.click(fix_code, [fix_input, fix_error, model_dropdown, max_tokens], fix_output)
1051
  review_btn.click(review_code, [review_input, model_dropdown, max_tokens], review_output)
1052
  convert_btn.click(convert_code, [convert_input, convert_from, convert_to, model_dropdown, max_tokens], convert_output)
1053
- test_btn.click(generate_tests, [test_input, test_lang, test_framework, model_dropdown, max_tokens], test_output)
1054
  doc_btn.click(document_code, [doc_input, doc_lang, doc_style, model_dropdown, max_tokens], doc_output)
1055
  opt_btn.click(optimize_code, [opt_input, opt_lang, opt_focus, model_dropdown, max_tokens], opt_output)
1056
  regex_btn.click(build_regex, [regex_desc, model_dropdown, max_tokens], regex_output)
1057
- api_btn.click(build_api, [api_desc, api_framework, model_dropdown, max_tokens], api_output)
1058
 
1059
  demo.launch(server_name="0.0.0.0", server_port=7860)
 
1
  import gradio as gr
2
  import requests
3
  import json
4
+ import time
5
  from faster_whisper import WhisperModel
6
 
7
  OLLAMA_URL = "http://localhost:11434"
8
+ MAX_RETRIES = 3
9
+ TIMEOUT = 300
10
 
11
  MODELS = {
12
  "⭐ Qwen3 30B-A3B (Best)": "hf.co/bartowski/Qwen_Qwen3-30B-A3B-GGUF:Q4_K_M",
 
22
  }
23
 
24
  MODEL_INFO = {
25
+ "⭐ Qwen3 30B-A3B (Best)": "🏆 Best quality • MoE 30B/3B active",
26
  "Qwen2.5 Coder 7B": "⚖️ Balanced • Great for most tasks",
27
  "Qwen2.5 Coder 3B": "🚀 Fast & capable • Recommended",
28
  "Qwen2.5 Coder 1.5B (Fast)": "⚡ Fastest • Simple tasks",
29
  "DeepSeek Coder 6.7B": "🧠 Complex logic • Algorithms",
30
  "DeepSeek Coder 1.3B (Fast)": "⚡ Quick completions",
31
+ "StarCoder2 7B": "🐙 GitHub trained",
32
  "StarCoder2 3B": "🐙 Fast GitHub style",
33
  "CodeGemma 7B": "🔷 Google • Strong docs",
34
  "CodeGemma 2B (Fast)": "🔷 Quick & efficient",
 
41
  "HTML/CSS", "SQL", "Bash", "PowerShell", "Lua"
42
  ]
43
 
44
+ # Whisper init
45
+ whisper_model = None
46
+ try:
47
+ print("Loading Whisper...")
48
+ whisper_model = WhisperModel("tiny", device="cpu", compute_type="int8")
49
+ print("✅ Whisper ready!")
50
+ except Exception as e:
51
+ print(f"❌ Whisper failed: {e}")
52
 
53
+ # ===== HELPERS =====
54
+
55
+ def check_ollama():
56
+ try:
57
+ r = requests.get(f"{OLLAMA_URL}/api/tags", timeout=5)
58
+ return r.status_code == 200
59
+ except:
60
+ return False
61
 
62
  def get_status():
63
  try:
64
+ r = requests.get(f"{OLLAMA_URL}/api/tags", timeout=5)
65
  if r.status_code == 200:
66
  models = r.json().get("models", [])
67
  return f"🟢 Online • {len(models)} models"
68
  except:
69
  pass
70
+ return "🔴 Offline"
71
 
72
  def get_model_info(model_name):
73
  return MODEL_INFO.get(model_name, "")
74
 
75
+ def validate_input(text, name="Input"):
76
+ if not text or not text.strip():
77
+ return False, f"⚠️ {name} cannot be empty."
78
+ if len(text) > 100000:
79
+ return False, f"⚠️ {name} too long (max 100KB)."
80
+ return True, None
81
+
82
  def transcribe_audio(audio):
83
  if audio is None:
84
  return ""
85
+ if whisper_model is None:
86
+ return "❌ Whisper not available."
87
  try:
88
  segments, _ = whisper_model.transcribe(audio)
89
+ text = " ".join([seg.text for seg in segments]).strip()
90
+ return text if text else "⚠️ No speech detected."
91
  except Exception as e:
92
+ return f" Transcription failed: {str(e)[:50]}"
93
 
94
  def call_ollama(model_name, prompt, temperature=0.7, max_tokens=2048):
95
+ if not check_ollama():
96
+ return "❌ **Ollama not running.** Please wait for it to start."
97
+
98
  model = MODELS.get(model_name, "qwen2.5-coder:3b")
99
+
100
+ for attempt in range(MAX_RETRIES):
101
+ try:
102
+ r = requests.post(
103
+ f"{OLLAMA_URL}/api/generate",
104
+ json={"model": model, "prompt": prompt, "stream": False,
105
+ "options": {"temperature": temperature, "num_predict": max_tokens}},
106
+ timeout=TIMEOUT
107
+ )
108
+
109
+ if r.status_code == 200:
110
+ response = r.json().get("response", "")
111
+ return response if response.strip() else "⚠️ Empty response. Try rephrasing."
112
+ elif r.status_code == 404:
113
+ return f"❌ **Model not found:** `{model}`"
114
+ else:
115
+ return f"❌ **Error {r.status_code}**"
116
+
117
+ except requests.exceptions.Timeout:
118
+ if attempt < MAX_RETRIES - 1:
119
+ time.sleep(2)
120
+ continue
121
+ return "❌ **Timeout.** Try smaller model."
122
+ except requests.exceptions.ConnectionError:
123
+ return "❌ **Connection failed.**"
124
+ except Exception as e:
125
+ return f"❌ **Error:** {str(e)[:50]}"
126
+
127
+ return "❌ **Max retries reached.**"
128
 
129
  def extract_code(text):
130
+ if not text or "```" not in text:
131
+ return text
132
+ try:
133
  parts = text.split("```")
134
  if len(parts) >= 2:
135
  code = parts[1]
136
  if "\n" in code:
137
  code = code.split("\n", 1)[-1]
138
  return code.strip()
139
+ except:
140
+ pass
141
  return text
142
 
143
  # ===== CORE FUNCTIONS =====
144
 
145
  def chat_stream(message, history, model_name, temperature, max_tokens):
146
+ valid, error = validate_input(message, "Message")
147
+ if not valid:
148
+ yield history + [[message, error]]
149
+ return
150
+
151
+ if not check_ollama():
152
+ yield history + [[message, "❌ **Ollama not running.**"]]
153
  return
154
 
155
  model = MODELS.get(model_name, "qwen2.5-coder:3b")
156
+ messages = [{"role": "system", "content": "You are an expert coding assistant. Use markdown code blocks."}]
157
 
158
  for user_msg, assistant_msg in history:
159
  messages.append({"role": "user", "content": user_msg})
 
165
  try:
166
  response = requests.post(
167
  f"{OLLAMA_URL}/api/chat",
168
+ json={"model": model, "messages": messages, "stream": True,
169
+ "options": {"temperature": temperature, "num_predict": max_tokens}},
170
+ stream=True, timeout=TIMEOUT
171
  )
172
+
173
+ if response.status_code != 200:
174
+ yield history + [[message, f"❌ **Error {response.status_code}**"]]
175
+ return
176
 
177
  full = ""
178
  for line in response.iter_lines():
 
184
  yield history + [[message, full]]
185
  except:
186
  continue
187
+
188
+ except requests.exceptions.Timeout:
189
+ yield history + [[message, "❌ **Timeout.**"]]
190
  except Exception as e:
191
+ yield history + [[message, f"❌ **Error:** {str(e)[:50]}"]]
192
 
193
  def generate_code(prompt, language, model_name, temperature, max_tokens):
194
+ valid, error = validate_input(prompt, "Description")
195
+ if not valid:
196
+ return error
197
 
198
  full_prompt = (
199
+ f"Write {language} code for:\n\n{prompt}\n\n"
200
+ "Requirements: Clean code, comments, handle edge cases. Output ONLY code in markdown block."
 
 
 
 
 
201
  )
 
202
  result = call_ollama(model_name, full_prompt, temperature, max_tokens)
203
+ return result if result.startswith("❌") or result.startswith("⚠️") else extract_code(result)
204
 
205
  def explain_code(code, model_name, detail_level, max_tokens):
206
+ valid, error = validate_input(code, "Code")
207
+ if not valid:
208
+ return error
209
 
210
+ details = {
211
+ "Brief": "Give 2-3 sentence explanation.",
212
+ "Normal": "Explain the code with main logic.",
213
+ "Detailed": "Detailed explanation with complexity and improvements."
214
  }
215
+ prompt = details.get(detail_level, details["Normal"]) + "\n\nCode:\n" + code
 
216
  return call_ollama(model_name, prompt, 0.5, max_tokens)
217
 
218
+ def fix_code(code, error_msg, model_name, max_tokens):
219
+ valid, error = validate_input(code, "Code")
220
+ if not valid:
221
+ return error
222
 
223
+ err = error_msg if error_msg and error_msg.strip() else "Not working as expected"
224
+ prompt = f"Fix this code and explain what was wrong.\n\nCode:\n{code}\n\nError: {err}"
 
 
 
 
 
 
 
 
225
  return call_ollama(model_name, prompt, 0.3, max_tokens)
226
 
227
  def review_code(code, model_name, max_tokens):
228
+ valid, error = validate_input(code, "Code")
229
+ if not valid:
230
+ return error
231
 
232
  prompt = (
233
+ "Review this code for:\n"
234
+ "1. Code Quality\n2. Bugs\n3. Performance\n4. Security\n5. Suggestions\n\n"
 
 
 
 
235
  "Code:\n" + code
236
  )
237
  return call_ollama(model_name, prompt, 0.4, max_tokens)
238
 
239
+ def convert_code(code, from_lang, to_lang, model_name, max_tokens):
240
+ valid, error = validate_input(code, "Code")
241
+ if not valid:
242
+ return error
243
+ if from_lang == to_lang:
244
+ return "⚠️ Same language selected."
 
 
 
 
 
 
 
 
 
 
245
 
246
+ prompt = f"Convert this {from_lang} to {to_lang}. Output ONLY code.\n\n{from_lang}:\n{code}"
247
  result = call_ollama(model_name, prompt, 0.3, max_tokens)
248
+ return result if result.startswith("❌") or result.startswith("⚠️") else extract_code(result)
249
 
250
  def generate_tests(code, language, framework, model_name, max_tokens):
251
+ valid, error = validate_input(code, "Code")
252
+ if not valid:
253
+ return error
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
254
 
255
+ fw = framework if framework else "pytest" if language == "Python" else "Jest"
256
+ prompt = f"Generate unit tests for this {language} code using {fw}. Output ONLY test code.\n\n{code}"
257
  result = call_ollama(model_name, prompt, 0.3, max_tokens)
258
+ return result if result.startswith("❌") or result.startswith("⚠️") else extract_code(result)
259
 
260
  def document_code(code, language, style, model_name, max_tokens):
261
+ valid, error = validate_input(code, "Code")
262
+ if not valid:
263
+ return error
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
264
 
265
+ prompt = f"Add {style.lower()} to this {language} code.\n\n{code}"
266
  result = call_ollama(model_name, prompt, 0.4, max_tokens)
267
+ return result if style == "README" or result.startswith("❌") else extract_code(result)
 
 
268
 
269
  def optimize_code(code, language, focus, model_name, max_tokens):
270
+ valid, error = validate_input(code, "Code")
271
+ if not valid:
272
+ return error
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
273
 
274
+ prompt = f"Optimize this {language} code for {focus.lower()}. Explain changes.\n\n{code}"
275
  return call_ollama(model_name, prompt, 0.3, max_tokens)
276
 
277
  def build_regex(description, model_name, max_tokens):
278
+ valid, error = validate_input(description, "Description")
279
+ if not valid:
280
+ return error
 
 
 
 
 
 
 
 
 
281
 
282
+ prompt = f"Create regex for: {description}\n\nProvide pattern, explanation, examples, and Python code."
283
  return call_ollama(model_name, prompt, 0.3, max_tokens)
284
 
285
  def build_api(description, framework, model_name, max_tokens):
286
+ valid, error = validate_input(description, "Description")
287
+ if not valid:
288
+ return error
 
 
 
 
 
 
 
 
 
 
 
289
 
290
+ prompt = f"Create REST API using {framework}:\n\n{description}\n\nInclude validation and error handling."
291
  result = call_ollama(model_name, prompt, 0.3, max_tokens)
292
+ return result if result.startswith("❌") else extract_code(result)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
293
 
294
+ # ===== UI (Gradio 6.0 compatible) =====
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
295
 
296
+ with gr.Blocks(title="Axon v6") as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
297
 
298
+ gr.Markdown("# 🔥 Axon v6\n### AI Coding Assistant • 10 Models • 9 Tools • 100% Local")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
299
 
300
+ status = gr.Markdown(value=get_status, every=5)
 
 
301
 
302
+ with gr.Row():
303
+ model_dropdown = gr.Dropdown(choices=list(MODELS.keys()), value="Qwen2.5 Coder 3B", label="🤖 Model", scale=3)
 
 
 
 
 
 
304
  temperature = gr.Slider(0, 1, value=0.7, step=0.1, label="🌡️ Creativity", scale=2)
305
  max_tokens = gr.Slider(256, 8192, value=2048, step=256, label="📏 Max Tokens", scale=2)
306
 
307
+ model_info = gr.Markdown(value="🚀 Fast & capable • Recommended")
308
+ model_dropdown.change(get_model_info, model_dropdown, model_info)
309
 
310
+ with gr.Tabs():
311
 
312
+ with gr.TabItem("💬 Chat"):
313
+ chatbot = gr.Chatbot(height=450)
 
314
  with gr.Row():
315
+ msg = gr.Textbox(placeholder="Ask anything...", show_label=False, scale=8)
316
+ send = gr.Button("Send ➤", variant="primary", scale=1)
 
 
 
 
317
  with gr.Row():
318
+ audio = gr.Audio(sources=["microphone"], type="filepath", label="🎤", scale=2)
319
+ transcribe = gr.Button("🎤 Transcribe", scale=1)
320
+ clear = gr.Button("🗑️ Clear", scale=1)
 
 
 
 
 
 
 
321
 
322
+ with gr.TabItem("⚡ Generate"):
 
323
  with gr.Row():
324
  with gr.Column(scale=1):
325
+ gen_prompt = gr.Textbox(label="📝 Describe what to build", lines=4)
 
 
 
 
326
  with gr.Row():
327
+ gen_lang = gr.Dropdown(LANGUAGES, value="Python", label="Language", scale=2)
328
  gen_temp = gr.Slider(0, 1, value=0.3, step=0.1, label="🌡️", scale=1)
329
+ gen_btn = gr.Button("⚡ Generate", variant="primary")
330
  with gr.Column(scale=2):
331
+ gen_output = gr.Code(label="Code", language="python", lines=18)
332
 
333
+ with gr.TabItem("🔍 Explain"):
 
334
  with gr.Row():
335
  with gr.Column(scale=1):
336
+ explain_input = gr.Code(label="📋 Code", lines=12)
337
+ explain_detail = gr.Radio(["Brief", "Normal", "Detailed"], value="Normal", label="Detail")
338
+ explain_btn = gr.Button("🔍 Explain", variant="primary")
 
 
 
339
  with gr.Column(scale=1):
340
+ explain_output = gr.Markdown(label="Explanation")
341
 
342
+ with gr.TabItem("🔧 Debug"):
 
343
  with gr.Row():
344
  with gr.Column(scale=1):
345
+ fix_input = gr.Code(label="🐛 Buggy Code", lines=10)
346
+ fix_error = gr.Textbox(label="❌ Error", lines=2)
347
+ fix_btn = gr.Button("🔧 Fix", variant="primary")
 
 
 
 
348
  with gr.Column(scale=1):
349
+ fix_output = gr.Markdown(label="Solution")
350
 
351
+ with gr.TabItem("📋 Review"):
 
352
  with gr.Row():
353
  with gr.Column(scale=1):
354
+ review_input = gr.Code(label="📋 Code", lines=14)
355
+ review_btn = gr.Button("📋 Review", variant="primary")
356
  with gr.Column(scale=1):
357
+ review_output = gr.Markdown(label="Review")
358
 
359
+ with gr.TabItem("🔄 Convert"):
 
360
  with gr.Row():
361
  with gr.Column(scale=1):
362
+ convert_input = gr.Code(label="📥 Source", lines=12)
363
  with gr.Row():
364
+ convert_from = gr.Dropdown(LANGUAGES, value="Python", label="From")
365
+ convert_to = gr.Dropdown(LANGUAGES, value="JavaScript", label="To")
366
+ convert_btn = gr.Button("🔄 Convert", variant="primary")
 
367
  with gr.Column(scale=1):
368
+ convert_output = gr.Code(label="📤 Result", lines=12)
369
 
370
+ with gr.TabItem("🧪 Test"):
 
371
  with gr.Row():
372
  with gr.Column(scale=1):
373
+ test_input = gr.Code(label="📋 Code", lines=12)
374
  with gr.Row():
375
+ test_lang = gr.Dropdown(LANGUAGES[:12], value="Python", label="Language")
376
+ test_fw = gr.Textbox(label="Framework", placeholder="pytest")
377
+ test_btn = gr.Button("🧪 Generate Tests", variant="primary")
378
  with gr.Column(scale=1):
379
+ test_output = gr.Code(label="Tests", lines=12)
380
 
381
+ with gr.TabItem("📝 Document"):
 
382
  with gr.Row():
383
  with gr.Column(scale=1):
384
+ doc_input = gr.Code(label="📋 Code", lines=12)
385
  with gr.Row():
386
+ doc_lang = gr.Dropdown(LANGUAGES, value="Python", label="Language")
387
+ doc_style = gr.Dropdown(["Docstrings", "Comments", "Both", "README"], value="Both", label="Style")
388
+ doc_btn = gr.Button("📝 Document", variant="primary")
 
 
 
389
  with gr.Column(scale=1):
390
+ doc_output = gr.Code(label="Documented", lines=12)
391
 
392
+ with gr.TabItem("🚀 Optimize"):
 
393
  with gr.Row():
394
  with gr.Column(scale=1):
395
+ opt_input = gr.Code(label="📋 Code", lines=12)
396
  with gr.Row():
397
+ opt_lang = gr.Dropdown(LANGUAGES, value="Python", label="Language")
398
+ opt_focus = gr.Dropdown(["All", "Performance", "Readability", "Memory"], value="All", label="Focus")
399
+ opt_btn = gr.Button("🚀 Optimize", variant="primary")
 
 
 
400
  with gr.Column(scale=1):
401
+ opt_output = gr.Markdown(label="Optimized")
402
 
403
+ with gr.TabItem("🛠️ Tools"):
404
+ gr.Markdown("### 🎯 Regex Builder")
 
 
 
405
  with gr.Row():
406
  with gr.Column(scale=1):
407
+ regex_desc = gr.Textbox(label="Describe pattern", lines=2)
408
+ regex_btn = gr.Button("🎯 Build Regex", variant="primary")
 
 
 
 
409
  with gr.Column(scale=1):
410
+ regex_output = gr.Markdown(label="Pattern")
411
 
412
+ gr.Markdown("---\n### 🔗 API Builder")
 
 
 
413
  with gr.Row():
414
  with gr.Column(scale=1):
415
+ api_desc = gr.Textbox(label="Describe endpoint", lines=2)
416
+ api_fw = gr.Dropdown(["FastAPI", "Express", "Flask", "Gin"], value="FastAPI", label="Framework")
417
+ api_btn = gr.Button("🔗 Build API", variant="primary")
 
 
 
 
 
 
 
418
  with gr.Column(scale=1):
419
+ api_output = gr.Code(label="API Code", lines=12)
420
 
421
+ gr.Markdown("<center>🔒 100% Local • Robust Error Handling</center>")
 
 
 
 
 
 
422
 
423
+ # Events
 
424
  def respond(message, history, model, temp, tokens):
425
  history = history or []
426
+ for updated in chat_stream(message, history, model, temp, tokens):
427
+ yield updated, ""
428
 
429
  msg.submit(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg])
430
  send.click(respond, [msg, chatbot, model_dropdown, temperature, max_tokens], [chatbot, msg])
431
  clear.click(lambda: [], None, chatbot)
432
+ transcribe.click(transcribe_audio, audio, msg)
433
 
434
  gen_btn.click(generate_code, [gen_prompt, gen_lang, model_dropdown, gen_temp, max_tokens], gen_output)
435
  explain_btn.click(explain_code, [explain_input, model_dropdown, explain_detail, max_tokens], explain_output)
436
  fix_btn.click(fix_code, [fix_input, fix_error, model_dropdown, max_tokens], fix_output)
437
  review_btn.click(review_code, [review_input, model_dropdown, max_tokens], review_output)
438
  convert_btn.click(convert_code, [convert_input, convert_from, convert_to, model_dropdown, max_tokens], convert_output)
439
+ test_btn.click(generate_tests, [test_input, test_lang, test_fw, model_dropdown, max_tokens], test_output)
440
  doc_btn.click(document_code, [doc_input, doc_lang, doc_style, model_dropdown, max_tokens], doc_output)
441
  opt_btn.click(optimize_code, [opt_input, opt_lang, opt_focus, model_dropdown, max_tokens], opt_output)
442
  regex_btn.click(build_regex, [regex_desc, model_dropdown, max_tokens], regex_output)
443
+ api_btn.click(build_api, [api_desc, api_fw, model_dropdown, max_tokens], api_output)
444
 
445
  demo.launch(server_name="0.0.0.0", server_port=7860)