vedaco commited on
Commit
c657783
Β·
verified Β·
1 Parent(s): 4ce71b0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +158 -137
app.py CHANGED
@@ -1,4 +1,4 @@
1
- """Gradio App for Veda Programming Assistant - Fixed Distillation"""
2
 
3
  import gradio as gr
4
  import tensorflow as tf
@@ -7,13 +7,15 @@ import json
7
  import re
8
  import ast
9
  import operator as op
 
 
10
 
11
  from model import VedaProgrammingLLM
12
  from tokenizer import VedaTokenizer
13
  from database import db
14
  from train import VedaTrainer
15
  from teacher import teacher
16
- from config import MODEL_DIR, DISTILLATION_ENABLED
17
 
18
 
19
  # --------- Globals ----------
@@ -22,6 +24,14 @@ tokenizer = None
22
  conversation_history = []
23
  current_conv_id = -1
24
 
 
 
 
 
 
 
 
 
25
 
26
  # --------- Helpers ----------
27
  def extract_text(message):
@@ -114,7 +124,7 @@ def is_good_response(response: str) -> bool:
114
  response = response.strip()
115
 
116
  # Too short
117
- if len(response) < 20:
118
  return False
119
 
120
  # Contains gibberish patterns
@@ -125,27 +135,40 @@ def is_good_response(response: str) -> bool:
125
  r'=\s+=\s+=',
126
  r'\[\.\]',
127
  r'return\s+if\s+is',
128
- r'\s{10,}', # Too many spaces
129
- r'(\w)\1{5,}', # Repeated characters
 
 
 
 
 
 
 
130
  ]
131
 
132
  for pattern in gibberish_patterns:
133
  if re.search(pattern, response):
134
  return False
135
 
136
- # Too many special characters compared to letters
137
  letters = sum(1 for c in response if c.isalpha())
138
  special = sum(1 for c in response if c in '[]{}()=<>|\\')
139
  if letters > 0 and special / letters > 0.5:
140
  return False
141
 
142
- # Check for common error phrases
 
 
 
 
 
143
  error_phrases = [
144
  "i'm not sure",
145
  "i don't know",
146
  "could you try rephrasing",
147
  "error:",
148
  "cannot understand",
 
149
  ]
150
 
151
  response_lower = response.lower()
@@ -156,6 +179,76 @@ def is_good_response(response: str) -> bool:
156
  return True
157
 
158
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
  # --------- Model Init ----------
160
  def initialize():
161
  global model, tokenizer
@@ -257,14 +350,13 @@ def get_student_response(user_input: str, temperature: float = 0.7, max_tokens:
257
  return clean_response(response)
258
 
259
  except Exception as e:
260
- print(f"Student model error: {e}")
261
  return ""
262
 
263
 
264
  def get_teacher_response(user_input: str) -> str:
265
- """Get response from teacher model (Dolphin Mistral)"""
266
  try:
267
- # Build conversation history for teacher
268
  conv_history = []
269
  for msg in conversation_history[-4:]:
270
  conv_history.append({"role": "user", "content": msg["user"]})
@@ -278,47 +370,39 @@ def get_teacher_response(user_input: str) -> str:
278
  return response if response else ""
279
 
280
  except Exception as e:
281
- print(f"Teacher model error: {e}")
282
  return ""
283
 
284
 
285
  def generate_response(user_input: str, temperature: float = 0.7, max_tokens: int = 200) -> str:
286
- """Generate a response - uses teacher if student fails"""
287
  global current_conv_id, conversation_history
288
 
289
  user_input = extract_text(user_input).strip()
290
  if not user_input:
291
  return "Please type a message!"
292
 
293
- # 1) Try math first
294
  math_ans = try_math_answer(user_input)
295
  if math_ans is not None:
296
  conversation_history.append({"user": user_input, "assistant": math_ans})
297
  current_conv_id = db.save_conversation(user_input, math_ans)
298
  return math_ans
299
 
300
- # 2) Try student model
301
- print(f"[Student] Generating response for: {user_input[:50]}...")
302
  student_response = get_student_response(user_input, temperature, max_tokens)
303
 
304
- # 3) Check if student response is good
305
  if is_good_response(student_response):
306
- print("[Student] Response is good quality, using it.")
307
  final_response = student_response
308
- source = "student"
309
  else:
310
- # 4) Student failed, ask teacher
311
- print("[Student] Response is poor quality, asking teacher...")
312
- print(f"[Student Bad Response]: {student_response[:100]}...")
313
-
314
  teacher_response = get_teacher_response(user_input)
315
 
316
  if teacher_response:
317
- print("[Teacher] Got good response from teacher!")
318
  final_response = teacher_response
319
- source = "teacher"
320
 
321
- # Save for future training
322
  db.save_distillation_data(
323
  user_input=user_input,
324
  teacher_response=teacher_response,
@@ -326,22 +410,15 @@ def generate_response(user_input: str, temperature: float = 0.7, max_tokens: int
326
  quality_score=1.0,
327
  )
328
  else:
329
- # Teacher also failed, use student response anyway
330
- print("[Teacher] No response from teacher, using student response.")
331
- final_response = student_response if student_response else "I'm sorry, I couldn't generate a good response. Please try again."
332
- source = "student"
333
 
334
- # 5) Save and return
335
  if not final_response:
336
- final_response = "I'm having trouble responding. Please try asking in a different way."
337
 
338
  conversation_history.append({"user": user_input, "assistant": final_response})
339
  current_conv_id = db.save_conversation(user_input, final_response)
340
 
341
- # Add indicator if from teacher
342
- if source == "teacher":
343
- final_response = f"πŸŽ“ {final_response}"
344
-
345
  return final_response
346
 
347
 
@@ -363,14 +440,14 @@ def respond(message, history, temperature, max_tokens):
363
  def feedback_good():
364
  if current_conv_id > 0:
365
  db.update_feedback(current_conv_id, 1)
366
- return "πŸ‘ Thanks! This helps me learn."
367
  return ""
368
 
369
 
370
  def feedback_bad():
371
  if current_conv_id > 0:
372
  db.update_feedback(current_conv_id, -1)
373
- return "πŸ‘Ž Thanks for feedback. I'll improve!"
374
  return ""
375
 
376
 
@@ -380,84 +457,34 @@ def clear_chat():
380
  return [], "Chat cleared."
381
 
382
 
383
- def retrain_with_distillation(epochs):
384
- """Retrain using teacher knowledge"""
385
- global model, tokenizer
386
-
387
- # Get user-approved conversations
388
- good_convs = db.get_good_conversations()
389
- extra_data = ""
390
- for conv in good_convs:
391
- extra_data += f"<USER> {conv['user_input']}\n"
392
- extra_data += f"<ASSISTANT> {conv['assistant_response']}\n\n"
393
-
394
- # Get distillation data (teacher responses)
395
- unused_distill = db.get_unused_distillation_data()
396
- distillation_data = ""
397
- for item in unused_distill:
398
- distillation_data += f"<USER> {item['user_input']}\n"
399
- distillation_data += f"<ASSISTANT> {item['teacher_response']}\n\n"
400
-
401
- total_samples = len(good_convs) + len(unused_distill)
402
-
403
- if total_samples == 0:
404
- return "❌ No training data available. Chat more and rate responses!"
405
-
406
- trainer = VedaTrainer()
407
- history = trainer.train(
408
- epochs=int(epochs),
409
- extra_data=extra_data,
410
- distillation_data=distillation_data,
411
- )
412
-
413
- model = trainer.model
414
- tokenizer = trainer.tokenizer
415
-
416
- # Mark distillation data as used
417
- if unused_distill:
418
- ids = [item["id"] for item in unused_distill]
419
- db.mark_distillation_used(ids)
420
-
421
- loss = history.history["loss"][-1]
422
-
423
- db.save_training_history(
424
- training_type="distillation",
425
- samples_used=total_samples,
426
- epochs=int(epochs),
427
- final_loss=loss,
428
- )
429
-
430
- return f"""βœ… Training Complete!
431
-
432
- πŸ“Š **Results:**
433
- - Loss: {loss:.4f}
434
- - User samples: {len(good_convs)}
435
- - Teacher samples: {len(unused_distill)}
436
- - Total epochs: {epochs}
437
-
438
- Your model has learned from the teacher!
439
- """
440
-
441
-
442
  def get_stats():
443
  stats = db.get_stats()
444
- teacher_available = teacher.is_available()
 
 
 
 
 
 
 
 
445
 
446
  return f"""## πŸ“Š Statistics
447
 
448
  ### Conversations
449
  | Metric | Count |
450
  |--------|-------|
451
- | πŸ’¬ Total | {stats['total']} |
452
- | πŸ‘ Positive | {stats['positive']} |
453
- | πŸ‘Ž Negative | {stats['negative']} |
454
 
455
- ### πŸŽ“ Distillation
456
  | Metric | Value |
457
  |--------|-------|
458
- | Teacher Available | {'βœ… Yes' if teacher_available else '❌ No'} |
459
- | Teacher Samples | {stats.get('distillation_total', 0)} |
460
- | Ready to Train | {stats.get('distillation_unused', 0)} |
 
461
  """
462
 
463
 
@@ -465,30 +492,32 @@ def get_stats():
465
  print("=" * 50)
466
  print("Starting Veda Programming Assistant...")
467
  print("=" * 50)
 
468
  initialize()
469
- print("Checking teacher availability...")
470
- if teacher.is_available():
471
- print("βœ… Teacher model (Dolphin Mistral) is available!")
472
- else:
473
- print("❌ Teacher model not available - check API key")
 
 
 
474
  print("=" * 50)
475
  print("Ready!")
476
  print("=" * 50)
477
 
478
 
479
- # --------- UI ----------
480
  with gr.Blocks(title="Veda Programming Assistant") as demo:
481
  gr.Markdown("""
482
  # πŸ•‰οΈ Veda Programming Assistant
483
 
484
  I can help you with **coding**, **programming concepts**, and **math**!
485
-
486
- *Responses marked with πŸŽ“ come from an advanced AI teacher.*
487
  """)
488
 
489
  with gr.Tabs():
490
  with gr.TabItem("πŸ’¬ Chat"):
491
- chatbot = gr.Chatbot(label="Conversation", height=400, value=[])
492
 
493
  with gr.Row():
494
  msg = gr.Textbox(
@@ -504,11 +533,11 @@ I can help you with **coding**, **programming concepts**, and **math**!
504
  max_tokens = gr.Slider(50, 400, 200, step=50, label="Response length")
505
 
506
  with gr.Row():
507
- good_btn = gr.Button("πŸ‘ Good", variant="secondary")
508
- bad_btn = gr.Button("πŸ‘Ž Bad", variant="secondary")
509
  clear_btn = gr.Button("πŸ—‘οΈ Clear", variant="secondary")
510
 
511
- feedback_msg = gr.Textbox(label="Status", lines=1, interactive=False)
512
 
513
  send_btn.click(respond, [msg, chatbot, temperature, max_tokens], [msg, chatbot])
514
  msg.submit(respond, [msg, chatbot, temperature, max_tokens], [msg, chatbot])
@@ -516,7 +545,7 @@ I can help you with **coding**, **programming concepts**, and **math**!
516
  bad_btn.click(feedback_bad, outputs=feedback_msg)
517
  clear_btn.click(clear_chat, outputs=[chatbot, feedback_msg])
518
 
519
- gr.Markdown("### πŸ’‘ Examples")
520
  gr.Examples(
521
  examples=[
522
  ["Hello! What can you do?"],
@@ -525,34 +554,26 @@ I can help you with **coding**, **programming concepts**, and **math**!
525
  ["Explain recursion"],
526
  ["Write bubble sort"],
527
  ["2+2=?"],
528
- ["What is the difference between list and tuple?"],
 
529
  ],
530
  inputs=msg,
531
  )
532
 
533
- with gr.TabItem("πŸŽ“ Training"):
534
- gr.Markdown("""
535
- ### Improve the Model
536
-
537
- The model learns from:
538
- 1. **Your feedback** - Rate responses πŸ‘ or πŸ‘Ž
539
- 2. **Teacher knowledge** - Learns from advanced AI
540
-
541
- Click below to train with collected data.
542
- """)
543
-
544
- train_epochs = gr.Slider(5, 30, 15, step=1, label="Training Epochs")
545
- train_btn = gr.Button("πŸš€ Train Model", variant="primary")
546
- train_output = gr.Markdown()
547
-
548
- train_btn.click(retrain_with_distillation, inputs=train_epochs, outputs=train_output)
549
-
550
- with gr.TabItem("πŸ“Š Statistics"):
551
  stats_out = gr.Markdown()
552
  refresh_btn = gr.Button("πŸ”„ Refresh")
553
  refresh_btn.click(get_stats, outputs=stats_out)
 
 
 
 
 
 
 
554
 
555
- gr.Markdown("---\n**Veda Programming Assistant** | Made with ❀️")
556
 
557
 
558
  if __name__ == "__main__":
 
1
+ """Veda Programming Assistant - Auto Learning (Hidden Teacher)"""
2
 
3
  import gradio as gr
4
  import tensorflow as tf
 
7
  import re
8
  import ast
9
  import operator as op
10
+ import threading
11
+ import time
12
 
13
  from model import VedaProgrammingLLM
14
  from tokenizer import VedaTokenizer
15
  from database import db
16
  from train import VedaTrainer
17
  from teacher import teacher
18
+ from config import MODEL_DIR
19
 
20
 
21
  # --------- Globals ----------
 
24
  conversation_history = []
25
  current_conv_id = -1
26
 
27
+ # Auto-training settings
28
+ AUTO_TRAIN_ENABLED = True
29
+ AUTO_TRAIN_MIN_SAMPLES = 10 # Train after this many teacher responses
30
+ AUTO_TRAIN_INTERVAL = 1800 # Check every 30 minutes (in seconds)
31
+ AUTO_TRAIN_EPOCHS = 10
32
+ is_training = False
33
+ last_train_time = 0
34
+
35
 
36
  # --------- Helpers ----------
37
  def extract_text(message):
 
124
  response = response.strip()
125
 
126
  # Too short
127
+ if len(response) < 30:
128
  return False
129
 
130
  # Contains gibberish patterns
 
135
  r'=\s+=\s+=',
136
  r'\[\.\]',
137
  r'return\s+if\s+is',
138
+ r'\s{10,}',
139
+ r'(\w)\1{5,}',
140
+ r'\[\s*\]',
141
+ r'def\s+def',
142
+ r'class\s+class',
143
+ r'return\s+return',
144
+ r'if\s+if',
145
+ r'\(\s*\)',
146
+ r'=\s*=\s*=',
147
  ]
148
 
149
  for pattern in gibberish_patterns:
150
  if re.search(pattern, response):
151
  return False
152
 
153
+ # Too many special characters
154
  letters = sum(1 for c in response if c.isalpha())
155
  special = sum(1 for c in response if c in '[]{}()=<>|\\')
156
  if letters > 0 and special / letters > 0.5:
157
  return False
158
 
159
+ # Too many brackets without proper code
160
+ brackets = response.count('[') + response.count(']') + response.count('{') + response.count('}')
161
+ if brackets > 20 and 'def ' not in response and 'class ' not in response:
162
+ return False
163
+
164
+ # Check for error phrases
165
  error_phrases = [
166
  "i'm not sure",
167
  "i don't know",
168
  "could you try rephrasing",
169
  "error:",
170
  "cannot understand",
171
+ "not sure how to respond",
172
  ]
173
 
174
  response_lower = response.lower()
 
179
  return True
180
 
181
 
182
+ # --------- Auto Training ----------
183
+ def auto_train_background():
184
+ """Background thread that automatically trains when enough data collected"""
185
+ global model, tokenizer, is_training, last_train_time
186
+
187
+ while True:
188
+ time.sleep(60) # Check every minute
189
+
190
+ if not AUTO_TRAIN_ENABLED:
191
+ continue
192
+
193
+ if is_training:
194
+ continue
195
+
196
+ # Check if enough time passed since last training
197
+ if time.time() - last_train_time < AUTO_TRAIN_INTERVAL:
198
+ continue
199
+
200
+ # Check if we have enough samples
201
+ try:
202
+ unused = db.get_unused_distillation_data()
203
+ if len(unused) >= AUTO_TRAIN_MIN_SAMPLES:
204
+ print(f"\n[Auto-Train] Starting training with {len(unused)} samples...")
205
+ is_training = True
206
+
207
+ # Prepare training data
208
+ good_convs = db.get_good_conversations()
209
+ extra_data = ""
210
+ for conv in good_convs:
211
+ extra_data += f"<USER> {conv['user_input']}\n"
212
+ extra_data += f"<ASSISTANT> {conv['assistant_response']}\n\n"
213
+
214
+ distillation_data = ""
215
+ for item in unused:
216
+ distillation_data += f"<USER> {item['user_input']}\n"
217
+ distillation_data += f"<ASSISTANT> {item['teacher_response']}\n\n"
218
+
219
+ # Train
220
+ trainer = VedaTrainer()
221
+ history = trainer.train(
222
+ epochs=AUTO_TRAIN_EPOCHS,
223
+ extra_data=extra_data,
224
+ distillation_data=distillation_data,
225
+ )
226
+
227
+ # Update global model
228
+ model = trainer.model
229
+ tokenizer = trainer.tokenizer
230
+
231
+ # Mark as used
232
+ ids = [item["id"] for item in unused]
233
+ db.mark_distillation_used(ids)
234
+
235
+ loss = history.history["loss"][-1]
236
+ db.save_training_history(
237
+ training_type="auto",
238
+ samples_used=len(unused) + len(good_convs),
239
+ epochs=AUTO_TRAIN_EPOCHS,
240
+ final_loss=loss,
241
+ )
242
+
243
+ last_train_time = time.time()
244
+ is_training = False
245
+ print(f"[Auto-Train] Completed! Loss: {loss:.4f}")
246
+
247
+ except Exception as e:
248
+ print(f"[Auto-Train] Error: {e}")
249
+ is_training = False
250
+
251
+
252
  # --------- Model Init ----------
253
  def initialize():
254
  global model, tokenizer
 
350
  return clean_response(response)
351
 
352
  except Exception as e:
353
+ print(f"Student error: {e}")
354
  return ""
355
 
356
 
357
  def get_teacher_response(user_input: str) -> str:
358
+ """Get response from teacher model (hidden from user)"""
359
  try:
 
360
  conv_history = []
361
  for msg in conversation_history[-4:]:
362
  conv_history.append({"role": "user", "content": msg["user"]})
 
370
  return response if response else ""
371
 
372
  except Exception as e:
373
+ print(f"Teacher error: {e}")
374
  return ""
375
 
376
 
377
  def generate_response(user_input: str, temperature: float = 0.7, max_tokens: int = 200) -> str:
378
+ """Generate response - seamlessly uses teacher when needed (hidden from user)"""
379
  global current_conv_id, conversation_history
380
 
381
  user_input = extract_text(user_input).strip()
382
  if not user_input:
383
  return "Please type a message!"
384
 
385
+ # 1) Math first
386
  math_ans = try_math_answer(user_input)
387
  if math_ans is not None:
388
  conversation_history.append({"user": user_input, "assistant": math_ans})
389
  current_conv_id = db.save_conversation(user_input, math_ans)
390
  return math_ans
391
 
392
+ # 2) Try student
 
393
  student_response = get_student_response(user_input, temperature, max_tokens)
394
 
395
+ # 3) Check quality
396
  if is_good_response(student_response):
 
397
  final_response = student_response
 
398
  else:
399
+ # 4) Silently ask teacher (user doesn't know!)
 
 
 
400
  teacher_response = get_teacher_response(user_input)
401
 
402
  if teacher_response:
 
403
  final_response = teacher_response
 
404
 
405
+ # Save for auto-training (silent learning)
406
  db.save_distillation_data(
407
  user_input=user_input,
408
  teacher_response=teacher_response,
 
410
  quality_score=1.0,
411
  )
412
  else:
413
+ final_response = student_response if student_response else "I'm having trouble understanding. Could you rephrase that?"
 
 
 
414
 
 
415
  if not final_response:
416
+ final_response = "I couldn't generate a response. Please try again."
417
 
418
  conversation_history.append({"user": user_input, "assistant": final_response})
419
  current_conv_id = db.save_conversation(user_input, final_response)
420
 
421
+ # NO indicator - user doesn't know if it's teacher or student!
 
 
 
422
  return final_response
423
 
424
 
 
440
  def feedback_good():
441
  if current_conv_id > 0:
442
  db.update_feedback(current_conv_id, 1)
443
+ return "πŸ‘ Thanks!"
444
  return ""
445
 
446
 
447
  def feedback_bad():
448
  if current_conv_id > 0:
449
  db.update_feedback(current_conv_id, -1)
450
+ return "πŸ‘Ž Thanks for feedback!"
451
  return ""
452
 
453
 
 
457
  return [], "Chat cleared."
458
 
459
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
460
  def get_stats():
461
  stats = db.get_stats()
462
+
463
+ # Calculate learning progress
464
+ total_teacher = stats.get('distillation_total', 0)
465
+ used_teacher = total_teacher - stats.get('distillation_unused', 0)
466
+
467
+ if total_teacher > 0:
468
+ learning_progress = (used_teacher / total_teacher) * 100
469
+ else:
470
+ learning_progress = 0
471
 
472
  return f"""## πŸ“Š Statistics
473
 
474
  ### Conversations
475
  | Metric | Count |
476
  |--------|-------|
477
+ | πŸ’¬ Total Chats | {stats['total']} |
478
+ | πŸ‘ Helpful | {stats['positive']} |
479
+ | πŸ‘Ž Needs Work | {stats['negative']} |
480
 
481
+ ### 🧠 Learning Progress
482
  | Metric | Value |
483
  |--------|-------|
484
+ | Knowledge Gained | {used_teacher} lessons |
485
+ | Learning Queue | {stats.get('distillation_unused', 0)} pending |
486
+ | Auto-Training | {'βœ… Active' if AUTO_TRAIN_ENABLED else '❌ Disabled'} |
487
+ | Currently Training | {'πŸ”„ Yes' if is_training else 'βœ… Ready'} |
488
  """
489
 
490
 
 
492
  print("=" * 50)
493
  print("Starting Veda Programming Assistant...")
494
  print("=" * 50)
495
+
496
  initialize()
497
+
498
+ # Start auto-training background thread
499
+ if AUTO_TRAIN_ENABLED:
500
+ print("Starting auto-learning background process...")
501
+ train_thread = threading.Thread(target=auto_train_background, daemon=True)
502
+ train_thread.start()
503
+ print("Auto-learning enabled!")
504
+
505
  print("=" * 50)
506
  print("Ready!")
507
  print("=" * 50)
508
 
509
 
510
+ # --------- UI (Simple - No Training Tab) ----------
511
  with gr.Blocks(title="Veda Programming Assistant") as demo:
512
  gr.Markdown("""
513
  # πŸ•‰οΈ Veda Programming Assistant
514
 
515
  I can help you with **coding**, **programming concepts**, and **math**!
 
 
516
  """)
517
 
518
  with gr.Tabs():
519
  with gr.TabItem("πŸ’¬ Chat"):
520
+ chatbot = gr.Chatbot(label="Conversation", height=450, value=[])
521
 
522
  with gr.Row():
523
  msg = gr.Textbox(
 
533
  max_tokens = gr.Slider(50, 400, 200, step=50, label="Response length")
534
 
535
  with gr.Row():
536
+ good_btn = gr.Button("πŸ‘ Helpful", variant="secondary")
537
+ bad_btn = gr.Button("πŸ‘Ž Not Helpful", variant="secondary")
538
  clear_btn = gr.Button("πŸ—‘οΈ Clear", variant="secondary")
539
 
540
+ feedback_msg = gr.Textbox(label="", lines=1, interactive=False, show_label=False)
541
 
542
  send_btn.click(respond, [msg, chatbot, temperature, max_tokens], [msg, chatbot])
543
  msg.submit(respond, [msg, chatbot, temperature, max_tokens], [msg, chatbot])
 
545
  bad_btn.click(feedback_bad, outputs=feedback_msg)
546
  clear_btn.click(clear_chat, outputs=[chatbot, feedback_msg])
547
 
548
+ gr.Markdown("### πŸ’‘ Try asking:")
549
  gr.Examples(
550
  examples=[
551
  ["Hello! What can you do?"],
 
554
  ["Explain recursion"],
555
  ["Write bubble sort"],
556
  ["2+2=?"],
557
+ ["What is a list in Python?"],
558
+ ["How do I read a file?"],
559
  ],
560
  inputs=msg,
561
  )
562
 
563
+ with gr.TabItem("πŸ“Š Stats"):
564
+ gr.Markdown("### How is Veda doing?")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
565
  stats_out = gr.Markdown()
566
  refresh_btn = gr.Button("πŸ”„ Refresh")
567
  refresh_btn.click(get_stats, outputs=stats_out)
568
+
569
+ gr.Markdown("""
570
+ ---
571
+ **πŸ’‘ Tip:** Rate responses to help Veda learn faster!
572
+ - πŸ‘ = This was helpful
573
+ - πŸ‘Ž = This needs improvement
574
+ """)
575
 
576
+ gr.Markdown("---\n**Veda Programming Assistant** | Always learning, always improving!")
577
 
578
 
579
  if __name__ == "__main__":