vedaco commited on
Commit
19602e4
Β·
verified Β·
1 Parent(s): 3096f4a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +394 -162
app.py CHANGED
@@ -1,239 +1,471 @@
 
 
1
  import gradio as gr
2
- import tensorflow as tf
3
- import numpy as np
4
  import os
5
  import json
 
6
 
7
  from model import VedaProgrammingLLM
8
  from tokenizer import VedaTokenizer
 
 
 
9
  from train import VedaTrainer, SAMPLE_CODE
 
 
 
 
10
 
11
- # Global state
12
- model = None
13
- tokenizer = None
14
 
15
- def initialize_model():
16
- """Initialize or load model"""
17
- global model, tokenizer
 
18
 
19
- model_path = "veda_model"
20
- config_file = os.path.join(model_path, "config.json")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
- try:
23
- if os.path.exists(config_file):
24
- print("Loading saved model...")
25
-
26
- with open(config_file, 'r') as f:
27
- config = json.load(f)
28
-
29
- tokenizer = VedaTokenizer()
30
- tokenizer.load(os.path.join(model_path, "tokenizer.json"))
31
-
32
- model = VedaProgrammingLLM(
33
- vocab_size=config['vocab_size'],
34
- max_length=config['max_length'],
35
- d_model=config['d_model'],
36
- num_heads=config['num_heads'],
37
- num_layers=config['num_layers'],
38
- ff_dim=config['ff_dim']
39
- )
40
-
41
- dummy = tf.zeros((1, config['max_length']), dtype=tf.int32)
42
- model(dummy)
43
- model.load_weights(os.path.join(model_path, "weights.h5"))
44
- print("Model loaded!")
45
  else:
46
- print("Training new model...")
47
- trainer = VedaTrainer(
48
- data_path="programming.txt",
49
- vocab_size=3000,
50
- max_length=128,
51
- batch_size=8
52
- )
53
- trainer.train(epochs=5, save_path=model_path)
54
- model = trainer.model
55
- tokenizer = trainer.tokenizer
56
- print("Model trained!")
57
-
58
- except Exception as e:
59
- print(f"Error: {e}")
60
- print("Creating fresh model...")
61
- trainer = VedaTrainer()
62
- trainer.train(epochs=5)
63
- model = trainer.model
64
- tokenizer = trainer.tokenizer
65
 
66
- def generate_code(prompt: str, max_tokens: int, temperature: float, top_k: int) -> str:
67
- """Generate code from prompt"""
68
- global model, tokenizer
 
 
 
 
 
 
69
 
70
- if model is None or tokenizer is None:
71
- return "Model not loaded. Please wait..."
72
 
73
  try:
74
  if not prompt.strip():
75
- return "Please enter a prompt."
76
-
77
- tokens = tokenizer.encode(prompt)
78
- if len(tokens) == 0:
79
- tokens = [2] # START token
80
 
81
- generated = model.generate(
82
- tokens,
83
- max_new_tokens=int(max_tokens),
 
84
  temperature=float(temperature),
 
85
  top_k=int(top_k)
86
  )
87
 
88
- result = tokenizer.decode(generated)
89
- return result
90
 
91
- except Exception as e:
92
- return f"Error: {str(e)}"
93
-
94
- def train_on_data(training_data: str, epochs: int) -> str:
95
- """Train model on provided data"""
96
- global model, tokenizer
97
-
98
- try:
99
- with open("programming.txt", 'w') as f:
100
- f.write(training_data)
101
-
102
- trainer = VedaTrainer(
103
- data_path="programming.txt",
104
- vocab_size=3000,
105
- max_length=128,
106
- batch_size=8
107
  )
108
 
109
- history = trainer.train(epochs=int(epochs), save_path="veda_model")
110
-
111
- model = trainer.model
112
- tokenizer = trainer.tokenizer
113
-
114
- final_loss = history.history['loss'][-1]
115
- final_acc = history.history.get('accuracy', [0])[-1]
116
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  return f"""βœ… Training Complete!
118
 
119
- Loss: {final_loss:.4f}
120
- Accuracy: {final_acc:.4f}
121
- Epochs: {epochs}
122
- Vocab Size: {tokenizer.vocabulary_size}
 
123
  """
124
- except Exception as e:
125
- return f"❌ Training Error: {str(e)}"
126
 
127
- def get_model_info() -> str:
128
- """Get model information"""
129
- global model, tokenizer
 
130
 
131
- if model is None:
132
- return "No model loaded."
 
 
 
 
 
133
 
134
- config = model.get_config()
135
- params = model.count_params()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
 
137
  return f"""## πŸ•‰οΈ Veda Programming LLM
138
 
 
 
139
  | Property | Value |
140
  |----------|-------|
141
- | Vocabulary Size | {config['vocab_size']} |
142
- | Max Length | {config['max_length']} |
143
- | Model Dimension | {config['d_model']} |
144
- | Attention Heads | {config['num_heads']} |
145
- | Transformer Layers | {config['num_layers']} |
146
- | FFN Dimension | {config['ff_dim']} |
147
- | **Total Parameters** | **{params:,}** |
 
 
 
 
 
 
 
 
148
  """
149
 
150
- # Build interface
151
  def create_app():
152
- with gr.Blocks(title="Veda Programming", theme=gr.themes.Soft()) as app:
 
 
 
 
 
 
 
 
 
 
 
153
 
154
  gr.Markdown("""
155
  # πŸ•‰οΈ Veda Programming LLM
156
- ### TensorFlow-based Code Generation Model
 
 
157
  """)
158
 
159
  with gr.Tabs():
160
- # Generation Tab
161
  with gr.TabItem("πŸ’» Generate Code"):
162
  with gr.Row():
163
- with gr.Column():
164
  prompt = gr.Textbox(
165
- label="Code Prompt",
166
- placeholder="def fibonacci(",
167
- lines=3,
168
- value="def calculate_sum("
169
  )
170
 
171
  with gr.Row():
172
- max_tokens = gr.Slider(10, 200, value=50, step=5, label="Max Tokens")
173
- temperature = gr.Slider(0.1, 1.5, value=0.8, step=0.1, label="Temperature")
 
 
 
 
 
 
174
 
175
- top_k = gr.Slider(1, 100, value=40, step=5, label="Top-K")
 
 
 
 
 
 
 
 
176
 
177
- gen_btn = gr.Button("πŸš€ Generate", variant="primary")
178
 
179
- with gr.Column():
180
- output = gr.Code(label="Generated Code", language="python", lines=12)
 
 
 
 
 
 
 
 
 
 
 
 
 
181
 
 
182
  gen_btn.click(
183
  generate_code,
184
- inputs=[prompt, max_tokens, temperature, top_k],
185
- outputs=output
 
 
 
 
 
 
 
186
  )
187
 
 
 
 
 
 
 
 
 
188
  gr.Examples(
189
  examples=[
190
- ["def fibonacci(n):", 60, 0.7, 40],
191
- ["def bubble_sort(arr):", 80, 0.8, 40],
192
- ["class Calculator:", 100, 0.7, 50],
193
- ["def binary_search(", 70, 0.8, 40],
194
  ],
195
- inputs=[prompt, max_tokens, temperature, top_k],
196
- outputs=output,
197
- fn=generate_code
198
  )
199
 
200
- # Training Tab
201
- with gr.TabItem("πŸŽ“ Train Model"):
202
- training_input = gr.Textbox(
203
- label="Training Code",
204
- placeholder="Paste Python code...",
205
- lines=12,
206
- value=SAMPLE_CODE[:1500]
207
- )
208
-
209
- epochs_slider = gr.Slider(1, 20, value=5, step=1, label="Epochs")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210
 
211
- train_btn = gr.Button("🎯 Train Model", variant="primary")
212
- train_output = gr.Textbox(label="Training Results", lines=8)
213
 
214
- train_btn.click(
215
- train_on_data,
216
- inputs=[training_input, epochs_slider],
217
- outputs=train_output
218
- )
 
 
 
 
 
219
 
220
- # Info Tab
221
  with gr.TabItem("ℹ️ Model Info"):
222
  info_output = gr.Markdown()
223
- refresh_btn = gr.Button("πŸ”„ Refresh")
224
- refresh_btn.click(get_model_info, outputs=info_output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
225
 
226
  gr.Markdown("""
227
  ---
228
- **Veda Programming LLM** | Built with TensorFlow & Gradio
 
229
  """)
230
 
231
  return app
232
 
233
- # Main
234
- print("πŸ•‰οΈ Initializing Veda Programming LLM...")
235
- initialize_model()
236
-
237
- print("πŸš€ Starting Gradio...")
238
- app = create_app()
239
- app.launch(server_name="0.0.0.0", server_port=7860)
 
 
 
 
 
1
+ """Gradio interface for Veda Programming LLM with continuous learning"""
2
+
3
  import gradio as gr
 
 
4
  import os
5
  import json
6
+ from datetime import datetime
7
 
8
  from model import VedaProgrammingLLM
9
  from tokenizer import VedaTokenizer
10
+ from data_collector import collector
11
+ from continuous_trainer import trainer
12
+ from database import db
13
  from train import VedaTrainer, SAMPLE_CODE
14
+ from config import (
15
+ MODEL_DIR, DEFAULT_TEMPERATURE, DEFAULT_MAX_TOKENS,
16
+ DEFAULT_REPETITION_PENALTY, DEFAULT_TOP_K
17
+ )
18
 
19
+ # Current interaction tracking
20
+ current_interaction_id = None
 
21
 
22
+ def initialize():
23
+ """Initialize the system"""
24
+ print("πŸ•‰οΈ Initializing Veda Programming LLM...")
25
+ print("=" * 50)
26
 
27
+ # Try to load existing model
28
+ if trainer.load_model():
29
+ print("βœ… Existing model loaded")
30
+ else:
31
+ print("πŸ“š Training initial model...")
32
+ # Initial training
33
+ initial_trainer = VedaTrainer(
34
+ data_path="programming.txt",
35
+ vocab_size=5000,
36
+ max_length=256,
37
+ batch_size=8
38
+ )
39
+ initial_trainer.train(epochs=10, save_path=MODEL_DIR)
40
+
41
+ # Load the trained model into continuous trainer
42
+ trainer.load_model()
43
 
44
+ # Start auto-training scheduler
45
+ trainer.start_auto_training()
46
+
47
+ print("=" * 50)
48
+ print("βœ… System ready!")
49
+
50
+ def clean_output(text: str) -> str:
51
+ """Clean generated output"""
52
+ lines = text.split('\n')
53
+ cleaned = []
54
+ empty_count = 0
55
+
56
+ for line in lines:
57
+ if line.strip() == '':
58
+ empty_count += 1
59
+ if empty_count <= 2:
60
+ cleaned.append(line)
 
 
 
 
 
 
61
  else:
62
+ empty_count = 0
63
+ cleaned.append(line)
64
+
65
+ return '\n'.join(cleaned)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
 
67
+ def generate_code(
68
+ prompt: str,
69
+ max_tokens: int,
70
+ temperature: float,
71
+ repetition_penalty: float,
72
+ top_k: int
73
+ ) -> tuple:
74
+ """Generate code and track interaction"""
75
+ global current_interaction_id
76
 
77
+ if trainer.model is None:
78
+ return "⏳ Model loading...", -1
79
 
80
  try:
81
  if not prompt.strip():
82
+ return "⚠️ Please enter a prompt.", -1
 
 
 
 
83
 
84
+ # Generate
85
+ result = trainer.generate(
86
+ prompt=prompt,
87
+ max_tokens=int(max_tokens),
88
  temperature=float(temperature),
89
+ repetition_penalty=float(repetition_penalty),
90
  top_k=int(top_k)
91
  )
92
 
93
+ result = clean_output(result)
 
94
 
95
+ # Save interaction
96
+ current_interaction_id = collector.collect_interaction(
97
+ prompt=prompt,
98
+ generated_code=result,
99
+ temperature=temperature,
100
+ max_tokens=max_tokens
 
 
 
 
 
 
 
 
 
 
101
  )
102
 
103
+ return result, current_interaction_id
 
 
 
 
 
 
104
 
105
+ except Exception as e:
106
+ import traceback
107
+ traceback.print_exc()
108
+ return f"❌ Error: {str(e)}", -1
109
+
110
+ def submit_feedback(interaction_id: int, is_positive: bool, edited_code: str = None):
111
+ """Submit feedback for generated code"""
112
+ if interaction_id < 0:
113
+ return "⚠️ No interaction to rate"
114
+
115
+ collector.record_feedback(
116
+ interaction_id=interaction_id,
117
+ is_positive=is_positive,
118
+ edited_code=edited_code if edited_code and edited_code.strip() else None
119
+ )
120
+
121
+ emoji = "πŸ‘" if is_positive else "πŸ‘Ž"
122
+ pending = collector.get_pending_count()
123
+
124
+ msg = f"{emoji} Feedback recorded! Thank you for helping improve the model.\n"
125
+ msg += f"πŸ“Š Approved samples pending training: {pending}"
126
+
127
+ if trainer.should_retrain():
128
+ msg += "\nπŸ”„ Enough samples collected - model will be retrained soon!"
129
+
130
+ return msg
131
+
132
+ def positive_feedback(interaction_id, code):
133
+ return submit_feedback(int(interaction_id), True, code)
134
+
135
+ def negative_feedback(interaction_id, code):
136
+ return submit_feedback(int(interaction_id), False, code)
137
+
138
+ def manual_train(epochs: int):
139
+ """Manually trigger training"""
140
+ if trainer.is_training:
141
+ return "⏳ Training already in progress..."
142
+
143
+ result = trainer.train(epochs=int(epochs))
144
+
145
+ if result['status'] == 'success':
146
  return f"""βœ… Training Complete!
147
 
148
+ πŸ“Š Results:
149
+ - Version: {result['version']}
150
+ - Loss: {result['loss']:.4f}
151
+ - Accuracy: {result['accuracy']:.4f}
152
+ - Samples Used: {result['samples_used']}
153
  """
154
+ else:
155
+ return f"❌ Training Error: {result['message']}"
156
 
157
+ def add_training_code(code: str, category: str):
158
+ """Add code directly to training data"""
159
+ if not code.strip():
160
+ return "⚠️ Please enter some code"
161
 
162
+ collector.add_training_sample(code, category)
163
+ return f"βœ… Code added to training data!\nCategory: {category}"
164
+
165
+ def get_statistics():
166
+ """Get system statistics"""
167
+ stats = collector.get_statistics()
168
+ status = trainer.get_status()
169
 
170
+ return f"""## πŸ“Š System Statistics
171
+
172
+ ### Model Status
173
+ | Property | Value |
174
+ |----------|-------|
175
+ | πŸ€– Model Version | {status['model_version']} |
176
+ | πŸ”„ Currently Training | {'Yes' if status['is_training'] else 'No'} |
177
+ | πŸ“ˆ Training Progress | {status['training_progress']:.0f}% |
178
+ | ⏰ Last Training | {status['last_training'] or 'Never'} |
179
+
180
+ ### Learning Data
181
+ | Metric | Count |
182
+ |--------|-------|
183
+ | πŸ’¬ Total Interactions | {stats['total_interactions']} |
184
+ | πŸ‘ Positive Feedback | {stats['positive_feedback']} |
185
+ | πŸ‘Ž Negative Feedback | {stats['negative_feedback']} |
186
+ | βœ… Approved Samples | {stats['approved_samples']} |
187
+ | πŸ“š Pending for Training | {status['pending_samples']} |
188
+ | 🎯 Min Samples to Retrain | {status['min_samples_for_training']} |
189
+
190
+ ### Training History
191
+ | Metric | Value |
192
+ |--------|-------|
193
+ | πŸ”„ Total Training Runs | {stats['training_runs']} |
194
+ | πŸ“ Code Samples | {stats['code_samples']} |
195
+
196
+ ### Last 7 Days
197
+ | Metric | Count |
198
+ |--------|-------|
199
+ | πŸ”’ Generations | {stats['recent_generations']} |
200
+ | πŸ‘ Positive | {stats['recent_positive']} |
201
+ | πŸ‘Ž Negative | {stats['recent_negative']} |
202
+ | πŸ“ˆ Approval Rate | {stats['approval_rate']:.1f}% |
203
+ """
204
+
205
+ def get_recent_interactions():
206
+ """Get recent interactions for review"""
207
+ interactions = db.get_recent_interactions(limit=10)
208
+
209
+ if not interactions:
210
+ return "No interactions yet."
211
+
212
+ md = "## Recent Interactions\n\n"
213
+
214
+ for item in interactions:
215
+ feedback = "πŸ‘" if item['feedback'] > 0 else ("πŸ‘Ž" if item['feedback'] < 0 else "⏳")
216
+ md += f"""### {item['timestamp']}
217
+ **Prompt:** `{item['prompt'][:50]}...`
218
+ **Feedback:** {feedback}
219
+
220
+ ---
221
+ """
222
+
223
+ return md
224
+
225
+ def get_training_history():
226
+ """Get training history"""
227
+ history = db.get_training_history(limit=10)
228
+
229
+ if not history:
230
+ return "No training history yet."
231
+
232
+ md = "## Training History\n\n"
233
+ md += "| Date | Version | Samples | Loss | Accuracy |\n"
234
+ md += "|------|---------|---------|------|----------|\n"
235
+
236
+ for item in history:
237
+ md += f"| {item['timestamp'][:10]} | {item['model_version']} | "
238
+ md += f"{item['samples_used']} | {item['final_loss']:.4f} | {item['final_accuracy']:.4f} |\n"
239
+
240
+ return md
241
+
242
+ def get_model_info():
243
+ """Get model architecture info"""
244
+ if trainer.model is None:
245
+ return "⏳ Model not loaded"
246
+
247
+ config = trainer.model.get_config()
248
+ params = trainer.model.count_params()
249
 
250
  return f"""## πŸ•‰οΈ Veda Programming LLM
251
 
252
+ ### Architecture
253
+
254
  | Property | Value |
255
  |----------|-------|
256
+ | πŸ“š Vocabulary Size | {config['vocab_size']:,} |
257
+ | πŸ“ Max Sequence Length | {config['max_length']} |
258
+ | 🧠 Model Dimension | {config['d_model']} |
259
+ | πŸ‘οΈ Attention Heads | {config['num_heads']} |
260
+ | πŸ“¦ Transformer Layers | {config['num_layers']} |
261
+ | πŸ”§ FFN Dimension | {config['ff_dim']} |
262
+ | ⚑ **Total Parameters** | **{params:,}** |
263
+
264
+ ### Features
265
+ - βœ… Continuous Learning from User Feedback
266
+ - βœ… Automatic Retraining
267
+ - βœ… Repetition Penalty
268
+ - βœ… Top-K & Top-P Sampling
269
+ - βœ… Temperature Control
270
+ - βœ… Model Versioning
271
  """
272
 
273
+ # Create the interface
274
  def create_app():
275
+ with gr.Blocks(
276
+ title="Veda Programming LLM",
277
+ theme=gr.themes.Soft(),
278
+ css="""
279
+ .feedback-btn { min-width: 100px; }
280
+ .positive { background-color: #4CAF50 !important; }
281
+ .negative { background-color: #f44336 !important; }
282
+ """
283
+ ) as app:
284
+
285
+ # Hidden state for interaction tracking
286
+ interaction_id = gr.State(value=-1)
287
 
288
  gr.Markdown("""
289
  # πŸ•‰οΈ Veda Programming LLM
290
+ ### AI Code Generation with Continuous Learning
291
+
292
+ This model learns from your feedback! Rate generated code to help improve it.
293
  """)
294
 
295
  with gr.Tabs():
296
+ # ============ Generation Tab ============
297
  with gr.TabItem("πŸ’» Generate Code"):
298
  with gr.Row():
299
+ with gr.Column(scale=1):
300
  prompt = gr.Textbox(
301
+ label="πŸ“ Code Prompt",
302
+ placeholder="Enter your code prompt...",
303
+ lines=4,
304
+ value="def fibonacci(n):"
305
  )
306
 
307
  with gr.Row():
308
+ max_tokens = gr.Slider(
309
+ 10, 300, value=DEFAULT_MAX_TOKENS,
310
+ step=10, label="πŸ“ Max Tokens"
311
+ )
312
+ temperature = gr.Slider(
313
+ 0.1, 1.5, value=DEFAULT_TEMPERATURE,
314
+ step=0.1, label="🌑️ Temperature"
315
+ )
316
 
317
+ with gr.Row():
318
+ repetition_penalty = gr.Slider(
319
+ 1.0, 2.0, value=DEFAULT_REPETITION_PENALTY,
320
+ step=0.1, label="πŸ”„ Repetition Penalty"
321
+ )
322
+ top_k = gr.Slider(
323
+ 10, 100, value=DEFAULT_TOP_K,
324
+ step=5, label="🎯 Top-K"
325
+ )
326
 
327
+ gen_btn = gr.Button("πŸš€ Generate Code", variant="primary", size="lg")
328
 
329
+ with gr.Column(scale=1):
330
+ output = gr.Code(
331
+ label="πŸ“„ Generated Code (Edit if needed before rating)",
332
+ language="python",
333
+ lines=15,
334
+ interactive=True
335
+ )
336
+
337
+ gr.Markdown("### πŸ“Š Rate this output to help improve the model:")
338
+
339
+ with gr.Row():
340
+ good_btn = gr.Button("πŸ‘ Good", variant="primary", elem_classes=["feedback-btn", "positive"])
341
+ bad_btn = gr.Button("πŸ‘Ž Bad", variant="secondary", elem_classes=["feedback-btn", "negative"])
342
+
343
+ feedback_output = gr.Textbox(label="Feedback Status", lines=2)
344
 
345
+ # Wire up generation
346
  gen_btn.click(
347
  generate_code,
348
+ inputs=[prompt, max_tokens, temperature, repetition_penalty, top_k],
349
+ outputs=[output, interaction_id]
350
+ )
351
+
352
+ # Wire up feedback
353
+ good_btn.click(
354
+ positive_feedback,
355
+ inputs=[interaction_id, output],
356
+ outputs=feedback_output
357
  )
358
 
359
+ bad_btn.click(
360
+ negative_feedback,
361
+ inputs=[interaction_id, output],
362
+ outputs=feedback_output
363
+ )
364
+
365
+ # Examples
366
+ gr.Markdown("### πŸ’‘ Example Prompts")
367
  gr.Examples(
368
  examples=[
369
+ ["def fibonacci(n):", 100, 0.7, 1.2, 50],
370
+ ["def bubble_sort(arr):", 120, 0.7, 1.2, 50],
371
+ ["class Calculator:", 150, 0.8, 1.3, 40],
372
+ ["def binary_search(arr, target):", 100, 0.7, 1.2, 50],
373
  ],
374
+ inputs=[prompt, max_tokens, temperature, repetition_penalty, top_k]
 
 
375
  )
376
 
377
+ # ============ Training Tab ============
378
+ with gr.TabItem("πŸŽ“ Training"):
379
+ with gr.Row():
380
+ with gr.Column():
381
+ gr.Markdown("### πŸ”„ Manual Training")
382
+ gr.Markdown("Trigger training on collected approved samples.")
383
+
384
+ train_epochs = gr.Slider(1, 20, value=5, step=1, label="Epochs")
385
+ train_btn = gr.Button("🎯 Start Training", variant="primary")
386
+ train_output = gr.Textbox(label="Training Output", lines=8)
387
+
388
+ train_btn.click(manual_train, inputs=[train_epochs], outputs=train_output)
389
+
390
+ with gr.Column():
391
+ gr.Markdown("### πŸ“ Add Training Code")
392
+ gr.Markdown("Contribute code directly to the training dataset.")
393
+
394
+ code_input = gr.Textbox(
395
+ label="Code",
396
+ placeholder="Paste your Python code here...",
397
+ lines=10
398
+ )
399
+
400
+ category = gr.Dropdown(
401
+ choices=["function", "class", "algorithm", "utility", "other"],
402
+ value="function",
403
+ label="Category"
404
+ )
405
+
406
+ add_btn = gr.Button("βž• Add to Training Data")
407
+ add_output = gr.Textbox(label="Status")
408
+
409
+ add_btn.click(add_training_code, inputs=[code_input, category], outputs=add_output)
410
+
411
+ # ============ Statistics Tab ============
412
+ with gr.TabItem("πŸ“Š Statistics"):
413
+ stats_output = gr.Markdown()
414
+ refresh_stats = gr.Button("πŸ”„ Refresh Statistics")
415
+ refresh_stats.click(get_statistics, outputs=stats_output)
416
 
417
+ gr.Markdown("---")
 
418
 
419
+ with gr.Row():
420
+ with gr.Column():
421
+ interactions_output = gr.Markdown()
422
+ refresh_interactions = gr.Button("πŸ”„ Refresh Interactions")
423
+ refresh_interactions.click(get_recent_interactions, outputs=interactions_output)
424
+
425
+ with gr.Column():
426
+ history_output = gr.Markdown()
427
+ refresh_history = gr.Button("πŸ”„ Refresh History")
428
+ refresh_history.click(get_training_history, outputs=history_output)
429
 
430
+ # ============ Model Info Tab ============
431
  with gr.TabItem("ℹ️ Model Info"):
432
  info_output = gr.Markdown()
433
+ refresh_info = gr.Button("πŸ”„ Refresh Info")
434
+ refresh_info.click(get_model_info, outputs=info_output)
435
+
436
+ gr.Markdown("""
437
+ ### 🧠 How Continuous Learning Works
438
+
439
+ 1. **You generate code** using the model
440
+ 2. **You rate the output** (πŸ‘ or πŸ‘Ž)
441
+ 3. **Good outputs are saved** for training
442
+ 4. **When enough samples collect**, the model retrains
443
+ 5. **The model improves** based on your feedback!
444
+
445
+ ### πŸ’‘ Tips
446
+
447
+ - Rate outputs honestly to help the model learn
448
+ - Edit code before rating if it's close but not perfect
449
+ - The more you use it, the better it gets!
450
+ - Contribute your own code samples for faster learning
451
+ """)
452
 
453
  gr.Markdown("""
454
  ---
455
+ **πŸ•‰οΈ Veda Programming LLM** | Continuous Learning System |
456
+ Built with TensorFlow & Gradio
457
  """)
458
 
459
  return app
460
 
461
+ # Main execution
462
+ if __name__ == "__main__":
463
+ initialize()
464
+
465
+ print("\nπŸš€ Starting Gradio Interface...")
466
+ app = create_app()
467
+ app.launch(
468
+ server_name="0.0.0.0",
469
+ server_port=7860,
470
+ show_error=True
471
+ )