LeroyDyer commited on
Commit
be6ceb5
·
verified ·
1 Parent(s): 3fa0b25

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1072 -430
app.py CHANGED
@@ -1,111 +1,185 @@
1
- # File: lcars_enhanced_interface.py
2
 
3
  import asyncio
 
4
  import json
5
  import os
 
6
  import time
7
  import uuid
8
- from typing import Dict, List, Any, Optional
9
  from dataclasses import dataclass
 
10
  import threading
11
- import pyttsx3
12
- import re
13
- from pathlib import Path
 
 
 
 
 
 
 
 
 
14
 
15
  import gradio as gr
 
 
16
  from rich.console import Console
17
- from openai import OpenAI, AsyncOpenAI
 
 
 
 
 
 
 
 
 
 
18
 
19
- # --- Configuration ---
20
- LOCAL_BASE_URL = "http://localhost:1234/v1"
21
- LOCAL_API_KEY = "not-needed"
 
 
22
 
23
- # HuggingFace Spaces configuration
24
- HF_INFERENCE_URL = "https://api-inference.huggingface.co/models/"
25
- HF_API_KEY = os.getenv("HF_API_KEY", "")
 
 
 
 
 
 
26
 
27
- # Available model options
28
- MODEL_OPTIONS = {
29
- "Local LM Studio": LOCAL_BASE_URL,
30
- "Codellama 7B": "codellama/CodeLlama-7b-hf",
31
- "Mistral 7B": "mistralai/Mistral-7B-v0.1",
32
- "Llama 2 7B": "meta-llama/Llama-2-7b-chat-hf",
33
- "Falcon 7B": "tiiuae/falcon-7b-instruct"
34
- }
35
 
36
- DEFAULT_TEMPERATURE = 0.7
37
- DEFAULT_MAX_TOKENS = 5000
 
 
 
38
 
39
- console = Console()
 
 
40
 
41
- # --- Canvas Artifact Dataclass ---
42
  @dataclass
43
- class CanvasArtifact:
44
- id: str
45
- type: str # 'code', 'diagram', 'text', 'image'
46
- content: str
47
- title: str
48
- timestamp: float
49
- metadata: Dict[str, Any]
50
 
51
- # --- Enhanced LLMAgent with Canvas Support ---
52
- class EnhancedLLMAgent:
53
- def __init__(self, model_id: str = "local-model", system_prompt: str = None,
54
- base_url: str = LOCAL_BASE_URL, api_key: str = LOCAL_API_KEY,
55
- use_huggingface: bool = False):
56
-
57
- self.use_huggingface = use_huggingface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  self.model_id = model_id
59
- self.system_prompt = system_prompt or """You are an advanced AI development assistant operating in a Star Trek LCARS interface.
60
- You specialize in code generation, analysis, and collaborative development.
61
- Always provide practical, executable code solutions when appropriate.
62
- Format code responses clearly with proper markdown code blocks and explain your reasoning."""
63
-
64
- if use_huggingface:
65
- # Use HuggingFace Inference API
66
- self.base_url = HF_INFERENCE_URL
67
- self.api_key = HF_API_KEY
68
- self.client = None
69
- console.log("[green]🚀 Using HuggingFace Inference API[/green]")
70
- else:
71
- # Use local LM Studio
72
- self.base_url = base_url
73
- self.api_key = api_key
74
- self.client = OpenAI(base_url=base_url, api_key=api_key)
75
- console.log(f"[green]🚀 Using Local LM Studio: {base_url}[/green]")
76
-
77
- # Enhanced conversation and canvas management
78
- self.conversations: Dict[str, List[Dict]] = {}
79
- self.canvas_artifacts: Dict[str, List[CanvasArtifact]] = {}
80
- self.max_history_length = 50
81
 
 
 
82
  # Speech synthesis
83
  try:
84
  self.tts_engine = pyttsx3.init()
85
  self.setup_tts()
86
  self.speech_enabled = True
87
- console.log("[green]TTS engine initialized successfully[/green]")
88
  except Exception as e:
89
- console.log(f"[red]TTS initialization failed: {e}[/red]")
90
  self.speech_enabled = False
91
-
 
 
 
 
92
  def setup_tts(self):
93
  """Configure text-to-speech engine"""
94
- try:
95
  voices = self.tts_engine.getProperty('voices')
96
  if voices:
97
- # Try to find a better voice
98
- for voice in voices:
99
- if 'female' in voice.name.lower() or 'zira' in voice.name.lower():
100
- self.tts_engine.setProperty('voice', voice.id)
101
- break
102
- else:
103
- self.tts_engine.setProperty('voice', voices[0].id)
104
-
105
- self.tts_engine.setProperty('rate', 180)
106
- self.tts_engine.setProperty('volume', 1.0)
107
- except Exception as e:
108
- console.log(f"[red]TTS setup error: {e}[/red]")
109
 
110
  def speak(self, text: str):
111
  """Convert text to speech in a non-blocking way"""
@@ -114,81 +188,657 @@ class EnhancedLLMAgent:
114
 
115
  def _speak():
116
  try:
117
- # Clean text for speech
118
  clean_text = re.sub(r'```.*?```', '', text, flags=re.DOTALL)
119
  clean_text = re.sub(r'`.*?`', '', clean_text)
120
- clean_text = re.sub(r'\n+', '. ', clean_text)
121
- clean_text = re.sub(r'\s+', ' ', clean_text)
122
  clean_text = clean_text.strip()
123
-
124
- if clean_text and len(clean_text) > 10:
125
- console.log(f"[blue]Speaking: {clean_text[:100]}...[/blue]")
126
- self.tts_engine.say(clean_text[:400])
127
  self.tts_engine.runAndWait()
 
 
 
128
  except Exception as e:
129
  console.log(f"[red]TTS Error: {e}[/red]")
130
 
131
  thread = threading.Thread(target=_speak, daemon=True)
132
  thread.start()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
 
134
- async def _local_inference(self, messages: List[Dict]) -> str:
135
- """Use local LM Studio"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
  try:
137
- async_client = AsyncOpenAI(base_url=self.base_url, api_key=self.api_key)
138
- response = await async_client.chat.completions.create(
139
  model=self.model_id,
140
  messages=messages,
141
- temperature=0.7,
142
- max_tokens=DEFAULT_MAX_TOKENS
143
  )
144
- return response.choices[0].message.content
 
145
  except Exception as e:
146
- return f"Local inference error: {str(e)}"
147
-
148
- async def _hf_inference(self, messages: List[Dict]) -> str:
149
- """Use HuggingFace Inference API"""
 
 
 
 
 
 
 
 
 
 
150
  try:
151
- import requests
152
- # Convert to HF format
153
- prompt = self._convert_messages_to_prompt(messages)
154
-
155
- headers = {
156
- "Authorization": f"Bearer {self.api_key}",
157
- "Content-Type": "application/json"
158
- }
159
-
160
- payload = {
161
- "inputs": prompt,
162
- "parameters": {
163
- "max_new_tokens": DEFAULT_MAX_TOKENS,
164
- "temperature": 0.7,
165
- "do_sample": True,
166
- "return_full_text": False
167
- }
168
- }
169
-
170
- model_url = f"{self.base_url}{self.model_id}"
171
- response = requests.post(model_url, headers=headers, json=payload, timeout=30)
172
- response.raise_for_status()
173
- result = response.json()
174
- return result[0]['generated_text']
175
  except Exception as e:
176
- return f"HuggingFace API Error: {str(e)}"
 
 
 
 
 
 
177
 
178
- def _convert_messages_to_prompt(self, messages: List[Dict]) -> str:
179
- """Convert conversation messages to a single prompt for HF"""
180
- prompt = ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181
  for msg in messages:
182
- if msg["role"] == "system":
183
- prompt += f"System: {msg['content']}\n\n"
184
- elif msg["role"] == "user":
185
- prompt += f"User: {msg['content']}\n\n"
186
- elif msg["role"] == "assistant":
187
- prompt += f"Assistant: {msg['content']}\n\n"
188
- prompt += "Assistant:"
189
- return prompt
190
-
191
- def add_artifact_to_canvas(self, conversation_id: str, content: str, artifact_type: str = "code", title: str = None):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
  """Add artifacts to the collaborative canvas"""
193
  if conversation_id not in self.canvas_artifacts:
194
  self.canvas_artifacts[conversation_id] = []
@@ -203,99 +853,71 @@ class EnhancedLLMAgent:
203
  )
204
 
205
  self.canvas_artifacts[conversation_id].append(artifact)
 
 
 
 
 
206
  console.log(f"[green]Added artifact to canvas: {artifact.title}[/green]")
207
  return artifact
208
-
209
  def get_canvas_context(self, conversation_id: str) -> str:
210
  """Get formatted canvas context for LLM prompts"""
211
  if conversation_id not in self.canvas_artifacts or not self.canvas_artifacts[conversation_id]:
212
  return ""
213
 
214
  context_lines = ["\n=== COLLABORATIVE CANVAS ARTIFACTS ==="]
215
- for artifact in self.canvas_artifacts[conversation_id][-10:]:
216
  context_lines.append(f"\n--- {artifact.title} [{artifact.type.upper()}] ---")
217
  preview = artifact.content[:500] + "..." if len(artifact.content) > 500 else artifact.content
218
  context_lines.append(preview)
219
 
220
  return "\n".join(context_lines) + "\n=================================\n"
221
-
222
- async def chat_with_canvas(self, message: str, conversation_id: str = "default", include_canvas: bool = True) -> str:
223
- """Enhanced chat that works with both local and HF"""
224
- if conversation_id not in self.conversations:
225
- self.conversations[conversation_id] = []
226
-
227
- # Build messages with system prompt and canvas context
228
- messages = [{"role": "system", "content": self.system_prompt}]
229
-
230
- # Include canvas context if requested
231
  if include_canvas:
232
  canvas_context = self.get_canvas_context(conversation_id)
233
  if canvas_context:
234
- messages.append({"role": "system", "content": f"Current collaborative canvas state:\n{canvas_context}"})
235
-
236
- # Add conversation history
237
- for msg in self.conversations[conversation_id][-self.max_history_length:]:
238
- messages.append(msg)
239
-
240
- # Add current message
241
- messages.append({"role": "user", "content": message})
242
 
243
  try:
244
- if self.use_huggingface:
245
- response_text = await self._hf_inference(messages)
246
- else:
247
- response_text = await self._local_inference(messages)
248
-
249
- # Update conversation history
250
- self.conversations[conversation_id].extend([
251
- {"role": "user", "content": message},
252
- {"role": "assistant", "content": response_text}
253
- ])
254
 
255
  # Auto-extract and add code artifacts to canvas
256
- self._extract_artifacts_to_canvas(response_text, conversation_id)
257
 
258
- return response_text
259
 
260
  except Exception as e:
261
  error_msg = f"Error in chat_with_canvas: {str(e)}"
262
  console.log(f"[red]{error_msg}[/red]")
263
  return error_msg
264
-
265
  def _extract_artifacts_to_canvas(self, response: str, conversation_id: str):
266
  """Automatically extract code blocks and add to canvas"""
267
- code_blocks = re.findall(r'```(?:\w+)?\n(.*?)```', response, re.DOTALL)
268
- for i, code_block in enumerate(code_blocks):
269
- if len(code_block.strip()) > 10:
270
- lang_match = re.search(r'```(\w+)\n', response)
271
- lang = lang_match.group(1) if lang_match else "unknown"
272
-
273
  self.add_artifact_to_canvas(
274
  conversation_id,
275
  code_block.strip(),
276
  "code",
277
- f"code_snippet_{lang}_{len(self.canvas_artifacts.get(conversation_id, [])) + 1}"
278
  )
279
-
280
- def clear_conversation(self, conversation_id: str = "default"):
281
- """Clear conversation but keep canvas artifacts"""
282
- if conversation_id in self.conversations:
283
- self.conversations[conversation_id] = []
284
- console.log(f"[yellow]Cleared conversation: {conversation_id}[/yellow]")
285
-
286
- def clear_canvas(self, conversation_id: str = "default"):
287
- """Clear canvas artifacts"""
288
- if conversation_id in self.canvas_artifacts:
289
- self.canvas_artifacts[conversation_id] = []
290
- console.log(f"[yellow]Cleared canvas: {conversation_id}[/yellow]")
291
-
292
  def get_canvas_summary(self, conversation_id: str) -> List[Dict]:
293
  """Get summary of canvas artifacts for display"""
294
  if conversation_id not in self.canvas_artifacts:
295
  return []
296
 
297
  artifacts = []
298
- for artifact in reversed(self.canvas_artifacts[conversation_id]):
299
  artifacts.append({
300
  "id": artifact.id,
301
  "type": artifact.type.upper(),
@@ -305,7 +927,7 @@ class EnhancedLLMAgent:
305
  })
306
 
307
  return artifacts
308
-
309
  def get_artifact_by_id(self, conversation_id: str, artifact_id: str) -> Optional[CanvasArtifact]:
310
  """Get specific artifact by ID"""
311
  if conversation_id not in self.canvas_artifacts:
@@ -315,44 +937,43 @@ class EnhancedLLMAgent:
315
  if artifact.id == artifact_id:
316
  return artifact
317
  return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
318
 
319
- def update_config(self, base_url: str, api_key: str, model_id: str, temperature: float, max_tokens: int):
320
- """Update agent configuration"""
321
- self.base_url = base_url
322
- self.api_key = api_key
323
- self.model_id = model_id
324
- console.log(f"[blue]Updated config: {model_id} @ {base_url}[/blue]")
325
-
326
- @staticmethod
327
- async def fetch_available_models(base_url: str, api_key: str, use_huggingface: bool = False) -> List[str]:
328
- """Fetch available models - works for both local and HF"""
329
- if use_huggingface:
330
- # Return popular HF models
331
- return list(MODEL_OPTIONS.keys())[1:] # Skip "Local LM Studio"
332
- else:
333
- # Fetch from local LM Studio
334
- try:
335
- console.log(f"[blue]Fetching models from {base_url}[/blue]")
336
- async_client = AsyncOpenAI(base_url=base_url, api_key=api_key)
337
- models = await async_client.models.list()
338
- model_list = [model.id for model in models.data]
339
- console.log(f"[green]Found {len(model_list)} local models[/green]")
340
- return model_list
341
- except Exception as e:
342
- console.log(f"[red]Error fetching local models: {e}[/red]")
343
- return ["local-model"]
344
 
345
- # --- LCARS Styled Gradio Interface ---
346
  class LcarsInterface:
347
- def __init__(self):
348
- # Start with HuggingFace by default for Spaces
349
- self.use_huggingface = True
350
- self.agent = EnhancedLLMAgent(use_huggingface=self.use_huggingface)
 
 
 
 
 
 
351
  self.current_conversation = "default"
352
-
 
 
353
  def create_interface(self):
354
  """Create the full LCARS-styled interface"""
355
 
 
356
  lcars_css = """
357
  :root {
358
  --lcars-orange: #FF9900;
@@ -370,8 +991,6 @@ class LcarsInterface:
370
  background: var(--lcars-black);
371
  color: var(--lcars-orange);
372
  font-family: 'Antonio', 'LCD', 'Courier New', monospace;
373
- margin: 0;
374
- padding: 0;
375
  }
376
 
377
  .gradio-container {
@@ -393,88 +1012,122 @@ class LcarsInterface:
393
  border-radius: 0 60px 0 0;
394
  margin: -20px -20px 20px -20px;
395
  border-bottom: 6px solid var(--lcars-blue);
 
396
  }
397
 
398
  .lcars-title {
399
- font-size: 2.5em;
400
  font-weight: bold;
401
  color: var(--lcars-black);
 
402
  margin: 0;
 
403
  }
404
 
405
  .lcars-subtitle {
406
- font-size: 1.2em;
407
  color: var(--lcars-black);
408
  margin: 10px 0 0 0;
 
409
  }
410
 
411
  .lcars-panel {
412
- background: rgba(66, 66, 66, 0.9);
413
- border: 2px solid var(--lcars-orange);
414
- border-radius: 0 20px 0 20px;
415
- padding: 15px;
416
- margin-bottom: 15px;
 
417
  }
418
 
419
  .lcars-button {
420
- background: var(--lcars-orange);
421
  color: var(--lcars-black) !important;
422
  border: none !important;
423
- border-radius: 0 15px 0 15px !important;
424
- padding: 10px 20px !important;
425
  font-family: inherit !important;
426
  font-weight: bold !important;
427
- margin: 5px !important;
 
 
 
 
428
  }
429
 
430
  .lcars-button:hover {
431
- background: var(--lcars-red) !important;
 
 
432
  }
433
 
434
  .lcars-input {
435
  background: var(--lcars-black) !important;
436
  color: var(--lcars-orange) !important;
437
  border: 2px solid var(--lcars-blue) !important;
438
- border-radius: 0 10px 0 10px !important;
439
- padding: 10px !important;
 
 
440
  }
441
 
442
  .lcars-chatbot {
443
  background: var(--lcars-black) !important;
444
- border: 2px solid var(--lcars-purple) !important;
445
- border-radius: 0 15px 0 15px !important;
 
 
 
 
 
 
 
 
 
 
 
446
  }
447
 
448
  .status-indicator {
449
  display: inline-block;
450
- width: 12px;
451
- height: 12px;
452
  border-radius: 50%;
453
  background: var(--lcars-red);
454
- margin-right: 8px;
 
455
  }
456
 
457
  .status-online {
458
  background: var(--lcars-blue);
459
- animation: pulse 2s infinite;
460
  }
461
 
462
  @keyframes pulse {
463
- 0% { opacity: 1; }
464
- 50% { opacity: 0.5; }
465
- 100% { opacity: 1; }
 
 
 
 
 
 
 
 
 
466
  }
467
  """
468
 
469
  with gr.Blocks(css=lcars_css, theme=gr.themes.Default(), title="LCARS Terminal") as interface:
470
 
471
  with gr.Column(elem_classes="lcars-container"):
472
- # Header
473
  with gr.Row(elem_classes="lcars-header"):
474
  gr.Markdown("""
475
  <div style="text-align: center; width: 100%;">
476
- <div class="lcars-title">🚀 LCARS TERMINAL</div>
477
- <div class="lcars-subtitle">STARFLEET AI DEVELOPMENT CONSOLE</div>
478
  <div style="margin-top: 10px;">
479
  <span class="status-indicator status-online"></span>
480
  <span style="color: var(--lcars-black); font-weight: bold;">SYSTEM ONLINE</span>
@@ -482,108 +1135,66 @@ class LcarsInterface:
482
  </div>
483
  """)
484
 
485
- # Connection Type Selector
486
- with gr.Row(elem_classes="lcars-panel"):
487
- gr.Markdown("### 🌐 CONNECTION TYPE")
488
- connection_type = gr.Radio(
489
- choices=["HuggingFace Inference", "Local LM Studio"],
490
- value="HuggingFace Inference",
491
- label="Select Connection Type",
492
- elem_classes="lcars-input"
493
- )
494
-
495
- # Main Content
496
  with gr.Row():
497
- # Left Sidebar
498
- with gr.Column(scale=1):
499
- # Configuration Panel
500
- with gr.Column(elem_classes="lcars-panel"):
501
- gr.Markdown("### 🔧 CONFIGURATION")
502
-
503
- # Connection-specific settings
504
- with gr.Row(visible=False) as local_settings:
505
- base_url = gr.Textbox(
506
- value=LOCAL_BASE_URL,
507
- label="LM Studio URL",
508
- elem_classes="lcars-input"
509
- )
510
- api_key = gr.Textbox(
511
- value=LOCAL_API_KEY,
512
- label="API Key",
513
- type="password",
514
- elem_classes="lcars-input"
515
- )
516
-
517
- with gr.Row(visible=True) as hf_settings:
518
- hf_api_key = gr.Textbox(
519
- value=HF_API_KEY,
520
- label="HuggingFace API Key",
521
- type="password",
522
- elem_classes="lcars-input",
523
- placeholder="Get from https://huggingface.co/settings/tokens"
524
- )
525
-
526
- with gr.Row():
527
- model_dropdown = gr.Dropdown(
528
- choices=list(MODEL_OPTIONS.keys())[1:],
529
- value=list(MODEL_OPTIONS.keys())[1],
530
- label="AI Model",
531
- elem_classes="lcars-input"
532
- )
533
- fetch_models_btn = gr.Button("📡 Fetch Models", elem_classes="lcars-button")
534
-
535
- with gr.Row():
536
- temperature = gr.Slider(0.0, 2.0, value=0.7, label="Temperature")
537
- max_tokens = gr.Slider(128, 8192, value=2000, step=128, label="Max Tokens")
538
-
539
- with gr.Row():
540
- update_config_btn = gr.Button("💾 Apply Config", elem_classes="lcars-button")
541
- speech_toggle = gr.Checkbox(value=True, label="🔊 Speech Output")
542
-
543
- # Canvas Artifacts
544
  with gr.Column(elem_classes="lcars-panel"):
545
- gr.Markdown("### 🎨 CANVAS ARTIFACTS")
546
- artifact_display = gr.JSON(label="")
 
 
 
547
  with gr.Row():
548
  refresh_artifacts_btn = gr.Button("🔄 Refresh", elem_classes="lcars-button")
549
  clear_canvas_btn = gr.Button("🗑️ Clear Canvas", elem_classes="lcars-button")
 
550
 
551
- # Main Content Area
552
  with gr.Column(scale=2):
553
- # Code Canvas
554
  with gr.Accordion("💻 COLLABORATIVE CODE CANVAS", open=True):
555
  code_editor = gr.Code(
556
- value="# Welcome to LCARS Collaborative Canvas\n\nprint('Hello, Starfleet!')",
557
  language="python",
558
- lines=15,
559
- label=""
 
560
  )
561
 
562
  with gr.Row():
563
- load_to_chat_btn = gr.Button("💬 Discuss Code", elem_classes="lcars-button")
564
- analyze_btn = gr.Button("🔍 Analyze", elem_classes="lcars-button")
565
- optimize_btn = gr.Button("⚡ Optimize", elem_classes="lcars-button")
 
566
 
567
  # Chat Interface
568
  with gr.Column(elem_classes="lcars-panel"):
569
- gr.Markdown("### 💬 MISSION LOG")
570
- chatbot = gr.Chatbot(label="", height=300)
 
 
 
 
 
571
 
572
  with gr.Row():
573
  message_input = gr.Textbox(
574
  placeholder="Enter your command or query...",
575
  show_label=False,
576
  lines=2,
 
577
  scale=4
578
  )
579
- send_btn = gr.Button("🚀 SEND", elem_classes="lcars-button", scale=1)
580
 
581
- # Status
582
  with gr.Row():
583
  status_display = gr.Textbox(
584
- value="LCARS terminal operational. Awaiting commands.",
585
  label="Status",
586
- max_lines=2
 
587
  )
588
  with gr.Column(scale=0):
589
  clear_chat_btn = gr.Button("🗑️ Clear Chat", elem_classes="lcars-button")
@@ -591,141 +1202,172 @@ class LcarsInterface:
591
 
592
  # === EVENT HANDLERS ===
593
 
594
- def switch_connection(connection_type):
595
- if connection_type == "Local LM Studio":
596
- return [
597
- gr.update(visible=True),
598
- gr.update(visible=False),
599
- gr.update(choices=["Fetching local models..."], value="Fetching local models...")
600
- ]
601
- else:
602
- return [
603
- gr.update(visible=False),
604
- gr.update(visible=True),
605
- gr.update(choices=list(MODEL_OPTIONS.keys())[1:], value=list(MODEL_OPTIONS.keys())[1])
606
- ]
607
-
608
- async def fetch_models_updated(connection_type, base_url_val, api_key_val, hf_api_key_val):
609
- if connection_type == "Local LM Studio":
610
- models = await EnhancedLLMAgent.fetch_available_models(
611
- base_url_val, api_key_val, use_huggingface=False
612
- )
613
- else:
614
- models = await EnhancedLLMAgent.fetch_available_models(
615
- "", hf_api_key_val, use_huggingface=True
616
- )
617
-
618
- if models:
619
- return gr.update(choices=models, value=models[0])
620
- return gr.update(choices=["No models found"])
621
-
622
- def update_agent_connection(connection_type, model_id, base_url_val, api_key_val, hf_api_key_val):
623
- use_hf = connection_type == "HuggingFace Inference"
624
- self.use_huggingface = use_hf
625
-
626
- if use_hf:
627
- self.agent = EnhancedLLMAgent(
628
- model_id=model_id,
629
- use_huggingface=True,
630
- api_key=hf_api_key_val
631
- )
632
- return f"✅ Switched to HuggingFace: {model_id}"
633
- else:
634
- self.agent = EnhancedLLMAgent(
635
- model_id=model_id,
636
- base_url=base_url_val,
637
- api_key=api_key_val,
638
- use_huggingface=False
639
- )
640
- return f"✅ Switched to Local: {base_url_val}"
641
-
642
- async def process_message(message, history, speech_enabled):
643
  if not message.strip():
644
  return "", history, "Please enter a message"
645
 
 
646
  history = history + [[message, None]]
647
 
648
  try:
649
- response = await self.agent.chat_with_canvas(
650
- message, self.current_conversation, include_canvas=True
 
 
 
651
  )
652
 
 
653
  history[-1][1] = response
654
 
655
- if speech_enabled and self.agent.speech_enabled:
656
- self.agent.speak(response)
657
 
658
- artifacts = self.agent.get_canvas_summary(self.current_conversation)
659
  status = f"✅ Response received. Canvas artifacts: {len(artifacts)}"
660
  return "", history, status, artifacts
661
 
662
  except Exception as e:
663
  error_msg = f"❌ Error: {str(e)}"
664
  history[-1][1] = error_msg
665
- return "", history, error_msg, self.agent.get_canvas_summary(self.current_conversation)
666
-
667
- def get_artifacts():
668
- return self.agent.get_canvas_summary(self.current_conversation)
669
-
670
- def clear_canvas():
671
- self.agent.clear_canvas(self.current_conversation)
672
- return [], "✅ Canvas cleared"
673
-
 
 
 
 
 
 
 
 
 
 
 
674
  def clear_chat():
675
- self.agent.clear_conversation(self.current_conversation)
 
676
  return [], "✅ Chat cleared"
677
-
678
  def new_session():
679
- self.agent.clear_conversation(self.current_conversation)
680
- self.agent.clear_canvas(self.current_conversation)
681
- return [], "# New session started\n\nprint('Ready!')", "🆕 New session started", []
682
-
683
- # Connect events
684
- connection_type.change(switch_connection, inputs=connection_type,
685
- outputs=[local_settings, hf_settings, model_dropdown])
686
-
687
- fetch_models_btn.click(fetch_models_updated,
688
- inputs=[connection_type, base_url, api_key, hf_api_key],
689
- outputs=model_dropdown)
690
-
691
- model_dropdown.change(update_agent_connection,
692
- inputs=[connection_type, model_dropdown, base_url, api_key, hf_api_key],
693
- outputs=status_display)
694
-
695
- send_btn.click(process_message,
696
- inputs=[message_input, chatbot, speech_toggle],
697
- outputs=[message_input, chatbot, status_display, artifact_display])
698
-
699
- message_input.submit(process_message,
700
- inputs=[message_input, chatbot, speech_toggle],
701
- outputs=[message_input, chatbot, status_display, artifact_display])
702
-
703
- refresh_artifacts_btn.click(get_artifacts, outputs=artifact_display)
704
- clear_canvas_btn.click(clear_canvas, outputs=[artifact_display, status_display])
705
- clear_chat_btn.click(clear_chat, outputs=[chatbot, status_display])
706
- new_session_btn.click(new_session, outputs=[chatbot, code_editor, status_display, artifact_display])
707
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
708
  interface.load(get_artifacts, outputs=artifact_display)
709
 
710
  return interface
711
 
712
- # --- Main Application ---
713
- def main():
714
- console.log("[bold blue]🚀 Starting LCARS Terminal...[/bold blue]")
715
-
716
- is_space = os.getenv('SPACE_ID') is not None
717
 
718
- if is_space:
719
- console.log("[green]🌐 Detected HuggingFace Space[/green]")
720
- else:
721
- console.log("[blue]💻 Running locally[/blue]")
 
 
 
 
722
 
723
- interface = LcarsInterface()
 
724
  demo = interface.create_interface()
 
725
 
726
- demo.launch(
727
- share=is_space
728
- )
729
-
730
- if __name__ == "__main__":
731
- main()
 
 
1
+ # File: enhanced_gradio_interface.py
2
 
3
  import asyncio
4
+ from collections import defaultdict
5
  import json
6
  import os
7
+ import re
8
  import time
9
  import uuid
10
+ from typing import List, Dict, Any, Optional
11
  from dataclasses import dataclass
12
+ from threading import Lock
13
  import threading
14
+ import json
15
+ import os
16
+ import queue
17
+ import traceback
18
+ import uuid
19
+ from typing import Coroutine, Dict, List, Any, Optional, Callable
20
+ from dataclasses import dataclass
21
+ from queue import Queue, Empty
22
+ from threading import Lock, Event, Thread
23
+ import threading
24
+ from concurrent.futures import ThreadPoolExecutor
25
+ import time
26
 
27
  import gradio as gr
28
+ from openai import AsyncOpenAI, OpenAI
29
+ import pyttsx3
30
  from rich.console import Console
31
+ BASE_URL="http://localhost:1234/v1"
32
+ BASE_API_KEY="not-needed"
33
+ BASE_CLIENT = AsyncOpenAI(
34
+ base_url=BASE_URL,
35
+ api_key=BASE_API_KEY
36
+ ) # Global state for client
37
+ BASEMODEL_ID = "leroydyer/qwen/qwen3-0.6b-q4_k_m.gguf" # Global state for selected model ID
38
+ CLIENT =OpenAI(
39
+ base_url=BASE_URL,
40
+ api_key=BASE_API_KEY
41
+ ) # Global state for client
42
 
43
+ # --- Global Variables (if needed) ---
44
+ console = Console()
45
+ # Example global client if needed elsewhere, adjust based on your setup
46
+ # BASE_CLIENT = AsyncOpenAI(base_url=DEFAULT_BASE_URL, api_key=DEFAULT_API_KEY)
47
+ # CLIENT = OpenAI(base_url=DEFAULT_BASE_URL, api_key=DEFAULT_API_KEY)
48
 
49
+ # --- Dataclasses (copied from your original code or imported) ---
50
+ @dataclass
51
+ class LLMMessage:
52
+ role: str
53
+ content: str
54
+ message_id: str = None
55
+ conversation_id: str = None
56
+ timestamp: float = None
57
+ metadata: Dict[str, Any] = None
58
 
59
+ def __post_init__(self):
60
+ if self.message_id is None:
61
+ self.message_id = str(uuid.uuid4())
62
+ if self.timestamp is None:
63
+ self.timestamp = time.time()
64
+ if self.metadata is None:
65
+ self.metadata = {}
 
66
 
67
+ @dataclass
68
+ class LLMRequest:
69
+ message: LLMMessage
70
+ response_event: str = None
71
+ callback: Callable = None
72
 
73
+ def __post_init__(self):
74
+ if self.response_event is None:
75
+ self.response_event = f"llm_response_{self.message.message_id}"
76
 
 
77
  @dataclass
78
+ class LLMResponse:
79
+ message: LLMMessage
80
+ request_id: str
81
+ success: bool = True
82
+ error: str = None
 
 
83
 
84
+ # --- Event Manager (copied from your original code or imported) ---
85
+ class EventManager:
86
+ def __init__(self):
87
+ self._handlers = defaultdict(list)
88
+ self._lock = threading.Lock()
89
+
90
+ def register(self, event: str, handler: Callable):
91
+ with self._lock:
92
+ self._handlers[event].append(handler)
93
+
94
+ def unregister(self, event: str, handler: Callable):
95
+ with self._lock:
96
+ if event in self._handlers and handler in self._handlers[event]:
97
+ self._handlers[event].remove(handler)
98
+
99
+ def raise_event(self, event: str, data: Any):
100
+ with self._lock:
101
+ handlers = self._handlers[event][:]
102
+ for handler in handlers:
103
+ try:
104
+ handler(data)
105
+ except Exception as e:
106
+ console.log(f"Error in event handler for {event}: {e}", style="bold red")
107
+
108
+ EVENT_MANAGER = EventManager()
109
+
110
+ def RegisterEvent(event: str, handler: Callable):
111
+ EVENT_MANAGER.register(event, handler)
112
+
113
+ def RaiseEvent(event: str, data: Any):
114
+ EVENT_MANAGER.raise_event(event, data)
115
+
116
+ def UnregisterEvent(event: str, handler: Callable):
117
+ EVENT_MANAGER.unregister(event, handler)
118
+
119
+ class LLMAgent:
120
+ """Main Agent Driver !
121
+ Agent For Multiple messages at once ,
122
+ has a message queing service as well as agenerator method for easy intergration with console
123
+ applications as well as ui !"""
124
+ def __init__(
125
+ self,
126
+ model_id: str = BASEMODEL_ID,
127
+ system_prompt: str = None,
128
+ max_queue_size: int = 1000,
129
+ max_retries: int = 3,
130
+ timeout: int = 30000,
131
+ max_tokens: int = 5000,
132
+ temperature: float = 0.3,
133
+ base_url: str = "http://localhost:1234/v1",
134
+ api_key: str = "not-needed",
135
+ generate_fn: Callable[[List[Dict[str, str]]], Coroutine[Any, Any, str]] = None
136
+ ):
137
  self.model_id = model_id
138
+ self.system_prompt = system_prompt or "You are a helpful AI assistant."
139
+ self.request_queue = Queue(maxsize=max_queue_size)
140
+ self.max_retries = max_retries
141
+ self.timeout = timeout
142
+ self.is_running = False
143
+ self._stop_event = Event()
144
+ self.processing_thread = None
145
+
146
+ # Conversation tracking
147
+ self.conversations: Dict[str, List[LLMMessage]] = {}
148
+ self.max_history_length = 20
149
+ self._generate = generate_fn or self._default_generate
150
+ self.api_key = api_key
151
+ self.base_url = base_url
152
+ self.max_tokens = max_tokens
153
+ self.temperature = temperature
154
+ self.async_client = self.CreateClient(base_url, api_key)
155
+
156
+ # Active requests waiting for responses
157
+ self.pending_requests: Dict[str, LLMRequest] = {}
158
+ self.pending_requests_lock = Lock()
 
159
 
160
+ # Register internal event handlers
161
+ self._register_event_handlers()
162
  # Speech synthesis
163
  try:
164
  self.tts_engine = pyttsx3.init()
165
  self.setup_tts()
166
  self.speech_enabled = True
 
167
  except Exception as e:
168
+ console.log(f"[yellow]TTS not available: {e}[/yellow]")
169
  self.speech_enabled = False
170
+
171
+ console.log("[bold green]🚀 Enhanced LLM Agent Initialized[/bold green]")
172
+
173
+ # Start the processing thread immediately
174
+ self.start()
175
  def setup_tts(self):
176
  """Configure text-to-speech engine"""
177
+ if hasattr(self, 'tts_engine'):
178
  voices = self.tts_engine.getProperty('voices')
179
  if voices:
180
+ self.tts_engine.setProperty('voice', voices[0].id)
181
+ self.tts_engine.setProperty('rate', 150)
182
+ self.tts_engine.setProperty('volume', 0.8)
 
 
 
 
 
 
 
 
 
183
 
184
  def speak(self, text: str):
185
  """Convert text to speech in a non-blocking way"""
 
188
 
189
  def _speak():
190
  try:
191
+ # Clean text for speech (remove markdown, code blocks)
192
  clean_text = re.sub(r'```.*?```', '', text, flags=re.DOTALL)
193
  clean_text = re.sub(r'`.*?`', '', clean_text)
 
 
194
  clean_text = clean_text.strip()
195
+ if clean_text:
196
+ self.tts_engine.say(clean_text)
 
 
197
  self.tts_engine.runAndWait()
198
+ else:
199
+ self.tts_engine.say(text)
200
+ self.tts_engine.runAndWait()
201
  except Exception as e:
202
  console.log(f"[red]TTS Error: {e}[/red]")
203
 
204
  thread = threading.Thread(target=_speak, daemon=True)
205
  thread.start()
206
+
207
+ async def _default_generate(self, messages: List[Dict[str, str]]) -> str:
208
+ """Default generate function if none provided"""
209
+ return await self.openai_generate(messages)
210
+
211
+ def _register_event_handlers(self):
212
+ """Register internal event handlers for response routing"""
213
+ RegisterEvent("llm_internal_response", self._handle_internal_response)
214
+
215
+ def _handle_internal_response(self, response: LLMResponse):
216
+ """Route responses to the appropriate request handlers"""
217
+ console.log(f"[bold cyan]Handling internal response for: {response.request_id}[/bold cyan]")
218
+
219
+ request = None
220
+ with self.pending_requests_lock:
221
+ if response.request_id in self.pending_requests:
222
+ request = self.pending_requests[response.request_id]
223
+ del self.pending_requests[response.request_id]
224
+ console.log(f"Found pending request for: {response.request_id}")
225
+ else:
226
+ console.log(f"No pending request found for: {response.request_id}", style="yellow")
227
+ return
228
+
229
+ # Raise the specific response event
230
+ if request.response_event:
231
+ console.log(f"[bold green]Raising event: {request.response_event}[/bold green]")
232
+ RaiseEvent(request.response_event, response)
233
+
234
+ # Call callback if provided
235
+ if request.callback:
236
+ try:
237
+ console.log(f"[bold yellow]Calling callback for: {response.request_id}[/bold yellow]")
238
+ request.callback(response)
239
+ except Exception as e:
240
+ console.log(f"Error in callback: {e}", style="bold red")
241
+
242
+ def _add_to_conversation_history(self, conversation_id: str, message: LLMMessage):
243
+ """Add message to conversation history"""
244
+ if conversation_id not in self.conversations:
245
+ self.conversations[conversation_id] = []
246
+
247
+ self.conversations[conversation_id].append(message)
248
+
249
+ # Trim history if too long
250
+ if len(self.conversations[conversation_id]) > self.max_history_length * 2:
251
+ self.conversations[conversation_id] = self.conversations[conversation_id][-(self.max_history_length * 2):]
252
+
253
+ def _build_messages_from_conversation(self, conversation_id: str, new_message: LLMMessage) -> List[Dict[str, str]]:
254
+ """Build message list from conversation history"""
255
+ messages = []
256
+
257
+ # Add system prompt
258
+ if self.system_prompt:
259
+ messages.append({"role": "system", "content": self.system_prompt})
260
+
261
+ # Add conversation history
262
+ if conversation_id in self.conversations:
263
+ for msg in self.conversations[conversation_id][-self.max_history_length:]:
264
+ messages.append({"role": msg.role, "content": msg.content})
265
+
266
+ # Add the new message
267
+ messages.append({"role": new_message.role, "content": new_message.content})
268
+
269
+ return messages
270
+
271
+ def _process_llm_request(self, request: LLMRequest):
272
+ """Process a single LLM request"""
273
+ console.log(f"[bold green]Processing LLM request: {request.message.message_id}[/bold green]")
274
+ try:
275
+ # Build messages for LLM
276
+ messages = self._build_messages_from_conversation(
277
+ request.message.conversation_id or "default",
278
+ request.message
279
+ )
280
+
281
+ console.log(f"Calling LLM with {len(messages)} messages")
282
+
283
+ # Call LLM - Use sync call for thread compatibility
284
+ response_content = self._call_llm_sync(messages)
285
+
286
+ console.log(f"[bold green]LLM response received: {response_content}...[/bold green]")
287
+
288
+ # Create response message
289
+ response_message = LLMMessage(
290
+ role="assistant",
291
+ content=response_content,
292
+ conversation_id=request.message.conversation_id,
293
+ metadata={"request_id": request.message.message_id}
294
+ )
295
+
296
+ # Update conversation history
297
+ self._add_to_conversation_history(
298
+ request.message.conversation_id or "default",
299
+ request.message
300
+ )
301
+ self._add_to_conversation_history(
302
+ request.message.conversation_id or "default",
303
+ response_message
304
+ )
305
+
306
+ # Create and send response
307
+ response = LLMResponse(
308
+ message=response_message,
309
+ request_id=request.message.message_id,
310
+ success=True
311
+ )
312
+
313
+ console.log(f"[bold blue]Sending internal response for: {request.message.message_id}[/bold blue]")
314
+ RaiseEvent("llm_internal_response", response)
315
+
316
+ except Exception as e:
317
+ console.log(f"[bold red]Error processing LLM request: {e}[/bold red]")
318
+ traceback.print_exc()
319
+ # Create error response
320
+ error_response = LLMResponse(
321
+ message=LLMMessage(
322
+ role="system",
323
+ content=f"Error: {str(e)}",
324
+ conversation_id=request.message.conversation_id
325
+ ),
326
+ request_id=request.message.message_id,
327
+ success=False,
328
+ error=str(e)
329
+ )
330
+
331
+ RaiseEvent("llm_internal_response", error_response)
332
+
333
+ def _call_llm_sync(self, messages: List[Dict[str, str]]) -> str:
334
+ """Sync call to the LLM with retry logic"""
335
+ console.log(f"Making LLM call to {self.model_id}")
336
+ for attempt in range(self.max_retries):
337
+ try:
338
+ response = CLIENT.chat.completions.create(
339
+ model=self.model_id,
340
+ messages=messages,
341
+ temperature=self.temperature,
342
+ max_tokens=self.max_tokens
343
+ )
344
+ content = response.choices[0].message.content
345
+ console.log(f"LLM call successful, response length: {len(content)}")
346
+ return content
347
+ except Exception as e:
348
+ console.log(f"LLM call attempt {attempt + 1} failed: {e}")
349
+ if attempt == self.max_retries - 1:
350
+ raise e
351
+ # Wait before retry
352
+
353
+ def _process_queue(self):
354
+ """Main queue processing loop"""
355
+ console.log("[bold cyan]LLM Agent queue processor started[/bold cyan]")
356
+ while not self._stop_event.is_set():
357
+ try:
358
+ request = self.request_queue.get(timeout=1.0)
359
+ if request:
360
+ console.log(f"Got request from queue: {request.message.message_id}")
361
+ self._process_llm_request(request)
362
+ self.request_queue.task_done()
363
+ except Empty:
364
+ continue
365
+ except Exception as e:
366
+ console.log(f"Error in queue processing: {e}", style="bold red")
367
+ traceback.print_exc()
368
+ console.log("[bold cyan]LLM Agent queue processor stopped[/bold cyan]")
369
+
370
+ def send_message(
371
+ self,
372
+ content: str,
373
+ role: str = "user",
374
+ conversation_id: str = None,
375
+ response_event: str = None,
376
+ callback: Callable = None,
377
+ metadata: Dict = None
378
+ ) -> str:
379
+ """Send a message to the LLM and get response via events"""
380
+ if not self.is_running:
381
+ raise RuntimeError("LLM Agent is not running. Call start() first.")
382
+
383
+ # Create message
384
+ message = LLMMessage(
385
+ role=role,
386
+ content=content,
387
+ conversation_id=conversation_id,
388
+ metadata=metadata or {}
389
+ )
390
+
391
+ # Create request
392
+ request = LLMRequest(
393
+ message=message,
394
+ response_event=response_event,
395
+ callback=callback
396
+ )
397
+
398
+ # Store in pending requests BEFORE adding to queue
399
+ with self.pending_requests_lock:
400
+ self.pending_requests[message.message_id] = request
401
+ console.log(f"Added to pending requests: {message.message_id}")
402
+
403
+ # Add to queue
404
+ try:
405
+ self.request_queue.put(request, timeout=5.0)
406
+ console.log(f"[bold magenta]Message queued: {message.message_id}, Content: {content[:50]}...[/bold magenta]")
407
+ return message.message_id
408
+ except queue.Full:
409
+ console.log(f"[bold red]Queue full, cannot send message[/bold red]")
410
+ with self.pending_requests_lock:
411
+ if message.message_id in self.pending_requests:
412
+ del self.pending_requests[message.message_id]
413
+ raise RuntimeError("LLM Agent queue is full")
414
+
415
+ async def chat(self, messages: List[Dict[str, str]]) -> str:
416
+ """
417
+ Async chat method that sends message via queue and returns response string.
418
+ This is the main method you should use.
419
+ """
420
+ # Create future for the response
421
+ loop = asyncio.get_event_loop()
422
+ response_future = loop.create_future()
423
+
424
+ def chat_callback(response: LLMResponse):
425
+ """Callback when LLM responds - thread-safe"""
426
+ console.log(f"[bold yellow]✓ CHAT CALLBACK TRIGGERED![/bold yellow]")
427
+
428
+ if not response_future.done():
429
+ if response.success:
430
+ content = response.message.content
431
+ console.log(f"Callback received content: {content}...")
432
+ # Schedule setting the future result on the main event loop
433
+ loop.call_soon_threadsafe(response_future.set_result, content)
434
+ else:
435
+ console.log(f"Error in response: {response.error}")
436
+ error_msg = f"❌ Error: {response.error}"
437
+ loop.call_soon_threadsafe(response_future.set_result, error_msg)
438
+ else:
439
+ console.log(f"[bold red]Future already done, ignoring callback[/bold red]")
440
+
441
+ console.log(f"Sending message to LLM agent...")
442
+
443
+ # Extract the actual message content from the messages list
444
+ user_message = ""
445
+ for msg in messages:
446
+ if msg.get("role") == "user":
447
+ user_message = msg.get("content", "")
448
+ break
449
+
450
+ if not user_message.strip():
451
+ return ""
452
+
453
+ # Send message with callback using the queue system
454
+ try:
455
+ message_id = self.send_message(
456
+ content=user_message,
457
+ conversation_id="default",
458
+ callback=chat_callback
459
+ )
460
+
461
+ console.log(f"Message sent with ID: {message_id}, waiting for response...")
462
+
463
+ # Wait for the response and return it
464
+ try:
465
+ response = await asyncio.wait_for(response_future, timeout=self.timeout)
466
+ console.log(f"[bold green]✓ Chat complete! Response length: {len(response)}[/bold green]")
467
+ return response
468
+
469
+ except asyncio.TimeoutError:
470
+ console.log("[bold red]Response timeout[/bold red]")
471
+ # Clean up the pending request
472
+ with self.pending_requests_lock:
473
+ if message_id in self.pending_requests:
474
+ del self.pending_requests[message_id]
475
+ return "❌ Response timeout - check if LLM server is running"
476
+
477
+ except Exception as e:
478
+ console.log(f"[bold red]Error sending message: {e}[/bold red]")
479
+ traceback.print_exc()
480
+ return f"❌ Error sending message: {e}"
481
+
482
+ def start(self):
483
+ """Start the LLM agent"""
484
+ if not self.is_running:
485
+ self.is_running = True
486
+ self._stop_event.clear()
487
+ self.processing_thread = Thread(target=self._process_queue, daemon=True)
488
+ self.processing_thread.start()
489
+ console.log("[bold green]LLM Agent started[/bold green]")
490
+
491
+ def stop(self):
492
+ """Stop the LLM agent"""
493
+ console.log("Stopping LLM Agent...")
494
+ self._stop_event.set()
495
+ if self.processing_thread and self.processing_thread.is_alive():
496
+ self.processing_thread.join(timeout=10)
497
+ self.is_running = False
498
+ console.log("LLM Agent stopped")
499
+
500
+ def get_conversation_history(self, conversation_id: str = "default") -> List[LLMMessage]:
501
+ """Get conversation history"""
502
+ return self.conversations.get(conversation_id, [])[:]
503
+
504
+ def clear_conversation(self, conversation_id: str = "default"):
505
+ """Clear conversation history"""
506
+ if conversation_id in self.conversations:
507
+ del self.conversations[conversation_id]
508
+
509
 
510
+ async def _chat(self, messages: List[Dict[str, str]]) -> str:
511
+ return await self._generate(messages)
512
+
513
+ @staticmethod
514
+ async def openai_generate(messages: List[Dict[str, str]], max_tokens: int = 8096, temperature: float = 0.4, model: str = BASEMODEL_ID,tools=None) -> str:
515
+ """Static method for generating responses using OpenAI API"""
516
+ try:
517
+ resp = await BASE_CLIENT.chat.completions.create(
518
+ model=model,
519
+ messages=messages,
520
+ temperature=temperature,
521
+ max_tokens=max_tokens,
522
+ tools=tools
523
+ )
524
+ response_text = resp.choices[0].message.content or ""
525
+ return response_text
526
+ except Exception as e:
527
+ console.log(f"[bold red]Error in openai_generate: {e}[/bold red]")
528
+ return f"[LLM_Agent Error - openai_generate: {str(e)}]"
529
+
530
+ async def _call_(self, messages: List[Dict[str, str]]) -> str:
531
+ """Internal call method using instance client"""
532
  try:
533
+ resp = await self.async_client.chat.completions.create(
 
534
  model=self.model_id,
535
  messages=messages,
536
+ temperature=self.temperature,
537
+ max_tokens=self.max_tokens
538
  )
539
+ response_text = resp.choices[0].message.content or ""
540
+ return response_text
541
  except Exception as e:
542
+ console.log(f"[bold red]Error in _call_: {e}[/bold red]")
543
+ return f"[LLM_Agent Error - _call_: {str(e)}]"
544
+
545
+ @staticmethod
546
+ def CreateClient(base_url: str, api_key: str) -> AsyncOpenAI:
547
+ '''Create async OpenAI Client required for multi tasking'''
548
+ return AsyncOpenAI(
549
+ base_url=base_url,
550
+ api_key=api_key
551
+ )
552
+
553
+ @staticmethod
554
+ async def fetch_available_models(base_url: str, api_key: str) -> List[str]:
555
+ """Fetches available models from the OpenAI API."""
556
  try:
557
+ async_client = AsyncOpenAI(base_url=base_url, api_key=api_key)
558
+ models = await async_client.models.list()
559
+ model_choices = [model.id for model in models.data]
560
+ return model_choices
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
561
  except Exception as e:
562
+ console.log(f"[bold red]LLM_Agent Error fetching models: {e}[/bold red]")
563
+ return ["LLM_Agent Error fetching models"]
564
+
565
+ def get_models(self) -> List[str]:
566
+ """Get available models using instance credentials"""
567
+ return asyncio.run(self.fetch_available_models(self.base_url, self.api_key))
568
+
569
 
570
+ def get_queue_size(self) -> int:
571
+ """Get current queue size"""
572
+ return self.request_queue.qsize()
573
+
574
+ def get_pending_requests_count(self) -> int:
575
+ """Get number of pending requests"""
576
+ with self.pending_requests_lock:
577
+ return len(self.pending_requests)
578
+
579
+ def get_status(self) -> Dict[str, Any]:
580
+ """Get agent status information"""
581
+ return {
582
+ "is_running": self.is_running,
583
+ "queue_size": self.get_queue_size(),
584
+ "pending_requests": self.get_pending_requests_count(),
585
+ "conversations_count": len(self.conversations),
586
+ "model": self.model_id
587
+ }
588
+ class AI_Agent:
589
+ def __init__(self, model_id: str, system_prompt: str = "You are a helpful assistant. Respond concisely in 1-2 sentences.", history: List[Dict] = None):
590
+ self.model_id = model_id
591
+ self.system_prompt = system_prompt
592
+ self.history = history or []
593
+ self.conversation_id = f"conv_{uuid.uuid4().hex[:8]}"
594
+
595
+ # Create agent instance
596
+ self.client = LLMAgent(
597
+ model_id=model_id,
598
+ system_prompt=self.system_prompt,
599
+ generate_fn=LLMAgent.openai_generate
600
+ )
601
+
602
+ console.log(f"[bold green]✓ MyAgent initialized with model: {model_id}[/bold green]")
603
+
604
+ async def call_llm(self, messages: List[Dict], use_history: bool = True) -> str:
605
+ """
606
+ Send messages to LLM and get response
607
+ Args:
608
+ messages: List of message dicts with 'role' and 'content'
609
+ use_history: Whether to include conversation history
610
+ Returns:
611
+ str: LLM response
612
+ """
613
+ try:
614
+ console.log(f"[bold yellow]Sending {len(messages)} messages to LLM (use_history: {use_history})...[/bold yellow]")
615
+
616
+ # Enhance messages based on history setting
617
+ enhanced_messages = await self._enhance_messages(messages, use_history)
618
+
619
+ response = await self.client.chat(enhanced_messages)
620
+ console.log(f"[bold green]✓ Response received ({len(response)} chars)[/bold green]")
621
+
622
+ # Update conversation history ONLY if we're using history
623
+ if use_history:
624
+ self._update_history(messages, response)
625
+
626
+ return response
627
+
628
+ except Exception as e:
629
+ console.log(f"[bold red]✗ ERROR: {e}[/bold red]")
630
+ traceback.print_exc()
631
+ return f"Error: {str(e)}"
632
+
633
+ async def _enhance_messages(self, messages: List[Dict], use_history: bool) -> List[Dict]:
634
+ """Enhance messages with system prompt and optional history"""
635
+ enhanced = []
636
+
637
+ # Add system prompt if not already in messages
638
+ has_system = any(msg.get('role') == 'system' for msg in messages)
639
+ if not has_system and self.system_prompt:
640
+ enhanced.append({"role": "system", "content": self.system_prompt})
641
+
642
+ # Add conversation history only if requested
643
+ if use_history and self.history:
644
+ enhanced.extend(self.history[-10:]) # Last 10 messages for context
645
+
646
+ # Add current messages
647
+ enhanced.extend(messages)
648
+
649
+ return enhanced
650
+
651
+ def _update_history(self, messages: List[Dict], response: str):
652
+ """Update conversation history with new exchange"""
653
+ # Add user messages to history
654
  for msg in messages:
655
+ if msg.get('role') in ['user', 'assistant']:
656
+ self.history.append(msg)
657
+
658
+ # Add assistant response to history
659
+ self.history.append({"role": "assistant", "content": response})
660
+
661
+ # Keep history manageable (last 20 exchanges)
662
+ if len(self.history) > 40: # 20 user + 20 assistant messages
663
+ self.history = self.history[-40:]
664
+
665
+ async def simple_query(self, query: str) -> str:
666
+ """Simple one-shot query method - NO history/context"""
667
+ messages = [{"role": "user", "content": query}]
668
+ return await self.call_llm(messages, use_history=False)
669
+
670
+ async def multi_turn_chat(self, user_input: str) -> str:
671
+ """Multi-turn chat that maintains context across calls"""
672
+ messages = [{"role": "user", "content": user_input}]
673
+ response = await self.call_llm(messages, use_history=True)
674
+ return response
675
+
676
+
677
+ def get_conversation_summary(self) -> Dict:
678
+ """Get conversation summary"""
679
+ return {
680
+ "conversation_id": self.conversation_id,
681
+ "total_messages": len(self.history),
682
+ "user_messages": len([msg for msg in self.history if msg.get('role') == 'user']),
683
+ "assistant_messages": len([msg for msg in self.history if msg.get('role') == 'assistant']),
684
+ "recent_exchanges": self.history[-4:] if self.history else []
685
+ }
686
+
687
+ def clear_history(self):
688
+ """Clear conversation history"""
689
+ self.history.clear()
690
+ console.log("[bold yellow]Conversation history cleared[/bold yellow]")
691
+
692
+ def update_system_prompt(self, new_prompt: str):
693
+ """Update the system prompt"""
694
+ self.system_prompt = new_prompt
695
+ console.log(f"[bold blue]System prompt updated[/bold blue]")
696
+
697
+ def stop(self):
698
+ """Stop the client gracefully"""
699
+ if hasattr(self, 'client') and self.client:
700
+ self.client.stop()
701
+ console.log("[bold yellow]MyAgent client stopped[/bold yellow]")
702
+ async def contextual_query(self, query: str, context_messages: List[Dict] = None,
703
+ context_text: str = None, context_files: List[str] = None) -> str:
704
+ """
705
+ Query with specific context but doesn't update main history
706
+
707
+ Args:
708
+ query: The user question
709
+ context_messages: List of message dicts for context
710
+ context_text: Plain text context (will be converted to system message)
711
+ context_files: List of file paths to read and include as context
712
+ """
713
+ messages = []
714
+
715
+ # Add system prompt
716
+ if self.system_prompt:
717
+ messages.append({"role": "system", "content": self.system_prompt})
718
+
719
+ # Handle different context types
720
+ if context_messages:
721
+ messages.extend(context_messages)
722
+
723
+ if context_text:
724
+ messages.append({"role": "system", "content": f"Additional context: {context_text}"})
725
+
726
+ if context_files:
727
+ file_context = await self._read_files_context(context_files)
728
+ if file_context:
729
+ messages.append({"role": "system", "content": f"File contents:\n{file_context}"})
730
+
731
+ # Add the actual query
732
+ messages.append({"role": "user", "content": query})
733
+
734
+ return await self.call_llm(messages, use_history=False)
735
+
736
+ async def _read_files_context(self, file_paths: List[str]) -> str:
737
+ """Read multiple files and return as context string"""
738
+ contexts = []
739
+ for file_path in file_paths:
740
+ try:
741
+ if os.path.exists(file_path):
742
+ with open(file_path, 'r', encoding='utf-8') as f:
743
+ content = f.read()
744
+ contexts.append(f"--- {os.path.basename(file_path)} ---\n{content}")
745
+ else:
746
+ console.log(f"[bold yellow]File not found: {file_path}[/bold yellow]")
747
+ except Exception as e:
748
+ console.log(f"[bold red]Error reading file {file_path}: {e}[/bold red]")
749
+
750
+ return "\n\n".join(contexts) if contexts else ""
751
+
752
+
753
+ async def query_with_code_context(self, query: str, code_snippets: List[str] = None,
754
+ code_files: List[str] = None) -> str:
755
+ """
756
+ Specialized contextual query for code-related questions
757
+ """
758
+ code_context = "CODE CONTEXT:\n"
759
+
760
+ if code_snippets:
761
+ for i, snippet in enumerate(code_snippets, 1):
762
+ code_context += f"\nSnippet {i}:\n```\n{snippet}\n```\n"
763
+
764
+ if code_files:
765
+ # Read code files and include them
766
+ for file_path in code_files:
767
+ if file_path.endswith(('.py', '.js', '.java', '.cpp', '.c', '.html', '.css')):
768
+ code_context += f"\nFile: {file_path}\n```\n"
769
+ try:
770
+ with open(file_path, 'r') as f:
771
+ code_context += f.read()
772
+ except Exception as e:
773
+ code_context += f"Error reading file: {e}"
774
+ code_context += "\n```\n"
775
+
776
+ return await self.contextual_query(query, context_text=code_context)
777
+
778
+ async def multi_context_query(self, query: str, contexts: Dict[str, Any]) -> str:
779
+ """
780
+ Advanced contextual query with multiple context types
781
+
782
+ Args:
783
+ query: The user question
784
+ contexts: Dict with various context types
785
+ - 'messages': List of message dicts
786
+ - 'text': Plain text context
787
+ - 'files': List of file paths
788
+ - 'urls': List of URLs
789
+ - 'code': List of code snippets or files
790
+ - 'metadata': Any additional metadata
791
+ """
792
+ all_context_messages = []
793
+
794
+ # Build context from different sources
795
+ if contexts.get('text'):
796
+ all_context_messages.append({"role": "system", "content": f"Context: {contexts['text']}"})
797
+
798
+ if contexts.get('messages'):
799
+ all_context_messages.extend(contexts['messages'])
800
+
801
+ if contexts.get('files'):
802
+ file_context = await self._read_files_context(contexts['files'])
803
+ if file_context:
804
+ all_context_messages.append({"role": "system", "content": f"File Contents:\n{file_context}"})
805
+
806
+ if contexts.get('code'):
807
+ code_context = "\n".join([f"Code snippet {i}:\n```\n{code}\n```"
808
+ for i, code in enumerate(contexts['code'], 1)])
809
+ all_context_messages.append({"role": "system", "content": f"Code Context:\n{code_context}"})
810
+
811
+ if contexts.get('metadata'):
812
+ all_context_messages.append({"role": "system", "content": f"Metadata: {contexts['metadata']}"})
813
+
814
+ return await self.contextual_query(query, context_messages=all_context_messages)
815
+
816
+
817
+ console = Console()
818
+
819
+ # --- Canvas Artifact Support ---
820
+ @dataclass
821
+ class CanvasArtifact:
822
+ id: str
823
+ type: str # 'code', 'diagram', 'text', 'image'
824
+ content: str
825
+ title: str
826
+ timestamp: float
827
+ metadata: Dict[str, Any]
828
+
829
+ class EnhancedAIAgent:
830
+ """
831
+ Wrapper around your AI_Agent that adds canvas/artifact management
832
+ without modifying the original agent.
833
+ """
834
+ def __init__(self, ai_agent):
835
+ self.agent = ai_agent
836
+ self.canvas_artifacts: Dict[str, List[CanvasArtifact]] = {}
837
+ self.max_canvas_artifacts = 50
838
+ console.log("[bold green]✓ Enhanced AI Agent wrapper initialized[/bold green]")
839
+
840
+ def add_artifact_to_canvas(self, conversation_id: str, content: str,
841
+ artifact_type: str = "code", title: str = None):
842
  """Add artifacts to the collaborative canvas"""
843
  if conversation_id not in self.canvas_artifacts:
844
  self.canvas_artifacts[conversation_id] = []
 
853
  )
854
 
855
  self.canvas_artifacts[conversation_id].append(artifact)
856
+
857
+ # Keep only recent artifacts
858
+ if len(self.canvas_artifacts[conversation_id]) > self.max_canvas_artifacts:
859
+ self.canvas_artifacts[conversation_id] = self.canvas_artifacts[conversation_id][-self.max_canvas_artifacts:]
860
+
861
  console.log(f"[green]Added artifact to canvas: {artifact.title}[/green]")
862
  return artifact
863
+
864
  def get_canvas_context(self, conversation_id: str) -> str:
865
  """Get formatted canvas context for LLM prompts"""
866
  if conversation_id not in self.canvas_artifacts or not self.canvas_artifacts[conversation_id]:
867
  return ""
868
 
869
  context_lines = ["\n=== COLLABORATIVE CANVAS ARTIFACTS ==="]
870
+ for artifact in self.canvas_artifacts[conversation_id][-10:]: # Last 10 artifacts
871
  context_lines.append(f"\n--- {artifact.title} [{artifact.type.upper()}] ---")
872
  preview = artifact.content[:500] + "..." if len(artifact.content) > 500 else artifact.content
873
  context_lines.append(preview)
874
 
875
  return "\n".join(context_lines) + "\n=================================\n"
876
+
877
+ async def chat_with_canvas(self, message: str, conversation_id: str = "default",
878
+ include_canvas: bool = True) -> str:
879
+ """Enhanced chat that includes canvas context"""
880
+ # Build context with canvas artifacts if requested
881
+ full_message = message
 
 
 
 
882
  if include_canvas:
883
  canvas_context = self.get_canvas_context(conversation_id)
884
  if canvas_context:
885
+ full_message = f"{canvas_context}\n\nUser Query: {message}"
 
 
 
 
 
 
 
886
 
887
  try:
888
+ # Use your original agent's multi_turn_chat method
889
+ response = await self.agent.multi_turn_chat(full_message)
 
 
 
 
 
 
 
 
890
 
891
  # Auto-extract and add code artifacts to canvas
892
+ self._extract_artifacts_to_canvas(response, conversation_id)
893
 
894
+ return response
895
 
896
  except Exception as e:
897
  error_msg = f"Error in chat_with_canvas: {str(e)}"
898
  console.log(f"[red]{error_msg}[/red]")
899
  return error_msg
900
+
901
  def _extract_artifacts_to_canvas(self, response: str, conversation_id: str):
902
  """Automatically extract code blocks and add to canvas"""
903
+ # Find all code blocks with optional language specification
904
+ code_blocks = re.findall(r'```(?:(\w+)\n)?(.*?)```', response, re.DOTALL)
905
+ for i, (lang, code_block) in enumerate(code_blocks):
906
+ if len(code_block.strip()) > 10: # Only add substantial code blocks
 
 
907
  self.add_artifact_to_canvas(
908
  conversation_id,
909
  code_block.strip(),
910
  "code",
911
+ f"code_snippet_{lang or 'unknown'}_{len(self.canvas_artifacts.get(conversation_id, [])) + 1}"
912
  )
913
+
 
 
 
 
 
 
 
 
 
 
 
 
914
  def get_canvas_summary(self, conversation_id: str) -> List[Dict]:
915
  """Get summary of canvas artifacts for display"""
916
  if conversation_id not in self.canvas_artifacts:
917
  return []
918
 
919
  artifacts = []
920
+ for artifact in reversed(self.canvas_artifacts[conversation_id]): # Newest first
921
  artifacts.append({
922
  "id": artifact.id,
923
  "type": artifact.type.upper(),
 
927
  })
928
 
929
  return artifacts
930
+
931
  def get_artifact_by_id(self, conversation_id: str, artifact_id: str) -> Optional[CanvasArtifact]:
932
  """Get specific artifact by ID"""
933
  if conversation_id not in self.canvas_artifacts:
 
937
  if artifact.id == artifact_id:
938
  return artifact
939
  return None
940
+
941
+ def clear_canvas(self, conversation_id: str = "default"):
942
+ """Clear canvas artifacts"""
943
+ if conversation_id in self.canvas_artifacts:
944
+ self.canvas_artifacts[conversation_id] = []
945
+ console.log(f"[yellow]Cleared canvas: {conversation_id}[/yellow]")
946
+
947
+ def get_latest_code_artifact(self, conversation_id: str) -> Optional[str]:
948
+ """Get the most recent code artifact content"""
949
+ if conversation_id not in self.canvas_artifacts:
950
+ return None
951
+
952
+ for artifact in reversed(self.canvas_artifacts[conversation_id]):
953
+ if artifact.type == "code":
954
+ return artifact.content
955
+ return None
956
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
957
 
 
958
  class LcarsInterface:
959
+ """LCARS-styled Gradio interface for your AI_Agent"""
960
+
961
+ def __init__(self, ai_agent):
962
+ """
963
+ Initialize interface with your AI_Agent instance
964
+
965
+ Args:
966
+ ai_agent: Instance of your AI_Agent class
967
+ """
968
+ self.enhanced_agent = EnhancedAIAgent(ai_agent)
969
  self.current_conversation = "default"
970
+ self.processing_lock = Lock()
971
+ console.log("[bold cyan]✓ LCARS Interface initialized[/bold cyan]")
972
+
973
  def create_interface(self):
974
  """Create the full LCARS-styled interface"""
975
 
976
+ # Enhanced LCARS CSS
977
  lcars_css = """
978
  :root {
979
  --lcars-orange: #FF9900;
 
991
  background: var(--lcars-black);
992
  color: var(--lcars-orange);
993
  font-family: 'Antonio', 'LCD', 'Courier New', monospace;
 
 
994
  }
995
 
996
  .gradio-container {
 
1012
  border-radius: 0 60px 0 0;
1013
  margin: -20px -20px 20px -20px;
1014
  border-bottom: 6px solid var(--lcars-blue);
1015
+ box-shadow: 0 4px 20px rgba(255, 153, 0, 0.3);
1016
  }
1017
 
1018
  .lcars-title {
1019
+ font-size: 3em;
1020
  font-weight: bold;
1021
  color: var(--lcars-black);
1022
+ text-shadow: 3px 3px 6px rgba(255, 255, 255, 0.4);
1023
  margin: 0;
1024
+ letter-spacing: 2px;
1025
  }
1026
 
1027
  .lcars-subtitle {
1028
+ font-size: 1.4em;
1029
  color: var(--lcars-black);
1030
  margin: 10px 0 0 0;
1031
+ font-weight: bold;
1032
  }
1033
 
1034
  .lcars-panel {
1035
+ background: linear-gradient(135deg, rgba(66, 66, 66, 0.9), rgba(40, 40, 40, 0.9));
1036
+ border: 3px solid var(--lcars-orange);
1037
+ border-radius: 0 25px 0 25px;
1038
+ padding: 20px;
1039
+ margin-bottom: 20px;
1040
+ box-shadow: 0 4px 15px rgba(255, 153, 0, 0.2);
1041
  }
1042
 
1043
  .lcars-button {
1044
+ background: linear-gradient(135deg, var(--lcars-orange), var(--lcars-red)) !important;
1045
  color: var(--lcars-black) !important;
1046
  border: none !important;
1047
+ border-radius: 0 20px 0 20px !important;
1048
+ padding: 12px 24px !important;
1049
  font-family: inherit !important;
1050
  font-weight: bold !important;
1051
+ font-size: 1.1em !important;
1052
+ cursor: pointer !important;
1053
+ transition: all 0.3s ease !important;
1054
+ margin: 8px !important;
1055
+ box-shadow: 0 4px 8px rgba(255, 153, 0, 0.3) !important;
1056
  }
1057
 
1058
  .lcars-button:hover {
1059
+ background: linear-gradient(135deg, var(--lcars-red), var(--lcars-orange)) !important;
1060
+ transform: translateY(-2px) !important;
1061
+ box-shadow: 0 6px 12px rgba(255, 153, 0, 0.4) !important;
1062
  }
1063
 
1064
  .lcars-input {
1065
  background: var(--lcars-black) !important;
1066
  color: var(--lcars-orange) !important;
1067
  border: 2px solid var(--lcars-blue) !important;
1068
+ border-radius: 0 15px 0 15px !important;
1069
+ padding: 12px !important;
1070
+ font-family: inherit !important;
1071
+ font-size: 1.1em !important;
1072
  }
1073
 
1074
  .lcars-chatbot {
1075
  background: var(--lcars-black) !important;
1076
+ border: 3px solid var(--lcars-purple) !important;
1077
+ border-radius: 0 20px 0 20px !important;
1078
+ min-height: 400px;
1079
+ max-height: 500px;
1080
+ }
1081
+
1082
+ .lcars-code-editor {
1083
+ background: var(--lcars-black) !important;
1084
+ color: var(--lcars-pale-blue) !important;
1085
+ border: 3px solid var(--lcars-blue) !important;
1086
+ border-radius: 0 20px 0 20px !important;
1087
+ font-family: 'Fira Code', 'Courier New', monospace !important;
1088
+ font-size: 1em !important;
1089
  }
1090
 
1091
  .status-indicator {
1092
  display: inline-block;
1093
+ width: 16px;
1094
+ height: 16px;
1095
  border-radius: 50%;
1096
  background: var(--lcars-red);
1097
+ margin-right: 12px;
1098
+ box-shadow: 0 0 10px currentColor;
1099
  }
1100
 
1101
  .status-online {
1102
  background: var(--lcars-blue);
1103
+ animation: pulse 1.5s infinite;
1104
  }
1105
 
1106
  @keyframes pulse {
1107
+ 0% { transform: scale(1); opacity: 1; }
1108
+ 50% { transform: scale(1.1); opacity: 0.7; }
1109
+ 100% { transform: scale(1); opacity: 1; }
1110
+ }
1111
+
1112
+ .panel-title {
1113
+ color: var(--lcars-yellow) !important;
1114
+ font-size: 1.4em !important;
1115
+ font-weight: bold !important;
1116
+ margin-bottom: 15px !important;
1117
+ border-bottom: 2px solid var(--lcars-orange);
1118
+ padding-bottom: 8px;
1119
  }
1120
  """
1121
 
1122
  with gr.Blocks(css=lcars_css, theme=gr.themes.Default(), title="LCARS Terminal") as interface:
1123
 
1124
  with gr.Column(elem_classes="lcars-container"):
1125
+ # Header Section
1126
  with gr.Row(elem_classes="lcars-header"):
1127
  gr.Markdown("""
1128
  <div style="text-align: center; width: 100%;">
1129
+ <div class="lcars-title">🚀 LCARS AI TERMINAL</div>
1130
+ <div class="lcars-subtitle">ADVANCED AI DEVELOPMENT CONSOLE</div>
1131
  <div style="margin-top: 10px;">
1132
  <span class="status-indicator status-online"></span>
1133
  <span style="color: var(--lcars-black); font-weight: bold;">SYSTEM ONLINE</span>
 
1135
  </div>
1136
  """)
1137
 
1138
+ # Main Content Area
 
 
 
 
 
 
 
 
 
 
1139
  with gr.Row():
1140
+ # Left Sidebar - Canvas Artifacts
1141
+ with gr.Column(scale=1, min_width=400):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1142
  with gr.Column(elem_classes="lcars-panel"):
1143
+ gr.Markdown("### 🎨 CANVAS ARTIFACTS", elem_classes="panel-title")
1144
+ artifact_display = gr.JSON(
1145
+ label="",
1146
+ elem_id="artifact-display"
1147
+ )
1148
  with gr.Row():
1149
  refresh_artifacts_btn = gr.Button("🔄 Refresh", elem_classes="lcars-button")
1150
  clear_canvas_btn = gr.Button("🗑️ Clear Canvas", elem_classes="lcars-button")
1151
+ load_latest_btn = gr.Button("📥 Load Latest", elem_classes="lcars-button")
1152
 
1153
+ # Main Content - Chat and Code Canvas
1154
  with gr.Column(scale=2):
1155
+ # Collaborative Code Canvas
1156
  with gr.Accordion("💻 COLLABORATIVE CODE CANVAS", open=True):
1157
  code_editor = gr.Code(
1158
+ value="# Welcome to LCARS Collaborative Canvas\n# Your code artifacts will appear here\n\nprint('Hello, Starfleet!')",
1159
  language="python",
1160
+ lines=20,
1161
+ label="",
1162
+ elem_classes="lcars-code-editor"
1163
  )
1164
 
1165
  with gr.Row():
1166
+ discuss_code_btn = gr.Button("💬 Discuss This Code", elem_classes="lcars-button")
1167
+ analyze_code_btn = gr.Button("🔍 Analyze", elem_classes="lcars-button")
1168
+ optimize_code_btn = gr.Button("⚡ Optimize", elem_classes="lcars-button")
1169
+ document_code_btn = gr.Button("📚 Document", elem_classes="lcars-button")
1170
 
1171
  # Chat Interface
1172
  with gr.Column(elem_classes="lcars-panel"):
1173
+ gr.Markdown("### 💬 MISSION LOG", elem_classes="panel-title")
1174
+ chatbot = gr.Chatbot(
1175
+ label="",
1176
+ elem_classes="lcars-chatbot",
1177
+ show_label=False,
1178
+ height=400
1179
+ )
1180
 
1181
  with gr.Row():
1182
  message_input = gr.Textbox(
1183
  placeholder="Enter your command or query...",
1184
  show_label=False,
1185
  lines=2,
1186
+ elem_classes="lcars-input",
1187
  scale=4
1188
  )
1189
+ send_btn = gr.Button("🚀 TRANSMIT", elem_classes="lcars-button", scale=1)
1190
 
1191
+ # Status and Controls
1192
  with gr.Row():
1193
  status_display = gr.Textbox(
1194
+ value=f"LCARS terminal operational. Model: {self.enhanced_agent.agent.model_id}",
1195
  label="Status",
1196
+ max_lines=2,
1197
+ elem_classes="lcars-input"
1198
  )
1199
  with gr.Column(scale=0):
1200
  clear_chat_btn = gr.Button("🗑️ Clear Chat", elem_classes="lcars-button")
 
1202
 
1203
  # === EVENT HANDLERS ===
1204
 
1205
+ def get_artifacts():
1206
+ """Get current canvas artifacts"""
1207
+ return self.enhanced_agent.get_canvas_summary(self.current_conversation)
1208
+
1209
+ def clear_canvas():
1210
+ """Clear the canvas"""
1211
+ self.enhanced_agent.clear_canvas(self.current_conversation)
1212
+ return [], "✅ Canvas cleared"
1213
+
1214
+ def load_latest_artifact_to_canvas():
1215
+ """Load the most recent code artifact to the canvas"""
1216
+ latest_code = self.enhanced_agent.get_latest_code_artifact(self.current_conversation)
1217
+ if latest_code:
1218
+ return latest_code, "✅ Latest artifact loaded"
1219
+ return "# No code artifacts available", "⚠️ No artifacts found"
1220
+
1221
+ async def process_message(message, history):
1222
+ """Process a chat message"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1223
  if not message.strip():
1224
  return "", history, "Please enter a message"
1225
 
1226
+ # Add user message to history
1227
  history = history + [[message, None]]
1228
 
1229
  try:
1230
+ # Get AI response using the enhanced agent
1231
+ response = await self.enhanced_agent.chat_with_canvas(
1232
+ message,
1233
+ self.current_conversation,
1234
+ include_canvas=True
1235
  )
1236
 
1237
+ # Update history with response
1238
  history[-1][1] = response
1239
 
1240
+ # Get updated artifacts
1241
+ artifacts = get_artifacts()
1242
 
 
1243
  status = f"✅ Response received. Canvas artifacts: {len(artifacts)}"
1244
  return "", history, status, artifacts
1245
 
1246
  except Exception as e:
1247
  error_msg = f"❌ Error: {str(e)}"
1248
  history[-1][1] = error_msg
1249
+ return "", history, error_msg, get_artifacts()
1250
+
1251
+ def create_code_query(code, query_template):
1252
+ """Create a query about code"""
1253
+ if not code.strip():
1254
+ return "Please provide some code first"
1255
+ return query_template.format(code=code)
1256
+
1257
+ def discuss_code(code):
1258
+ return create_code_query(code, "Please analyze this code:\n```python\n{code}\n```")
1259
+
1260
+ def analyze_code(code):
1261
+ return create_code_query(code, "Perform a comprehensive analysis of this code:\n```python\n{code}\n```")
1262
+
1263
+ def optimize_code(code):
1264
+ return create_code_query(code, "Optimize this code for performance and best practices:\n```python\n{code}\n```")
1265
+
1266
+ def document_code(code):
1267
+ return create_code_query(code, "Generate comprehensive documentation for this code:\n```python\n{code}\n```")
1268
+
1269
  def clear_chat():
1270
+ """Clear chat history"""
1271
+ self.enhanced_agent.agent.clear_history()
1272
  return [], "✅ Chat cleared"
1273
+
1274
  def new_session():
1275
+ """Start new session"""
1276
+ self.enhanced_agent.agent.clear_history()
1277
+ self.enhanced_agent.clear_canvas(self.current_conversation)
1278
+ return [], "# New collaborative session started\n\nprint('Ready for development!')", "🆕 New session started", []
1279
+
1280
+ # Connect event handlers
1281
+ send_btn.click(
1282
+ process_message,
1283
+ inputs=[message_input, chatbot],
1284
+ outputs=[message_input, chatbot, status_display, artifact_display]
1285
+ )
1286
+
1287
+ message_input.submit(
1288
+ process_message,
1289
+ inputs=[message_input, chatbot],
1290
+ outputs=[message_input, chatbot, status_display, artifact_display]
1291
+ )
1292
+
1293
+ discuss_code_btn.click(
1294
+ discuss_code,
1295
+ inputs=code_editor,
1296
+ outputs=message_input
1297
+ )
1298
+
1299
+ analyze_code_btn.click(
1300
+ analyze_code,
1301
+ inputs=code_editor,
1302
+ outputs=message_input
1303
+ )
1304
+
1305
+ optimize_code_btn.click(
1306
+ optimize_code,
1307
+ inputs=code_editor,
1308
+ outputs=message_input
1309
+ )
1310
+
1311
+ document_code_btn.click(
1312
+ document_code,
1313
+ inputs=code_editor,
1314
+ outputs=message_input
1315
+ )
1316
+
1317
+ refresh_artifacts_btn.click(
1318
+ get_artifacts,
1319
+ outputs=artifact_display
1320
+ )
1321
+
1322
+ clear_canvas_btn.click(
1323
+ clear_canvas,
1324
+ outputs=[artifact_display, status_display]
1325
+ )
1326
+
1327
+ load_latest_btn.click(
1328
+ load_latest_artifact_to_canvas,
1329
+ outputs=[code_editor, status_display]
1330
+ )
1331
+
1332
+ clear_chat_btn.click(
1333
+ clear_chat,
1334
+ outputs=[chatbot, status_display]
1335
+ )
1336
+
1337
+ new_session_btn.click(
1338
+ new_session,
1339
+ outputs=[chatbot, code_editor, status_display, artifact_display]
1340
+ )
1341
+
1342
+ # Initialize artifacts on load
1343
  interface.load(get_artifacts, outputs=artifact_display)
1344
 
1345
  return interface
1346
 
1347
+
1348
+ # --- Example Usage ---
1349
+ if __name__ == "__main__":
1350
+ """
1351
+ Example of how to use this interface with your AI_Agent
1352
 
1353
+ Uncomment and modify based on your actual import paths:
1354
+ """
1355
+
1356
+ # Create your agent instance
1357
+ my_agent = AI_Agent(
1358
+ model_id="leroydyer/qwen/qwen3-0.6b-q4_k_m.gguf",
1359
+ system_prompt="You are a helpful AI development assistant."
1360
+ )
1361
 
1362
+ # Create and launch the interface
1363
+ interface = LcarsInterface(my_agent)
1364
  demo = interface.create_interface()
1365
+ demo.launch(share=False, show_error=True)
1366
 
1367
+ console.log("[bold yellow]⚠️ Please uncomment and configure the main block with your AI_Agent[/bold yellow]")
1368
+ console.log("[bold cyan]Example:[/bold cyan]")
1369
+ console.log(" from your_module import AI_Agent")
1370
+ console.log(" my_agent = AI_Agent(model_id='your-model', system_prompt='...')")
1371
+ console.log(" interface = LcarsInterface(my_agent)")
1372
+ console.log(" demo = interface.create_interface()")
1373
+ console.log(" demo.launch()")