LeroyDyer commited on
Commit
87d431f
Β·
verified Β·
1 Parent(s): 0df7f2b

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +1637 -0
app.py ADDED
@@ -0,0 +1,1637 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File: lcars_enhanced_interface.py
2
+
3
+ import asyncio
4
+ import json
5
+ import os
6
+ import time
7
+ import uuid
8
+ from typing import Dict, List, Any, Optional
9
+ from dataclasses import dataclass
10
+ import threading
11
+ import pyttsx3
12
+ import re
13
+ from pathlib import Path
14
+
15
+ import gradio as gr
16
+ from rich.console import Console
17
+ from openai import OpenAI, AsyncOpenAI
18
+ import asyncio
19
+ from collections import defaultdict
20
+ import json
21
+ import os
22
+ import queue
23
+ import traceback
24
+ import uuid
25
+ from typing import Dict, List, Any, Optional, Callable, Coroutine
26
+ from dataclasses import dataclass
27
+ from queue import Queue, Empty
28
+ from threading import Lock, Event, Thread
29
+ import threading
30
+ from concurrent.futures import ThreadPoolExecutor
31
+ import time
32
+ from openai import OpenAI, AsyncOpenAI
33
+ from rich.console import Console
34
+ import gradio as gr
35
+ import pyttsx3
36
+ import re
37
+ from pathlib import Path
38
+ #############################################################
39
+ BASE_URL="http://localhost:1234/v1"
40
+ BASE_API_KEY="not-needed"
41
+ BASE_CLIENT = AsyncOpenAI(
42
+ base_url=BASE_URL,
43
+ api_key=BASE_API_KEY
44
+ ) # Global state for client
45
+ BASEMODEL_ID = "leroydyer/qwen/qwen3-0.6b-q4_k_m.gguf" # Global state for selected model ID
46
+ CLIENT =OpenAI(
47
+ base_url=BASE_URL,
48
+ api_key=BASE_API_KEY)
49
+ # --- Configuration ---
50
+ DEFAULT_BASE_URL = "http://localhost:1234/v1"
51
+ DEFAULT_API_KEY = "not-needed"
52
+ DEFAULT_MODEL_ID = "leroydyer/qwen/qwen3-0.6b-q4_k_m.gguf"
53
+ DEFAULT_TEMPERATURE = 0.3
54
+ DEFAULT_MAX_TOKENS = 5000
55
+
56
+
57
+ # --- Configuration ---
58
+ DEFAULT_BASE_URL = "http://localhost:1234/v1"
59
+ DEFAULT_API_KEY = "not-needed"
60
+ DEFAULT_MODEL_ID = "leroydyer/qwen/qwen3-0.6b-q4_k_m.gguf"
61
+ DEFAULT_TEMPERATURE = 0.7
62
+ DEFAULT_MAX_TOKENS = 5000
63
+ console = Console()
64
+ class EventManager:
65
+ def __init__(self):
66
+ self._handlers = defaultdict(list)
67
+ self._lock = threading.Lock()
68
+ def register(self, event: str, handler: Callable):
69
+ with self._lock:
70
+ self._handlers[event].append(handler)
71
+ def unregister(self, event: str, handler: Callable):
72
+ with self._lock:
73
+ if event in self._handlers and handler in self._handlers[event]:
74
+ self._handlers[event].remove(handler)
75
+ def raise_event(self, event: str, data: Any):
76
+ with self._lock:
77
+ handlers = self._handlers[event][:]
78
+ for handler in handlers:
79
+ try:
80
+ handler(data)
81
+ except Exception as e:
82
+ console.log(f"Error in event handler for {event}: {e}", style="bold red")
83
+
84
+ EVENT_MANAGER = EventManager()
85
+ def RegisterEvent(event: str, handler: Callable):
86
+ EVENT_MANAGER.register(event, handler)
87
+ def RaiseEvent(event: str, data: Any):
88
+ EVENT_MANAGER.raise_event(event, data)
89
+ def UnregisterEvent(event: str, handler: Callable):
90
+ EVENT_MANAGER.unregister(event, handler)
91
+ @dataclass
92
+ class LLMMessage:
93
+ role: str
94
+ content: str
95
+ message_id: str = None
96
+ conversation_id: str = None
97
+ timestamp: float = None
98
+ metadata: Dict[str, Any] = None
99
+ def __post_init__(self):
100
+ if self.message_id is None:
101
+ self.message_id = str(uuid.uuid4())
102
+ if self.timestamp is None:
103
+ self.timestamp = time.time()
104
+ if self.metadata is None:
105
+ self.metadata = {}
106
+
107
+ @dataclass
108
+ class LLMRequest:
109
+ message: LLMMessage
110
+ response_event: str = None
111
+ callback: Callable = None
112
+ def __post_init__(self):
113
+ if self.response_event is None:
114
+ self.response_event = f"llm_response_{self.message.message_id}"
115
+
116
+ @dataclass
117
+ class LLMResponse:
118
+ message: LLMMessage
119
+ request_id: str
120
+ success: bool = True
121
+ error: str = None
122
+
123
+ class LLMAgent:
124
+ """Main Agent Driver !
125
+ Agent For Multiple messages at once ,
126
+ has a message queing service as well as agenerator method for easy intergration with console
127
+ applications as well as ui !"""
128
+ def __init__(
129
+ self,
130
+ model_id: str = DEFAULT_MODEL_ID,
131
+ system_prompt: str = None,
132
+ max_queue_size: int = 1000,
133
+ max_retries: int = 3,
134
+ timeout: int = 30000,
135
+ max_tokens: int = 5000,
136
+ temperature: float = 0.3,
137
+ base_url: str = "http://localhost:1234/v1",
138
+ api_key: str = "not-needed",
139
+ generate_fn: Callable[[List[Dict[str, str]]], Coroutine[Any, Any, str]] = None
140
+ ):
141
+ self.model_id = model_id
142
+ self.system_prompt = system_prompt or "You are a helpful AI assistant."
143
+ self.request_queue = Queue(maxsize=max_queue_size)
144
+ self.max_retries = max_retries
145
+ self.timeout = timeout
146
+ self.is_running = False
147
+ self._stop_event = Event()
148
+ self.processing_thread = None
149
+ # Conversation tracking
150
+ self.conversations: Dict[str, List[LLMMessage]] = {}
151
+ self.max_history_length = 20
152
+ self._generate = generate_fn or self._default_generate
153
+ self.api_key = api_key
154
+ self.base_url = base_url
155
+ self.max_tokens = max_tokens
156
+ self.temperature = temperature
157
+ self.async_client = self.CreateClient(base_url, api_key)
158
+ # Active requests waiting for responses
159
+ self.pending_requests: Dict[str, LLMRequest] = {}
160
+ self.pending_requests_lock = Lock()
161
+ # Register internal event handlers
162
+ self._register_event_handlers()
163
+ # Start the processing thread immediately
164
+ self.start()
165
+
166
+ async def _default_generate(self, messages: List[Dict[str, str]]) -> str:
167
+ """Default generate function if none provided"""
168
+ return await self.openai_generate(messages)
169
+
170
+ def _register_event_handlers(self):
171
+ """Register internal event handlers for response routing"""
172
+ RegisterEvent("llm_internal_response", self._handle_internal_response)
173
+
174
+ def _handle_internal_response(self, response: LLMResponse):
175
+ """Route responses to the appropriate request handlers"""
176
+ console.log(f"[bold cyan]Handling internal response for: {response.request_id}[/bold cyan]")
177
+ request = None
178
+ with self.pending_requests_lock:
179
+ if response.request_id in self.pending_requests:
180
+ request = self.pending_requests[response.request_id]
181
+ del self.pending_requests[response.request_id]
182
+ console.log(f"Found pending request for: {response.request_id}")
183
+ else:
184
+ console.log(f"No pending request found for: {response.request_id}", style="yellow")
185
+ return
186
+ # Raise the specific response event
187
+ if request.response_event:
188
+ console.log(f"[bold green]Raising event: {request.response_event}[/bold green]")
189
+ RaiseEvent(request.response_event, response)
190
+ # Call callback if provided
191
+ if request.callback:
192
+ try:
193
+ console.log(f"[bold yellow]Calling callback for: {response.request_id}[/bold yellow]")
194
+ request.callback(response)
195
+ except Exception as e:
196
+ console.log(f"Error in callback: {e}", style="bold red")
197
+
198
+ def _add_to_conversation_history(self, conversation_id: str, message: LLMMessage):
199
+ """Add message to conversation history"""
200
+ if conversation_id not in self.conversations:
201
+ self.conversations[conversation_id] = []
202
+ self.conversations[conversation_id].append(message)
203
+ # Trim history if too long
204
+ if len(self.conversations[conversation_id]) > self.max_history_length * 2:
205
+ self.conversations[conversation_id] = self.conversations[conversation_id][-(self.max_history_length * 2):]
206
+
207
+ def _build_messages_from_conversation(self, conversation_id: str, new_message: LLMMessage) -> List[Dict[str, str]]:
208
+ """Build message list from conversation history"""
209
+ messages = []
210
+ # Add system prompt
211
+ if self.system_prompt:
212
+ messages.append({"role": "system", "content": self.system_prompt})
213
+ # Add conversation history
214
+ if conversation_id in self.conversations:
215
+ for msg in self.conversations[conversation_id][-self.max_history_length:]:
216
+ messages.append({"role": msg.role, "content": msg.content})
217
+ # Add the new message
218
+ messages.append({"role": new_message.role, "content": new_message.content})
219
+ return messages
220
+
221
+ def _process_llm_request(self, request: LLMRequest):
222
+ """Process a single LLM request"""
223
+ console.log(f"[bold green]Processing LLM request: {request.message.message_id}[/bold green]")
224
+ try:
225
+ # Build messages for LLM
226
+ messages = self._build_messages_from_conversation(
227
+ request.message.conversation_id or "default",
228
+ request.message
229
+ )
230
+ console.log(f"Calling LLM with {len(messages)} messages")
231
+ # Call LLM - Use sync call for thread compatibility
232
+ response_content = self._call_llm_sync(messages)
233
+ console.log(f"[bold green]LLM response received: {response_content}...[/bold green]")
234
+ # Create response message
235
+ response_message = LLMMessage(
236
+ role="assistant",
237
+ content=response_content,
238
+ conversation_id=request.message.conversation_id,
239
+ metadata={"request_id": request.message.message_id}
240
+ )
241
+ # Update conversation history
242
+ self._add_to_conversation_history(
243
+ request.message.conversation_id or "default",
244
+ request.message
245
+ )
246
+ self._add_to_conversation_history(
247
+ request.message.conversation_id or "default",
248
+ response_message
249
+ )
250
+ # Create and send response
251
+ response = LLMResponse(
252
+ message=response_message,
253
+ request_id=request.message.message_id,
254
+ success=True
255
+ )
256
+ console.log(f"[bold blue]Sending internal response for: {request.message.message_id}[/bold blue]")
257
+ RaiseEvent("llm_internal_response", response)
258
+ except Exception as e:
259
+ console.log(f"[bold red]Error processing LLM request: {e}[/bold red]")
260
+ traceback.print_exc()
261
+ # Create error response
262
+ error_response = LLMResponse(
263
+ message=LLMMessage(
264
+ role="system",
265
+ content=f"Error: {str(e)}",
266
+ conversation_id=request.message.conversation_id
267
+ ),
268
+ request_id=request.message.message_id,
269
+ success=False,
270
+ error=str(e)
271
+ )
272
+ RaiseEvent("llm_internal_response", error_response)
273
+
274
+ def _call_llm_sync(self, messages: List[Dict[str, str]]) -> str:
275
+ """Sync call to the LLM with retry logic"""
276
+ console.log(f"Making LLM call to {self.model_id}")
277
+ for attempt in range(self.max_retries):
278
+ try:
279
+ response = CLIENT.chat.completions.create(
280
+ model=self.model_id,
281
+ messages=messages,
282
+ temperature=self.temperature,
283
+ max_tokens=self.max_tokens
284
+ )
285
+ content = response.choices[0].message.content
286
+ console.log(f"LLM call successful, response length: {len(content)}")
287
+ return content
288
+ except Exception as e:
289
+ console.log(f"LLM call attempt {attempt + 1} failed: {e}")
290
+ if attempt == self.max_retries - 1:
291
+ raise e
292
+ time.sleep(1) # Wait before retry
293
+ def _process_queue(self):
294
+ """Main queue processing loop"""
295
+ console.log("[bold cyan]LLM Agent queue processor started[/bold cyan]")
296
+ while not self._stop_event.is_set():
297
+ try:
298
+ request = self.request_queue.get(timeout=1.0)
299
+ if request:
300
+ console.log(f"Got request from queue: {request.message.message_id}")
301
+ self._process_llm_request(request)
302
+ self.request_queue.task_done()
303
+ except Empty:
304
+ continue
305
+ except Exception as e:
306
+ console.log(f"Error in queue processing: {e}", style="bold red")
307
+ traceback.print_exc()
308
+ console.log("[bold cyan]LLM Agent queue processor stopped[/bold cyan]")
309
+
310
+ def send_message(
311
+ self,
312
+ content: str,
313
+ role: str = "user",
314
+ conversation_id: str = None,
315
+ response_event: str = None,
316
+ callback: Callable = None,
317
+ metadata: Dict = None
318
+ ) -> str:
319
+ """Send a message to the LLM and get response via events"""
320
+ if not self.is_running:
321
+ raise RuntimeError("LLM Agent is not running. Call start() first.")
322
+ # Create message
323
+ message = LLMMessage(
324
+ role=role,
325
+ content=content,
326
+ conversation_id=conversation_id,
327
+ metadata=metadata or {}
328
+ )
329
+ # Create request
330
+ request = LLMRequest(
331
+ message=message,
332
+ response_event=response_event,
333
+ callback=callback
334
+ )
335
+ # Store in pending requests BEFORE adding to queue
336
+ with self.pending_requests_lock:
337
+ self.pending_requests[message.message_id] = request
338
+ console.log(f"Added to pending requests: {message.message_id}")
339
+ # Add to queue
340
+ try:
341
+ self.request_queue.put(request, timeout=5.0)
342
+ console.log(f"[bold magenta]Message queued: {message.message_id}, Content: {content[:50]}...[/bold magenta]")
343
+ return message.message_id
344
+ except queue.Full:
345
+ console.log(f"[bold red]Queue full, cannot send message[/bold red]")
346
+ with self.pending_requests_lock:
347
+ if message.message_id in self.pending_requests:
348
+ del self.pending_requests[message.message_id]
349
+ raise RuntimeError("LLM Agent queue is full")
350
+
351
+ async def chat(self, messages: List[Dict[str, str]]) -> str:
352
+ """
353
+ Async chat method that sends message via queue and returns response string.
354
+ This is the main method you should use.
355
+ """
356
+ # Create future for the response
357
+ loop = asyncio.get_event_loop()
358
+ response_future = loop.create_future()
359
+ def chat_callback(response: LLMResponse):
360
+ """Callback when LLM responds - thread-safe"""
361
+ console.log(f"[bold yellow]βœ“ CHAT CALLBACK TRIGGERED![/bold yellow]")
362
+ if not response_future.done():
363
+ if response.success:
364
+ content = response.message.content
365
+ console.log(f"Callback received content: {content}...")
366
+ # Schedule setting the future result on the main event loop
367
+ loop.call_soon_threadsafe(response_future.set_result, content)
368
+ else:
369
+ console.log(f"Error in response: {response.error}")
370
+ error_msg = f"❌ Error: {response.error}"
371
+ loop.call_soon_threadsafe(response_future.set_result, error_msg)
372
+ else:
373
+ console.log(f"[bold red]Future already done, ignoring callback[/bold red]")
374
+ console.log(f"Sending message to LLM agent...")
375
+ # Extract the actual message content from the messages list
376
+ user_message = ""
377
+ for msg in messages:
378
+ if msg.get("role") == "user":
379
+ user_message = msg.get("content", "")
380
+ break
381
+ if not user_message.strip():
382
+ return ""
383
+ # Send message with callback using the queue system
384
+ try:
385
+ message_id = self.send_message(
386
+ content=user_message,
387
+ conversation_id="default",
388
+ callback=chat_callback
389
+ )
390
+ console.log(f"Message sent with ID: {message_id}, waiting for response...")
391
+ # Wait for the response and return it
392
+ try:
393
+ response = await asyncio.wait_for(response_future, timeout=self.timeout)
394
+ console.log(f"[bold green]βœ“ Chat complete! Response length: {len(response)}[/bold green]")
395
+ return response
396
+ except asyncio.TimeoutError:
397
+ console.log("[bold red]Response timeout[/bold red]")
398
+ # Clean up the pending request
399
+ with self.pending_requests_lock:
400
+ if message_id in self.pending_requests:
401
+ del self.pending_requests[message_id]
402
+ return "❌ Response timeout - check if LLM server is running"
403
+ except Exception as e:
404
+ console.log(f"[bold red]Error sending message: {e}[/bold red]")
405
+ traceback.print_exc()
406
+ return f"❌ Error sending message: {e}"
407
+
408
+ def start(self):
409
+ """Start the LLM agent"""
410
+ if not self.is_running:
411
+ self.is_running = True
412
+ self._stop_event.clear()
413
+ self.processing_thread = Thread(target=self._process_queue, daemon=True)
414
+ self.processing_thread.start()
415
+ console.log("[bold green]LLM Agent started[/bold green]")
416
+
417
+ def stop(self):
418
+ """Stop the LLM agent"""
419
+ console.log("Stopping LLM Agent...")
420
+ self._stop_event.set()
421
+ if self.processing_thread and self.processing_thread.is_alive():
422
+ self.processing_thread.join(timeout=10)
423
+ self.is_running = False
424
+ console.log("LLM Agent stopped")
425
+
426
+ def get_conversation_history(self, conversation_id: str = "default") -> List[LLMMessage]:
427
+ """Get conversation history"""
428
+ return self.conversations.get(conversation_id, [])[:]
429
+
430
+ def clear_conversation(self, conversation_id: str = "default"):
431
+ """Clear conversation history"""
432
+ if conversation_id in self.conversations:
433
+ del self.conversations[conversation_id]
434
+
435
+ async def _chat(self, messages: List[Dict[str, str]]) -> str:
436
+ return await self._generate(messages)
437
+
438
+ @staticmethod
439
+ async def openai_generate(messages: List[Dict[str, str]], max_tokens: int = 8096, temperature: float = 0.4, model: str = DEFAULT_MODEL_ID,tools=None) -> str:
440
+ """Static method for generating responses using OpenAI API"""
441
+ try:
442
+ resp = await BASE_CLIENT.chat.completions.create(
443
+ model=model,
444
+ messages=messages,
445
+ temperature=temperature,
446
+ max_tokens=max_tokens,
447
+ tools=tools
448
+ )
449
+ response_text = resp.choices[0].message.content or ""
450
+ return response_text
451
+ except Exception as e:
452
+ console.log(f"[bold red]Error in openai_generate: {e}[/bold red]")
453
+ return f"[LLM_Agent Error - openai_generate: {str(e)}]"
454
+
455
+ async def _call_(self, messages: List[Dict[str, str]]) -> str:
456
+ """Internal call method using instance client"""
457
+ try:
458
+ resp = await self.async_client.chat.completions.create(
459
+ model=self.model_id,
460
+ messages=messages,
461
+ temperature=self.temperature,
462
+ max_tokens=self.max_tokens
463
+ )
464
+ response_text = resp.choices[0].message.content or ""
465
+ return response_text
466
+ except Exception as e:
467
+ console.log(f"[bold red]Error in _call_: {e}[/bold red]")
468
+ return f"[LLM_Agent Error - _call_: {str(e)}]"
469
+
470
+ @staticmethod
471
+ def CreateClient(base_url: str, api_key: str) -> AsyncOpenAI:
472
+ '''Create async OpenAI Client required for multi tasking'''
473
+ return AsyncOpenAI(
474
+ base_url=base_url,
475
+ api_key=api_key
476
+ )
477
+
478
+ @staticmethod
479
+ async def fetch_available_models(base_url: str, api_key: str) -> List[str]:
480
+ """Fetches available models from the OpenAI API."""
481
+ try:
482
+ async_client = AsyncOpenAI(base_url=base_url, api_key=api_key)
483
+ models = await async_client.models.list()
484
+ model_choices = [model.id for model in models.data]
485
+ return model_choices
486
+ except Exception as e:
487
+ console.log(f"[bold red]LLM_Agent Error fetching models: {e}[/bold red]")
488
+ return ["LLM_Agent Error fetching models"]
489
+
490
+ def get_models(self) -> List[str]:
491
+ """Get available models using instance credentials"""
492
+ return asyncio.run(self.fetch_available_models(self.base_url, self.api_key))
493
+
494
+ def get_queue_size(self) -> int:
495
+ """Get current queue size"""
496
+ return self.request_queue.qsize()
497
+
498
+ def get_pending_requests_count(self) -> int:
499
+ """Get number of pending requests"""
500
+ with self.pending_requests_lock:
501
+ return len(self.pending_requests)
502
+
503
+ def get_status(self) -> Dict[str, Any]:
504
+ """Get agent status information"""
505
+ return {
506
+ "is_running": self.is_running,
507
+ "queue_size": self.get_queue_size(),
508
+ "pending_requests": self.get_pending_requests_count(),
509
+ "conversations_count": len(self.conversations),
510
+ "model": self.model_id
511
+ }
512
+
513
+ # --- Enhanced Canvas Management ---
514
+ @dataclass
515
+ class CanvasArtifact:
516
+ id: str
517
+ type: str # 'code', 'diagram', 'text', 'image'
518
+ content: str
519
+ title: str
520
+ timestamp: float
521
+ metadata: Dict[str, Any]
522
+
523
+ class EnhancedLLMAgent(LLMAgent):
524
+ def __init__(self, *args, **kwargs):
525
+ super().__init__(*args, **kwargs)
526
+ # Enhanced canvas management
527
+ self.canvas_artifacts: Dict[str, List[CanvasArtifact]] = {}
528
+
529
+ def add_artifact_to_canvas(self, conversation_id: str, content: str, artifact_type: str = "code", title: str = None):
530
+ if conversation_id not in self.canvas_artifacts:
531
+ self.canvas_artifacts[conversation_id] = []
532
+ artifact = CanvasArtifact(
533
+ id=str(uuid.uuid4())[:8],
534
+ type=artifact_type,
535
+ content=content,
536
+ title=title or f"{artifact_type}_{len(self.canvas_artifacts[conversation_id]) + 1}",
537
+ timestamp=time.time(),
538
+ metadata={"conversation_id": conversation_id}
539
+ )
540
+ self.canvas_artifacts[conversation_id].append(artifact)
541
+ return artifact
542
+
543
+ def get_canvas_context(self, conversation_id: str) -> str:
544
+ if conversation_id not in self.canvas_artifacts or not self.canvas_artifacts[conversation_id]:
545
+ return ""
546
+ context_lines = ["\n=== COLLABORATIVE CANVAS ARTIFACTS ==="]
547
+ for artifact in self.canvas_artifacts[conversation_id][-10:]: # Last 10 artifacts
548
+ context_lines.append(f"\n--- {artifact.title} [{artifact.type.upper()}] ---")
549
+ context_lines.append(artifact.content[:500] + "..." if len(artifact.content) > 500 else artifact.content)
550
+ return "\n".join(context_lines) + "\n=================================\n"
551
+
552
+ def _extract_artifacts_to_canvas(self, response: str, conversation_id: str):
553
+ code_blocks = re.findall(r'```(?:\w+)?\n(.*?)```', response, re.DOTALL)
554
+ for code_block in code_blocks:
555
+ if len(code_block.strip()) > 10:
556
+ lang_match = re.search(r'```(\w+)', response)
557
+ lang = lang_match.group(1).lower() if lang_match else "python"
558
+ self.add_artifact_to_canvas(
559
+ conversation_id,
560
+ code_block.strip(),
561
+ "code",
562
+ f"Extracted Code ({lang})"
563
+ )
564
+
565
+ def get_canvas_summary(self, conversation_id: str) -> List[Dict]:
566
+ if conversation_id not in self.canvas_artifacts:
567
+ return []
568
+ return [
569
+ {
570
+ "id": artifact.id,
571
+ "type": artifact.type.upper(),
572
+ "title": artifact.title,
573
+ "preview": artifact.content[:100] + "..." if len(artifact.content) > 100 else artifact.content,
574
+ "timestamp": time.strftime("%H:%M", time.localtime(artifact.timestamp))
575
+ }
576
+ for artifact in reversed(self.canvas_artifacts[conversation_id]) # Newest first
577
+ ]
578
+
579
+ def get_artifact_by_id(self, conversation_id: str, artifact_id: str):
580
+ if conversation_id not in self.canvas_artifacts:
581
+ return None
582
+ for artifact in self.canvas_artifacts[conversation_id]:
583
+ if artifact.id == artifact_id:
584
+ return artifact
585
+ return None
586
+
587
+ def clear_canvas(self, conversation_id: str = "default"):
588
+ if conversation_id in self.canvas_artifacts:
589
+ self.canvas_artifacts[conversation_id] = []
590
+
591
+ async def chat_with_canvas(self, message: str, conversation_id: str = "default", include_canvas: bool = True) -> str:
592
+ """Enhanced chat that includes canvas context"""
593
+ # Build messages with system prompt and canvas context
594
+ messages = [{"role": "system", "content": self.system_prompt}]
595
+
596
+ # Include canvas context if requested
597
+ if include_canvas:
598
+ canvas_context = self.get_canvas_context(conversation_id)
599
+ if canvas_context:
600
+ messages.append({"role": "system", "content": f"Current collaborative canvas state:\n{canvas_context}"})
601
+
602
+ # Add conversation history
603
+ for msg in self.conversations.get(conversation_id, [])[-self.max_history_length:]:
604
+ messages.append({"role": msg.role, "content": msg.content})
605
+
606
+ # Add current message
607
+ messages.append({"role": "user", "content": message})
608
+
609
+ try:
610
+ response = await self._call_(messages)
611
+
612
+ # Update conversation history
613
+ user_msg = LLMMessage(role="user", content=message, conversation_id=conversation_id)
614
+ self._add_to_conversation_history(conversation_id, user_msg)
615
+
616
+ response_msg = LLMMessage(role="assistant", content=response, conversation_id=conversation_id)
617
+ self._add_to_conversation_history(conversation_id, response_msg)
618
+
619
+ # Auto-extract and add code artifacts to canvas
620
+ self._extract_artifacts_to_canvas(response, conversation_id)
621
+
622
+ return response
623
+
624
+ except Exception as e:
625
+ error_msg = f"Error in chat_with_canvas: {str(e)}"
626
+ console.log(f"[red]{error_msg}[/red]")
627
+ return error_msg
628
+
629
+
630
+
631
+ # --- Enhanced LLMAgent with Canvas Support ---
632
+ class AI_Agent:
633
+ def __init__(self, model_id: str, system_prompt: str = "You are a helpful assistant. Respond concisely in 1-2 sentences.", history: List[Dict] = None):
634
+ self.model_id = model_id
635
+ self.system_prompt = system_prompt
636
+ self.history = history or []
637
+ self.conversation_id = f"conv_{uuid.uuid4().hex[:8]}"
638
+
639
+ # Create agent instance
640
+ self.client = LLMAgent(
641
+ model_id=model_id,
642
+ system_prompt=self.system_prompt,
643
+ generate_fn=LLMAgent.openai_generate
644
+ )
645
+
646
+ console.log(f"[bold green]βœ“ MyAgent initialized with model: {model_id}[/bold green]")
647
+
648
+ async def call_llm(self, messages: List[Dict], use_history: bool = True) -> str:
649
+ """
650
+ Send messages to LLM and get response
651
+ Args:
652
+ messages: List of message dicts with 'role' and 'content'
653
+ use_history: Whether to include conversation history
654
+ Returns:
655
+ str: LLM response
656
+ """
657
+ try:
658
+ console.log(f"[bold yellow]Sending {len(messages)} messages to LLM (use_history: {use_history})...[/bold yellow]")
659
+
660
+ # Enhance messages based on history setting
661
+ enhanced_messages = await self._enhance_messages(messages, use_history)
662
+
663
+ response = await self.client.chat(enhanced_messages)
664
+ console.log(f"[bold green]βœ“ Response received ({len(response)} chars)[/bold green]")
665
+
666
+ # Update conversation history ONLY if we're using history
667
+ if use_history:
668
+ self._update_history(messages, response)
669
+
670
+ return response
671
+
672
+ except Exception as e:
673
+ console.log(f"[bold red]βœ— ERROR: {e}[/bold red]")
674
+ traceback.print_exc()
675
+ return f"Error: {str(e)}"
676
+
677
+ async def _enhance_messages(self, messages: List[Dict], use_history: bool) -> List[Dict]:
678
+ """Enhance messages with system prompt and optional history"""
679
+ enhanced = []
680
+
681
+ # Add system prompt if not already in messages
682
+ has_system = any(msg.get('role') == 'system' for msg in messages)
683
+ if not has_system and self.system_prompt:
684
+ enhanced.append({"role": "system", "content": self.system_prompt})
685
+
686
+ # Add conversation history only if requested
687
+ if use_history and self.history:
688
+ enhanced.extend(self.history[-10:]) # Last 10 messages for context
689
+
690
+ # Add current messages
691
+ enhanced.extend(messages)
692
+
693
+ return enhanced
694
+
695
+ def _update_history(self, messages: List[Dict], response: str):
696
+ """Update conversation history with new exchange"""
697
+ # Add user messages to history
698
+ for msg in messages:
699
+ if msg.get('role') in ['user', 'assistant']:
700
+ self.history.append(msg)
701
+
702
+ # Add assistant response to history
703
+ self.history.append({"role": "assistant", "content": response})
704
+
705
+ # Keep history manageable (last 20 exchanges)
706
+ if len(self.history) > 40: # 20 user + 20 assistant messages
707
+ self.history = self.history[-40:]
708
+
709
+ async def simple_query(self, query: str) -> str:
710
+ """Simple one-shot query method - NO history/context"""
711
+ messages = [{"role": "user", "content": query}]
712
+ return await self.call_llm(messages, use_history=False)
713
+
714
+ async def multi_turn_chat(self, user_input: str) -> str:
715
+ """Multi-turn chat that maintains context across calls"""
716
+ messages = [{"role": "user", "content": user_input}]
717
+ response = await self.call_llm(messages, use_history=True)
718
+ return response
719
+
720
+
721
+ def get_conversation_summary(self) -> Dict:
722
+ """Get conversation summary"""
723
+ return {
724
+ "conversation_id": self.conversation_id,
725
+ "total_messages": len(self.history),
726
+ "user_messages": len([msg for msg in self.history if msg.get('role') == 'user']),
727
+ "assistant_messages": len([msg for msg in self.history if msg.get('role') == 'assistant']),
728
+ "recent_exchanges": self.history[-4:] if self.history else []
729
+ }
730
+
731
+ def clear_history(self):
732
+ """Clear conversation history"""
733
+ self.history.clear()
734
+ console.log("[bold yellow]Conversation history cleared[/bold yellow]")
735
+
736
+ def update_system_prompt(self, new_prompt: str):
737
+ """Update the system prompt"""
738
+ self.system_prompt = new_prompt
739
+ console.log(f"[bold blue]System prompt updated[/bold blue]")
740
+
741
+ def stop(self):
742
+ """Stop the client gracefully"""
743
+ if hasattr(self, 'client') and self.client:
744
+ self.client.stop()
745
+ console.log("[bold yellow]MyAgent client stopped[/bold yellow]")
746
+ async def contextual_query(self, query: str, context_messages: List[Dict] = None,
747
+ context_text: str = None, context_files: List[str] = None) -> str:
748
+ """
749
+ Query with specific context but doesn't update main history
750
+
751
+ Args:
752
+ query: The user question
753
+ context_messages: List of message dicts for context
754
+ context_text: Plain text context (will be converted to system message)
755
+ context_files: List of file paths to read and include as context
756
+ """
757
+ messages = []
758
+
759
+ # Add system prompt
760
+ if self.system_prompt:
761
+ messages.append({"role": "system", "content": self.system_prompt})
762
+
763
+ # Handle different context types
764
+ if context_messages:
765
+ messages.extend(context_messages)
766
+
767
+ if context_text:
768
+ messages.append({"role": "system", "content": f"Additional context: {context_text}"})
769
+
770
+ if context_files:
771
+ file_context = await self._read_files_context(context_files)
772
+ if file_context:
773
+ messages.append({"role": "system", "content": f"File contents:\n{file_context}"})
774
+
775
+ # Add the actual query
776
+ messages.append({"role": "user", "content": query})
777
+
778
+ return await self.call_llm(messages, use_history=False)
779
+
780
+ async def _read_files_context(self, file_paths: List[str]) -> str:
781
+ """Read multiple files and return as context string"""
782
+ contexts = []
783
+ for file_path in file_paths:
784
+ try:
785
+ if os.path.exists(file_path):
786
+ with open(file_path, 'r', encoding='utf-8') as f:
787
+ content = f.read()
788
+ contexts.append(f"--- {os.path.basename(file_path)} ---\n{content}")
789
+ else:
790
+ console.log(f"[bold yellow]File not found: {file_path}[/bold yellow]")
791
+ except Exception as e:
792
+ console.log(f"[bold red]Error reading file {file_path}: {e}[/bold red]")
793
+
794
+ return "\n\n".join(contexts) if contexts else ""
795
+
796
+
797
+ async def query_with_code_context(self, query: str, code_snippets: List[str] = None,
798
+ code_files: List[str] = None) -> str:
799
+ """
800
+ Specialized contextual query for code-related questions
801
+ """
802
+ code_context = "CODE CONTEXT:\n"
803
+
804
+ if code_snippets:
805
+ for i, snippet in enumerate(code_snippets, 1):
806
+ code_context += f"\nSnippet {i}:\n```\n{snippet}\n```\n"
807
+
808
+ if code_files:
809
+ # Read code files and include them
810
+ for file_path in code_files:
811
+ if file_path.endswith(('.py', '.js', '.java', '.cpp', '.c', '.html', '.css')):
812
+ code_context += f"\nFile: {file_path}\n```\n"
813
+ try:
814
+ with open(file_path, 'r') as f:
815
+ code_context += f.read()
816
+ except Exception as e:
817
+ code_context += f"Error reading file: {e}"
818
+ code_context += "\n```\n"
819
+
820
+ return await self.contextual_query(query, context_text=code_context)
821
+
822
+ async def multi_context_query(self, query: str, contexts: Dict[str, Any]) -> str:
823
+ """
824
+ Advanced contextual query with multiple context types
825
+
826
+ Args:
827
+ query: The user question
828
+ contexts: Dict with various context types
829
+ - 'messages': List of message dicts
830
+ - 'text': Plain text context
831
+ - 'files': List of file paths
832
+ - 'urls': List of URLs
833
+ - 'code': List of code snippets or files
834
+ - 'metadata': Any additional metadata
835
+ """
836
+ all_context_messages = []
837
+
838
+ # Build context from different sources
839
+ if contexts.get('text'):
840
+ all_context_messages.append({"role": "system", "content": f"Context: {contexts['text']}"})
841
+
842
+ if contexts.get('messages'):
843
+ all_context_messages.extend(contexts['messages'])
844
+
845
+ if contexts.get('files'):
846
+ file_context = await self._read_files_context(contexts['files'])
847
+ if file_context:
848
+ all_context_messages.append({"role": "system", "content": f"File Contents:\n{file_context}"})
849
+
850
+ if contexts.get('code'):
851
+ code_context = "\n".join([f"Code snippet {i}:\n```\n{code}\n```"
852
+ for i, code in enumerate(contexts['code'], 1)])
853
+ all_context_messages.append({"role": "system", "content": f"Code Context:\n{code_context}"})
854
+
855
+ if contexts.get('metadata'):
856
+ all_context_messages.append({"role": "system", "content": f"Metadata: {contexts['metadata']}"})
857
+
858
+ return await self.contextual_query(query, context_messages=all_context_messages)
859
+
860
+
861
+ # --- Enhanced LLMAgent with Canvas Support ---
862
+ @dataclass
863
+ class CanvasArtifact:
864
+ id: str
865
+ type: str # 'code', 'diagram', 'text', 'image'
866
+ content: str
867
+ title: str
868
+ timestamp: float
869
+ metadata: Dict[str, Any]
870
+
871
+ class EnhancedLLMAgent:
872
+ def __init__(self, model_id: str = DEFAULT_MODEL_ID, system_prompt: str = None,
873
+ base_url: str = DEFAULT_BASE_URL, api_key: str = DEFAULT_API_KEY):
874
+ self.model_id = model_id
875
+ self.system_prompt = system_prompt or """You are an advanced AI development assistant operating in a Star Trek LCARS interface.
876
+ You specialize in code generation, analysis, and collaborative development.
877
+ Always provide practical, executable code solutions when appropriate.
878
+ Format code responses clearly with proper markdown code blocks and explain your reasoning."""
879
+ self.base_url = base_url
880
+ self.api_key = api_key
881
+ self.client = OpenAI(base_url=base_url, api_key=api_key)
882
+
883
+ # Enhanced conversation and canvas management
884
+ self.conversations: Dict[str, List[Dict]] = {}
885
+ self.canvas_artifacts: Dict[str, List[CanvasArtifact]] = {}
886
+ self.max_history_length = 50
887
+
888
+ # Speech synthesis
889
+ try:
890
+ self.tts_engine = pyttsx3.init()
891
+ self.setup_tts()
892
+ self.speech_enabled = True
893
+ except Exception as e:
894
+ console.log(f"[yellow]TTS not available: {e}[/yellow]")
895
+ self.speech_enabled = False
896
+
897
+ console.log("[bold green]πŸš€ Enhanced LLM Agent Initialized[/bold green]")
898
+
899
+ def setup_tts(self):
900
+ """Configure text-to-speech engine"""
901
+ if hasattr(self, 'tts_engine'):
902
+ voices = self.tts_engine.getProperty('voices')
903
+ if voices:
904
+ self.tts_engine.setProperty('voice', voices[0].id)
905
+ self.tts_engine.setProperty('rate', 150)
906
+ self.tts_engine.setProperty('volume', 0.8)
907
+
908
+ def speak(self, text: str):
909
+ """Convert text to speech in a non-blocking way"""
910
+ if not hasattr(self, 'speech_enabled') or not self.speech_enabled:
911
+ return
912
+
913
+ def _speak():
914
+ try:
915
+ # Clean text for speech (remove markdown, code blocks)
916
+ clean_text = re.sub(r'```.*?```', '', text, flags=re.DOTALL)
917
+ clean_text = re.sub(r'`.*?`', '', clean_text)
918
+ clean_text = clean_text.strip()
919
+ if clean_text:
920
+ self.tts_engine.say(clean_text) # Limit length
921
+ self.tts_engine.runAndWait()
922
+ else:
923
+ self.tts_engine.say(text) # Limit length
924
+ self.tts_engine.runAndWait()
925
+ except Exception as e:
926
+ console.log(f"[red]TTS Error: {e}[/red]")
927
+
928
+ thread = threading.Thread(target=_speak, daemon=True)
929
+ thread.start()
930
+
931
+ def setup_tts(self):
932
+ """Configure text-to-speech engine"""
933
+ try:
934
+ self.tts_engine = pyttsx3.init()
935
+ voices = self.tts_engine.getProperty('voices')
936
+ if voices:
937
+ # Try to find a better voice
938
+ for voice in voices:
939
+ if 'female' in voice.name.lower() or 'zira' in voice.name.lower():
940
+ self.tts_engine.setProperty('voice', voice.id)
941
+ break
942
+ else:
943
+ self.tts_engine.setProperty('voice', voices[0].id)
944
+
945
+ self.tts_engine.setProperty('rate', 180) # Slightly faster
946
+ self.tts_engine.setProperty('volume', 1.0) # Maximum volume
947
+ self.speech_enabled = True
948
+ console.log("[green]TTS engine initialized successfully[/green]")
949
+ except Exception as e:
950
+ console.log(f"[red]TTS initialization failed: {e}[/red]")
951
+ self.speech_enabled = False
952
+
953
+ def add_artifact_to_canvas(self, conversation_id: str, content: str, artifact_type: str = "code", title: str = None):
954
+ """Add artifacts to the collaborative canvas"""
955
+ if conversation_id not in self.canvas_artifacts:
956
+ self.canvas_artifacts[conversation_id] = []
957
+
958
+ artifact = CanvasArtifact(
959
+ id=str(uuid.uuid4())[:8],
960
+ type=artifact_type,
961
+ content=content,
962
+ title=title or f"{artifact_type}_{len(self.canvas_artifacts[conversation_id]) + 1}",
963
+ timestamp=time.time(),
964
+ metadata={"conversation_id": conversation_id}
965
+ )
966
+
967
+ self.canvas_artifacts[conversation_id].append(artifact)
968
+ console.log(f"[green]Added artifact to canvas: {artifact.title}[/green]")
969
+ return artifact
970
+
971
+ def get_canvas_context(self, conversation_id: str) -> str:
972
+ """Get formatted canvas context for LLM prompts"""
973
+ if conversation_id not in self.canvas_artifacts or not self.canvas_artifacts[conversation_id]:
974
+ return ""
975
+
976
+ context_lines = ["\n=== COLLABORATIVE CANVAS ARTIFACTS ==="]
977
+ for artifact in self.canvas_artifacts[conversation_id][-10:]: # Last 10 artifacts
978
+ context_lines.append(f"\n--- {artifact.title} [{artifact.type.upper()}] ---")
979
+ preview = artifact.content[:500] + "..." if len(artifact.content) > 500 else artifact.content
980
+ context_lines.append(preview)
981
+
982
+ return "\n".join(context_lines) + "\n=================================\n"
983
+
984
+ async def chat_with_canvas(self, message: str, conversation_id: str = "default", include_canvas: bool = True) -> str:
985
+ """Enhanced chat that includes canvas context"""
986
+ if conversation_id not in self.conversations:
987
+ self.conversations[conversation_id] = []
988
+
989
+ # Build messages with system prompt and canvas context
990
+ messages = [{"role": "system", "content": self.system_prompt}]
991
+
992
+ # Include canvas context if requested
993
+ if include_canvas:
994
+ canvas_context = self.get_canvas_context(conversation_id)
995
+ if canvas_context:
996
+ messages.append({"role": "system", "content": f"Current collaborative canvas state:\n{canvas_context}"})
997
+
998
+ # Add conversation history
999
+ for msg in self.conversations[conversation_id][-self.max_history_length:]:
1000
+ messages.append(msg)
1001
+
1002
+ # Add current message
1003
+ messages.append({"role": "user", "content": message})
1004
+
1005
+ try:
1006
+ # Use async client for better performance
1007
+ async_client = AsyncOpenAI(base_url=self.base_url, api_key=self.api_key)
1008
+ response = await async_client.chat.completions.create(
1009
+ model=self.model_id,
1010
+ messages=messages,
1011
+ temperature=0.7,
1012
+ max_tokens=DEFAULT_MAX_TOKENS
1013
+ )
1014
+
1015
+ response_text = response.choices[0].message.content
1016
+
1017
+ # Update conversation history
1018
+ self.conversations[conversation_id].extend([
1019
+ {"role": "user", "content": message},
1020
+ {"role": "assistant", "content": response_text}
1021
+ ])
1022
+
1023
+ # Auto-extract and add code artifacts to canvas
1024
+ self._extract_artifacts_to_canvas(response_text, conversation_id)
1025
+
1026
+ return response_text
1027
+
1028
+ except Exception as e:
1029
+ error_msg = f"Error in chat_with_canvas: {str(e)}"
1030
+ console.log(f"[red]{error_msg}[/red]")
1031
+ return error_msg
1032
+
1033
+ def _extract_artifacts_to_canvas(self, response: str, conversation_id: str):
1034
+ """Automatically extract code blocks and add to canvas"""
1035
+ # Find all code blocks with optional language specification
1036
+ code_blocks = re.findall(r'```(?:\w+)?\n(.*?)```', response, re.DOTALL)
1037
+ for i, code_block in enumerate(code_blocks):
1038
+ if len(code_block.strip()) > 10: # Only add substantial code blocks
1039
+ # Try to detect language from the code block marker
1040
+ lang_match = re.search(r'```(\w+)\n', response)
1041
+ lang = lang_match.group(1) if lang_match else "unknown"
1042
+
1043
+ self.add_artifact_to_canvas(
1044
+ conversation_id,
1045
+ code_block.strip(),
1046
+ "code",
1047
+ f"code_snippet_{lang}_{len(self.canvas_artifacts.get(conversation_id, [])) + 1}"
1048
+ )
1049
+
1050
+ def clear_conversation(self, conversation_id: str = "default"):
1051
+ """Clear conversation but keep canvas artifacts"""
1052
+ if conversation_id in self.conversations:
1053
+ self.conversations[conversation_id] = []
1054
+ console.log(f"[yellow]Cleared conversation: {conversation_id}[/yellow]")
1055
+
1056
+ def clear_canvas(self, conversation_id: str = "default"):
1057
+ """Clear canvas artifacts"""
1058
+ if conversation_id in self.canvas_artifacts:
1059
+ self.canvas_artifacts[conversation_id] = []
1060
+ console.log(f"[yellow]Cleared canvas: {conversation_id}[/yellow]")
1061
+
1062
+ def get_canvas_summary(self, conversation_id: str) -> List[Dict]:
1063
+ """Get summary of canvas artifacts for display"""
1064
+ if conversation_id not in self.canvas_artifacts:
1065
+ return []
1066
+
1067
+ artifacts = []
1068
+ for artifact in reversed(self.canvas_artifacts[conversation_id]): # Newest first
1069
+ artifacts.append({
1070
+ "id": artifact.id,
1071
+ "type": artifact.type.upper(),
1072
+ "title": artifact.title,
1073
+ "preview": artifact.content[:100] + "..." if len(artifact.content) > 100 else artifact.content,
1074
+ "timestamp": time.strftime("%H:%M:%S", time.localtime(artifact.timestamp))
1075
+ })
1076
+
1077
+ return artifacts
1078
+
1079
+ def get_artifact_by_id(self, conversation_id: str, artifact_id: str) -> Optional[CanvasArtifact]:
1080
+ """Get specific artifact by ID"""
1081
+ if conversation_id not in self.canvas_artifacts:
1082
+ return None
1083
+
1084
+ for artifact in self.canvas_artifacts[conversation_id]:
1085
+ if artifact.id == artifact_id:
1086
+ return artifact
1087
+ return None
1088
+
1089
+ @staticmethod
1090
+ async def fetch_available_models(base_url: str, api_key: str) -> List[str]:
1091
+ """Fetch available models from the API"""
1092
+ try:
1093
+ console.log(f"[blue]Fetching models from {base_url}[/blue]")
1094
+ async_client = AsyncOpenAI(base_url=base_url, api_key=api_key)
1095
+ models = await async_client.models.list()
1096
+ model_list = [model.id for model in models.data]
1097
+ console.log(f"[green]Found {len(model_list)} models[/green]")
1098
+ return model_list
1099
+ except Exception as e:
1100
+ console.log(f"[red]Error fetching models: {e}[/red]")
1101
+ return ["default-model"]
1102
+
1103
+ def update_config(self, base_url: str, api_key: str, model_id: str, temperature: float, max_tokens: int):
1104
+ """Update agent configuration"""
1105
+ self.base_url = base_url
1106
+ self.api_key = api_key
1107
+ self.model_id = model_id
1108
+ console.log(f"[blue]Updated config: {model_id} @ {base_url}[/blue]")
1109
+
1110
+ # --- LCARS Styled Gradio Interface ---
1111
+ class LcarsInterface:
1112
+ def __init__(self, agent: EnhancedLLMAgent):
1113
+ self.agent = agent
1114
+ self.current_conversation = "default"
1115
+
1116
+ def create_interface(self):
1117
+ """Create the full LCARS-styled interface"""
1118
+
1119
+ # Enhanced LCARS CSS with proper Star Trek styling
1120
+ lcars_css = """
1121
+ :root {
1122
+ --lcars-orange: #FF9900;
1123
+ --lcars-red: #FF0033;
1124
+ --lcars-blue: #6699FF;
1125
+ --lcars-purple: #CC99FF;
1126
+ --lcars-pale-blue: #99CCFF;
1127
+ --lcars-black: #000000;
1128
+ --lcars-dark-blue: #3366CC;
1129
+ --lcars-gray: #424242;
1130
+ --lcars-yellow: #FFFF66;
1131
+ }
1132
+
1133
+ body {
1134
+ background: var(--lcars-black);
1135
+ color: var(--lcars-orange);
1136
+ font-family: 'Antonio', 'LCD', 'Courier New', monospace;
1137
+ margin: 0;
1138
+ padding: 0;
1139
+ }
1140
+
1141
+ .gradio-container {
1142
+ background: var(--lcars-black) !important;
1143
+ min-height: 100vh;
1144
+ }
1145
+
1146
+ .lcars-container {
1147
+ background: var(--lcars-black);
1148
+ border: 4px solid var(--lcars-orange);
1149
+ border-radius: 0 30px 0 0;
1150
+ min-height: 100vh;
1151
+ padding: 20px;
1152
+ }
1153
+
1154
+ .lcars-header {
1155
+ background: linear-gradient(90deg, var(--lcars-red), var(--lcars-orange));
1156
+ padding: 20px 40px;
1157
+ border-radius: 0 60px 0 0;
1158
+ margin: -20px -20px 20px -20px;
1159
+ border-bottom: 6px solid var(--lcars-blue);
1160
+ box-shadow: 0 4px 20px rgba(255, 153, 0, 0.3);
1161
+ }
1162
+
1163
+ .lcars-title {
1164
+ font-size: 3em;
1165
+ font-weight: bold;
1166
+ color: var(--lcars-black);
1167
+ text-shadow: 3px 3px 6px rgba(255, 255, 255, 0.4);
1168
+ margin: 0;
1169
+ letter-spacing: 2px;
1170
+ }
1171
+
1172
+ .lcars-subtitle {
1173
+ font-size: 1.4em;
1174
+ color: var(--lcars-black);
1175
+ margin: 10px 0 0 0;
1176
+ font-weight: bold;
1177
+ }
1178
+
1179
+ .lcars-panel {
1180
+ background: linear-gradient(135deg, rgba(66, 66, 66, 0.9), rgba(40, 40, 40, 0.9));
1181
+ border: 3px solid var(--lcars-orange);
1182
+ border-radius: 0 25px 0 25px;
1183
+ padding: 20px;
1184
+ margin-bottom: 20px;
1185
+ box-shadow: 0 4px 15px rgba(255, 153, 0, 0.2);
1186
+ }
1187
+
1188
+ .lcars-button {
1189
+ background: linear-gradient(135deg, var(--lcars-orange), var(--lcars-red));
1190
+ color: var(--lcars-black) !important;
1191
+ border: none !important;
1192
+ border-radius: 0 20px 0 20px !important;
1193
+ padding: 12px 24px !important;
1194
+ font-family: inherit !important;
1195
+ font-weight: bold !important;
1196
+ font-size: 1.1em !important;
1197
+ cursor: pointer !important;
1198
+ transition: all 0.3s ease !important;
1199
+ margin: 8px !important;
1200
+ box-shadow: 0 4px 8px rgba(255, 153, 0, 0.3) !important;
1201
+ }
1202
+
1203
+ .lcars-button:hover {
1204
+ background: linear-gradient(135deg, var(--lcars-red), var(--lcars-orange)) !important;
1205
+ transform: translateY(-2px) !important;
1206
+ box-shadow: 0 6px 12px rgba(255, 153, 0, 0.4) !important;
1207
+ }
1208
+
1209
+ .lcars-input {
1210
+ background: var(--lcars-black) !important;
1211
+ color: var(--lcars-orange) !important;
1212
+ border: 2px solid var(--lcars-blue) !important;
1213
+ border-radius: 0 15px 0 15px !important;
1214
+ padding: 12px !important;
1215
+ font-family: inherit !important;
1216
+ font-size: 1.1em !important;
1217
+ }
1218
+
1219
+ .lcars-chatbot {
1220
+ background: var(--lcars-black) !important;
1221
+ border: 3px solid var(--lcars-purple) !important;
1222
+ border-radius: 0 20px 0 20px !important;
1223
+ min-height: 400px;
1224
+ max-height: 500px;
1225
+ }
1226
+
1227
+ .lcars-code-editor {
1228
+ background: var(--lcars-black) !important;
1229
+ color: var(--lcars-pale-blue) !important;
1230
+ border: 3px solid var(--lcars-blue) !important;
1231
+ border-radius: 0 20px 0 20px !important;
1232
+ font-family: 'Fira Code', 'Courier New', monospace !important;
1233
+ font-size: 1em !important;
1234
+ }
1235
+
1236
+ .user-message {
1237
+ background: linear-gradient(135deg, rgba(102, 153, 255, 0.2), rgba(51, 102, 204, 0.2)) !important;
1238
+ border-left: 6px solid var(--lcars-blue) !important;
1239
+ padding: 12px !important;
1240
+ margin: 8px 0 !important;
1241
+ border-radius: 0 15px 0 15px !important;
1242
+ }
1243
+
1244
+ .assistant-message {
1245
+ background: linear-gradient(135deg, rgba(255, 153, 0, 0.2), rgba(255, 102, 0, 0.2)) !important;
1246
+ border-left: 6px solid var(--lcars-orange) !important;
1247
+ padding: 12px !important;
1248
+ margin: 8px 0 !important;
1249
+ border-radius: 0 15px 0 15px !important;
1250
+ }
1251
+
1252
+ .artifact-item {
1253
+ background: linear-gradient(135deg, rgba(204, 153, 255, 0.15), rgba(153, 102, 204, 0.15));
1254
+ border: 2px solid var(--lcars-purple);
1255
+ padding: 10px;
1256
+ margin: 6px 0;
1257
+ border-radius: 0 12px 0 12px;
1258
+ cursor: pointer;
1259
+ transition: all 0.3s ease;
1260
+ }
1261
+
1262
+ .artifact-item:hover {
1263
+ background: linear-gradient(135deg, rgba(204, 153, 255, 0.3), rgba(153, 102, 204, 0.3));
1264
+ transform: translateX(5px);
1265
+ }
1266
+
1267
+ .status-indicator {
1268
+ display: inline-block;
1269
+ width: 16px;
1270
+ height: 16px;
1271
+ border-radius: 50%;
1272
+ background: var(--lcars-red);
1273
+ margin-right: 12px;
1274
+ box-shadow: 0 0 10px currentColor;
1275
+ }
1276
+
1277
+ .status-online {
1278
+ background: var(--lcars-blue);
1279
+ animation: pulse 1.5s infinite;
1280
+ }
1281
+
1282
+ @keyframes pulse {
1283
+ 0% { transform: scale(1); opacity: 1; }
1284
+ 50% { transform: scale(1.1); opacity: 0.7; }
1285
+ 100% { transform: scale(1); opacity: 1; }
1286
+ }
1287
+
1288
+ .panel-title {
1289
+ color: var(--lcars-yellow) !important;
1290
+ font-size: 1.4em !important;
1291
+ font-weight: bold !important;
1292
+ margin-bottom: 15px !important;
1293
+ border-bottom: 2px solid var(--lcars-orange);
1294
+ padding-bottom: 8px;
1295
+ }
1296
+
1297
+ .gradio-accordion {
1298
+ border: 2px solid var(--lcars-orange) !important;
1299
+ border-radius: 0 20px 0 20px !important;
1300
+ margin-bottom: 20px !important;
1301
+ }
1302
+
1303
+ .gradio-accordion .label {
1304
+ background: linear-gradient(90deg, var(--lcars-orange), var(--lcars-red)) !important;
1305
+ color: var(--lcars-black) !important;
1306
+ font-size: 1.3em !important;
1307
+ font-weight: bold !important;
1308
+ padding: 15px 20px !important;
1309
+ }
1310
+ """
1311
+
1312
+ with gr.Blocks(css=lcars_css, theme=gr.themes.Default(), title="LCARS Terminal") as interface:
1313
+
1314
+ with gr.Column(elem_classes="lcars-container"):
1315
+ # Header Section
1316
+ with gr.Row(elem_classes="lcars-header"):
1317
+ gr.Markdown("""
1318
+ <div style="text-align: center; width: 100%;">
1319
+ <div class="lcars-title">πŸš€ LCARS TERMINAL v4.2</div>
1320
+ <div class="lcars-subtitle">STARFLEET AI DEVELOPMENT CONSOLE</div>
1321
+ <div style="margin-top: 10px;">
1322
+ <span class="status-indicator status-online"></span>
1323
+ <span style="color: var(--lcars-black); font-weight: bold;">SYSTEM ONLINE</span>
1324
+ </div>
1325
+ </div>
1326
+ """)
1327
+
1328
+ # Main Content Area
1329
+ with gr.Row():
1330
+ # Left Sidebar - Controls and Configuration
1331
+ with gr.Column(scale=1, min_width=400):
1332
+ # Configuration Panel
1333
+ with gr.Column(elem_classes="lcars-panel"):
1334
+ gr.Markdown("### πŸ”§ SYSTEM CONFIGURATION", elem_classes="panel-title")
1335
+
1336
+ with gr.Row():
1337
+ base_url = gr.Textbox(
1338
+ value=DEFAULT_BASE_URL,
1339
+ label="API Base URL",
1340
+ elem_classes="lcars-input"
1341
+ )
1342
+ api_key = gr.Textbox(
1343
+ value=DEFAULT_API_KEY,
1344
+ label="API Key",
1345
+ type="password",
1346
+ elem_classes="lcars-input"
1347
+ )
1348
+
1349
+ with gr.Row():
1350
+ model_dropdown = gr.Dropdown(
1351
+ choices=["Fetching models..."],
1352
+ value="default-model",
1353
+ label="AI Model",
1354
+ elem_classes="lcars-input"
1355
+ )
1356
+ fetch_models_btn = gr.Button("πŸ“‘ Fetch Models", elem_classes="lcars-button")
1357
+
1358
+ with gr.Row():
1359
+ temperature = gr.Slider(
1360
+ 0.0, 2.0,
1361
+ value=0.7,
1362
+ label="Temperature",
1363
+ elem_classes="lcars-input"
1364
+ )
1365
+ max_tokens = gr.Slider(
1366
+ 128, 8192,
1367
+ value=2000,
1368
+ step=128,
1369
+ label="Max Tokens",
1370
+ elem_classes="lcars-input"
1371
+ )
1372
+
1373
+ with gr.Row():
1374
+ update_config_btn = gr.Button("πŸ’Ύ Apply Config", elem_classes="lcars-button")
1375
+ speech_toggle = gr.Checkbox(value=True, label="πŸ”Š Speech Output")
1376
+
1377
+ # Canvas Artifacts Panel
1378
+ with gr.Column(elem_classes="lcars-panel"):
1379
+ gr.Markdown("### 🎨 CANVAS ARTIFACTS", elem_classes="panel-title")
1380
+ artifact_display = gr.JSON(
1381
+ label="",
1382
+ elem_id="artifact-display"
1383
+ )
1384
+ with gr.Row():
1385
+ refresh_artifacts_btn = gr.Button("πŸ”„ Refresh", elem_classes="lcars-button")
1386
+ clear_canvas_btn = gr.Button("πŸ—‘οΈ Clear Canvas", elem_classes="lcars-button")
1387
+
1388
+ # Main Content - Chat and Code Canvas
1389
+ with gr.Column(scale=2):
1390
+ # Collaborative Code Canvas
1391
+ with gr.Accordion("πŸ’» COLLABORATIVE CODE CANVAS", open=True):
1392
+ code_editor = gr.Code(
1393
+ value="# Welcome to LCARS Collaborative Canvas\n# Your code artifacts will appear here\n\nprint('Hello, Starfleet!')",
1394
+ language="python",
1395
+ lines=20,
1396
+ label="",
1397
+ elem_classes="lcars-code-editor"
1398
+ )
1399
+
1400
+ with gr.Row():
1401
+ load_to_chat_btn = gr.Button("πŸ’¬ Discuss This Code", elem_classes="lcars-button")
1402
+ analyze_btn = gr.Button("πŸ” Analyze Code", elem_classes="lcars-button")
1403
+ optimize_btn = gr.Button("⚑ Optimize", elem_classes="lcars-button")
1404
+ document_btn = gr.Button("πŸ“š Document", elem_classes="lcars-button")
1405
+
1406
+ # Chat Interface
1407
+ with gr.Column(elem_classes="lcars-panel"):
1408
+ gr.Markdown("### πŸ’¬ MISSION LOG", elem_classes="panel-title")
1409
+ chatbot = gr.Chatbot(
1410
+ label="",
1411
+ elem_classes="lcars-chatbot",
1412
+ show_label=False,
1413
+ height=400
1414
+ )
1415
+
1416
+ with gr.Row():
1417
+ message_input = gr.Textbox(
1418
+ placeholder="Enter your command or query...",
1419
+ show_label=False,
1420
+ lines=2,
1421
+ elem_classes="lcars-input",
1422
+ scale=4
1423
+ )
1424
+ send_btn = gr.Button("πŸš€ TRANSMIT", elem_classes="lcars-button", scale=1)
1425
+
1426
+ # Status and Controls
1427
+ with gr.Row():
1428
+ status_display = gr.Textbox(
1429
+ value="LCARS terminal operational. Awaiting commands.",
1430
+ label="Status",
1431
+ max_lines=2,
1432
+ elem_classes="lcars-input"
1433
+ )
1434
+ with gr.Column(scale=0):
1435
+ clear_chat_btn = gr.Button("πŸ—‘οΈ Clear Chat", elem_classes="lcars-button")
1436
+ new_session_btn = gr.Button("πŸ†• New Session", elem_classes="lcars-button")
1437
+
1438
+ # === EVENT HANDLERS ===
1439
+
1440
+ async def fetch_and_update_models(base_url, api_key):
1441
+ """Fetch models and update dropdown"""
1442
+ try:
1443
+ models = await EnhancedLLMAgent.fetch_available_models(base_url, api_key)
1444
+ if models:
1445
+ return gr.update(choices=models, value=models[0])
1446
+ else:
1447
+ return gr.update(choices=["No models found"], value="No models found")
1448
+ except Exception as e:
1449
+ console.log(f"[red]Error fetching models: {e}[/red]")
1450
+ return gr.update(choices=[f"Error: {str(e)}"], value=f"Error: {str(e)}")
1451
+
1452
+ def update_agent_config(base_url, api_key, model_id, temperature_val, max_tokens_val):
1453
+ """Update agent configuration"""
1454
+ try:
1455
+ self.agent.update_config(base_url, api_key, model_id, temperature_val, max_tokens_val)
1456
+ return f"βœ… Configuration updated: {model_id}"
1457
+ except Exception as e:
1458
+ return f"❌ Config error: {str(e)}"
1459
+
1460
+ def get_artifacts():
1461
+ """Get current canvas artifacts"""
1462
+ return self.agent.get_canvas_summary(self.current_conversation)
1463
+
1464
+ def clear_canvas():
1465
+ """Clear the canvas"""
1466
+ self.agent.clear_canvas(self.current_conversation)
1467
+ return [], "βœ… Canvas cleared"
1468
+
1469
+ async def process_message(message, history, speech_enabled):
1470
+ """Process a chat message"""
1471
+ if not message.strip():
1472
+ return "", history, "Please enter a message"
1473
+
1474
+ # Add user message to history
1475
+ history = history + [[message, None]]
1476
+
1477
+ try:
1478
+ # Get AI response
1479
+ response = await self.agent.chat_with_canvas(
1480
+ message,
1481
+ self.current_conversation,
1482
+ include_canvas=True
1483
+ )
1484
+
1485
+ # Update history with response
1486
+ history[-1][1] = response
1487
+
1488
+ # Speech synthesis if enabled
1489
+ if speech_enabled and self.agent.speech_enabled:
1490
+ self.agent.speak(response)
1491
+
1492
+ # Get updated artifacts
1493
+ artifacts = get_artifacts()
1494
+
1495
+ status = f"βœ… Response received. Canvas artifacts: {len(artifacts)}"
1496
+ return "", history, status, artifacts
1497
+
1498
+ except Exception as e:
1499
+ error_msg = f"❌ Error: {str(e)}"
1500
+ history[-1][1] = error_msg
1501
+ return "", history, error_msg, get_artifacts()
1502
+
1503
+ def load_code_to_chat(code):
1504
+ """Load code from canvas into chat"""
1505
+ if not code.strip():
1506
+ return ""
1507
+ return f"Please analyze this code:\n```python\n{code}\n```"
1508
+
1509
+ def analyze_code(code):
1510
+ """Quick analysis of code"""
1511
+ if not code.strip():
1512
+ return "Please provide some code to analyze"
1513
+ return f"Perform a comprehensive analysis of this code:\n```python\n{code}\n```"
1514
+
1515
+ def optimize_code(code):
1516
+ """Quick optimization request"""
1517
+ if not code.strip():
1518
+ return "Please provide some code to optimize"
1519
+ return f"Optimize this code for performance and best practices:\n```python\n{code}\n```"
1520
+
1521
+ def document_code(code):
1522
+ """Quick documentation request"""
1523
+ if not code.strip():
1524
+ return "Please provide some code to document"
1525
+ return f"Generate comprehensive documentation for this code:\n```python\n{code}\n```"
1526
+
1527
+ def clear_chat():
1528
+ """Clear chat history"""
1529
+ self.agent.clear_conversation(self.current_conversation)
1530
+ return [], "βœ… Chat cleared"
1531
+
1532
+ def new_session():
1533
+ """Start new session"""
1534
+ self.agent.clear_conversation(self.current_conversation)
1535
+ self.agent.clear_canvas(self.current_conversation)
1536
+ return [], "# New collaborative session started\n\nprint('Ready for development!')", "πŸ†• New session started", []
1537
+
1538
+ # Connect event handlers
1539
+ fetch_models_btn.click(
1540
+ fetch_and_update_models,
1541
+ inputs=[base_url, api_key],
1542
+ outputs=model_dropdown
1543
+ )
1544
+
1545
+ update_config_btn.click(
1546
+ update_agent_config,
1547
+ inputs=[base_url, api_key, model_dropdown, temperature, max_tokens],
1548
+ outputs=status_display
1549
+ )
1550
+
1551
+ send_btn.click(
1552
+ process_message,
1553
+ inputs=[message_input, chatbot, speech_toggle],
1554
+ outputs=[message_input, chatbot, status_display, artifact_display]
1555
+ )
1556
+
1557
+ message_input.submit(
1558
+ process_message,
1559
+ inputs=[message_input, chatbot, speech_toggle],
1560
+ outputs=[message_input, chatbot, status_display, artifact_display]
1561
+ )
1562
+
1563
+ load_to_chat_btn.click(
1564
+ load_code_to_chat,
1565
+ inputs=code_editor,
1566
+ outputs=message_input
1567
+ )
1568
+
1569
+ analyze_btn.click(
1570
+ analyze_code,
1571
+ inputs=code_editor,
1572
+ outputs=message_input
1573
+ )
1574
+
1575
+ optimize_btn.click(
1576
+ optimize_code,
1577
+ inputs=code_editor,
1578
+ outputs=message_input
1579
+ )
1580
+
1581
+ document_btn.click(
1582
+ document_code,
1583
+ inputs=code_editor,
1584
+ outputs=message_input
1585
+ )
1586
+
1587
+ refresh_artifacts_btn.click(
1588
+ get_artifacts,
1589
+ outputs=artifact_display
1590
+ )
1591
+
1592
+ clear_canvas_btn.click(
1593
+ clear_canvas,
1594
+ outputs=[artifact_display, status_display]
1595
+ )
1596
+
1597
+ clear_chat_btn.click(
1598
+ clear_chat,
1599
+ outputs=[chatbot, status_display]
1600
+ )
1601
+
1602
+ new_session_btn.click(
1603
+ new_session,
1604
+ outputs=[chatbot, code_editor, status_display, artifact_display]
1605
+ )
1606
+
1607
+ # Initialize artifacts on load
1608
+ interface.load(get_artifacts, outputs=artifact_display)
1609
+
1610
+ return interface
1611
+
1612
+ # --- Main Application ---
1613
+ def main():
1614
+ console.log("[bold blue]πŸš€ Starting LCARS Enhanced Interface...[/bold blue]")
1615
+
1616
+ try:
1617
+ # Initialize the enhanced agent
1618
+ agent = EnhancedLLMAgent()
1619
+
1620
+ # Create and launch the interface
1621
+ interface = LcarsInterface(agent)
1622
+ demo = interface.create_interface()
1623
+
1624
+ console.log("[bold green]βœ… LCARS Interface Ready - Launching...[/bold green]")
1625
+ demo.launch(
1626
+ share=True,
1627
+ show_error=True,
1628
+ inbrowser=True
1629
+ )
1630
+ except Exception as e:
1631
+ console.log(f"[bold red]Failed to start application: {e}[/bold red]")
1632
+ raise
1633
+
1634
+
1635
+
1636
+ if __name__ == "__main__":
1637
+ main()