diff --git "a/app.py" "b/app.py" --- "a/app.py" +++ "b/app.py" @@ -15,54 +15,14 @@ from pathlib import Path import gradio as gr from rich.console import Console from openai import OpenAI, AsyncOpenAI -import asyncio -from collections import defaultdict -import json -import os -import queue -import traceback -import uuid -from typing import Dict, List, Any, Optional, Callable, Coroutine -from dataclasses import dataclass -from queue import Queue, Empty -from threading import Lock, Event, Thread -import threading -from concurrent.futures import ThreadPoolExecutor -import time -from openai import OpenAI, AsyncOpenAI -from rich.console import Console -import gradio as gr -import pyttsx3 -import re -from pathlib import Path -############################################################# -BASE_URL="http://localhost:1234/v1" -BASE_API_KEY="not-needed" -BASE_CLIENT = AsyncOpenAI( - base_url=BASE_URL, - api_key=BASE_API_KEY -) # Global state for client -BASEMODEL_ID = "https://huggingface.co/LeroyDyer/_Starfleet_II_-Q4_K_S-GGUF/resolve/main/_starfleet_ii_-q4_k_s.gguf" # Global state for selected model ID -CLIENT =OpenAI( - base_url=BASE_URL, - api_key=BASE_API_KEY) -# --- Configuration --- -DEFAULT_BASE_URL = "http://localhost:1234/v1" -DEFAULT_API_KEY = "not-needed" -DEFAULT_MODEL_ID = "https://huggingface.co/LeroyDyer/_Starfleet_II_-Q4_K_S-GGUF/resolve/main/_starfleet_ii_-q4_k_s.gguf" -DEFAULT_TEMPERATURE = 0.3 -DEFAULT_MAX_TOKENS = 5000 - -# Add this configuration section at the top -import os -# Configuration that works for both local and HuggingFace Spaces +# --- Configuration --- LOCAL_BASE_URL = "http://localhost:1234/v1" LOCAL_API_KEY = "not-needed" -# HuggingFace Spaces configuration - using free inference endpoints +# HuggingFace Spaces configuration HF_INFERENCE_URL = "https://api-inference.huggingface.co/models/" -HF_API_KEY = os.getenv("HF_API_KEY", "") # Set this in Spaces secrets +HF_API_KEY = os.getenv("HF_API_KEY", "") # Available model options MODEL_OPTIONS = { @@ -72,460 +32,13 @@ MODEL_OPTIONS = { "Llama 2 7B": "meta-llama/Llama-2-7b-chat-hf", "Falcon 7B": "tiiuae/falcon-7b-instruct" } -console = Console() -class EventManager: - def __init__(self): - self._handlers = defaultdict(list) - self._lock = threading.Lock() - def register(self, event: str, handler: Callable): - with self._lock: - self._handlers[event].append(handler) - def unregister(self, event: str, handler: Callable): - with self._lock: - if event in self._handlers and handler in self._handlers[event]: - self._handlers[event].remove(handler) - def raise_event(self, event: str, data: Any): - with self._lock: - handlers = self._handlers[event][:] - for handler in handlers: - try: - handler(data) - except Exception as e: - console.log(f"Error in event handler for {event}: {e}", style="bold red") - -EVENT_MANAGER = EventManager() -def RegisterEvent(event: str, handler: Callable): - EVENT_MANAGER.register(event, handler) -def RaiseEvent(event: str, data: Any): - EVENT_MANAGER.raise_event(event, data) -def UnregisterEvent(event: str, handler: Callable): - EVENT_MANAGER.unregister(event, handler) -@dataclass -class LLMMessage: - role: str - content: str - message_id: str = None - conversation_id: str = None - timestamp: float = None - metadata: Dict[str, Any] = None - def __post_init__(self): - if self.message_id is None: - self.message_id = str(uuid.uuid4()) - if self.timestamp is None: - self.timestamp = time.time() - if self.metadata is None: - self.metadata = {} - -@dataclass -class LLMRequest: - message: LLMMessage - response_event: str = None - callback: Callable = None - def __post_init__(self): - if self.response_event is None: - self.response_event = f"llm_response_{self.message.message_id}" - -@dataclass -class LLMResponse: - message: LLMMessage - request_id: str - success: bool = True - error: str = None - -class LLMAgent: - """Main Agent Driver ! - Agent For Multiple messages at once , - has a message queing service as well as agenerator method for easy intergration with console - applications as well as ui !""" - def __init__( - self, - model_id: str = DEFAULT_MODEL_ID, - system_prompt: str = None, - max_queue_size: int = 1000, - max_retries: int = 3, - timeout: int = 30000, - max_tokens: int = 5000, - temperature: float = 0.3, - base_url: str = "http://localhost:1234/v1", - api_key: str = "not-needed", - generate_fn: Callable[[List[Dict[str, str]]], Coroutine[Any, Any, str]] = None - ): - self.model_id = model_id - self.system_prompt = system_prompt or "You are a helpful AI assistant." - self.request_queue = Queue(maxsize=max_queue_size) - self.max_retries = max_retries - self.timeout = timeout - self.is_running = False - self._stop_event = Event() - self.processing_thread = None - # Conversation tracking - self.conversations: Dict[str, List[LLMMessage]] = {} - self.max_history_length = 20 - self._generate = generate_fn or self._default_generate - self.api_key = api_key - self.base_url = base_url - self.max_tokens = max_tokens - self.temperature = temperature - self.async_client = self.CreateClient(base_url, api_key) - # Active requests waiting for responses - self.pending_requests: Dict[str, LLMRequest] = {} - self.pending_requests_lock = Lock() - # Register internal event handlers - self._register_event_handlers() - # Start the processing thread immediately - self.start() - - async def _default_generate(self, messages: List[Dict[str, str]]) -> str: - """Default generate function if none provided""" - return await self.openai_generate(messages) - - def _register_event_handlers(self): - """Register internal event handlers for response routing""" - RegisterEvent("llm_internal_response", self._handle_internal_response) - - def _handle_internal_response(self, response: LLMResponse): - """Route responses to the appropriate request handlers""" - console.log(f"[bold cyan]Handling internal response for: {response.request_id}[/bold cyan]") - request = None - with self.pending_requests_lock: - if response.request_id in self.pending_requests: - request = self.pending_requests[response.request_id] - del self.pending_requests[response.request_id] - console.log(f"Found pending request for: {response.request_id}") - else: - console.log(f"No pending request found for: {response.request_id}", style="yellow") - return - # Raise the specific response event - if request.response_event: - console.log(f"[bold green]Raising event: {request.response_event}[/bold green]") - RaiseEvent(request.response_event, response) - # Call callback if provided - if request.callback: - try: - console.log(f"[bold yellow]Calling callback for: {response.request_id}[/bold yellow]") - request.callback(response) - except Exception as e: - console.log(f"Error in callback: {e}", style="bold red") - - def _add_to_conversation_history(self, conversation_id: str, message: LLMMessage): - """Add message to conversation history""" - if conversation_id not in self.conversations: - self.conversations[conversation_id] = [] - self.conversations[conversation_id].append(message) - # Trim history if too long - if len(self.conversations[conversation_id]) > self.max_history_length * 2: - self.conversations[conversation_id] = self.conversations[conversation_id][-(self.max_history_length * 2):] - - def _build_messages_from_conversation(self, conversation_id: str, new_message: LLMMessage) -> List[Dict[str, str]]: - """Build message list from conversation history""" - messages = [] - # Add system prompt - if self.system_prompt: - messages.append({"role": "system", "content": self.system_prompt}) - # Add conversation history - if conversation_id in self.conversations: - for msg in self.conversations[conversation_id][-self.max_history_length:]: - messages.append({"role": msg.role, "content": msg.content}) - # Add the new message - messages.append({"role": new_message.role, "content": new_message.content}) - return messages - - def _process_llm_request(self, request: LLMRequest): - """Process a single LLM request""" - console.log(f"[bold green]Processing LLM request: {request.message.message_id}[/bold green]") - try: - # Build messages for LLM - messages = self._build_messages_from_conversation( - request.message.conversation_id or "default", - request.message - ) - console.log(f"Calling LLM with {len(messages)} messages") - # Call LLM - Use sync call for thread compatibility - response_content = self._call_llm_sync(messages) - console.log(f"[bold green]LLM response received: {response_content}...[/bold green]") - # Create response message - response_message = LLMMessage( - role="assistant", - content=response_content, - conversation_id=request.message.conversation_id, - metadata={"request_id": request.message.message_id} - ) - # Update conversation history - self._add_to_conversation_history( - request.message.conversation_id or "default", - request.message - ) - self._add_to_conversation_history( - request.message.conversation_id or "default", - response_message - ) - # Create and send response - response = LLMResponse( - message=response_message, - request_id=request.message.message_id, - success=True - ) - console.log(f"[bold blue]Sending internal response for: {request.message.message_id}[/bold blue]") - RaiseEvent("llm_internal_response", response) - except Exception as e: - console.log(f"[bold red]Error processing LLM request: {e}[/bold red]") - traceback.print_exc() - # Create error response - error_response = LLMResponse( - message=LLMMessage( - role="system", - content=f"Error: {str(e)}", - conversation_id=request.message.conversation_id - ), - request_id=request.message.message_id, - success=False, - error=str(e) - ) - RaiseEvent("llm_internal_response", error_response) - - def _call_llm_sync(self, messages: List[Dict[str, str]]) -> str: - """Sync call to the LLM with retry logic""" - console.log(f"Making LLM call to {self.model_id}") - for attempt in range(self.max_retries): - try: - response = CLIENT.chat.completions.create( - model=self.model_id, - messages=messages, - temperature=self.temperature, - max_tokens=self.max_tokens - ) - content = response.choices[0].message.content - console.log(f"LLM call successful, response length: {len(content)}") - return content - except Exception as e: - console.log(f"LLM call attempt {attempt + 1} failed: {e}") - if attempt == self.max_retries - 1: - raise e - time.sleep(1) # Wait before retry - def _process_queue(self): - """Main queue processing loop""" - console.log("[bold cyan]LLM Agent queue processor started[/bold cyan]") - while not self._stop_event.is_set(): - try: - request = self.request_queue.get(timeout=1.0) - if request: - console.log(f"Got request from queue: {request.message.message_id}") - self._process_llm_request(request) - self.request_queue.task_done() - except Empty: - continue - except Exception as e: - console.log(f"Error in queue processing: {e}", style="bold red") - traceback.print_exc() - console.log("[bold cyan]LLM Agent queue processor stopped[/bold cyan]") - - def send_message( - self, - content: str, - role: str = "user", - conversation_id: str = None, - response_event: str = None, - callback: Callable = None, - metadata: Dict = None - ) -> str: - """Send a message to the LLM and get response via events""" - if not self.is_running: - raise RuntimeError("LLM Agent is not running. Call start() first.") - # Create message - message = LLMMessage( - role=role, - content=content, - conversation_id=conversation_id, - metadata=metadata or {} - ) - # Create request - request = LLMRequest( - message=message, - response_event=response_event, - callback=callback - ) - # Store in pending requests BEFORE adding to queue - with self.pending_requests_lock: - self.pending_requests[message.message_id] = request - console.log(f"Added to pending requests: {message.message_id}") - # Add to queue - try: - self.request_queue.put(request, timeout=5.0) - console.log(f"[bold magenta]Message queued: {message.message_id}, Content: {content[:50]}...[/bold magenta]") - return message.message_id - except queue.Full: - console.log(f"[bold red]Queue full, cannot send message[/bold red]") - with self.pending_requests_lock: - if message.message_id in self.pending_requests: - del self.pending_requests[message.message_id] - raise RuntimeError("LLM Agent queue is full") - - async def chat(self, messages: List[Dict[str, str]]) -> str: - """ - Async chat method that sends message via queue and returns response string. - This is the main method you should use. - """ - # Create future for the response - loop = asyncio.get_event_loop() - response_future = loop.create_future() - def chat_callback(response: LLMResponse): - """Callback when LLM responds - thread-safe""" - console.log(f"[bold yellow]✓ CHAT CALLBACK TRIGGERED![/bold yellow]") - if not response_future.done(): - if response.success: - content = response.message.content - console.log(f"Callback received content: {content}...") - # Schedule setting the future result on the main event loop - loop.call_soon_threadsafe(response_future.set_result, content) - else: - console.log(f"Error in response: {response.error}") - error_msg = f"❌ Error: {response.error}" - loop.call_soon_threadsafe(response_future.set_result, error_msg) - else: - console.log(f"[bold red]Future already done, ignoring callback[/bold red]") - console.log(f"Sending message to LLM agent...") - # Extract the actual message content from the messages list - user_message = "" - for msg in messages: - if msg.get("role") == "user": - user_message = msg.get("content", "") - break - if not user_message.strip(): - return "" - # Send message with callback using the queue system - try: - message_id = self.send_message( - content=user_message, - conversation_id="default", - callback=chat_callback - ) - console.log(f"Message sent with ID: {message_id}, waiting for response...") - # Wait for the response and return it - try: - response = await asyncio.wait_for(response_future, timeout=self.timeout) - console.log(f"[bold green]✓ Chat complete! Response length: {len(response)}[/bold green]") - return response - except asyncio.TimeoutError: - console.log("[bold red]Response timeout[/bold red]") - # Clean up the pending request - with self.pending_requests_lock: - if message_id in self.pending_requests: - del self.pending_requests[message_id] - return "❌ Response timeout - check if LLM server is running" - except Exception as e: - console.log(f"[bold red]Error sending message: {e}[/bold red]") - traceback.print_exc() - return f"❌ Error sending message: {e}" - - def start(self): - """Start the LLM agent""" - if not self.is_running: - self.is_running = True - self._stop_event.clear() - self.processing_thread = Thread(target=self._process_queue, daemon=True) - self.processing_thread.start() - console.log("[bold green]LLM Agent started[/bold green]") - - def stop(self): - """Stop the LLM agent""" - console.log("Stopping LLM Agent...") - self._stop_event.set() - if self.processing_thread and self.processing_thread.is_alive(): - self.processing_thread.join(timeout=10) - self.is_running = False - console.log("LLM Agent stopped") - - def get_conversation_history(self, conversation_id: str = "default") -> List[LLMMessage]: - """Get conversation history""" - return self.conversations.get(conversation_id, [])[:] - - def clear_conversation(self, conversation_id: str = "default"): - """Clear conversation history""" - if conversation_id in self.conversations: - del self.conversations[conversation_id] - - async def _chat(self, messages: List[Dict[str, str]]) -> str: - return await self._generate(messages) - - @staticmethod - async def openai_generate(messages: List[Dict[str, str]], max_tokens: int = 8096, temperature: float = 0.4, model: str = DEFAULT_MODEL_ID,tools=None) -> str: - """Static method for generating responses using OpenAI API""" - try: - resp = await BASE_CLIENT.chat.completions.create( - model=model, - messages=messages, - temperature=temperature, - max_tokens=max_tokens, - tools=tools - ) - response_text = resp.choices[0].message.content or "" - return response_text - except Exception as e: - console.log(f"[bold red]Error in openai_generate: {e}[/bold red]") - return f"[LLM_Agent Error - openai_generate: {str(e)}]" - - async def _call_(self, messages: List[Dict[str, str]]) -> str: - """Internal call method using instance client""" - try: - resp = await self.async_client.chat.completions.create( - model=self.model_id, - messages=messages, - temperature=self.temperature, - max_tokens=self.max_tokens - ) - response_text = resp.choices[0].message.content or "" - return response_text - except Exception as e: - console.log(f"[bold red]Error in _call_: {e}[/bold red]") - return f"[LLM_Agent Error - _call_: {str(e)}]" - - @staticmethod - def CreateClient(base_url: str, api_key: str) -> AsyncOpenAI: - '''Create async OpenAI Client required for multi tasking''' - return AsyncOpenAI( - base_url=base_url, - api_key=api_key - ) - - @staticmethod - async def fetch_available_models(base_url: str, api_key: str) -> List[str]: - """Fetches available models from the OpenAI API.""" - try: - async_client = AsyncOpenAI(base_url=base_url, api_key=api_key) - models = await async_client.models.list() - model_choices = [model.id for model in models.data] - return model_choices - except Exception as e: - console.log(f"[bold red]LLM_Agent Error fetching models: {e}[/bold red]") - return ["LLM_Agent Error fetching models"] - - def get_models(self) -> List[str]: - """Get available models using instance credentials""" - return asyncio.run(self.fetch_available_models(self.base_url, self.api_key)) - - def get_queue_size(self) -> int: - """Get current queue size""" - return self.request_queue.qsize() - - def get_pending_requests_count(self) -> int: - """Get number of pending requests""" - with self.pending_requests_lock: - return len(self.pending_requests) - - def get_status(self) -> Dict[str, Any]: - """Get agent status information""" - return { - "is_running": self.is_running, - "queue_size": self.get_queue_size(), - "pending_requests": self.get_pending_requests_count(), - "conversations_count": len(self.conversations), - "model": self.model_id - } - +DEFAULT_TEMPERATURE = 0.7 +DEFAULT_MAX_TOKENS = 5000 +console = Console() -# --- Enhanced LLMAgent with Canvas Support --- +# --- Canvas Artifact Dataclass --- @dataclass class CanvasArtifact: id: str @@ -535,23 +48,24 @@ class CanvasArtifact: timestamp: float metadata: Dict[str, Any] +# --- Enhanced LLMAgent with Canvas Support --- class EnhancedLLMAgent: - def __init__(self, model_id: str = DEFAULT_MODEL_ID, system_prompt: str = None, - base_url: str = LOCAL_BASE_URL, api_key: str = LOCAL_API_KEY, use_huggingface: bool = False): + def __init__(self, model_id: str = "local-model", system_prompt: str = None, + base_url: str = LOCAL_BASE_URL, api_key: str = LOCAL_API_KEY, + use_huggingface: bool = False): + + self.use_huggingface = use_huggingface self.model_id = model_id self.system_prompt = system_prompt or """You are an advanced AI development assistant operating in a Star Trek LCARS interface. You specialize in code generation, analysis, and collaborative development. Always provide practical, executable code solutions when appropriate. Format code responses clearly with proper markdown code blocks and explain your reasoning.""" - self.base_url = base_url - self.api_key = api_key - self.client = OpenAI(base_url=base_url, api_key=api_key) - self.use_huggingface = use_huggingface + if use_huggingface: # Use HuggingFace Inference API - self.base_url = "https://api-inference.huggingface.co/models/" + self.base_url = HF_INFERENCE_URL self.api_key = HF_API_KEY - self.client = None # We'll use requests for HF + self.client = None console.log("[green]🚀 Using HuggingFace Inference API[/green]") else: # Use local LM Studio @@ -559,7 +73,7 @@ class EnhancedLLMAgent: self.api_key = api_key self.client = OpenAI(base_url=base_url, api_key=api_key) console.log(f"[green]🚀 Using Local LM Studio: {base_url}[/green]") - + # Enhanced conversation and canvas management self.conversations: Dict[str, List[Dict]] = {} self.canvas_artifacts: Dict[str, List[CanvasArtifact]] = {} @@ -570,11 +84,28 @@ class EnhancedLLMAgent: self.tts_engine = pyttsx3.init() self.setup_tts() self.speech_enabled = True + console.log("[green]TTS engine initialized successfully[/green]") except Exception as e: - console.log(f"[yellow]TTS not available: {e}[/yellow]") + console.log(f"[red]TTS initialization failed: {e}[/red]") self.speech_enabled = False - - console.log("[bold green]🚀 Enhanced LLM Agent Initialized[/bold green]") + + def setup_tts(self): + """Configure text-to-speech engine""" + try: + voices = self.tts_engine.getProperty('voices') + if voices: + # Try to find a better voice + for voice in voices: + if 'female' in voice.name.lower() or 'zira' in voice.name.lower(): + self.tts_engine.setProperty('voice', voice.id) + break + else: + self.tts_engine.setProperty('voice', voices[0].id) + + self.tts_engine.setProperty('rate', 180) + self.tts_engine.setProperty('volume', 1.0) + except Exception as e: + console.log(f"[red]TTS setup error: {e}[/red]") def speak(self, text: str): """Convert text to speech in a non-blocking way""" @@ -583,88 +114,80 @@ class EnhancedLLMAgent: def _speak(): try: - # Clean text for speech (remove markdown, code blocks) + # Clean text for speech clean_text = re.sub(r'```.*?```', '', text, flags=re.DOTALL) clean_text = re.sub(r'`.*?`', '', clean_text) + clean_text = re.sub(r'\n+', '. ', clean_text) + clean_text = re.sub(r'\s+', ' ', clean_text) clean_text = clean_text.strip() - if clean_text: - self.tts_engine.say(clean_text) # Limit length + + if clean_text and len(clean_text) > 10: + console.log(f"[blue]Speaking: {clean_text[:100]}...[/blue]") + self.tts_engine.say(clean_text[:400]) self.tts_engine.runAndWait() - else: - self.tts_engine.say(text) # Limit length - self.tts_engine.runAndWait() except Exception as e: console.log(f"[red]TTS Error: {e}[/red]") thread = threading.Thread(target=_speak, daemon=True) thread.start() - def setup_tts(self): - """Configure text-to-speech engine""" - try: - self.tts_engine = pyttsx3.init() - voices = self.tts_engine.getProperty('voices') - if voices: - # Try to find a better voice - for voice in voices: - if 'female' in voice.name.lower() or 'zira' in voice.name.lower(): - self.tts_engine.setProperty('voice', voice.id) - break - else: - self.tts_engine.setProperty('voice', voices[0].id) - - self.tts_engine.setProperty('rate', 180) # Slightly faster - self.tts_engine.setProperty('volume', 1.0) # Maximum volume - self.speech_enabled = True - console.log("[green]TTS engine initialized successfully[/green]") - except Exception as e: - console.log(f"[red]TTS initialization failed: {e}[/red]") - self.speech_enabled = False async def _local_inference(self, messages: List[Dict]) -> str: """Use local LM Studio""" - async_client = AsyncOpenAI(base_url=self.base_url, api_key=self.api_key) - response = await async_client.chat.completions.create( - model=self.model_id, - messages=messages, - temperature=0.7, - max_tokens=DEFAULT_MAX_TOKENS - ) - return response.choices[0].message.content + try: + async_client = AsyncOpenAI(base_url=self.base_url, api_key=self.api_key) + response = await async_client.chat.completions.create( + model=self.model_id, + messages=messages, + temperature=0.7, + max_tokens=DEFAULT_MAX_TOKENS + ) + return response.choices[0].message.content + except Exception as e: + return f"Local inference error: {str(e)}" async def _hf_inference(self, messages: List[Dict]) -> str: """Use HuggingFace Inference API""" - import requests - import json - - # Convert to HF format - prompt = self._convert_messages_to_prompt(messages) - - headers = { - "Authorization": f"Bearer {self.api_key}", - "Content-Type": "application/json" - } - - payload = { - "inputs": prompt, - "parameters": { - "max_new_tokens": DEFAULT_MAX_TOKENS, - "temperature": 0.7, - "do_sample": True, - "return_full_text": False - } - } - - # Use the selected model - model_url = f"{self.base_url}{self.model_id}" - try: - response = requests.post(model_url, headers=headers, json=payload) + import requests + # Convert to HF format + prompt = self._convert_messages_to_prompt(messages) + + headers = { + "Authorization": f"Bearer {self.api_key}", + "Content-Type": "application/json" + } + + payload = { + "inputs": prompt, + "parameters": { + "max_new_tokens": DEFAULT_MAX_TOKENS, + "temperature": 0.7, + "do_sample": True, + "return_full_text": False + } + } + + model_url = f"{self.base_url}{self.model_id}" + response = requests.post(model_url, headers=headers, json=payload, timeout=30) response.raise_for_status() result = response.json() return result[0]['generated_text'] except Exception as e: return f"HuggingFace API Error: {str(e)}" + def _convert_messages_to_prompt(self, messages: List[Dict]) -> str: + """Convert conversation messages to a single prompt for HF""" + prompt = "" + for msg in messages: + if msg["role"] == "system": + prompt += f"System: {msg['content']}\n\n" + elif msg["role"] == "user": + prompt += f"User: {msg['content']}\n\n" + elif msg["role"] == "assistant": + prompt += f"Assistant: {msg['content']}\n\n" + prompt += "Assistant:" + return prompt + def add_artifact_to_canvas(self, conversation_id: str, content: str, artifact_type: str = "code", title: str = None): """Add artifacts to the collaborative canvas""" if conversation_id not in self.canvas_artifacts: @@ -689,7 +212,7 @@ class EnhancedLLMAgent: return "" context_lines = ["\n=== COLLABORATIVE CANVAS ARTIFACTS ==="] - for artifact in self.canvas_artifacts[conversation_id][-10:]: # Last 10 artifacts + for artifact in self.canvas_artifacts[conversation_id][-10:]: context_lines.append(f"\n--- {artifact.title} [{artifact.type.upper()}] ---") preview = artifact.content[:500] + "..." if len(artifact.content) > 500 else artifact.content context_lines.append(preview) @@ -697,7 +220,7 @@ class EnhancedLLMAgent: return "\n".join(context_lines) + "\n=================================\n" async def chat_with_canvas(self, message: str, conversation_id: str = "default", include_canvas: bool = True) -> str: - """Enhanced chat that includes canvas context""" + """Enhanced chat that works with both local and HF""" if conversation_id not in self.conversations: self.conversations[conversation_id] = [] @@ -718,16 +241,10 @@ class EnhancedLLMAgent: messages.append({"role": "user", "content": message}) try: - # Use async client for better performance - async_client = AsyncOpenAI(base_url=self.base_url, api_key=self.api_key) - response = await async_client.chat.completions.create( - model=self.model_id, - messages=messages, - temperature=0.7, - max_tokens=DEFAULT_MAX_TOKENS - ) - - response_text = response.choices[0].message.content + if self.use_huggingface: + response_text = await self._hf_inference(messages) + else: + response_text = await self._local_inference(messages) # Update conversation history self.conversations[conversation_id].extend([ @@ -745,662 +262,87 @@ class EnhancedLLMAgent: console.log(f"[red]{error_msg}[/red]") return error_msg - def _extract_artifacts_to_canvas(self, response: str, conversation_id: str): - """Automatically extract code blocks and add to canvas""" - # Find all code blocks with optional language specification - code_blocks = re.findall(r'```(?:\w+)?\n(.*?)```', response, re.DOTALL) - for i, code_block in enumerate(code_blocks): - if len(code_block.strip()) > 10: # Only add substantial code blocks - # Try to detect language from the code block marker - lang_match = re.search(r'```(\w+)\n', response) - lang = lang_match.group(1) if lang_match else "unknown" - - self.add_artifact_to_canvas( - conversation_id, - code_block.strip(), - "code", - f"code_snippet_{lang}_{len(self.canvas_artifacts.get(conversation_id, [])) + 1}" - ) - - def clear_conversation(self, conversation_id: str = "default"): - """Clear conversation but keep canvas artifacts""" - if conversation_id in self.conversations: - self.conversations[conversation_id] = [] - console.log(f"[yellow]Cleared conversation: {conversation_id}[/yellow]") - - def clear_canvas(self, conversation_id: str = "default"): - """Clear canvas artifacts""" - if conversation_id in self.canvas_artifacts: - self.canvas_artifacts[conversation_id] = [] - console.log(f"[yellow]Cleared canvas: {conversation_id}[/yellow]") - - def get_canvas_summary(self, conversation_id: str) -> List[Dict]: - """Get summary of canvas artifacts for display""" - if conversation_id not in self.canvas_artifacts: - return [] - - artifacts = [] - for artifact in reversed(self.canvas_artifacts[conversation_id]): # Newest first - artifacts.append({ - "id": artifact.id, - "type": artifact.type.upper(), - "title": artifact.title, - "preview": artifact.content[:100] + "..." if len(artifact.content) > 100 else artifact.content, - "timestamp": time.strftime("%H:%M:%S", time.localtime(artifact.timestamp)) - }) - - return artifacts - - def get_artifact_by_id(self, conversation_id: str, artifact_id: str) -> Optional[CanvasArtifact]: - """Get specific artifact by ID""" - if conversation_id not in self.canvas_artifacts: - return None - - for artifact in self.canvas_artifacts[conversation_id]: - if artifact.id == artifact_id: - return artifact - return None - - @staticmethod - async def fetch_available_models(base_url: str, api_key: str) -> List[str]: - """Fetch available models from the API""" - try: - console.log(f"[blue]Fetching models from {base_url}[/blue]") - async_client = AsyncOpenAI(base_url=base_url, api_key=api_key) - models = await async_client.models.list() - model_list = [model.id for model in models.data] - console.log(f"[green]Found {len(model_list)} models[/green]") - return model_list - except Exception as e: - console.log(f"[red]Error fetching models: {e}[/red]") - return ["default-model"] - - def update_config(self, base_url: str, api_key: str, model_id: str, temperature: float, max_tokens: int): - """Update agent configuration""" - self.base_url = base_url - self.api_key = api_key - self.model_id = model_id - console.log(f"[blue]Updated config: {model_id} @ {base_url}[/blue]") - @staticmethod - async def fetch_available_models(base_url: str, api_key: str, use_huggingface: bool = False) -> List[str]: - """Fetch available models - works for both local and HF""" - if use_huggingface: - # Return popular HF models - return list(MODEL_OPTIONS.keys())[1:] # Skip "Local LM Studio" - else: - # Fetch from local LM Studio - try: - console.log(f"[blue]Fetching models from {base_url}[/blue]") - async_client = AsyncOpenAI(base_url=base_url, api_key=api_key) - models = await async_client.models.list() - model_list = [model.id for model in models.data] - console.log(f"[green]Found {len(model_list)} local models[/green]") - return model_list - except Exception as e: - console.log(f"[red]Error fetching local models: {e}[/red]") - return ["local-model"] # Fallback - async def chat_with_canvas(self, message: str, conversation_id: str = "default", include_canvas: bool = True) -> str: - """Enhanced chat that works with both local and HF""" - if conversation_id not in self.conversations: - self.conversations[conversation_id] = [] - - # Build messages with system prompt and canvas context - messages = [{"role": "system", "content": self.system_prompt}] - - # Include canvas context if requested - if include_canvas: - canvas_context = self.get_canvas_context(conversation_id) - if canvas_context: - messages.append({"role": "system", "content": f"Current collaborative canvas state:\n{canvas_context}"}) - - # Add conversation history - for msg in self.conversations[conversation_id][-self.max_history_length:]: - messages.append(msg) - - # Add current message - messages.append({"role": "user", "content": message}) - - try: - if self.use_huggingface: - response_text = await self._hf_inference(messages) - else: - response_text = await self._local_inference(messages) - - # Update conversation history - self.conversations[conversation_id].extend([ - {"role": "user", "content": message}, - {"role": "assistant", "content": response_text} - ]) - - # Auto-extract and add code artifacts to canvas - self._extract_artifacts_to_canvas(response_text, conversation_id) - - return response_text - - except Exception as e: - error_msg = f"Error in chat_with_canvas: {str(e)}" - console.log(f"[red]{error_msg}[/red]") - return error_msg - def _convert_messages_to_prompt(self, messages: List[Dict]) -> str: - """Convert conversation messages to a single prompt for HF""" - prompt = "" - for msg in messages: - if msg["role"] == "system": - prompt += f"System: {msg['content']}\n\n" - elif msg["role"] == "user": - prompt += f"User: {msg['content']}\n\n" - elif msg["role"] == "assistant": - prompt += f"Assistant: {msg['content']}\n\n" - prompt += "Assistant:" - return prompt - -# --- LCARS Styled Gradio Interface --- -class LcarsInterface: - def __init__(self, agent: EnhancedLLMAgent): - self.agent = agent - self.current_conversation = "default" - - def create_interface(self): - """Create the full LCARS-styled interface""" - - # Enhanced LCARS CSS with proper Star Trek styling - lcars_css = """ - :root { - --lcars-orange: #FF9900; - --lcars-red: #FF0033; - --lcars-blue: #6699FF; - --lcars-purple: #CC99FF; - --lcars-pale-blue: #99CCFF; - --lcars-black: #000000; - --lcars-dark-blue: #3366CC; - --lcars-gray: #424242; - --lcars-yellow: #FFFF66; - } - - body { - background: var(--lcars-black); - color: var(--lcars-orange); - font-family: 'Antonio', 'LCD', 'Courier New', monospace; - margin: 0; - padding: 0; - } - - .gradio-container { - background: var(--lcars-black) !important; - min-height: 100vh; - } - - .lcars-container { - background: var(--lcars-black); - border: 4px solid var(--lcars-orange); - border-radius: 0 30px 0 0; - min-height: 100vh; - padding: 20px; - } - - .lcars-header { - background: linear-gradient(90deg, var(--lcars-red), var(--lcars-orange)); - padding: 20px 40px; - border-radius: 0 60px 0 0; - margin: -20px -20px 20px -20px; - border-bottom: 6px solid var(--lcars-blue); - box-shadow: 0 4px 20px rgba(255, 153, 0, 0.3); - } - - .lcars-title { - font-size: 3em; - font-weight: bold; - color: var(--lcars-black); - text-shadow: 3px 3px 6px rgba(255, 255, 255, 0.4); - margin: 0; - letter-spacing: 2px; - } - - .lcars-subtitle { - font-size: 1.4em; - color: var(--lcars-black); - margin: 10px 0 0 0; - font-weight: bold; - } - - .lcars-panel { - background: linear-gradient(135deg, rgba(66, 66, 66, 0.9), rgba(40, 40, 40, 0.9)); - border: 3px solid var(--lcars-orange); - border-radius: 0 25px 0 25px; - padding: 20px; - margin-bottom: 20px; - box-shadow: 0 4px 15px rgba(255, 153, 0, 0.2); - } - - .lcars-button { - background: linear-gradient(135deg, var(--lcars-orange), var(--lcars-red)); - color: var(--lcars-black) !important; - border: none !important; - border-radius: 0 20px 0 20px !important; - padding: 12px 24px !important; - font-family: inherit !important; - font-weight: bold !important; - font-size: 1.1em !important; - cursor: pointer !important; - transition: all 0.3s ease !important; - margin: 8px !important; - box-shadow: 0 4px 8px rgba(255, 153, 0, 0.3) !important; - } - - .lcars-button:hover { - background: linear-gradient(135deg, var(--lcars-red), var(--lcars-orange)) !important; - transform: translateY(-2px) !important; - box-shadow: 0 6px 12px rgba(255, 153, 0, 0.4) !important; - } - - .lcars-input { - background: var(--lcars-black) !important; - color: var(--lcars-orange) !important; - border: 2px solid var(--lcars-blue) !important; - border-radius: 0 15px 0 15px !important; - padding: 12px !important; - font-family: inherit !important; - font-size: 1.1em !important; - } - - .lcars-chatbot { - background: var(--lcars-black) !important; - border: 3px solid var(--lcars-purple) !important; - border-radius: 0 20px 0 20px !important; - min-height: 400px; - max-height: 500px; - } - - .lcars-code-editor { - background: var(--lcars-black) !important; - color: var(--lcars-pale-blue) !important; - border: 3px solid var(--lcars-blue) !important; - border-radius: 0 20px 0 20px !important; - font-family: 'Fira Code', 'Courier New', monospace !important; - font-size: 1em !important; - } - - .user-message { - background: linear-gradient(135deg, rgba(102, 153, 255, 0.2), rgba(51, 102, 204, 0.2)) !important; - border-left: 6px solid var(--lcars-blue) !important; - padding: 12px !important; - margin: 8px 0 !important; - border-radius: 0 15px 0 15px !important; - } - - .assistant-message { - background: linear-gradient(135deg, rgba(255, 153, 0, 0.2), rgba(255, 102, 0, 0.2)) !important; - border-left: 6px solid var(--lcars-orange) !important; - padding: 12px !important; - margin: 8px 0 !important; - border-radius: 0 15px 0 15px !important; - } - - .artifact-item { - background: linear-gradient(135deg, rgba(204, 153, 255, 0.15), rgba(153, 102, 204, 0.15)); - border: 2px solid var(--lcars-purple); - padding: 10px; - margin: 6px 0; - border-radius: 0 12px 0 12px; - cursor: pointer; - transition: all 0.3s ease; - } - - .artifact-item:hover { - background: linear-gradient(135deg, rgba(204, 153, 255, 0.3), rgba(153, 102, 204, 0.3)); - transform: translateX(5px); - } - - .status-indicator { - display: inline-block; - width: 16px; - height: 16px; - border-radius: 50%; - background: var(--lcars-red); - margin-right: 12px; - box-shadow: 0 0 10px currentColor; - } - - .status-online { - background: var(--lcars-blue); - animation: pulse 1.5s infinite; - } - - @keyframes pulse { - 0% { transform: scale(1); opacity: 1; } - 50% { transform: scale(1.1); opacity: 0.7; } - 100% { transform: scale(1); opacity: 1; } - } - - .panel-title { - color: var(--lcars-yellow) !important; - font-size: 1.4em !important; - font-weight: bold !important; - margin-bottom: 15px !important; - border-bottom: 2px solid var(--lcars-orange); - padding-bottom: 8px; - } - - .gradio-accordion { - border: 2px solid var(--lcars-orange) !important; - border-radius: 0 20px 0 20px !important; - margin-bottom: 20px !important; - } - - .gradio-accordion .label { - background: linear-gradient(90deg, var(--lcars-orange), var(--lcars-red)) !important; - color: var(--lcars-black) !important; - font-size: 1.3em !important; - font-weight: bold !important; - padding: 15px 20px !important; - } - """ - - with gr.Blocks(css=lcars_css, theme=gr.themes.Default(), title="LCARS Terminal") as interface: - - with gr.Column(elem_classes="lcars-container"): - # Header Section - with gr.Row(elem_classes="lcars-header"): - gr.Markdown(""" -