- π₯οΈ Analyze an entire directory/project
+ # Save code snippets as artifacts
+ for i, snippet in enumerate(parsed_response.code_snippets):
+ agent.add_artifact(
+ conversation_id=conversation_id,
+ artifact_type="code",
+ content=snippet['code'],
+ title=f"code_snippet_{snippet['language']}_{i}",
+ metadata={
+ "language": snippet['language'],
+ "description": snippet.get('description', ''),
+ "source": "llm_response"
+ }
+ )
+ artifacts_created.append(f"code_snippet_{i}")
+
+ # Save thinking as a text artifact if substantial
+ if len(parsed_response.thinking) > 50:
+ agent.add_artifact(
+ conversation_id=conversation_id,
+ artifact_type="text",
+ content=parsed_response.thinking,
+ title="reasoning_process",
+ metadata={"type": "reasoning", "source": "llm_response"}
+ )
+ artifacts_created.append("reasoning")
+
+ return artifacts_created
+def process_lcars_message(message, history, speak_response=False):
+ """Process messages using the LLMAgent and parse responses"""
+ if not message.strip():
+ return "", history, "Please enter a message", []
+
+ try:
+ # Add user message to displayed history
+ new_history = history + [[message, ""]]
+
+ # Use the agent's direct_chat method
+ raw_response = agent.direct_chat(message, agent.current_conversation)
+
+ # Parse the response
+ parsed_response = parse_llm_response(raw_response)
+
+ # Extract and save artifacts from the response
+ artifacts_created = extract_artifacts_from_response(parsed_response, agent.current_conversation)
+
+ # Update the history with the main content
+ display_content = parsed_response.main_content
+ if parsed_response.code_snippets:
+ display_content += "\n\n**Code Snippets Generated:**"
+ for i, snippet in enumerate(parsed_response.code_snippets):
+ display_content += f"\n```{snippet['language']}\n{snippet['code']}\n```"
+
+ new_history[-1][1] = display_content
+
+ # Speak response if enabled
+ if speak_response and agent.speech_enabled:
+ agent.speak(parsed_response.main_content)
+
+ # Get artifacts for display
+ artifacts = agent.get_canvas_summary(agent.current_conversation)
+ status = f"β
Response parsed. Artifacts created: {len(artifacts_created)} | Total: {len(artifacts)}"
+
+ return "", new_history, status, artifacts, parsed_response.thinking
+
+ except Exception as e:
+ error_msg = f"β Error: {str(e)}"
+ new_history = history + [[message, error_msg]]
+ return "", new_history, error_msg, agent.get_canvas_summary(agent.current_conversation), ""
+
+def update_chat_display(history):
+ """Convert history to formatted HTML for display"""
+ if not history:
+ return "
"
+ for i, (user_msg, bot_msg) in enumerate(history):
+ html += f"""
+
+
+ π€ You: {user_msg}
- """)
-
- with gr.Column():
- gr.HTML(f"""
-
-
βΉοΈ Directory Scanning Tips
-
- - Default excluded dirs: `__pycache__`, `.git`, `.venv`
-
+
+ π€ L.C.A.R.S: {bot_msg}
- """)
- dir_input = gr.Textbox(
- label="Directory Path",
- placeholder="/path/to/your/project",
- lines=1
- )
- with gr.Row():
- recurse_check = gr.Checkbox(label="Include subdirectories", value=True)
- analyze_dir_btn = gr.Button("π Analyze Directory", variant="huggingface")
- exclude_dirs_input = gr.Textbox(
- label="Directories to Exclude (comma separated)",
- placeholder="tests, docs, examples",
- lines=1
- )
- with gr.Column():
- gr.Markdown("### π Scan Summary")
- summary_output = gr.Markdown("Scan results will appear here")
- with gr.Column():
- gr.Markdown("### π Detailed Results")
- detailed_output = gr.Markdown("Detailed errors and fixes will appear here")
- dir_output = gr.Markdown(label="Analysis Results")
- dir_download = gr.File(label="π₯ Download Report")
-
+
+ """
+ html += "
"
+ return html
+
+def update_artifacts_display():
+ """Get formatted artifacts display"""
+ artifacts = agent.get_canvas_artifacts(agent.current_conversation)
+ if not artifacts:
+ return "
No artifacts generated yet
"
+
+ html = "
"
+ for i, artifact in enumerate(artifacts[-10:]): # Last 10 artifacts
+ type_icon = {
+ "code": "π»",
+ "text": "π",
+ "diagram": "π",
+ "image": "πΌοΈ"
+ }.get(artifact.type, "π")
+
+ html += f"""
+
+
{type_icon} {artifact.title} (#{i})
+
Type: {artifact.type} | Time: {time.ctime(artifact.timestamp)}
+
+ {artifact.content[:150]}{'...' if len(artifact.content) > 150 else ''}
+
+
+ """
+ html += "
"
+ return html
+
+def get_plain_text_response(history):
+ """Extract the latest bot response for plain text display"""
+ if not history:
+ return "## π€ L.C.A.R.S Response\n\n*Awaiting your query...*"
+
+ last_exchange = history[-1]
+ if len(last_exchange) >= 2 and last_exchange[1]:
+ return f"## π€ L.C.A.R.S Response\n\n{last_exchange[1]}"
+ else:
+ return "## π€ L.C.A.R.S Response\n\n*Processing...*"
+
+def execute_code_artifact(artifact_id, current_code):
+ """Execute a specific code artifact"""
+ try:
+ artifacts = agent.get_canvas_artifacts(agent.current_conversation)
+ if not artifacts:
+ return "No artifacts available", current_code
+
+ try:
+ artifact_idx = int(artifact_id)
+ if 0 <= artifact_idx < len(artifacts):
+ artifact = artifacts[artifact_idx]
+ if artifact.type == "code":
+ # Return the code to display in the editor
+ display_text = f"## π Loaded Artifact #{artifact_idx}\n\n**Title:** {artifact.title}\n\n**Code:**\n```python\n{artifact.content}\n```"
+ return display_text, artifact.content
+ else:
+ return f"β Artifact {artifact_idx} is not code (type: {artifact.type})", current_code
+ else:
+ return f"β Invalid artifact ID. Available: 0-{len(artifacts)-1}", current_code
+ except ValueError:
+ return "β Please enter a valid numeric artifact ID", current_code
- with gr.TabItem(label="π Enhanced Session & Artifact Management"):
- with gr.Row():
- # Session Management Column
- with gr.Column():
- gr.HTML(f"
π SESSION MANAGEMENT
")
-
- session_status = gr.Textbox(label="Session Status", value="Ready", interactive=False)
-
- with gr.Row():
- session_name_input = gr.Textbox(
- label="Session Name",
- placeholder="Leave empty for auto-naming (NewSession1, NewSession2...)",
- scale=2
- )
- merge_session_checkbox = gr.Checkbox(
- label="Merge Mode",
- value=False,
- info="Merge with current session instead of replacing"
- )
-
- with gr.Row():
- save_session_btn = gr.Button("πΎ Save Session", variant="huggingface")
- load_session_btn = gr.Button("π Load Session", variant="huggingface")
-
- session_dropdown = gr.Dropdown(
- label="Available Sessions",
- choices=["none"],
- interactive=True,
- info="Select session to load"
- )
-
- with gr.Row():
- load_all_sessions_btn = gr.Button("π Load All Sessions", variant="huggingface")
- refresh_sessions_btn = gr.Button("π Refresh Sessions", variant="huggingface")
+ except Exception as e:
+ return f"β Error: {str(e)}", current_code
+
+def create_code_artifact(code, description, language):
+ """Create a new code artifact"""
+ try:
+ if not code.strip():
+ return "β No code provided", code
+
+ agent.add_artifact(
+ conversation_id=agent.current_conversation,
+ artifact_type="code",
+ content=code,
+ title=description or f"Code_{len(agent.get_canvas_artifacts(agent.current_conversation))}",
+ metadata={"language": language, "description": description}
+ )
+
+ artifacts_count = len(agent.get_canvas_artifacts(agent.current_conversation))
+ return f"β
Code artifact saved! Total artifacts: {artifacts_count}", code
+
+ except Exception as e:
+ return f"β Error saving artifact: {str(e)}", code
+
+def clear_current_chat():
+ """Clear the current conversation"""
+ agent.clear_conversation(agent.current_conversation)
+ empty_history = []
+ status_msg = "β
Chat cleared"
+ plain_text = "## π€ L.C.A.R.S Response\n\n*Chat cleared*"
+ chat_display = update_chat_display(empty_history)
+ artifacts_display = update_artifacts_display()
+
+ return empty_history, plain_text, status_msg, chat_display, artifacts_display, ""
+
+def new_session():
+ """Start a new session"""
+ agent.clear_conversation(agent.current_conversation)
+ agent.clear_canvas(agent.current_conversation)
+
+ new_code = "# New L.C.A.R.S Session Started\nprint('π Local Computer Advanced Reasoning System Online')\nprint('π€ All systems nominal - Ready for collaboration')"
+ empty_history = []
+ status_msg = "π New session started"
+ plain_text = "## π€ L.C.A.R.S Response\n\n*New session started*"
+ chat_display = update_chat_display(empty_history)
+ artifacts_display = update_artifacts_display()
+
+ return empty_history, new_code, plain_text, status_msg, chat_display, artifacts_display, ""
+def update_model_settings(base_url, api_key, model_id, temperature, max_tokens):
+ """Update agent model settings"""
+ try:
+ agent.base_url = base_url
+ agent.api_key = api_key
+ agent.model_id = model_id
+ agent.temperature = float(temperature)
+ agent.max_tokens = int(max_tokens)
+
+ # Recreate client with new settings
+ agent.async_client = agent.CreateClient(base_url, api_key)
+
+ return f"β
Model settings updated: {model_id} | Temp: {temperature} | Max tokens: {max_tokens}"
+ except Exception as e:
+ return f"β Error updating settings: {str(e)}"
+
+async def fetch_models(base_url, api_key):
+ """Fetch available models from the API"""
+ try:
+ models = await agent.fetch_available_models(base_url, api_key)
+ return gr.Dropdown(choices=models, value=models[0] if models else "")
+ except Exception as e:
+ print(f"Error fetching models: {e}")
+ return gr.Dropdown(choices=[], value="")
+
+# Create the Gradio interface
+with gr.Blocks(
+ title="π L.C.A.R.S - Local Computer Advanced Reasoning System",
+ theme='Yntec/HaleyCH_Theme_Orange_Green',
+ css=custom_css
+ ) as demo:
+
+ # State management
+ history_state = gr.State([])
+ with gr.Sidebar(label = "Settings"):
+ gr.HTML("
βοΈ MODEL SETTINGS
")
- # Artifact Management Column
- with gr.Column():
- gr.HTML(f"
π§± ARTIFACT MANAGEMENT
")
-
- artifact_status = gr.Textbox(label="Artifact Status", value="Ready", interactive=False)
-
- with gr.Row():
- artifact_session_input = gr.Textbox(
- label="Artifact Session Name",
- placeholder="Leave empty to load all artifacts",
- scale=2
- )
- merge_artifacts_checkbox = gr.Checkbox(
- label="Merge Artifacts",
- value=True,
- info="Add to current artifacts instead of replacing"
- )
-
- with gr.Row():
- load_artifacts_btn = gr.Button("π Load Artifacts", variant="huggingface")
- #save_artifacts_btn = gr.Button("πΎ Save Artifacts", variant="huggingface")
-
- artifact_dropdown = gr.Dropdown(
- label="Available Artifact Files",
- choices=["none"],
- interactive=True,
- info="Select artifact file to load"
+ with gr.Accordion("π§ Configuration", open=True):
+ base_url = gr.Textbox(
+ value=agent.base_url,
+ label="Base URL",
+ placeholder="http://localhost:1234/v1"
)
-
- with gr.Row():
- load_all_artifacts_btn = gr.Button("π Load All Artifacts", variant="huggingface")
- refresh_artifacts_btn = gr.Button("π Refresh Artifacts", variant="huggingface")
-
-
- with gr.TabItem(label="π Session & Artifact Browser"):
- with gr.Row():
- session_info = gr.JSON(label="Session Details", value=[], elem_classes=["metadata-display"])
- artifact_info = gr.JSON(label="Artifact Details", value=[], elem_classes=["metadata-display"])
-
-
-
-
-
-
-
-
-
- with gr.Tab(label="π Directory to JSON Extractor", elem_id="directory_extractor_tab"):
-
- def ExtractDirectoryToJson(directory_path="dump", extension='.txt', json_file_path="_Data.json"):
- def extract_data_from_files(directory_path, Extension='.md', max_seq_length=2048):
- import os
- import json
- Json_list = []
-
- # Check if directory exists
- if not os.path.exists(directory_path):
- return f"Error: Directory '{directory_path}' does not exist."
-
- # Check if directory is empty
- if not os.listdir(directory_path):
- return f"Error: Directory '{directory_path}' is empty."
-
- # Iterate over each file in the directory
- for file_name in os.listdir(directory_path):
- if file_name.endswith(Extension):
- file_path = os.path.join(directory_path, file_name)
- try:
- with open(file_path, 'r', encoding='utf-8') as file:
- chunk = file.read()
- chunks = [chunk[i:i+max_seq_length] for i in range(0, len(chunk), max_seq_length)]
- for text_seg in chunks:
- Json_list.append({'DocumentTitle': file_name, 'Text': text_seg})
- except Exception as e:
- return f"Error reading file {file_name}: {str(e)}"
- return Json_list
-
- def save_to_json(data, json_file):
- import os
- import json
- try:
- with open(json_file, 'w') as f:
- json.dump(data, f, indent=4)
- return True, None
- except Exception as e:
- return False, str(e)
-
- # Extract file contents
- _data = extract_data_from_files(directory_path, Extension=extension)
-
- # Check if we got an error message instead of data
- if isinstance(_data, str):
- return _data
-
- # Save data to JSON file
- success, error = save_to_json(_data, json_file_path)
- if success:
- return f"File extraction completed. JSON file saved to: {json_file_path}"
- else:
- return f"Error saving JSON file: {error}"
-
- # Gradio Interface
- def process_directory(directory_path, extension, json_filename):
- # Validate inputs
- if not directory_path:
- return "Please select a directory"
- if not extension:
- return "Please specify a file extension"
- if not json_filename:
- json_filename = "_Data.json"
-
- # Ensure extension starts with a dot
- if not extension.startswith('.'):
- extension = '.' + extension
-
- # Call the main function
- result = ExtractDirectoryToJson(
- directory_path=directory_path,
- extension=extension,
- json_file_path=json_filename
- )
- return result
-
-
-
- FileTypes=['.txt','.md','.json','.py']
-
- gr.Markdown("## Directory to JSON Extractor")
- gr.Markdown("Extract text from files in a directory and save as JSON")
-
- with gr.Row():
- directory_input = gr.Textbox(label="Directory Path", placeholder="Path to directory containing files")
- directory_btn = gr.Button("Browse")
-
- with gr.Row():
- extension_input = gr.Dropdown(label="File Extension",choices=FileTypes, interactive=True)
- json_output = gr.Textbox(label="Output JSON Filename", value="_Data.json")
-
- submitProcessDirectory_btn = gr.Button("Process Directory")
- output_text = gr.JSON(label="Output")
-
-
-
- # Main Crawling Tab
- with gr.Tab("π Crawl Repository"):
- with gr.Row():
- with gr.Column(scale=1):
- gr.Markdown("## π― Source Configuration")
-
- source_type = gr.Dropdown(
- choices=["GitHub", "Local", "Hugging Face"],
- label="Source Type",
- value="GitHub"
+ api_key = gr.Textbox(
+ value=agent.api_key,
+ label="API Key",
+ placeholder="not-needed for local models",
+ type="password"
)
-
- # GitHub settings
- with gr.Group(visible=True) as github_group:
- repo_url = gr.Textbox(
- label="GitHub Repository URL",
- placeholder="https://github.com/owner/repo",
- value=""
- )
- github_token = gr.Textbox(
- label="GitHub Token (optional)",
- type="password",
- placeholder="ghp_..."
- )
-
- # Local settings
- with gr.Group(visible=False) as local_group:
- local_path = gr.Textbox(
- label="Local Directory Path",
- placeholder="/path/to/directory",
- value=""
- )
-
- # Hugging Face settings
- with gr.Group(visible=False) as hf_group:
- hf_repo_id = gr.Textbox(
- label="Hugging Face Repository ID",
- placeholder="microsoft/DialoGPT-medium",
- value=""
- )
- hf_repo_type = gr.Dropdown(
- choices=["model", "dataset", "space"],
- label="Repository Type",
- value="model"
- )
- hf_token = gr.Textbox(
- label="Hugging Face Token (optional)",
- type="password",
- placeholder="hf_..."
- )
-
- gr.Markdown("## βοΈ Crawling Options")
-
- max_file_size = gr.Number(
- label="Max File Size (MB)",
- value=1,
+ model_id = gr.Dropdown(
+ value=agent.model_id,
+ label="Model",
+ choices=[agent.model_id],
+ allow_custom_value=True
+ )
+ temperature = gr.Slider(
+ value=agent.temperature,
minimum=0.1,
- maximum=100
+ maximum=2.0,
+ step=0.1,
+ label="Temperature"
)
-
- include_patterns = gr.Textbox(
- label="Include Patterns (comma-separated)",
- placeholder="*.py, *.js, *.md",
- value=""
+ max_tokens = gr.Slider(
+ value=agent.max_tokens,
+ minimum=100,
+ maximum=10000,
+ step=100,
+ label="Max Tokens"
)
- exclude_patterns = gr.Textbox(
- label="Exclude Patterns (comma-separated)",
- placeholder="*.pyc, __pycache__/*, .git/*",
- value=""
- )
+ with gr.Row():
+ update_settings_btn = gr.Button("π Update Settings", variant="primary")
+ fetch_models_btn = gr.Button("π Fetch Models", variant="secondary")
- use_relative_paths = gr.Checkbox(
- label="Use Relative Paths",
- value=True
+ # ============================================
+ # HEADER SECTION
+ # ============================================
+ with gr.Row():
+ with gr.Column(scale=1):
+ gr.Image(
+ value="https://cdn-avatars.huggingface.co/v1/production/uploads/65d883893a52cd9bcd8ab7cf/tRsCJlHNZo1D02kBTmfy9.jpeg",
+ elem_id="lcars_logo",
+ height=200,
+ show_download_button=False,
+ container=False,
+ width=200
+ )
+ with gr.Column(scale=3):
+ gr.HTML(f"""
+
+ π₯οΈ L.C.A.R.S - Local Computer Advanced Reasoning System
+
USS Enterprise β’ NCC-1701-D β’ Starfleet Command
+
+ """)
+
+ # ============================================
+ # MAIN INTERFACE TABS
+ # ============================================
+ with gr.Tabs():
+
+ # ============================================
+ # L.C.A.R.S MAIN CHAT TAB (Enhanced)
+ # ============================================
+ with gr.TabItem(label="π€ L.C.A.R.S Chat Intelligence", elem_id="lcars_main_tab"):
+ with gr.Row():
+ # LEFT COLUMN - INPUT & CONTROLS
+ with gr.Column(scale=2):
+ gr.HTML("
π§ REASONING PROCESS
")
+ with gr.Accordion(label="π§ AI Reasoning & Thinking", open=True):
+ thinking_display = gr.Markdown(
+ value="*AI reasoning will appear here during processing...*",
+ label="Thought Process",
+ show_label=True,
+ height=200
)
-
- crawl_btn = gr.Button("π Start Crawling", variant="primary", size="lg")
- with gr.Column(scale=2):
- gr.Markdown("## π Results")
-
- results_summary = gr.Textbox(
- label="Crawling Summary",
- lines=8,
- interactive=False
- )
-
- file_list = gr.Dataframe(
- label="Files Found",
- headers=["File Path", "Size (chars)", "Type", "Lines"],
- interactive=False,
- wrap=True
- )
-
- # File Browser Tab
- with gr.Tab("π File Browser"):
- with gr.Row():
- with gr.Column(scale=1):
- gr.Markdown("## π File Selection")
-
- selected_file = gr.Dropdown(
- label="Select File",
- choices=[],
- interactive=True,
- allow_custom_value=True
+ # Main chat input
+ message = gr.Textbox(
+ show_copy_button=True,
+ lines=3,
+ label="π¬ Ask L.C.A.R.S",
+ placeholder="Enter your message to the Local Computer Advanced Reasoning System..."
+ )
+
+ # Control buttons
+ with gr.Row():
+ submit_btn = gr.Button("π Ask L.C.A.R.S", variant="primary", size="lg")
+ clear_btn = gr.Button("ποΈ Clear Chat", variant="secondary")
+ new_session_btn = gr.Button("π New Session", variant="secondary")
+
+ # Audio controls
+ with gr.Row():
+ speak_response = gr.Checkbox(label="π Speak Response", value=False)
+
+ # Quick Actions
+ with gr.Accordion(label="β‘ Utility Quick Actions", open=False):
+ with gr.Row():
+ artifact_id_input = gr.Textbox(
+ label="Artifact ID",
+ placeholder="Artifact ID (0, 1, 2...)",
+ scale=2
+ )
+ execute_artifact_btn = gr.Button("π Load Artifact", variant="primary")
+
+ # MIDDLE COLUMN - RESPONSES
+ with gr.Column(scale=2):
+ gr.HTML("
SYSTEM RESPONSE
")
+
+ with gr.Accordion(label="π€ L.C.A.R.S Response", open=True):
+ plain_text_output = gr.Markdown(
+ value="## π€ L.C.A.R.S Response\n\n*Awaiting your query...*",
+ container=True,
+ show_copy_button=True,
+ label="AI Response",
+ height=300
)
-
- load_btn = gr.Button("π Load File", variant="secondary")
- save_btn = gr.Button("πΎ Save Changes", variant="primary")
-
- save_status = gr.Textbox(
- label="Status",
- lines=2,
- interactive=False
+
+ execution_output = gr.Markdown(
+ value="*Execution results will appear here*",
+ label="Execution Results",
+ height=150
+ )
+
+ status_display = gr.Textbox(
+ value="System ready",
+ label="Status",
+ interactive=False
+ )
+
+ gr.HTML("
Current Session
")
+
+ # Enhanced Chat History Display
+ with gr.Accordion(label="π Session Chat History", open=True):
+ chat_history_display = gr.HTML(
+ value="
No messages yet
",
+ label="Full Session History",
+ show_label=True
)
+
+ # RIGHT COLUMN - ENHANCED CODE ARTIFACTS
+ with gr.Column(scale=2):
+ gr.HTML("
π§± ENHANCED CODE ARTIFACTS WORKSHOP
")
- with gr.Column(scale=3):
- gr.Markdown("## βοΈ File Editor")
-
- file_content = gr.Textbox(
- label="File Content",
- lines=25,
+ with gr.Accordion(label="π§± Code Artifacts Workshop", open=True):
+ # Enhanced Code Editor with save functionality
+ code_artifacts = gr.Code(
+ language="python",
+ label="Generated Code & Artifacts",
+ lines=15,
interactive=True,
- show_copy_button=True,
- placeholder="Select a file to view its content..."
+ show_line_numbers=True,
+ elem_id="code_editor",
+ value="# Welcome to L.C.A.R.S Code Workshop\n# Write or generate code here\n\nprint('π L.C.A.R.S Code Workshop Active')"
)
-
- # Export Tab
- with gr.Tab("π€ Export"):
- with gr.Row():
- with gr.Column():
- gr.Markdown("## πΎ Export Options")
- export_format = gr.Dropdown(
- choices=["JSON", "File List", "Summary Report"],
- label="Export Format",
- value="JSON"
- )
+ # Enhanced Artifact Controls
+ with gr.Row():
+ artifact_description = gr.Textbox(
+ label="Artifact Description",
+ placeholder="Brief description of the code...",
+ scale=2
+ )
+ artifact_language = gr.Dropdown(
+ choices=["python", "javascript", "html", "css", "bash", "sql", "json"],
+ value="python",
+ label="Language",
+ scale=1
+ )
- export_btn = gr.Button("π Generate Export", variant="primary")
+ with gr.Row():
+ execute_code_btn = gr.Button("βΆοΈ Execute Code", variant="primary")
+ create_artifact_btn = gr.Button("πΎ Save Artifact", variant="primary")
- export_output = gr.Textbox(
- label="Export Output",
- lines=20,
- show_copy_button=True,
- interactive=False
+ # Artifacts Display
+ with gr.Accordion(label="π Current Session Artifacts", open=True):
+ artifacts_display = gr.HTML(
+ value="
No artifacts generated yet
",
+ label="Generated Artifacts Timeline",
+ show_label=True
)
- with gr.Tab("π§ Task Planning"):
-
- with gr.Column(scale = 1):
-
- with gr.Column():
- with gr.Row():
- sub_task_output = gr.Textbox(lines=4,show_label=True,container=False,label="Sub Tasks")
- gr.HTML(f"
Task Strategy and Planning
")
- with gr.Accordion("Task Graph", open=False):
- task_graph_img = gr.Image(label="Task Reasoning Graph")
- with gr.Row():
- graph_btn = gr.Button("Visualize Task Graph",variant="huggingface")
- with gr.Row():
- with gr.Column():
- task_input = gr.Textbox(lines = 10,label="Enter Task Description",placeholder = "Write a BPE Tokenizer in VB.NET")
-
-
- with gr.Column():
- gr.HTML(f"
Generated Code
")
-
- with gr.Accordion("Generated Code", open=False):
-
- task_code_output = gr.Code(show_label=True,container=True,label="Task Code Generated",language='python')
-
- with gr.Row():
- complexity_btn = gr.Button("Analyze Complexity", variant="huggingface")
- decompose_btn = gr.Button("Decompose Task", variant="huggingface")
- workflow_btn = gr.Button("Generate Workflow", variant="huggingface")
- with gr.Row():
- GeneratePlan_btn = gr.Button("Generate plan", variant="huggingface")
- GenerateTaskCode_btn = gr.Button("Generate code", variant="huggingface")
-
- with gr.Row():
-
- with gr.Tabs():
-
- with gr.Tab("Complexity"):
- gr.HTML(f"
Task Complexity
")
-
- complexity_output = gr.Markdown(show_label=True,max_height=600,container=True,show_copy_button = True,label="Task Complexity")
-
- with gr.Tab("Planning"):
- gr.HTML(f"
Sub Task Planning
")
-
- decompose_output = gr.Markdown(show_label=True,container=True,show_copy_button = True,label="Task Analysis")
-
- with gr.Tab("WorkFlow"):
- gr.HTML(f"
Task Work-Flow
")
-
- workflow_output = gr.Markdown(show_label=True,container=True,label="Task WorkFlow")
-
-
-
-
-
- # Footer
- gr.HTML(f"""
-
- π L.C.A.R.S - Enhanced Local Computer Advanced Reasoning System v3.0 β’ Starfleet Command β’ Computer Core Online
-
- """)
-
-
+ # ============================================
+ # EVENT HANDLERS - WITH PARSED RESPONSE SUPPORT
+ # ============================================
+
+ # Main chat functionality
+ def handle_message(message, history, speak_response):
+ # Process the message
+ cleaned_message, new_history, status_msg, artifacts, thinking = process_lcars_message(message, history, speak_response)
+
+ # Update all displays
+ plain_text = get_plain_text_response(new_history)
+ chat_display = update_chat_display(new_history)
+ artifacts_html = update_artifacts_display()
+
+ # Format thinking for display
+ thinking_display_content = f"## π§ AI Reasoning\n\n{thinking}" if thinking else "*No reasoning content extracted*"
+
+ # Return in correct order for outputs
+ return cleaned_message, new_history, plain_text, status_msg, chat_display, artifacts_html, thinking_display_content
+
+ submit_btn.click(
+ fn=handle_message,
+ inputs=[message, history_state, speak_response],
+ outputs=[
+ message, # 0 - cleaned message input
+ history_state, # 1 - updated history state
+ plain_text_output, # 2 - markdown response (string)
+ status_display, # 3 - status message (string)
+ chat_history_display, # 4 - HTML display
+ artifacts_display, # 5 - HTML display
+ thinking_display # 6 - thinking markdown
+ ]
+ )
+
+ message.submit(
+ fn=handle_message,
+ inputs=[message, history_state, speak_response],
+ outputs=[
+ message,
+ history_state,
+ plain_text_output,
+ status_display,
+ chat_history_display,
+ artifacts_display,
+ thinking_display
+ ]
+ )
+
+ # Clear chat
+ clear_btn.click(
+ fn=clear_current_chat,
+ outputs=[
+ history_state, # 0 - empty history list
+ plain_text_output, # 1 - markdown string
+ status_display, # 2 - status string
+ chat_history_display, # 3 - HTML string
+ artifacts_display, # 4 - HTML string
+ thinking_display # 5 - thinking markdown
+ ]
+ )
+
+ # New session
+ new_session_btn.click(
+ fn=new_session,
+ outputs=[
+ history_state, # 0 - empty history list
+ code_artifacts, # 1 - code string
+ plain_text_output, # 2 - markdown string
+ status_display, # 3 - status string
+ chat_history_display, # 4 - HTML string
+ artifacts_display, # 5 - HTML string
+ thinking_display # 6 - thinking markdown
+ ]
+ )
+
+ # Artifact operations
+ create_artifact_btn.click(
+ fn=create_code_artifact,
+ inputs=[code_artifacts, artifact_description, artifact_language],
+ outputs=[execution_output, code_artifacts]
+ )
+
+ execute_artifact_btn.click(
+ fn=execute_code_artifact,
+ inputs=[artifact_id_input, code_artifacts],
+ outputs=[execution_output, code_artifacts]
+ )
+ execute_code_btn.click(
+ fn=execute_current_code,
+ inputs=[code_artifacts],
+ outputs=[execution_output, code_artifacts]
+ )
+
+ # Model settings
+ update_settings_btn.click(
+ fn=update_model_settings,
+ inputs=[base_url, api_key, model_id, temperature, max_tokens],
+ outputs=[status_display]
+ )
+
+ fetch_models_btn.click(
+ fn=fetch_models,
+ inputs=[base_url, api_key],
+ outputs=[model_id]
+ )
if __name__ == "__main__":
- demo.launch()
+ # Start the agent
+ agent.start()
+ print("π L.C.A.R.S Agent Started!")
+ print(f"π€ Model: {agent.model_id}")
+ print(f"π Base URL: {agent.base_url}")
+ print(f"π¬ Default Conversation: {agent.current_conversation}")
+
+ # Launch the interface
+ demo.launch(share=True, server_name="0.0.0.0", server_port=7860)
\ No newline at end of file