precison9 commited on
Commit
2ce66a8
·
verified ·
1 Parent(s): 153ac71

Add ui.py — complete Gradio UI with 6 tabs

Browse files
Files changed (1) hide show
  1. multeclaw/ui.py +562 -0
multeclaw/ui.py ADDED
@@ -0,0 +1,562 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Multeclaw Gradio UI — complete multi-tab interface with chat, settings, status, and tools.
3
+ """
4
+
5
+ import json
6
+ import time
7
+ import os
8
+ from datetime import datetime
9
+
10
+ import gradio as gr
11
+
12
+ from multeclaw.config import MODEL_REGISTRY, SYSTEM_PROMPTS, BUILT_IN_TOOLS, Provider
13
+ from multeclaw.agent import MulteclawAgent, TaskRouter
14
+
15
+ # ─── Global Agent Instance ─────────────────────────────────────────────────────
16
+ agent = MulteclawAgent()
17
+
18
+ # ─── CSS ───────────────────────────────────────────────────────────────────────
19
+ CSS = """
20
+ /* Layout */
21
+ .gradio-container { max-width: 1500px !important; margin: auto; }
22
+
23
+ /* Header */
24
+ .header-banner {
25
+ background: linear-gradient(135deg, #0f0c29, #302b63, #24243e);
26
+ border-radius: 16px;
27
+ padding: 24px 32px;
28
+ margin-bottom: 16px;
29
+ color: white;
30
+ text-align: center;
31
+ }
32
+ .header-banner h1 { margin: 0; font-size: 2em; }
33
+ .header-banner p { margin: 4px 0 0 0; opacity: 0.8; font-size: 0.95em; }
34
+
35
+ /* Model badge */
36
+ .model-badge {
37
+ display: inline-block;
38
+ padding: 2px 10px;
39
+ border-radius: 12px;
40
+ font-size: 11px;
41
+ font-weight: 600;
42
+ letter-spacing: 0.5px;
43
+ }
44
+
45
+ /* Status indicators */
46
+ .status-ok { color: #4ade80 !important; }
47
+ .status-err { color: #f87171 !important; }
48
+ .status-warn { color: #fbbf24 !important; }
49
+
50
+ /* Chatbot tweaks */
51
+ #main-chatbot { min-height: 520px; }
52
+ #main-chatbot .message { max-width: 85% !important; }
53
+
54
+ /* Settings cards */
55
+ .settings-card {
56
+ border: 1px solid var(--border-color-primary);
57
+ border-radius: 12px;
58
+ padding: 16px;
59
+ margin-bottom: 12px;
60
+ }
61
+
62
+ /* Footer */
63
+ .footer-text { text-align: center; opacity: 0.5; font-size: 0.8em; margin-top: 16px; }
64
+ """
65
+
66
+ # ─── JS ────────────────────────────────────────────────────────────────────────
67
+ HEAD_JS = """
68
+ <script>
69
+ document.addEventListener('DOMContentLoaded', () => {
70
+ document.title = 'Multeclaw — AI Agent System';
71
+ });
72
+ </script>
73
+ """
74
+
75
+
76
+ # ─── Chat Handler ──────────────────────────────────────────────────────────────
77
+ def chat_respond(
78
+ message: str,
79
+ history: list[dict],
80
+ model_name: str,
81
+ persona: str,
82
+ custom_system_prompt: str,
83
+ temperature: float,
84
+ max_tokens: int,
85
+ enable_tools: bool,
86
+ enable_planning: bool,
87
+ ):
88
+ """
89
+ Main chat function. Streams responses from the agent.
90
+ """
91
+ if not message.strip():
92
+ yield ""
93
+ return
94
+
95
+ # Resolve system prompt
96
+ if persona == "Custom":
97
+ system_prompt = custom_system_prompt
98
+ else:
99
+ system_prompt = SYSTEM_PROMPTS.get(persona, SYSTEM_PROMPTS["Multeclaw Agent"])
100
+
101
+ # Stream the response
102
+ partial = ""
103
+ try:
104
+ for chunk in agent.chat_stream(
105
+ message=message,
106
+ history=history,
107
+ model_name=model_name,
108
+ system_prompt=system_prompt,
109
+ temperature=temperature,
110
+ max_tokens=int(max_tokens),
111
+ enable_tools=enable_tools,
112
+ enable_planning=enable_planning,
113
+ ):
114
+ partial += chunk
115
+ yield partial
116
+ except Exception as e:
117
+ error_msg = f"\n\n❌ **Error**: {type(e).__name__}: {str(e)}"
118
+ yield partial + error_msg
119
+
120
+
121
+ # ─── Settings Handlers ─────────────────────────────────────────────────────────
122
+ def save_api_keys(openai_key, anthropic_key, hf_key, groq_key, ollama_url):
123
+ """Save API keys to the agent's client."""
124
+ results = []
125
+ if openai_key:
126
+ agent.client.set_api_key("openai", openai_key)
127
+ results.append("✅ OpenAI key saved")
128
+ if anthropic_key:
129
+ agent.client.set_api_key("anthropic", anthropic_key)
130
+ results.append("✅ Anthropic key saved")
131
+ if hf_key:
132
+ agent.client.set_api_key("huggingface", hf_key)
133
+ results.append("✅ HuggingFace key saved")
134
+ if groq_key:
135
+ agent.client.set_api_key("groq", groq_key)
136
+ results.append("✅ Groq key saved")
137
+ if ollama_url:
138
+ agent.client.set_ollama_url(ollama_url)
139
+ results.append(f"✅ Ollama URL set to {ollama_url}")
140
+
141
+ if not results:
142
+ return "⚠️ No keys provided.", get_available_models()
143
+
144
+ return "\n".join(results), get_available_models()
145
+
146
+
147
+ def test_connections():
148
+ """Test all configured API connections."""
149
+ results = agent.client.check_connections()
150
+ return json.dumps(results, indent=2)
151
+
152
+
153
+ def get_available_models():
154
+ """Return list of available model names based on configured keys."""
155
+ return gr.update(choices=agent.client.get_available_models())
156
+
157
+
158
+ # ─── Status Handlers ───────────────────────────────────────────────────────────
159
+ def get_status():
160
+ """Get current agent status."""
161
+ available = agent.client.get_available_models()
162
+ mem = agent.memory
163
+ status = {
164
+ "available_models": available,
165
+ "total_models_configured": len(available),
166
+ "conversation_messages": len(mem.conversation),
167
+ "tool_calls_made": len(mem.tool_results),
168
+ "plans_executed": len(mem.task_plans),
169
+ "session_started": mem.conversation[0]["timestamp"] if mem.conversation else "N/A",
170
+ }
171
+ return json.dumps(status, indent=2)
172
+
173
+
174
+ def get_agent_logs():
175
+ return agent.get_logs(100)
176
+
177
+
178
+ def clear_all():
179
+ agent.clear_memory()
180
+ return "🗑️ All memory and logs cleared."
181
+
182
+
183
+ def get_model_info(model_name: str):
184
+ """Get detailed info about a model."""
185
+ if model_name not in MODEL_REGISTRY:
186
+ return "Select a model to see details."
187
+ m = MODEL_REGISTRY[model_name]
188
+ return f"""### {m.display_name}
189
+ | Property | Value |
190
+ |----------|-------|
191
+ | **Provider** | {m.provider.value.title()} |
192
+ | **Model ID** | `{m.model_id}` |
193
+ | **Tier** | {m.tier.title()} |
194
+ | **Context Window** | {m.context_window:,} tokens |
195
+ | **Max Output** | {m.max_tokens:,} tokens |
196
+ | **Streaming** | {'✅' if m.supports_streaming else '❌'} |
197
+ | **Tool Use** | {'✅' if m.supports_tools else '❌'} |
198
+
199
+ {m.description}"""
200
+
201
+
202
+ # ─── Build the UI ──────────────────────────────────────────────────────────────
203
+ THEME = gr.themes.Soft(
204
+ primary_hue=gr.themes.colors.indigo,
205
+ secondary_hue=gr.themes.colors.purple,
206
+ neutral_hue=gr.themes.colors.slate,
207
+ font=[gr.themes.GoogleFont("Inter"), "system-ui", "sans-serif"],
208
+ font_mono=[gr.themes.GoogleFont("JetBrains Mono"), "monospace"],
209
+ )
210
+
211
+
212
+ def build_ui() -> gr.Blocks:
213
+ with gr.Blocks(
214
+ title="Multeclaw — AI Agent System",
215
+ fill_height=True,
216
+ ) as demo:
217
+
218
+ # ─── Header ───────────────────────────────────────────────────────
219
+ gr.HTML("""
220
+ <div class="header-banner">
221
+ <h1>🦅 Multeclaw</h1>
222
+ <p>Multi-Model AI Agent System — GPT · Claude · Llama · Mistral · Qwen · Groq · Ollama</p>
223
+ </div>
224
+ """)
225
+
226
+ # ─── Main Tabs ────────────────────────────────────────────────────
227
+ with gr.Tabs(selected="chat") as tabs:
228
+
229
+ # ═══════════════════════════════════════════════════════════════
230
+ # TAB 1: CHAT
231
+ # ═══════════════════════════════════════════════════════════════
232
+ with gr.Tab("💬 Chat", id="chat"):
233
+ with gr.Row():
234
+ # Left sidebar — controls
235
+ with gr.Column(scale=1, min_width=280):
236
+ gr.Markdown("### 🤖 Model")
237
+ model_selector = gr.Dropdown(
238
+ choices=list(MODEL_REGISTRY.keys()),
239
+ value="GPT-4o Mini",
240
+ label="Select Model",
241
+ interactive=True,
242
+ elem_id="model-selector",
243
+ )
244
+ model_info_display = gr.Markdown(
245
+ value=get_model_info("GPT-4o Mini"),
246
+ elem_id="model-info",
247
+ )
248
+ model_selector.change(
249
+ fn=get_model_info,
250
+ inputs=[model_selector],
251
+ outputs=[model_info_display],
252
+ )
253
+
254
+ gr.Markdown("### 🎭 Persona")
255
+ persona_selector = gr.Dropdown(
256
+ choices=list(SYSTEM_PROMPTS.keys()),
257
+ value="Multeclaw Agent",
258
+ label="System Persona",
259
+ interactive=True,
260
+ )
261
+ custom_prompt = gr.Textbox(
262
+ label="Custom System Prompt",
263
+ placeholder="Enter your custom system prompt here...",
264
+ lines=3,
265
+ visible=False,
266
+ )
267
+ persona_selector.change(
268
+ fn=lambda p: gr.update(visible=(p == "Custom")),
269
+ inputs=[persona_selector],
270
+ outputs=[custom_prompt],
271
+ )
272
+
273
+ gr.Markdown("### ⚙️ Parameters")
274
+ temperature = gr.Slider(
275
+ minimum=0.0, maximum=2.0, value=0.7, step=0.05,
276
+ label="Temperature",
277
+ info="0 = deterministic, 2 = very creative",
278
+ )
279
+ max_tokens = gr.Slider(
280
+ minimum=256, maximum=16384, value=4096, step=256,
281
+ label="Max Tokens",
282
+ )
283
+
284
+ gr.Markdown("### 🛠️ Agent Features")
285
+ enable_tools = gr.Checkbox(
286
+ value=True, label="Enable Tools",
287
+ info="Calculator, code execution, file I/O",
288
+ )
289
+ enable_planning = gr.Checkbox(
290
+ value=True, label="Enable Planning",
291
+ info="Auto-decompose complex tasks into steps",
292
+ )
293
+
294
+ # Right side — chat
295
+ with gr.Column(scale=3):
296
+ chat_interface = gr.ChatInterface(
297
+ fn=chat_respond,
298
+ chatbot=gr.Chatbot(
299
+ elem_id="main-chatbot",
300
+ height=580,
301
+ placeholder="🦅 **Multeclaw is ready.** Select a model, configure your settings, and start chatting.\n\nI support GPT, Claude, Llama, Mistral, Qwen, Groq, and Ollama models.",
302
+ ),
303
+ additional_inputs=[
304
+ model_selector,
305
+ persona_selector,
306
+ custom_prompt,
307
+ temperature,
308
+ max_tokens,
309
+ enable_tools,
310
+ enable_planning,
311
+ ],
312
+ examples=[
313
+ ["Explain quantum computing in simple terms"],
314
+ ["Write a Python function to find all prime numbers up to n"],
315
+ ["Calculate the compound interest on $10,000 at 7% for 20 years"],
316
+ ["Step by step, analyze the pros and cons of microservices vs monolith"],
317
+ ["Create a complete REST API design for a task management app"],
318
+ ],
319
+ save_history=True,
320
+ editable=True,
321
+ concurrency_limit=5,
322
+ )
323
+
324
+ # ═══════════════════════════════════════════════════════════════
325
+ # TAB 2: API KEYS & SETTINGS
326
+ # ═══════════════════════════════════════════════════════════════
327
+ with gr.Tab("🔑 Settings", id="settings"):
328
+ gr.Markdown("### API Key Configuration")
329
+ gr.Markdown("Enter your API keys below. Keys are stored in-memory only — they are **never** saved to disk.")
330
+
331
+ with gr.Row():
332
+ with gr.Column():
333
+ gr.Markdown("#### Cloud Providers")
334
+ openai_key = gr.Textbox(
335
+ label="OpenAI API Key",
336
+ type="password",
337
+ placeholder="sk-...",
338
+ info="For GPT-4o, GPT-4o Mini, GPT-4 Turbo",
339
+ )
340
+ anthropic_key = gr.Textbox(
341
+ label="Anthropic API Key",
342
+ type="password",
343
+ placeholder="sk-ant-...",
344
+ info="For Claude 4 Opus, Sonnet, Haiku",
345
+ )
346
+ hf_key = gr.Textbox(
347
+ label="HuggingFace Token",
348
+ type="password",
349
+ placeholder="hf_...",
350
+ info="For Llama, Qwen, Mistral via HF Inference",
351
+ )
352
+ groq_key = gr.Textbox(
353
+ label="Groq API Key",
354
+ type="password",
355
+ placeholder="gsk_...",
356
+ info="For ultra-fast Llama 3, Mixtral inference",
357
+ )
358
+
359
+ with gr.Column():
360
+ gr.Markdown("#### Local Models")
361
+ ollama_url = gr.Textbox(
362
+ label="Ollama Server URL",
363
+ value="http://localhost:11434",
364
+ info="URL of your local Ollama instance",
365
+ )
366
+
367
+ gr.Markdown("#### Actions")
368
+ with gr.Row():
369
+ save_btn = gr.Button("💾 Save Keys", variant="primary", size="lg")
370
+ test_btn = gr.Button("🔌 Test Connections", variant="secondary", size="lg")
371
+
372
+ save_status = gr.Textbox(label="Status", interactive=False, lines=5)
373
+ connection_results = gr.Code(label="Connection Test Results", language="json", lines=10)
374
+
375
+ save_btn.click(
376
+ fn=save_api_keys,
377
+ inputs=[openai_key, anthropic_key, hf_key, groq_key, ollama_url],
378
+ outputs=[save_status, model_selector],
379
+ )
380
+ test_btn.click(
381
+ fn=test_connections,
382
+ inputs=[],
383
+ outputs=[connection_results],
384
+ )
385
+
386
+ # ═══════════════════════════════════════════════════════════════
387
+ # TAB 3: MODELS
388
+ # ═══════════════════════════════════════════════════════════════
389
+ with gr.Tab("📦 Models", id="models"):
390
+ gr.Markdown("### Model Registry")
391
+ gr.Markdown("All supported models and their capabilities.")
392
+
393
+ # Build model table
394
+ model_rows = []
395
+ for name, m in MODEL_REGISTRY.items():
396
+ model_rows.append([
397
+ name,
398
+ m.provider.value.title(),
399
+ m.tier.title(),
400
+ f"{m.context_window:,}",
401
+ "✅" if m.supports_streaming else "❌",
402
+ "✅" if m.supports_tools else "❌",
403
+ m.description,
404
+ ])
405
+
406
+ gr.Dataframe(
407
+ value=model_rows,
408
+ headers=["Model", "Provider", "Tier", "Context", "Streaming", "Tools", "Description"],
409
+ datatype=["str", "str", "str", "str", "str", "str", "str"],
410
+ interactive=False,
411
+ wrap=True,
412
+ )
413
+
414
+ # ═══════════════════════════════════════════════════════════════
415
+ # TAB 4: TOOLS
416
+ # ═══════════════════════════════════════════════════════════════
417
+ with gr.Tab("🛠️ Tools", id="tools"):
418
+ gr.Markdown("### Built-in Agent Tools")
419
+ gr.Markdown("These tools are available when **Enable Tools** is checked in Chat settings.")
420
+
421
+ for name, tool in BUILT_IN_TOOLS.items():
422
+ with gr.Accordion(f"🔧 {name}", open=False):
423
+ gr.Markdown(f"**Description:** {tool['description']}")
424
+ params = tool["parameters"]["properties"]
425
+ required = tool["parameters"].get("required", [])
426
+ param_text = ""
427
+ for pname, pdef in params.items():
428
+ req = " *(required)*" if pname in required else ""
429
+ param_text += f"- `{pname}` ({pdef['type']}): {pdef.get('description', '')}{req}\n"
430
+ gr.Markdown(f"**Parameters:**\n{param_text}")
431
+
432
+ gr.Markdown("---")
433
+ gr.Markdown("### Quick Tool Test")
434
+ with gr.Row():
435
+ test_tool_name = gr.Dropdown(
436
+ choices=list(BUILT_IN_TOOLS.keys()),
437
+ value="calculator",
438
+ label="Tool",
439
+ )
440
+ test_tool_input = gr.Textbox(
441
+ label="Input (JSON arguments)",
442
+ value='{"expression": "2**10 + sqrt(144)"}',
443
+ lines=2,
444
+ )
445
+ test_tool_btn = gr.Button("▶️ Run Tool", variant="primary")
446
+ test_tool_output = gr.Code(label="Result", language="json")
447
+
448
+ def run_tool_test(tool_name, input_json):
449
+ try:
450
+ args = json.loads(input_json)
451
+ result = agent.tools.execute(tool_name, args)
452
+ return json.dumps(result, indent=2)
453
+ except json.JSONDecodeError as e:
454
+ return json.dumps({"error": f"Invalid JSON: {str(e)}"}, indent=2)
455
+ except Exception as e:
456
+ return json.dumps({"error": str(e)}, indent=2)
457
+
458
+ test_tool_btn.click(
459
+ fn=run_tool_test,
460
+ inputs=[test_tool_name, test_tool_input],
461
+ outputs=[test_tool_output],
462
+ )
463
+
464
+ # ═══════════════════════════════════════════════════════════════
465
+ # TAB 5: STATUS & LOGS
466
+ # ═══════════════════════════════════════════════════════════════
467
+ with gr.Tab("📊 Status", id="status"):
468
+ gr.Markdown("### Agent Status & Monitoring")
469
+
470
+ with gr.Row():
471
+ with gr.Column():
472
+ status_display = gr.Code(label="Agent Status", language="json", lines=12)
473
+ refresh_status_btn = gr.Button("🔄 Refresh Status", variant="secondary")
474
+ refresh_status_btn.click(fn=get_status, outputs=[status_display])
475
+
476
+ with gr.Column():
477
+ logs_display = gr.Textbox(
478
+ label="Agent Logs",
479
+ lines=15,
480
+ interactive=False,
481
+ )
482
+ with gr.Row():
483
+ refresh_logs_btn = gr.Button("🔄 Refresh Logs")
484
+ clear_btn = gr.Button("🗑️ Clear All", variant="stop")
485
+
486
+ refresh_logs_btn.click(fn=get_agent_logs, outputs=[logs_display])
487
+ clear_btn.click(fn=clear_all, outputs=[logs_display])
488
+
489
+ # ═══════════════════════════════════════════════════════════════
490
+ # TAB 6: ABOUT
491
+ # ═══════════════════════════════════════════════════════════════
492
+ with gr.Tab("ℹ️ About", id="about"):
493
+ gr.Markdown("""
494
+ ### 🦅 Multeclaw — Multi-Model AI Agent System
495
+
496
+ **Multeclaw** is a complete AI agent framework that connects to multiple LLM providers through a single interface.
497
+
498
+ ---
499
+
500
+ #### 🏗️ Architecture
501
+
502
+ | Component | Role |
503
+ |-----------|------|
504
+ | **Router** | Classifies tasks → direct, tool-assisted, multi-step, code, analysis |
505
+ | **Planner** | Decomposes complex objectives into executable step sequences |
506
+ | **Executor** | Runs LLM completions and tool calls with streaming |
507
+ | **Memory** | Tracks conversation history, tool results, and session context |
508
+ | **Tool System** | Sandboxed calculator, code execution, file I/O, web search |
509
+ | **Safety Layer** | Input/output content filtering |
510
+ | **Repair Loop** | Retries failed tool calls with error context |
511
+
512
+ #### 🤖 Supported Providers
513
+
514
+ | Provider | Models | Key Feature |
515
+ |----------|--------|-------------|
516
+ | **OpenAI** | GPT-4o, GPT-4o Mini, GPT-4 Turbo | Strong reasoning, tool calling |
517
+ | **Anthropic** | Claude 4 Opus, Sonnet, 3.5 Haiku | Deep analysis, long context |
518
+ | **HuggingFace** | Llama 3, Qwen 2.5, Mistral | Open models via inference API |
519
+ | **Groq** | Llama 3 70B, Mixtral 8x7B | Ultra-fast inference |
520
+ | **Ollama** | Any local model | Privacy, offline use |
521
+
522
+ #### 🔄 Agent Loops
523
+
524
+ 1. **Reasoning Loop** — LLM processes the request with full context
525
+ 2. **Tool Loop** — LLM decides to call tools, results fed back iteratively
526
+ 3. **Planning Loop** — Complex tasks decomposed into steps, executed sequentially
527
+ 4. **Repair Loop** — Failed operations retried with error context
528
+
529
+ #### 🛡️ Safety
530
+
531
+ - Input content filtering blocks harmful requests
532
+ - Code execution is sandboxed with timeout limits
533
+ - API keys stored in-memory only, never persisted to disk
534
+ - File operations restricted to safe paths
535
+
536
+ ---
537
+
538
+ *Built with Gradio, OpenAI SDK, Anthropic SDK, HuggingFace Hub, and LiteLLM.*
539
+ """)
540
+
541
+ # Footer
542
+ gr.HTML('<p class="footer-text">Multeclaw v1.0 — Multi-Model AI Agent System</p>')
543
+
544
+ return demo
545
+
546
+
547
+ # ─── Entry Point ───────────────────────────────────────────────────────────────
548
+ def main():
549
+ demo = build_ui()
550
+ demo.launch(
551
+ server_name="0.0.0.0",
552
+ server_port=7860,
553
+ share=False,
554
+ show_error=True,
555
+ theme=THEME,
556
+ css=CSS,
557
+ head=HEAD_JS,
558
+ )
559
+
560
+
561
+ if __name__ == "__main__":
562
+ main()