AI_MAGI / magi_web_interface.py
LordPBA's picture
Upload folder using huggingface_hub
76fd9ac verified
"""
MAGI System - Web Interface v2.0
Neon Genesis Evangelion AI Simulation
Gradio web interface for the MAGI multi-agent system
"""
import gradio as gr
import sys
import os
import io
import re
import threading
import queue
from contextlib import redirect_stdout, redirect_stderr
from pathlib import Path
from datetime import datetime
from typing import Tuple, Generator
# Add parent directory to path
sys.path.insert(0, str(Path(__file__).parent))
# Import MAGI system
from Main_core_002 import analyze_question
# Evangelion-themed CSS
EVANGELION_CSS = """
/* NERV/MAGI Theme - Evangelion Style */
.gradio-container {
font-family: 'Courier New', monospace !important;
background: linear-gradient(135deg, #0a0e1a 0%, #1a1f2e 100%) !important;
}
.contain {
background: rgba(26, 31, 46, 0.95) !important;
border: 2px solid #d32f2f !important;
border-radius: 0px !important;
}
h1, h2, h3, h4, h5, h6, .centered-markdown {
color: #ff6f00 !important;
font-family: 'Courier New', monospace !important;
text-transform: uppercase !important;
letter-spacing: 2px !important;
text-shadow: 0 0 10px rgba(211, 47, 47, 0.5) !important;
text-align: center !important;
}
.output-markdown, .gr-textbox, .gradio-markdown, .gradio-label, .gradio-status {
text-align: center !important;
}
.tab-nav button {
background: #1a1f2e !important;
color: #00bcd4 !important;
border: 1px solid #d32f2f !important;
font-weight: bold !important;
}
.tab-nav button.selected {
background: #d32f2f !important;
color: white !important;
border: 2px solid #ff6f00 !important;
}
textarea, input {
background: #0a0e1a !important;
color: #00ff41 !important;
border: 1px solid #00bcd4 !important;
font-family: 'Courier New', monospace !important;
}
.output-markdown {
background: #0a0e1a !important;
color: #00ff41 !important;
border: 1px solid #d32f2f !important;
padding: 20px !important;
font-family: 'Courier New', monospace !important;
text-align: center !important;
}
button {
background: linear-gradient(135deg, #d32f2f 0%, #ff6f00 100%) !important;
color: white !important;
border: none !important;
font-weight: bold !important;
text-transform: uppercase !important;
letter-spacing: 1px !important;
box-shadow: 0 0 20px rgba(211, 47, 47, 0.5) !important;
}
button:hover {
box-shadow: 0 0 30px rgba(255, 111, 0, 0.8) !important;
}
.progress-bar {
background: #d32f2f !important;
}
footer {
color: #00bcd4 !important;
text-align: center !important;
}
/* Override alignment for live logs for readability */
#live-logs textarea {
text-align: left !important;
font-family: 'Courier New', monospace !important;
white-space: pre-wrap !important;
}
"""
def process_magi_query_stream(
question: str,
provider: str = "Groq",
ollama_model: str = "",
enable_search: bool = False,
temperature: float = 0.5,
clean_logs: bool = True,
) -> Generator[Tuple[str, str, str], None, None]:
"""
Stream MAGI analysis with live logs.
Yields successive updates for (result_text, status_message, live_logs).
"""
result_text = ""
status_text = ""
log_text = ""
if not question or not question.strip():
yield ("❌ ERROR: Please enter a question.", "⚠️ No input provided", "")
return
# Normalize provider and handle Ollama alias
provider_lower = provider.lower()
if provider_lower == "ollama (local)":
provider_lower = "ollama"
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
header = f"""
╔══════════════════════════════════════════════════════════════════╗
β•‘ MAGI SYSTEM ANALYSIS β•‘
β•‘ Multi-Agent General Intelligence β•‘
β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
⏰ Timestamp: {timestamp}
❓ Question: {question}
πŸ€– Provider: {provider}
🌐 Search: {"Enabled" if enable_search else "Disabled"}
🌑️ Temperature: {temperature}
πŸ¦™ Ollama Model: {ollama_model if provider_lower == "ollama" else "-"}
{'='*70}
EXECUTING THREE-PERSPECTIVE ANALYSIS...
{'='*70}
"""
# Immediately show header in logs
log_text += header
yield (result_text, "πŸš€ Analysis started...", log_text)
# Queue to collect streamed stdout/stderr
q: queue.Queue[str | None] = queue.Queue()
class QueueWriter(io.TextIOBase):
def write(self, s: str) -> int:
if s:
q.put(s)
return len(s)
ansi_escape = re.compile(r"\x1b\[[0-?]*[ -/]*[@-~]")
def sanitize(chunk: str) -> str:
# Strip ANSI color/control codes and carriage returns
chunk = ansi_escape.sub("", chunk)
chunk = chunk.replace("\r", "")
return chunk
# Worker to run analysis while capturing stdout/stderr
analysis_result_holder = {"result": None, "error": None}
def worker():
try:
with redirect_stdout(QueueWriter()), redirect_stderr(QueueWriter()):
res = analyze_question(
question=question,
provider=provider_lower,
ollama_model=ollama_model,
enable_search=enable_search,
temperature=temperature
)
analysis_result_holder["result"] = res
except Exception as e: # noqa: BLE001
analysis_result_holder["error"] = e
finally:
q.put(None) # Sentinel to indicate completion
t = threading.Thread(target=worker, daemon=True)
t.start()
# Consume queue and stream updates
while True:
try:
item = q.get(timeout=0.2)
except queue.Empty:
# Yield periodic heartbeat without changing texts to keep UI responsive
yield (result_text, "⏳ Running analysis...", log_text)
continue
if item is None:
break
chunk = item
if clean_logs:
chunk = sanitize(chunk)
log_text += chunk
# Keep log size reasonable
if len(log_text) > 200_000:
log_text = log_text[-200_000:]
yield (result_text, "⏳ Running analysis...", log_text)
# Thread finished: prepare final outputs
if analysis_result_holder["error"] is not None:
e = analysis_result_holder["error"]
error_msg = f"""
╔══════════════════════════════════════════════════════════════════╗
β•‘ ERROR β•‘
β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
❌ An error occurred during MAGI analysis:
{str(e)}
Please check:
- Your API keys are configured in config/.env
- You have a stable internet connection (if using cloud providers)
- The question is not empty
"""
result_text = error_msg
status_text = f"❌ Error: {str(e)}"
yield (result_text, status_text, log_text)
return
res = analysis_result_holder["result"]
result_text = header + "\n" + res["result"] + "\n\n" + "=" * 70
status_text = f"βœ… Analysis completed successfully at {timestamp}"
yield (result_text, status_text, log_text)
def create_magi_interface():
"""Create the Gradio interface for MAGI system"""
with gr.Blocks(css=EVANGELION_CSS, title="MAGI System", theme=gr.themes.Base()) as interface:
# Header
gr.Markdown("""
# πŸ”Ί MAGI SYSTEM πŸ”Ί
## Multi-Agent General Intelligence
### *Based on Neon Genesis Evangelion*
---
The MAGI system consists of three AI agents, each representing a different aspect of Dr. Naoko Akagi's personality:
- **MELCHIOR-1**: Scientific analysis (logic and data)
- **BALTHASAR-2**: Ethical evaluation (emotions and morals)
- **CASPER-3**: Practical assessment (social and real-world)
All three perspectives are synthesized to provide comprehensive analysis.
""", elem_classes="centered-markdown")
# Main interface
with gr.Row():
with gr.Column(scale=2):
# Input section
question_input = gr.Textbox(
label="🎯 Enter Your Question",
placeholder="What question would you like the MAGI system to analyze?",
lines=3
)
# Settings
with gr.Accordion("βš™οΈ Advanced Settings", open=False):
provider_dropdown = gr.Dropdown(
choices=["Groq", "OpenAI", "Ollama (local)"],
value="Groq",
label="LLM Provider",
info="Groq is free and fast, OpenAI requires paid API key, Ollama runs locally"
)
ollama_model_input = gr.Textbox(
label="Ollama Model Name (if using Ollama)",
placeholder="e.g. llama3, phi3, mistral, ...",
visible=False
)
search_checkbox = gr.Checkbox(
label="Enable Internet Search",
value=False,
info="Requires SERPER_API_KEY in .env file"
)
temperature_slider = gr.Slider(
minimum=0.0,
maximum=1.0,
value=0.5,
step=0.1,
label="Temperature",
info="Higher = more creative, Lower = more focused"
)
clean_logs_checkbox = gr.Checkbox(
label="Clean colored logs (strip ANSI)",
value=True,
info="Recommended for readable logs"
)
# Action buttons
with gr.Row():
analyze_btn = gr.Button("πŸš€ EXECUTE MAGI ANALYSIS", variant="primary", size="lg")
clear_btn = gr.Button("πŸ—‘οΈ Clear", variant="secondary")
# Example questions now placed under the buttons in the left column
gr.Examples(
examples=[
["Should we deploy EVA Unit-01 against the approaching Angel despite Shinji's unstable sync ratio?"],
["Is it ethical to proceed with the Human Instrumentality Project to eliminate individual suffering?"],
["Should NERV prioritize civilian evacuation or Angel neutralization during an active attack on Tokyo-3?"],
["What is the acceptable risk threshold for activating a Dummy Plug system in combat operations?"],
["Should we collaborate with SEELE's directives or maintain autonomous control over NERV operations?"]
],
inputs=question_input,
label="πŸ’‘ Example Questions"
)
with gr.Column(scale=3):
# Output section
logs_output = gr.Textbox(
label="�️ Live Logs",
lines=18,
max_lines=40,
interactive=False,
show_copy_button=True,
value="",
elem_id="live-logs",
)
result_output = gr.Textbox(
label="πŸ“Š MAGI Analysis Result",
lines=16,
max_lines=30,
show_copy_button=True,
elem_classes="centered-markdown"
)
status_output = gr.Textbox(
label="ℹ️ Status",
lines=1,
interactive=False,
elem_classes="centered-markdown"
)
# Footer
gr.Markdown("""
---
**MAGI System v2.0** | Powered by CrewAI & Groq
*"The truth lies in the synthesis of three perspectives"*
πŸ”΄ NERV Systems Division | 🟠 MAGI Supercomputer Array
""")
# Event handlers
def update_ollama_visibility(provider):
return gr.update(visible=(provider == "Ollama (local)"))
provider_dropdown.change(
fn=update_ollama_visibility,
inputs=provider_dropdown,
outputs=ollama_model_input
)
analyze_btn.click(
fn=process_magi_query_stream,
inputs=[question_input, provider_dropdown, ollama_model_input, search_checkbox, temperature_slider, clean_logs_checkbox],
outputs=[result_output, status_output, logs_output]
)
clear_btn.click(
fn=lambda: ("", "", "", ""),
inputs=None,
outputs=[question_input, result_output, status_output, logs_output]
)
return interface
def main():
"""Launch the MAGI web interface"""
print("="*70)
print("MAGI SYSTEM - WEB INTERFACE STARTING")
print("="*70)
print("\nπŸ”Ί Initializing NERV MAGI Supercomputer Array...")
print("πŸ”Έ Loading: MELCHIOR-1 (Scientific)")
print("πŸ”Έ Loading: BALTHASAR-2 (Ethical)")
print("πŸ”Έ Loading: CASPER-3 (Practical)")
print("\nβœ… All systems operational")
print("🌐 Launching web interface...\n")
interface = create_magi_interface()
# Launch with custom settings
interface.launch(
server_name="0.0.0.0", # Allow external access
server_port=7862, # Different port to avoid conflict
share=True, # Create public link
inbrowser=True, # Open in browser automatically
show_error=True
)
if __name__ == "__main__":
main()