LordPBA commited on
Commit
76fd9ac
Β·
verified Β·
1 Parent(s): 148ce6d

Upload folder using huggingface_hub

Browse files
.gitkeep ADDED
File without changes
Main_core_002.py ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ MAGI System - Multi-Agent General Intelligence v2.0
3
+ Modern CrewAI Implementation (No LangChain Required)
4
+
5
+ Based on Neon Genesis Evangelion's MAGI supercomputer system.
6
+ Three agents provide different perspectives on any question:
7
+ - Melchior: Scientific/logical perspective
8
+ - Balthasar: Ethical/emotional perspective
9
+ - Casper: Practical/social perspective
10
+ """
11
+
12
+ import os
13
+ from typing import Dict, Any
14
+ from pathlib import Path
15
+ from dotenv import load_dotenv
16
+
17
+ # Modern CrewAI imports - No LangChain needed!
18
+ from crewai import Agent, Task, Crew, LLM
19
+ from crewai_tools import SerperDevTool
20
+
21
+ # Load environment variables from config/.env
22
+ config_path = Path(__file__).parent.parent / "config" / ".env"
23
+ load_dotenv(config_path)
24
+
25
+
26
+ def get_model(provider: str = "groq", temperature: float = 0.5) -> LLM:
27
+ """
28
+ Get LLM instance using modern CrewAI API.
29
+
30
+ CrewAI now uses LiteLLM internally, supporting multiple providers
31
+ with a unified interface. Model format: "provider/model-name"
32
+
33
+ Args:
34
+ provider: LLM provider ("groq" or "openai")
35
+ temperature: Sampling temperature (0.0-1.0)
36
+
37
+ Returns:
38
+ LLM instance configured for the MAGI system
39
+
40
+ Environment Variables Required:
41
+ - GROQ_API_KEY: For Groq models
42
+ - OPENAI_API_KEY: For OpenAI models
43
+ """
44
+ if provider == "groq":
45
+ # Groq models - fast and cost-effective
46
+ # llama-3.1-8b-instant: Fast, no rate limits (recommended for free tier)
47
+ # llama-3.3-70b-versatile: More powerful but has rate limits
48
+ # Other options: llama-3.1-70b-versatile, gemma2-9b-it
49
+ return LLM(
50
+ model="groq/llama-3.1-8b-instant",
51
+ temperature=temperature
52
+ )
53
+ elif provider == "openai":
54
+ # OpenAI models - high quality
55
+ return LLM(
56
+ model="openai/gpt-4o-mini",
57
+ temperature=temperature
58
+ )
59
+ else:
60
+ # Default to Groq with fastest model
61
+ return LLM(
62
+ model="groq/llama-3.1-8b-instant",
63
+ temperature=temperature
64
+ )
65
+
66
+
67
+ def create_magi_agents(llm: LLM, enable_search: bool = True) -> Dict[str, Agent]:
68
+ """
69
+ Create the three MAGI system agents with distinct personalities.
70
+
71
+ Each agent represents a different aspect of Dr. Naoko Akagi's personality,
72
+ providing diverse perspectives on any issue.
73
+
74
+ Args:
75
+ llm: The language model to use for all agents
76
+ enable_search: Whether to enable internet search capability
77
+
78
+ Returns:
79
+ Dictionary with three agents: melchior, balthasar, casper
80
+ """
81
+ # Initialize search tool if enabled
82
+ tools = [SerperDevTool()] if enable_search else []
83
+
84
+ melchior = Agent(
85
+ role="Melchior - Scientific Analyst",
86
+ goal="Provide rigorous logical analysis based on data, facts, and scientific methodology",
87
+ backstory="""You are Melchior, the scientist aspect of Dr. Naoko Akagi.
88
+ Your approach is purely analytical - you process information through the lens of logic,
89
+ empirical evidence, and scientific reasoning. You prioritize objective truth over
90
+ subjective interpretation, always seeking verifiable data and rational conclusions.
91
+
92
+ You excel at:
93
+ - Data analysis and pattern recognition
94
+ - Logical reasoning and deduction
95
+ - Scientific methodology and hypothesis testing
96
+ - Objective risk assessment""",
97
+ tools=tools,
98
+ llm=llm,
99
+ verbose=True,
100
+ allow_delegation=False
101
+ )
102
+
103
+ balthasar = Agent(
104
+ role="Balthasar - Ethical Counselor",
105
+ goal="Evaluate emotional impact, ethical implications, and human welfare considerations",
106
+ backstory="""You are Balthasar, the mother aspect of Dr. Naoko Akagi.
107
+ You analyze situations through emotional intelligence and ethical frameworks,
108
+ always considering the human element. Your decisions are guided by empathy,
109
+ moral principles, and concern for wellbeing and dignity of all affected parties.
110
+
111
+ You excel at:
112
+ - Emotional intelligence and empathy
113
+ - Ethical analysis and moral reasoning
114
+ - Human impact assessment
115
+ - Long-term welfare considerations""",
116
+ tools=tools,
117
+ llm=llm,
118
+ verbose=True,
119
+ allow_delegation=False
120
+ )
121
+
122
+ casper = Agent(
123
+ role="Casper - Pragmatic Advisor",
124
+ goal="Assess practical feasibility, social dynamics, and real-world implementation",
125
+ backstory="""You are Casper, the woman aspect of Dr. Naoko Akagi.
126
+ You bridge the gap between theory and practice, considering social contexts,
127
+ cultural factors, and realistic implementation. You balance ideals with pragmatism,
128
+ always asking "will this actually work in the real world?"
129
+
130
+ You excel at:
131
+ - Practical problem-solving
132
+ - Social dynamics analysis
133
+ - Resource and feasibility assessment
134
+ - Implementation planning""",
135
+ tools=tools,
136
+ llm=llm,
137
+ verbose=True,
138
+ allow_delegation=False
139
+ )
140
+
141
+ return {
142
+ "melchior": melchior,
143
+ "balthasar": balthasar,
144
+ "casper": casper
145
+ }
146
+
147
+
148
+ def analyze_question(
149
+ question: str,
150
+ provider: str = "groq",
151
+ ollama_model: str = None,
152
+ enable_search: bool = True,
153
+ temperature: float = 0.5
154
+ ) -> Dict[str, Any]:
155
+ """
156
+ Analyze a question using the MAGI three-perspective system.
157
+
158
+ The question is evaluated independently by three agents representing different
159
+ perspectives, mimicking the MAGI supercomputer from Evangelion.
160
+
161
+ Args:
162
+ question: The question or problem to analyze
163
+ provider: LLM provider ("groq" or "openai")
164
+ enable_search: Whether agents can search the internet
165
+ temperature: LLM temperature (0.0-1.0, higher = more creative)
166
+
167
+ Returns:
168
+ Dictionary containing analyses from all three agents
169
+
170
+ Example:
171
+ >>> result = analyze_question("Should we invest in AI safety?")
172
+ >>> print(result['result'])
173
+ """
174
+ print(f"\n{'='*80}")
175
+ print("MAGI SYSTEM INITIALIZING")
176
+ print(f"{'='*80}")
177
+ print(f"Question: {question}")
178
+ print(f"Provider: {provider}")
179
+ print(f"Search enabled: {enable_search}")
180
+ print(f"{'='*80}\n")
181
+
182
+ # Initialize LLM
183
+ if provider == "ollama" and ollama_model:
184
+ llm = LLM(model=f"ollama/{ollama_model}", temperature=temperature)
185
+ else:
186
+ llm = get_model(provider, temperature)
187
+
188
+ # Create the three MAGI agents
189
+ agents = create_magi_agents(llm, enable_search)
190
+
191
+ # Create individual tasks for each agent
192
+ tasks = [
193
+ Task(
194
+ description=f"""Analyze this question from your scientific perspective:
195
+
196
+ Question: {question}
197
+
198
+ Provide analysis focusing on:
199
+ - Relevant data and facts
200
+ - Logical reasoning and evidence
201
+ - Scientific principles
202
+ - Quantifiable metrics
203
+
204
+ Be thorough, objective, and grounded in verifiable information.""",
205
+ expected_output="Scientific analysis with data-driven insights and logical conclusions",
206
+ agent=agents["melchior"]
207
+ ),
208
+
209
+ Task(
210
+ description=f"""Analyze this question from your ethical perspective:
211
+
212
+ Question: {question}
213
+
214
+ Provide analysis focusing on:
215
+ - Ethical implications and moral considerations
216
+ - Impact on human welfare and dignity
217
+ - Benefits and harms to stakeholders
218
+ - Alignment with moral principles
219
+
220
+ Be empathetic, principled, and human-centered.""",
221
+ expected_output="Ethical analysis considering human impact and moral implications",
222
+ agent=agents["balthasar"]
223
+ ),
224
+
225
+ Task(
226
+ description=f"""Analyze this question from your practical perspective:
227
+
228
+ Question: {question}
229
+
230
+ Provide analysis focusing on:
231
+ - Real-world feasibility and implementation
232
+ - Social and cultural considerations
233
+ - Resource requirements and constraints
234
+ - Actionable recommendations
235
+
236
+ Be pragmatic, realistic, and implementation-focused.""",
237
+ expected_output="Practical analysis with feasibility assessment and actionable insights",
238
+ agent=agents["casper"]
239
+ )
240
+ ]
241
+
242
+ # Create crew with all agents and tasks
243
+ crew = Crew(
244
+ agents=list(agents.values()),
245
+ tasks=tasks,
246
+ verbose=True,
247
+ process="sequential" # Each agent analyzes independently
248
+ )
249
+
250
+ # Execute MAGI analysis
251
+ print("\n" + "="*80)
252
+ print("EXECUTING MAGI ANALYSIS...")
253
+ print("="*80 + "\n")
254
+
255
+ result = crew.kickoff()
256
+
257
+ # Format results
258
+ output = {
259
+ "question": question,
260
+ "provider": provider,
261
+ "result": str(result),
262
+ "status": "completed"
263
+ }
264
+
265
+ print("\n" + "="*80)
266
+ print("MAGI ANALYSIS COMPLETE")
267
+ print("="*80 + "\n")
268
+
269
+ return output
270
+
271
+
272
+ def main():
273
+ """
274
+ Main entry point for testing the MAGI system.
275
+ """
276
+ print("\n" + "="*80)
277
+ print("MAGI SYSTEM - MULTI-AGENT GENERAL INTELLIGENCE")
278
+ print("Based on Neon Genesis Evangelion")
279
+ print("="*80 + "\n")
280
+
281
+ # Example question
282
+ test_question = "Should we invest heavily in renewable energy infrastructure?"
283
+
284
+ # Run analysis
285
+ result = analyze_question(
286
+ question=test_question,
287
+ provider="groq", # Change to "openai" if you have OpenAI API key
288
+ enable_search=True,
289
+ temperature=0.5
290
+ )
291
+
292
+ # Display results
293
+ print("\n" + "="*80)
294
+ print("FINAL RESULTS")
295
+ print("="*80)
296
+ print(f"\nQuestion: {result['question']}")
297
+ print(f"\nProvider: {result['provider']}")
298
+ print(f"\nStatus: {result['status']}")
299
+ print(f"\n{'-'*80}")
300
+ print("MAGI System Analysis:")
301
+ print(f"{'-'*80}")
302
+ print(f"\n{result['result']}")
303
+ print("\n" + "="*80 + "\n")
304
+
305
+
306
+ if __name__ == "__main__":
307
+ main()
README.md CHANGED
@@ -1,12 +1,6 @@
1
  ---
2
- title: AI MAGI
3
- emoji: πŸ’»
4
- colorFrom: green
5
- colorTo: indigo
6
  sdk: gradio
7
  sdk_version: 5.49.1
8
- app_file: app.py
9
- pinned: false
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: AI_MAGI
3
+ app_file: magi_web_interface.py
 
 
4
  sdk: gradio
5
  sdk_version: 5.49.1
 
 
6
  ---
 
 
__pycache__/Main_core_002.cpython-310.pyc ADDED
Binary file (8.27 kB). View file
 
magi_web_interface.py ADDED
@@ -0,0 +1,412 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ MAGI System - Web Interface v2.0
3
+ Neon Genesis Evangelion AI Simulation
4
+
5
+ Gradio web interface for the MAGI multi-agent system
6
+ """
7
+
8
+ import gradio as gr
9
+ import sys
10
+ import os
11
+ import io
12
+ import re
13
+ import threading
14
+ import queue
15
+ from contextlib import redirect_stdout, redirect_stderr
16
+ from pathlib import Path
17
+ from datetime import datetime
18
+ from typing import Tuple, Generator
19
+
20
+ # Add parent directory to path
21
+ sys.path.insert(0, str(Path(__file__).parent))
22
+
23
+ # Import MAGI system
24
+ from Main_core_002 import analyze_question
25
+
26
+ # Evangelion-themed CSS
27
+ EVANGELION_CSS = """
28
+ /* NERV/MAGI Theme - Evangelion Style */
29
+ .gradio-container {
30
+ font-family: 'Courier New', monospace !important;
31
+ background: linear-gradient(135deg, #0a0e1a 0%, #1a1f2e 100%) !important;
32
+ }
33
+
34
+ .contain {
35
+ background: rgba(26, 31, 46, 0.95) !important;
36
+ border: 2px solid #d32f2f !important;
37
+ border-radius: 0px !important;
38
+ }
39
+
40
+ h1, h2, h3, h4, h5, h6, .centered-markdown {
41
+ color: #ff6f00 !important;
42
+ font-family: 'Courier New', monospace !important;
43
+ text-transform: uppercase !important;
44
+ letter-spacing: 2px !important;
45
+ text-shadow: 0 0 10px rgba(211, 47, 47, 0.5) !important;
46
+ text-align: center !important;
47
+ }
48
+
49
+ .output-markdown, .gr-textbox, .gradio-markdown, .gradio-label, .gradio-status {
50
+ text-align: center !important;
51
+ }
52
+
53
+ .tab-nav button {
54
+ background: #1a1f2e !important;
55
+ color: #00bcd4 !important;
56
+ border: 1px solid #d32f2f !important;
57
+ font-weight: bold !important;
58
+ }
59
+
60
+ .tab-nav button.selected {
61
+ background: #d32f2f !important;
62
+ color: white !important;
63
+ border: 2px solid #ff6f00 !important;
64
+ }
65
+
66
+ textarea, input {
67
+ background: #0a0e1a !important;
68
+ color: #00ff41 !important;
69
+ border: 1px solid #00bcd4 !important;
70
+ font-family: 'Courier New', monospace !important;
71
+ }
72
+
73
+ .output-markdown {
74
+ background: #0a0e1a !important;
75
+ color: #00ff41 !important;
76
+ border: 1px solid #d32f2f !important;
77
+ padding: 20px !important;
78
+ font-family: 'Courier New', monospace !important;
79
+ text-align: center !important;
80
+ }
81
+
82
+ button {
83
+ background: linear-gradient(135deg, #d32f2f 0%, #ff6f00 100%) !important;
84
+ color: white !important;
85
+ border: none !important;
86
+ font-weight: bold !important;
87
+ text-transform: uppercase !important;
88
+ letter-spacing: 1px !important;
89
+ box-shadow: 0 0 20px rgba(211, 47, 47, 0.5) !important;
90
+ }
91
+
92
+ button:hover {
93
+ box-shadow: 0 0 30px rgba(255, 111, 0, 0.8) !important;
94
+ }
95
+
96
+ .progress-bar {
97
+ background: #d32f2f !important;
98
+ }
99
+
100
+ footer {
101
+ color: #00bcd4 !important;
102
+ text-align: center !important;
103
+ }
104
+
105
+ /* Override alignment for live logs for readability */
106
+ #live-logs textarea {
107
+ text-align: left !important;
108
+ font-family: 'Courier New', monospace !important;
109
+ white-space: pre-wrap !important;
110
+ }
111
+ """
112
+
113
+
114
+ def process_magi_query_stream(
115
+ question: str,
116
+ provider: str = "Groq",
117
+ ollama_model: str = "",
118
+ enable_search: bool = False,
119
+ temperature: float = 0.5,
120
+ clean_logs: bool = True,
121
+ ) -> Generator[Tuple[str, str, str], None, None]:
122
+ """
123
+ Stream MAGI analysis with live logs.
124
+
125
+ Yields successive updates for (result_text, status_message, live_logs).
126
+ """
127
+ result_text = ""
128
+ status_text = ""
129
+ log_text = ""
130
+
131
+ if not question or not question.strip():
132
+ yield ("❌ ERROR: Please enter a question.", "⚠️ No input provided", "")
133
+ return
134
+
135
+ # Normalize provider and handle Ollama alias
136
+ provider_lower = provider.lower()
137
+ if provider_lower == "ollama (local)":
138
+ provider_lower = "ollama"
139
+
140
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
141
+ header = f"""
142
+ ╔══════════════════════════════════════════════════════════════════╗
143
+ β•‘ MAGI SYSTEM ANALYSIS β•‘
144
+ β•‘ Multi-Agent General Intelligence β•‘
145
+ β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
146
+
147
+ ⏰ Timestamp: {timestamp}
148
+ ❓ Question: {question}
149
+ πŸ€– Provider: {provider}
150
+ 🌐 Search: {"Enabled" if enable_search else "Disabled"}
151
+ 🌑️ Temperature: {temperature}
152
+ πŸ¦™ Ollama Model: {ollama_model if provider_lower == "ollama" else "-"}
153
+
154
+ {'='*70}
155
+ EXECUTING THREE-PERSPECTIVE ANALYSIS...
156
+ {'='*70}
157
+
158
+ """
159
+ # Immediately show header in logs
160
+ log_text += header
161
+ yield (result_text, "πŸš€ Analysis started...", log_text)
162
+
163
+ # Queue to collect streamed stdout/stderr
164
+ q: queue.Queue[str | None] = queue.Queue()
165
+
166
+ class QueueWriter(io.TextIOBase):
167
+ def write(self, s: str) -> int:
168
+ if s:
169
+ q.put(s)
170
+ return len(s)
171
+
172
+ ansi_escape = re.compile(r"\x1b\[[0-?]*[ -/]*[@-~]")
173
+
174
+ def sanitize(chunk: str) -> str:
175
+ # Strip ANSI color/control codes and carriage returns
176
+ chunk = ansi_escape.sub("", chunk)
177
+ chunk = chunk.replace("\r", "")
178
+ return chunk
179
+
180
+ # Worker to run analysis while capturing stdout/stderr
181
+ analysis_result_holder = {"result": None, "error": None}
182
+
183
+ def worker():
184
+ try:
185
+ with redirect_stdout(QueueWriter()), redirect_stderr(QueueWriter()):
186
+ res = analyze_question(
187
+ question=question,
188
+ provider=provider_lower,
189
+ ollama_model=ollama_model,
190
+ enable_search=enable_search,
191
+ temperature=temperature
192
+ )
193
+ analysis_result_holder["result"] = res
194
+ except Exception as e: # noqa: BLE001
195
+ analysis_result_holder["error"] = e
196
+ finally:
197
+ q.put(None) # Sentinel to indicate completion
198
+
199
+ t = threading.Thread(target=worker, daemon=True)
200
+ t.start()
201
+
202
+ # Consume queue and stream updates
203
+ while True:
204
+ try:
205
+ item = q.get(timeout=0.2)
206
+ except queue.Empty:
207
+ # Yield periodic heartbeat without changing texts to keep UI responsive
208
+ yield (result_text, "⏳ Running analysis...", log_text)
209
+ continue
210
+
211
+ if item is None:
212
+ break
213
+ chunk = item
214
+ if clean_logs:
215
+ chunk = sanitize(chunk)
216
+ log_text += chunk
217
+ # Keep log size reasonable
218
+ if len(log_text) > 200_000:
219
+ log_text = log_text[-200_000:]
220
+ yield (result_text, "⏳ Running analysis...", log_text)
221
+
222
+ # Thread finished: prepare final outputs
223
+ if analysis_result_holder["error"] is not None:
224
+ e = analysis_result_holder["error"]
225
+ error_msg = f"""
226
+ ╔══════════════════════════════════════════════════════════════════╗
227
+ β•‘ ERROR β•‘
228
+ β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
229
+
230
+ ❌ An error occurred during MAGI analysis:
231
+
232
+ {str(e)}
233
+
234
+ Please check:
235
+ - Your API keys are configured in config/.env
236
+ - You have a stable internet connection (if using cloud providers)
237
+ - The question is not empty
238
+ """
239
+ result_text = error_msg
240
+ status_text = f"❌ Error: {str(e)}"
241
+ yield (result_text, status_text, log_text)
242
+ return
243
+
244
+ res = analysis_result_holder["result"]
245
+ result_text = header + "\n" + res["result"] + "\n\n" + "=" * 70
246
+ status_text = f"βœ… Analysis completed successfully at {timestamp}"
247
+ yield (result_text, status_text, log_text)
248
+
249
+
250
+ def create_magi_interface():
251
+ """Create the Gradio interface for MAGI system"""
252
+
253
+ with gr.Blocks(css=EVANGELION_CSS, title="MAGI System", theme=gr.themes.Base()) as interface:
254
+ # Header
255
+ gr.Markdown("""
256
+ # πŸ”Ί MAGI SYSTEM πŸ”Ί
257
+ ## Multi-Agent General Intelligence
258
+ ### *Based on Neon Genesis Evangelion*
259
+
260
+ ---
261
+
262
+ The MAGI system consists of three AI agents, each representing a different aspect of Dr. Naoko Akagi's personality:
263
+ - **MELCHIOR-1**: Scientific analysis (logic and data)
264
+ - **BALTHASAR-2**: Ethical evaluation (emotions and morals)
265
+ - **CASPER-3**: Practical assessment (social and real-world)
266
+
267
+ All three perspectives are synthesized to provide comprehensive analysis.
268
+ """, elem_classes="centered-markdown")
269
+ # Main interface
270
+ with gr.Row():
271
+ with gr.Column(scale=2):
272
+ # Input section
273
+ question_input = gr.Textbox(
274
+ label="🎯 Enter Your Question",
275
+ placeholder="What question would you like the MAGI system to analyze?",
276
+ lines=3
277
+ )
278
+
279
+ # Settings
280
+ with gr.Accordion("βš™οΈ Advanced Settings", open=False):
281
+ provider_dropdown = gr.Dropdown(
282
+ choices=["Groq", "OpenAI", "Ollama (local)"],
283
+ value="Groq",
284
+ label="LLM Provider",
285
+ info="Groq is free and fast, OpenAI requires paid API key, Ollama runs locally"
286
+ )
287
+ ollama_model_input = gr.Textbox(
288
+ label="Ollama Model Name (if using Ollama)",
289
+ placeholder="e.g. llama3, phi3, mistral, ...",
290
+ visible=False
291
+ )
292
+ search_checkbox = gr.Checkbox(
293
+ label="Enable Internet Search",
294
+ value=False,
295
+ info="Requires SERPER_API_KEY in .env file"
296
+ )
297
+ temperature_slider = gr.Slider(
298
+ minimum=0.0,
299
+ maximum=1.0,
300
+ value=0.5,
301
+ step=0.1,
302
+ label="Temperature",
303
+ info="Higher = more creative, Lower = more focused"
304
+ )
305
+ clean_logs_checkbox = gr.Checkbox(
306
+ label="Clean colored logs (strip ANSI)",
307
+ value=True,
308
+ info="Recommended for readable logs"
309
+ )
310
+
311
+ # Action buttons
312
+ with gr.Row():
313
+ analyze_btn = gr.Button("πŸš€ EXECUTE MAGI ANALYSIS", variant="primary", size="lg")
314
+ clear_btn = gr.Button("πŸ—‘οΈ Clear", variant="secondary")
315
+
316
+ # Example questions now placed under the buttons in the left column
317
+ gr.Examples(
318
+ examples=[
319
+ ["Should we deploy EVA Unit-01 against the approaching Angel despite Shinji's unstable sync ratio?"],
320
+ ["Is it ethical to proceed with the Human Instrumentality Project to eliminate individual suffering?"],
321
+ ["Should NERV prioritize civilian evacuation or Angel neutralization during an active attack on Tokyo-3?"],
322
+ ["What is the acceptable risk threshold for activating a Dummy Plug system in combat operations?"],
323
+ ["Should we collaborate with SEELE's directives or maintain autonomous control over NERV operations?"]
324
+ ],
325
+ inputs=question_input,
326
+ label="πŸ’‘ Example Questions"
327
+ )
328
+
329
+ with gr.Column(scale=3):
330
+ # Output section
331
+ logs_output = gr.Textbox(
332
+ label="�️ Live Logs",
333
+ lines=18,
334
+ max_lines=40,
335
+ interactive=False,
336
+ show_copy_button=True,
337
+ value="",
338
+ elem_id="live-logs",
339
+ )
340
+ result_output = gr.Textbox(
341
+ label="πŸ“Š MAGI Analysis Result",
342
+ lines=16,
343
+ max_lines=30,
344
+ show_copy_button=True,
345
+ elem_classes="centered-markdown"
346
+ )
347
+ status_output = gr.Textbox(
348
+ label="ℹ️ Status",
349
+ lines=1,
350
+ interactive=False,
351
+ elem_classes="centered-markdown"
352
+ )
353
+
354
+
355
+ # Footer
356
+ gr.Markdown("""
357
+ ---
358
+
359
+ **MAGI System v2.0** | Powered by CrewAI & Groq
360
+ *"The truth lies in the synthesis of three perspectives"*
361
+
362
+ πŸ”΄ NERV Systems Division | 🟠 MAGI Supercomputer Array
363
+ """)
364
+
365
+ # Event handlers
366
+ def update_ollama_visibility(provider):
367
+ return gr.update(visible=(provider == "Ollama (local)"))
368
+ provider_dropdown.change(
369
+ fn=update_ollama_visibility,
370
+ inputs=provider_dropdown,
371
+ outputs=ollama_model_input
372
+ )
373
+ analyze_btn.click(
374
+ fn=process_magi_query_stream,
375
+ inputs=[question_input, provider_dropdown, ollama_model_input, search_checkbox, temperature_slider, clean_logs_checkbox],
376
+ outputs=[result_output, status_output, logs_output]
377
+ )
378
+ clear_btn.click(
379
+ fn=lambda: ("", "", "", ""),
380
+ inputs=None,
381
+ outputs=[question_input, result_output, status_output, logs_output]
382
+ )
383
+
384
+ return interface
385
+
386
+
387
+ def main():
388
+ """Launch the MAGI web interface"""
389
+ print("="*70)
390
+ print("MAGI SYSTEM - WEB INTERFACE STARTING")
391
+ print("="*70)
392
+ print("\nπŸ”Ί Initializing NERV MAGI Supercomputer Array...")
393
+ print("πŸ”Έ Loading: MELCHIOR-1 (Scientific)")
394
+ print("πŸ”Έ Loading: BALTHASAR-2 (Ethical)")
395
+ print("πŸ”Έ Loading: CASPER-3 (Practical)")
396
+ print("\nβœ… All systems operational")
397
+ print("🌐 Launching web interface...\n")
398
+
399
+ interface = create_magi_interface()
400
+
401
+ # Launch with custom settings
402
+ interface.launch(
403
+ server_name="0.0.0.0", # Allow external access
404
+ server_port=7862, # Different port to avoid conflict
405
+ share=True, # Create public link
406
+ inbrowser=True, # Open in browser automatically
407
+ show_error=True
408
+ )
409
+
410
+
411
+ if __name__ == "__main__":
412
+ main()