GhufranAI commited on
Commit
f1fca86
Β·
verified Β·
1 Parent(s): 71a474d

Upload multi_agent_system.py

Browse files
Files changed (1) hide show
  1. multi_agent_system.py +707 -0
multi_agent_system.py ADDED
@@ -0,0 +1,707 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Multi-Agent Research Assistant with LangGraph (HUGGINGFACE COMPATIBLE)
3
+ ======================================================================
4
+
5
+ Adapted for HuggingFace models that don't support bind_tools() or with_structured_output()
6
+ Uses: Manual tool calling with prompt engineering + JSON parsing with error handling
7
+ Supports: Both text-generation and conversational task types
8
+
9
+ Installation:
10
+ pip install langgraph langchain langchain-community langchain-huggingface pydantic numexpr
11
+ """
12
+
13
+ import operator
14
+ import re
15
+ import json
16
+ from typing import Annotated, List, Optional, TypedDict, Literal
17
+ from pydantic import BaseModel, Field, ValidationError
18
+ import numexpr as ne
19
+
20
+ # LangGraph imports
21
+ from langgraph.graph import StateGraph, END
22
+
23
+ # LangChain imports
24
+ from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
25
+ from langchain_core.tools import tool
26
+ from langchain_core.messages import HumanMessage
27
+
28
+
29
+ # ═══════════════════════════════════════════════════════════════════
30
+ # 1. PYDANTIC SCHEMAS
31
+ # ═══════════════════════════════════════════════════════════════════
32
+
33
+ class ResearchOutput(BaseModel):
34
+ """Structured output from Researcher agent"""
35
+ answer: str = Field(description="The direct answer to the question")
36
+ sources_used: List[str] = Field(description="List of tools/sources consulted")
37
+ confidence: float = Field(description="Confidence score 0-1", ge=0, le=1)
38
+
39
+
40
+ class AnalysisOutput(BaseModel):
41
+ """Structured output from Analyst agent"""
42
+ key_points: List[str] = Field(description="2-3 key points")
43
+ implications: str = Field(description="Why this matters")
44
+
45
+
46
+ class ReportOutput(BaseModel):
47
+ """Structured output from Writer agent"""
48
+ title: str = Field(description="Report title")
49
+ content: str = Field(description="Main report content")
50
+
51
+
52
+ class CritiqueOutput(BaseModel):
53
+ """Structured output from Critic agent"""
54
+ score: float = Field(description="Quality score 0-10", ge=0, le=10)
55
+ needs_revision: bool = Field(description="Whether revision is needed")
56
+
57
+
58
+ # ═══════════════════════════════════════════════════════════════════
59
+ # 2. SHARED STATE
60
+ # ═══════════════════════════════════════════════════════════════════
61
+
62
+ class AgentState(TypedDict):
63
+ """Shared state for all agents"""
64
+ question: str
65
+ research_output: Optional[ResearchOutput]
66
+ analysis_output: Optional[AnalysisOutput]
67
+ report_output: Optional[ReportOutput]
68
+ critique_output: Optional[CritiqueOutput]
69
+ report_iterations: int
70
+ max_iterations: int
71
+ current_step: str
72
+
73
+
74
+ # ═══════════════════════════════════════════════════════════════════
75
+ # 3. TOOLS
76
+ # ═══════════════════════════════════════════════════════════════════
77
+
78
+ @tool
79
+ def calculator(expression: str) -> str:
80
+ """
81
+ Perform safe mathematical calculations.
82
+
83
+ Args:
84
+ expression: A mathematical expression like "2+2" or "(10*5)+3"
85
+ """
86
+ try:
87
+ expression = expression.strip()
88
+ allowed = set("0123456789+-*/(). ")
89
+ if not all(c in allowed for c in expression):
90
+ return "Error: Invalid characters"
91
+ result = ne.evaluate(expression)
92
+ return str(result)
93
+ except Exception as e:
94
+ return f"Error: {str(e)}"
95
+
96
+
97
+ @tool
98
+ def search_knowledge(query: str) -> str:
99
+ """
100
+ Search for general knowledge information.
101
+
102
+ Args:
103
+ query: The search query or topic
104
+ """
105
+ knowledge = {
106
+ "ai": "Artificial Intelligence (AI) is the simulation of human intelligence by machines. Key applications include machine learning, natural language processing, computer vision, and robotics. AI systems can learn from data, recognize patterns, and make decisions.",
107
+ "artificial intelligence": "Artificial Intelligence (AI) is the simulation of human intelligence by machines. Key applications include machine learning, natural language processing, computer vision, and robotics. AI systems can learn from data, recognize patterns, and make decisions.",
108
+ "machine learning": "Machine Learning is a subset of AI that enables systems to learn and improve from experience without being explicitly programmed. It uses algorithms to identify patterns in data and make predictions.",
109
+ "python": "Python is a high-level, interpreted programming language known for its simplicity and readability. It's widely used in web development, data science, AI, automation, and scientific computing.",
110
+ "data science": "Data Science is an interdisciplinary field that uses scientific methods, algorithms, and systems to extract knowledge and insights from structured and unstructured data.",
111
+ }
112
+
113
+ query_lower = query.lower()
114
+ for key, value in knowledge.items():
115
+ if key in query_lower:
116
+ return value
117
+
118
+ return f"Information about '{query}' would require web search or domain expertise. This is a general knowledge topic."
119
+
120
+
121
+ # ═══════════════════════════════════════════════════════════════════
122
+ # 4. TOOL EXECUTOR (Manual Implementation)
123
+ # ═══════════════════════════════════════════════════════════════════
124
+
125
+ class ToolExecutor:
126
+ """Manually execute tools based on LLM requests"""
127
+
128
+ def __init__(self, tools):
129
+ self.tools = {t.name: t for t in tools}
130
+
131
+ def detect_tool_call(self, text: str) -> Optional[tuple]:
132
+ """Detect if text contains a tool call request"""
133
+
134
+ # Pattern: USE_TOOL: tool_name(arguments)
135
+ pattern = r'USE_TOOL:\s*(\w+)\((.*?)\)'
136
+ match = re.search(pattern, text, re.IGNORECASE)
137
+
138
+ if match:
139
+ tool_name = match.group(1)
140
+ arguments = match.group(2).strip('"\'')
141
+ return (tool_name, arguments)
142
+
143
+ # Alternative pattern: tool_name: arguments
144
+ for tool_name in self.tools.keys():
145
+ if f"{tool_name}:" in text.lower():
146
+ # Extract what comes after the tool name
147
+ pattern = rf'{tool_name}:\s*([^\n]+)'
148
+ match = re.search(pattern, text, re.IGNORECASE)
149
+ if match:
150
+ arguments = match.group(1).strip('"\'')
151
+ return (tool_name, arguments)
152
+
153
+ return None
154
+
155
+ def execute(self, tool_name: str, arguments: str) -> str:
156
+ """Execute a tool with given arguments"""
157
+ if tool_name not in self.tools:
158
+ return f"Error: Tool '{tool_name}' not found"
159
+
160
+ try:
161
+ result = self.tools[tool_name].func(arguments)
162
+ return result
163
+ except Exception as e:
164
+ return f"Error executing {tool_name}: {str(e)}"
165
+
166
+
167
+ # ═══════════════════════════════════════════════════════════════════
168
+ # 5. JSON PARSER WITH ERROR HANDLING
169
+ # ═══════════════════════════════════════════════════════════════════
170
+
171
+ def extract_json(text: str) -> Optional[dict]:
172
+ """Extract JSON from text with multiple strategies"""
173
+
174
+ # Strategy 1: Find JSON in code blocks
175
+ json_pattern = r'```(?:json)?\s*(\{.*?\})\s*```'
176
+ matches = re.findall(json_pattern, text, re.DOTALL)
177
+ if matches:
178
+ try:
179
+ return json.loads(matches[0])
180
+ except:
181
+ pass
182
+
183
+ # Strategy 2: Find JSON without code blocks
184
+ json_pattern = r'\{[^{}]*(?:\{[^{}]*\}[^{}]*)*\}'
185
+ matches = re.findall(json_pattern, text, re.DOTALL)
186
+ for match in matches:
187
+ try:
188
+ parsed = json.loads(match)
189
+ if isinstance(parsed, dict) and len(parsed) > 0:
190
+ return parsed
191
+ except:
192
+ continue
193
+
194
+ return None
195
+
196
+
197
+ def safe_parse_pydantic(text: str, model: BaseModel, fallback_data: dict) -> BaseModel:
198
+ """Safely parse text into Pydantic model with fallback"""
199
+
200
+ # Try to extract JSON
201
+ json_data = extract_json(text)
202
+
203
+ if json_data:
204
+ try:
205
+ return model(**json_data)
206
+ except ValidationError:
207
+ pass
208
+
209
+ # Try parsing text directly as JSON
210
+ try:
211
+ return model.model_validate_json(text)
212
+ except:
213
+ pass
214
+
215
+ # Fallback: Create model with fallback data
216
+ try:
217
+ return model(**fallback_data)
218
+ except:
219
+ # Last resort: minimal valid model
220
+ return model(**{k: v for k, v in fallback_data.items() if k in model.model_fields})
221
+
222
+
223
+ # ═══════════════════════════════════════════════════════════════════
224
+ # 6. LLM FACTORY
225
+ # ��══════════════════════════════════════════════════════════════════
226
+
227
+ class LLMFactory:
228
+ """Factory for creating LLM instances"""
229
+
230
+ @staticmethod
231
+ def create_llm(token: str, temperature: float = 0.3):
232
+ """Create base LLM with conversational support"""
233
+ try:
234
+ # Try using ChatHuggingFace wrapper for conversational models
235
+ endpoint = HuggingFaceEndpoint(
236
+ repo_id="meta-llama/Llama-3.1-8B-Instruct",
237
+ huggingfacehub_api_token=token,
238
+ temperature=temperature,
239
+ max_new_tokens=1000,
240
+ top_p=0.9,
241
+ repetition_penalty=1.1,
242
+ task="conversational" # Specify conversational task
243
+ )
244
+
245
+ # Wrap with ChatHuggingFace for proper message handling
246
+ llm = ChatHuggingFace(llm=endpoint)
247
+ return llm
248
+
249
+ except Exception as e:
250
+ print(f"⚠️ ChatHuggingFace failed, trying standard endpoint: {e}")
251
+ # Fallback to standard endpoint
252
+ return HuggingFaceEndpoint(
253
+ repo_id="meta-llama/Llama-3.1-8B-Instruct",
254
+ huggingfacehub_api_token=token,
255
+ temperature=temperature,
256
+ max_new_tokens=1000,
257
+ top_p=0.9,
258
+ repetition_penalty=1.1
259
+ )
260
+
261
+
262
+ # ═══════════════════════════════════════════════════════════════════
263
+ # 7. AGENT NODES
264
+ # ═══════════════════════════════════════════════════════════════════
265
+
266
+ class ResearcherAgent:
267
+ """Researcher with manual tool calling"""
268
+
269
+ def __init__(self, llm, tool_executor):
270
+ self.llm = llm
271
+ self.tool_executor = tool_executor
272
+
273
+ def __call__(self, state: AgentState) -> AgentState:
274
+ """Research node with tool execution"""
275
+
276
+ print("\nπŸ” RESEARCHER AGENT")
277
+
278
+ question = state["question"]
279
+
280
+ # Determine which tool to use
281
+ prompt = f"""You are a research assistant. Answer this question: {question}
282
+
283
+ Available tools:
284
+ - calculator: For math operations (e.g., "2+2", "(10*5)+3")
285
+ - search_knowledge: For information lookup (e.g., "artificial intelligence", "python")
286
+
287
+ Instructions:
288
+ 1. If the question involves math/calculation, respond with: USE_TOOL: calculator(expression)
289
+ 2. If the question needs information, respond with: USE_TOOL: search_knowledge(topic)
290
+ 3. Replace 'expression' or 'topic' with the actual query
291
+
292
+ Examples:
293
+ - For "what is 2+2": USE_TOOL: calculator(2+2)
294
+ - For "what is AI": USE_TOOL: search_knowledge(artificial intelligence)
295
+
296
+ Your response:"""
297
+
298
+ # Get LLM response (handle both chat and text models)
299
+ try:
300
+ # Try chat-style invocation first
301
+ if hasattr(self.llm, 'invoke'):
302
+ response_obj = self.llm.invoke([HumanMessage(content=prompt)])
303
+ # Extract content from response
304
+ if hasattr(response_obj, 'content'):
305
+ response = response_obj.content
306
+ else:
307
+ response = str(response_obj)
308
+ else:
309
+ response = self.llm(prompt)
310
+ except Exception as e:
311
+ print(f" ⚠️ LLM error: {e}")
312
+ # Fallback: try direct call
313
+ try:
314
+ response = str(self.llm.invoke(prompt))
315
+ except:
316
+ response = f"Error: Unable to get LLM response for: {question}"
317
+
318
+ print(f" LLM Response: {response[:200]}...")
319
+
320
+ # Check for tool call
321
+ tool_call = self.tool_executor.detect_tool_call(response)
322
+
323
+ if tool_call:
324
+ tool_name, arguments = tool_call
325
+ print(f" πŸ”§ Executing: {tool_name}({arguments})")
326
+
327
+ # Execute tool
328
+ tool_result = self.tool_executor.execute(tool_name, arguments)
329
+ print(f" βœ… Tool Result: {tool_result}")
330
+
331
+ # Synthesize final answer
332
+ synthesis_prompt = f"""Based on this tool result, provide a clear answer to: {question}
333
+
334
+ Tool used: {tool_name}
335
+ Tool result: {tool_result}
336
+
337
+ Provide a direct, concise answer."""
338
+
339
+ try:
340
+ if hasattr(self.llm, 'invoke'):
341
+ answer_obj = self.llm.invoke([HumanMessage(content=synthesis_prompt)])
342
+ answer = answer_obj.content if hasattr(answer_obj, 'content') else str(answer_obj)
343
+ else:
344
+ answer = self.llm(synthesis_prompt)
345
+ except:
346
+ answer = f"The answer is: {tool_result}"
347
+
348
+ sources = [tool_name]
349
+ else:
350
+ # No tool needed, use LLM knowledge
351
+ answer = response
352
+ sources = ["LLM Knowledge"]
353
+
354
+ # Create research output
355
+ research_output = ResearchOutput(
356
+ answer=answer.strip(),
357
+ sources_used=sources,
358
+ confidence=0.9 if tool_call else 0.7
359
+ )
360
+
361
+ state["research_output"] = research_output
362
+ state["current_step"] = "research_complete"
363
+ print(f" βœ… Answer: {answer[:100]}...")
364
+
365
+ return state
366
+
367
+
368
+ class AnalystAgent:
369
+ """Analyzes research"""
370
+
371
+ def __init__(self, llm):
372
+ self.llm = llm
373
+
374
+ def __call__(self, state: AgentState) -> AgentState:
375
+ """Analysis node"""
376
+
377
+ print("\nπŸ“Š ANALYST AGENT")
378
+
379
+ research = state["research_output"]
380
+
381
+ prompt = f"""Analyze this answer and extract key insights.
382
+
383
+ Question: {state['question']}
384
+ Answer: {research.answer}
385
+
386
+ Provide your analysis in JSON format:
387
+ {{
388
+ "key_points": ["point 1", "point 2"],
389
+ "implications": "why this matters"
390
+ }}
391
+
392
+ Analysis:"""
393
+
394
+ try:
395
+ if hasattr(self.llm, 'invoke'):
396
+ response_obj = self.llm.invoke([HumanMessage(content=prompt)])
397
+ response = response_obj.content if hasattr(response_obj, 'content') else str(response_obj)
398
+ else:
399
+ response = self.llm(prompt)
400
+ except Exception as e:
401
+ print(f" ⚠️ LLM error: {e}")
402
+ response = '{"key_points": ["Analysis unavailable"], "implications": "Direct answer provided"}'
403
+
404
+ # Parse with fallback
405
+ fallback = {
406
+ "key_points": [research.answer[:100]],
407
+ "implications": "Direct answer provided"
408
+ }
409
+
410
+ analysis_output = safe_parse_pydantic(response, AnalysisOutput, fallback)
411
+
412
+ state["analysis_output"] = analysis_output
413
+ state["current_step"] = "analysis_complete"
414
+ print(f" βœ… Extracted {len(analysis_output.key_points)} key points")
415
+
416
+ return state
417
+
418
+
419
+ class WriterAgent:
420
+ """Creates reports"""
421
+
422
+ def __init__(self, llm):
423
+ self.llm = llm
424
+
425
+ def __call__(self, state: AgentState) -> AgentState:
426
+ """Writing node"""
427
+
428
+ print(f"\n✍️ WRITER AGENT (Iteration {state['report_iterations'] + 1})")
429
+
430
+ research = state["research_output"]
431
+ analysis = state["analysis_output"]
432
+
433
+ prompt = f"""Write a clear, professional report.
434
+
435
+ Question: {state['question']}
436
+ Answer: {research.answer}
437
+ Key Points: {', '.join(analysis.key_points)}
438
+
439
+ Create a report in JSON format:
440
+ {{
441
+ "title": "descriptive title",
442
+ "content": "detailed explanation with the answer and key points"
443
+ }}
444
+
445
+ Report:"""
446
+
447
+ try:
448
+ if hasattr(self.llm, 'invoke'):
449
+ response_obj = self.llm.invoke([HumanMessage(content=prompt)])
450
+ response = response_obj.content if hasattr(response_obj, 'content') else str(response_obj)
451
+ else:
452
+ response = self.llm(prompt)
453
+ except Exception as e:
454
+ print(f" ⚠️ LLM error: {e}")
455
+ response = ""
456
+
457
+ # Parse with fallback
458
+ fallback = {
459
+ "title": state['question'],
460
+ "content": f"Question: {state['question']}\n\nAnswer: {research.answer}\n\nKey Points:\n" + "\n".join(f"β€’ {point}" for point in analysis.key_points)
461
+ }
462
+
463
+ report_output = safe_parse_pydantic(response, ReportOutput, fallback)
464
+
465
+ state["report_output"] = report_output
466
+ state["report_iterations"] += 1
467
+ state["current_step"] = "report_complete"
468
+ print(f" βœ… Report created: {len(report_output.content)} chars")
469
+
470
+ return state
471
+
472
+
473
+ class CriticAgent:
474
+ """Reviews reports"""
475
+
476
+ def __init__(self, llm):
477
+ self.llm = llm
478
+
479
+ def __call__(self, state: AgentState) -> AgentState:
480
+ """Critique node"""
481
+
482
+ print("\n🎯 CRITIC AGENT")
483
+
484
+ report = state["report_output"]
485
+
486
+ # Simple heuristic-based scoring for reliability
487
+ score = 8.0
488
+
489
+ # Check if answer is in content
490
+ if state["research_output"].answer.lower() in report.content.lower():
491
+ score += 1.0
492
+
493
+ # Check content length
494
+ if len(report.content) > 100:
495
+ score += 0.5
496
+
497
+ # Penalize first iteration slightly to allow one revision
498
+ if state["report_iterations"] == 1:
499
+ score -= 1.0
500
+
501
+ score = min(10.0, max(0.0, score))
502
+
503
+ needs_revision = (
504
+ score < 8.0 and
505
+ state["report_iterations"] < state["max_iterations"]
506
+ )
507
+
508
+ critique_output = CritiqueOutput(
509
+ score=score,
510
+ needs_revision=needs_revision
511
+ )
512
+
513
+ state["critique_output"] = critique_output
514
+ state["current_step"] = "critique_complete"
515
+ print(f" βœ… Score: {score}/10 | Revision needed: {needs_revision}")
516
+
517
+ return state
518
+
519
+
520
+ # ═══════════════════════════════════════════════════════════════════
521
+ # 8. CONDITIONAL ROUTING
522
+ # ═══════════════════════════════════════════════════════════════════
523
+
524
+ def route_critique(state: AgentState) -> Literal["revise", "finish"]:
525
+ """Route from critic"""
526
+ critique = state["critique_output"]
527
+
528
+ if critique.needs_revision:
529
+ print(f"\nπŸ”„ Revision needed (Score: {critique.score}/10)")
530
+ return "revise"
531
+ else:
532
+ print(f"\nβœ… Report approved (Score: {critique.score}/10)")
533
+ return "finish"
534
+
535
+
536
+ # ═══════════════════════════════════════════════════════════════════
537
+ # 9. MAIN SYSTEM
538
+ # ═══════════════════════════════════════════════════════════════════
539
+
540
+ class MultiAgentSystem:
541
+ """Multi-agent system compatible with HuggingFace models"""
542
+
543
+ def __init__(self, token: str, max_iterations: int = 2):
544
+ self.max_iterations = max_iterations
545
+
546
+ print("\n" + "="*70)
547
+ print("πŸ€– INITIALIZING MULTI-AGENT SYSTEM (HUGGINGFACE COMPATIBLE)")
548
+ print("="*70)
549
+
550
+ # Create tools and executor
551
+ tools = [calculator, search_knowledge]
552
+ self.tool_executor = ToolExecutor(tools)
553
+ print(f"πŸ› οΈ Loaded {len(tools)} tools: {[t.name for t in tools]}")
554
+
555
+ # Create LLM
556
+ print("πŸ“‘ Creating LLM...")
557
+ self.llm = LLMFactory.create_llm(token)
558
+ print(" βœ… LLM ready")
559
+
560
+ # Initialize agents
561
+ print("πŸ€– Initializing agents...")
562
+ self.researcher = ResearcherAgent(self.llm, self.tool_executor)
563
+ self.analyst = AnalystAgent(self.llm)
564
+ self.writer = WriterAgent(self.llm)
565
+ self.critic = CriticAgent(self.llm)
566
+ print(" βœ… All agents ready")
567
+
568
+ # Build graph
569
+ print("πŸ”— Building workflow...")
570
+ self.graph = self._build_graph()
571
+ print(" βœ… Graph compiled")
572
+
573
+ print("\nβœ… System ready!\n")
574
+
575
+ def _build_graph(self) -> StateGraph:
576
+ """Build the workflow graph"""
577
+
578
+ workflow = StateGraph(AgentState)
579
+
580
+ # Add nodes
581
+ workflow.add_node("researcher", self.researcher)
582
+ workflow.add_node("analyst", self.analyst)
583
+ workflow.add_node("writer", self.writer)
584
+ workflow.add_node("critic", self.critic)
585
+
586
+ # Set entry point
587
+ workflow.set_entry_point("researcher")
588
+
589
+ # Add edges
590
+ workflow.add_edge("researcher", "analyst")
591
+ workflow.add_edge("analyst", "writer")
592
+ workflow.add_edge("writer", "critic")
593
+
594
+ # Conditional edge from critic
595
+ workflow.add_conditional_edges(
596
+ "critic",
597
+ route_critique,
598
+ {
599
+ "revise": "writer",
600
+ "finish": END
601
+ }
602
+ )
603
+
604
+ return workflow.compile()
605
+
606
+ def research(self, question: str) -> dict:
607
+ """Execute research workflow"""
608
+
609
+ print("="*70)
610
+ print(f"πŸ“‹ QUESTION: {question}")
611
+ print("="*70)
612
+
613
+ initial_state = AgentState(
614
+ question=question,
615
+ research_output=None,
616
+ analysis_output=None,
617
+ report_output=None,
618
+ critique_output=None,
619
+ report_iterations=0,
620
+ max_iterations=self.max_iterations,
621
+ current_step="start"
622
+ )
623
+
624
+ try:
625
+ final_state = self.graph.invoke(initial_state)
626
+
627
+ print("\n" + "="*70)
628
+ print("βœ… WORKFLOW COMPLETE")
629
+ print("="*70)
630
+
631
+ if final_state.get("critique_output"):
632
+ print(f"Final score: {final_state['critique_output'].score}/10")
633
+
634
+ return final_state
635
+
636
+ except Exception as e:
637
+ print(f"\n❌ Error: {e}")
638
+ import traceback
639
+ traceback.print_exc()
640
+ return None
641
+
642
+
643
+ # ═══════════════════════════════════════════════════════════════════
644
+ # 10. CLI INTERFACE
645
+ # ═══════════════════════════════════════════════════════════════════
646
+
647
+ def cli_demo():
648
+ """Command-line demo"""
649
+
650
+ print("""
651
+ ╔══════════════════════════════════════════════════════════════════════════╗
652
+ β•‘ MULTI-AGENT SYSTEM β•‘
653
+ β•‘ Manual tool calling + JSON parsing with fallbacks β•‘
654
+ β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
655
+ """)
656
+
657
+ token = input("Enter your Hugging Face token: ").strip()
658
+
659
+ if not token:
660
+ print("❌ Token required!")
661
+ return
662
+
663
+ try:
664
+ system = MultiAgentSystem(token=token, max_iterations=2)
665
+ except Exception as e:
666
+ print(f"❌ Initialization failed: {e}")
667
+ import traceback
668
+ traceback.print_exc()
669
+ return
670
+
671
+ print("\nπŸ’‘ Try questions like:")
672
+ print(" β€’ what is 2+2")
673
+ print(" β€’ calculate (15*3)+7")
674
+ print(" β€’ what is artificial intelligence")
675
+ print(" β€’ what is machine learning")
676
+
677
+ while True:
678
+ print("\n" + "="*70)
679
+ question = input("\nπŸ€” Enter question (or 'quit'): ").strip()
680
+
681
+ if question.lower() in ['quit', 'exit', 'q']:
682
+ print("\nπŸ‘‹ Goodbye!")
683
+ break
684
+
685
+ if not question:
686
+ continue
687
+
688
+ final_state = system.research(question)
689
+
690
+ if final_state and final_state.get("report_output"):
691
+ print("\n" + "="*70)
692
+ print("πŸ“„ FINAL REPORT")
693
+ print("="*70)
694
+
695
+ report = final_state["report_output"]
696
+ print(f"\nπŸ“Œ {report.title}")
697
+ print(f"\n{report.content}")
698
+
699
+ print("\n" + "="*70)
700
+ print("🎯 QUALITY SCORE")
701
+ print("="*70)
702
+ critique = final_state["critique_output"]
703
+ print(f"Score: {critique.score}/10")
704
+
705
+
706
+ if __name__ == "__main__":
707
+ cli_demo()