GhufranAI commited on
Commit
3e31e00
·
verified ·
1 Parent(s): 7c3354c

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +591 -0
  2. multi_agent_assistant.py +913 -0
  3. requirements.txt +8 -0
app.py ADDED
@@ -0,0 +1,591 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Streamlit UI for Multi-Agent Research Assistant (Tavily Version)
3
+ =================================================================
4
+
5
+ Features:
6
+ - Clean, professional interface
7
+ - Real-time agent execution visualization
8
+ - Interactive tool selection
9
+ - Source citations with links
10
+ - Export reports
11
+ - Session history
12
+
13
+ Run: streamlit run app.py
14
+ """
15
+
16
+ import streamlit as st
17
+ from datetime import datetime
18
+ import json
19
+ import time
20
+
21
+ # Import your multi-agent system
22
+ from multi_agent_assistant import (
23
+ MultiAgentSystem,
24
+ Config,
25
+ TAVILY_AVAILABLE
26
+ )
27
+
28
+ # ═══════════════════════════════════════════════════════════════════════════
29
+ # PAGE CONFIG
30
+ # ═══════════════════════════════════════════════════════════════════════════
31
+
32
+ st.set_page_config(
33
+ page_title="Multi-Agent Research Assistant",
34
+ page_icon="🤖",
35
+ layout="wide",
36
+ initial_sidebar_state="expanded"
37
+ )
38
+
39
+ # Custom CSS
40
+ st.markdown("""
41
+ <style>
42
+ .main-header {
43
+ font-size: 2.5rem;
44
+ font-weight: bold;
45
+ color: #1f77b4;
46
+ text-align: center;
47
+ margin-bottom: 1rem;
48
+ }
49
+ .sub-header {
50
+ font-size: 1.2rem;
51
+ color: #666;
52
+ text-align: center;
53
+ margin-bottom: 2rem;
54
+ }
55
+ .agent-box {
56
+ padding: 1rem;
57
+ border-radius: 0.5rem;
58
+ border-left: 4px solid;
59
+ margin: 1rem 0;
60
+ }
61
+ .researcher { border-color: #1f77b4; background-color: #e3f2fd; }
62
+ .analyst { border-color: #ff7f0e; background-color: #fff3e0; }
63
+ .writer { border-color: #2ca02c; background-color: #e8f5e9; }
64
+ .critic { border-color: #d62728; background-color: #ffebee; }
65
+ .source-card {
66
+ padding: 1rem;
67
+ border-radius: 0.5rem;
68
+ background-color: #f5f5f5;
69
+ margin: 0.5rem 0;
70
+ }
71
+ .metric-card {
72
+ padding: 1rem;
73
+ border-radius: 0.5rem;
74
+ background-color: #ffffff;
75
+ border: 1px solid #e0e0e0;
76
+ text-align: center;
77
+ }
78
+ </style>
79
+ """, unsafe_allow_html=True)
80
+
81
+ # ═══════════════════════════════════════════════════════════════════════════
82
+ # SESSION STATE INITIALIZATION
83
+ # ═══════════════════════════════════════════════════════════════════════════
84
+
85
+ if 'system' not in st.session_state:
86
+ st.session_state.system = None
87
+ if 'history' not in st.session_state:
88
+ st.session_state.history = []
89
+ if 'current_research' not in st.session_state:
90
+ st.session_state.current_research = None
91
+ if 'agent_logs' not in st.session_state:
92
+ st.session_state.agent_logs = []
93
+
94
+
95
+ # ═══════════════════════════════════════════════════════════════════════════
96
+ # HELPER FUNCTIONS
97
+ # ═══════════════════════════════════════════════════════════════════════════
98
+
99
+ def initialize_system(hf_token: str, tavily_key: str):
100
+ """Initialize the multi-agent system"""
101
+ try:
102
+ with st.spinner("🚀 Initializing Multi-Agent System..."):
103
+ system = MultiAgentSystem(
104
+ hf_token=hf_token,
105
+ tavily_key=tavily_key,
106
+ max_iterations=2
107
+ )
108
+ st.session_state.system = system
109
+ return True
110
+ except Exception as e:
111
+ st.error(f"Initialization failed: {str(e)}")
112
+ return False
113
+
114
+
115
+ def display_agent_activity(step: str, agent_name: str, content: str):
116
+ """Display agent activity in real-time"""
117
+
118
+ agent_colors = {
119
+ "Researcher": "researcher",
120
+ "Analyst": "analyst",
121
+ "Writer": "writer",
122
+ "Critic": "critic"
123
+ }
124
+
125
+ color_class = agent_colors.get(agent_name, "researcher")
126
+
127
+ st.markdown(f"""
128
+ <div class="agent-box {color_class}">
129
+ <strong>🤖 {agent_name} Agent</strong><br/>
130
+ <small>{content}</small>
131
+ </div>
132
+ """, unsafe_allow_html=True)
133
+
134
+
135
+ def format_report(report_output, research_output, critique_output):
136
+ """Format the final report"""
137
+
138
+ st.markdown("---")
139
+ st.markdown("## 📄 Research Report")
140
+
141
+ # Title
142
+ st.markdown(f"### {report_output.title}")
143
+
144
+ # Content
145
+ st.markdown(report_output.content)
146
+
147
+ # Metadata section
148
+ st.markdown("---")
149
+ st.markdown("### 📊 Research Metadata")
150
+
151
+ col1, col2, col3 = st.columns(3)
152
+
153
+ with col1:
154
+ st.markdown(f"""
155
+ <div class="metric-card">
156
+ <h4>Sources</h4>
157
+ <p>{', '.join(research_output.sources_used)}</p>
158
+ </div>
159
+ """, unsafe_allow_html=True)
160
+
161
+ with col2:
162
+ st.markdown(f"""
163
+ <div class="metric-card">
164
+ <h4>Confidence</h4>
165
+ <p>{research_output.confidence*100:.0f}%</p>
166
+ </div>
167
+ """, unsafe_allow_html=True)
168
+
169
+ with col3:
170
+ st.markdown(f"""
171
+ <div class="metric-card">
172
+ <h4>Quality Score</h4>
173
+ <p>{critique_output.score:.1f}/10</p>
174
+ </div>
175
+ """, unsafe_allow_html=True)
176
+
177
+ # Web sources
178
+ if research_output.web_sources:
179
+ st.markdown("### 🌐 Web References")
180
+ for i, source in enumerate(research_output.web_sources, 1):
181
+ st.markdown(f"""
182
+ <div class="source-card">
183
+ <strong>{i}. {source['title']}</strong><br/>
184
+ <a href="{source['url']}" target="_blank">{source['url']}</a>
185
+ </div>
186
+ """, unsafe_allow_html=True)
187
+
188
+
189
+ def export_report(report_output, research_output):
190
+ """Generate downloadable report"""
191
+
192
+ content = f"""# {report_output.title}
193
+
194
+ {report_output.content}
195
+
196
+ ---
197
+
198
+ ## Metadata
199
+
200
+ - **Sources:** {', '.join(research_output.sources_used)}
201
+ - **Confidence:** {research_output.confidence*100:.0f}%
202
+ - **Generated:** {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
203
+
204
+ """
205
+
206
+ if research_output.web_sources:
207
+ content += "\n## Web References\n\n"
208
+ for i, source in enumerate(research_output.web_sources, 1):
209
+ content += f"{i}. [{source['title']}]({source['url']})\n"
210
+
211
+ return content
212
+
213
+
214
+ # ═══════════════════════════════════════════════════════════════════════════
215
+ # SIDEBAR
216
+ # ═══════════════════════════════════════════════════════════════════════════
217
+
218
+ with st.sidebar:
219
+ st.markdown("# ⚙️ Configuration")
220
+
221
+ # API Keys
222
+ st.markdown("## 🔑 API Keys")
223
+
224
+ hf_token = st.text_input(
225
+ "Hugging Face Token",
226
+ type="password",
227
+ value=Config.HF_TOKEN if Config.HF_TOKEN else "",
228
+ help="Get from: https://huggingface.co/settings/tokens"
229
+ )
230
+
231
+ tavily_key = st.text_input(
232
+ "Tavily API Key",
233
+ type="password",
234
+ value=Config.TAVILY_API_KEY if Config.TAVILY_API_KEY else "",
235
+ help="Get FREE key from: https://tavily.com/"
236
+ )
237
+
238
+ if st.button("🚀 Initialize System", type="primary", use_container_width=True):
239
+ if not hf_token or not tavily_key:
240
+ st.error("Both tokens required!")
241
+ else:
242
+ if initialize_system(hf_token, tavily_key):
243
+ st.success("✅ System Ready!")
244
+
245
+ st.markdown("---")
246
+
247
+ # System Status
248
+ st.markdown("## 📊 System Status")
249
+
250
+ if st.session_state.system:
251
+ st.success("🟢 Online")
252
+ st.info(f"📚 Queries: {len(st.session_state.history)}")
253
+ else:
254
+ st.error("🔴 Offline")
255
+
256
+ if not TAVILY_AVAILABLE:
257
+ st.warning("⚠️ Tavily not installed")
258
+
259
+ st.markdown("---")
260
+
261
+ # Example queries
262
+ st.markdown("## 💡 Example Queries")
263
+
264
+ examples = {
265
+ "Math": "what is 125*8+47",
266
+ "Knowledge": "explain deep learning",
267
+ "Current Events": "latest AI news December 2025",
268
+ "Web Search": "current Bitcoin price"
269
+ }
270
+
271
+ for category, query in examples.items():
272
+ if st.button(f"{category}", use_container_width=True):
273
+ st.session_state.example_query = query
274
+
275
+ st.markdown("---")
276
+
277
+ # Clear history
278
+ if st.button("🗑️ Clear History", use_container_width=True):
279
+ st.session_state.history = []
280
+ st.session_state.current_research = None
281
+ st.rerun()
282
+
283
+ st.markdown("---")
284
+
285
+ # About
286
+ with st.expander("ℹ️ About"):
287
+ st.markdown("""
288
+ **Multi-Agent Research Assistant**
289
+
290
+ An Agentic AI system with:
291
+ - 🔍 Tavily web search
292
+ - 🧮 Calculator tool
293
+ - 📚 Knowledge base
294
+ - 🤖 4 specialized agents
295
+ - ♻️ Iterative refinement
296
+
297
+ **Tools:**
298
+ - LangGraph (orchestration)
299
+ - Tavily (AI-optimized search)
300
+ - Llama 3.1 8B (reasoning)
301
+
302
+ **Version:** 2.0
303
+ """)
304
+
305
+
306
+ # ═══════════════════════════════════════════════════════════════════════════
307
+ # MAIN CONTENT
308
+ # ═══════════════════════════════════════════════════════════════════════════
309
+
310
+ # Header
311
+ st.markdown('<div class="main-header">🤖 Multi-Agent Research Assistant</div>', unsafe_allow_html=True)
312
+ st.markdown('<div class="sub-header">Powered by Tavily AI-Optimized Search & Agentic AI With LangGraph</div>', unsafe_allow_html=True)
313
+
314
+ # Check system status
315
+ if not st.session_state.system:
316
+ st.warning("⚠️ Please initialize the system using the sidebar")
317
+
318
+ col1, col2, col3 = st.columns(3)
319
+
320
+ with col1:
321
+ st.markdown("""
322
+ ### 🔑 Step 1: Get API Keys
323
+
324
+ **Hugging Face (FREE)**
325
+ - [Get token](https://huggingface.co/settings/tokens)
326
+ - No credit card needed
327
+
328
+ **Tavily (FREE)**
329
+ - [Get key](https://tavily.com/)
330
+ - 1,000 searches/month free
331
+ """)
332
+
333
+ with col2:
334
+ st.markdown("""
335
+ ### ⚙️ Step 2: Initialize
336
+
337
+ 1. Enter tokens in sidebar
338
+ 2. Click "Initialize System"
339
+ 3. Wait ~10 seconds
340
+ 4. Start researching!
341
+ """)
342
+
343
+ with col3:
344
+ st.markdown("""
345
+ ### 💡 Step 3: Ask Questions
346
+
347
+ Try:
348
+ - Math calculations
349
+ - General knowledge
350
+ - Current events
351
+ - Web research
352
+ """)
353
+
354
+ st.stop()
355
+
356
+ # Main Interface
357
+ st.markdown("## 🔍 Research Query")
358
+
359
+ # Query input
360
+ query_col, button_col = st.columns([4, 1])
361
+
362
+ with query_col:
363
+ # Check if example query exists
364
+ default_query = st.session_state.get('example_query', '')
365
+ if default_query:
366
+ query = st.text_input(
367
+ "What would you like to research?",
368
+ value=default_query,
369
+ placeholder="e.g., latest AI developments, what is 25*4, explain machine learning"
370
+ )
371
+ # Clear example query after use
372
+ del st.session_state.example_query
373
+ else:
374
+ query = st.text_input(
375
+ "What would you like to research?",
376
+ placeholder="e.g., latest AI developments, what is 25*4, explain machine learning"
377
+ )
378
+
379
+ with button_col:
380
+ st.markdown("<br/>", unsafe_allow_html=True)
381
+ research_button = st.button("🚀 Research", type="primary", use_container_width=True)
382
+
383
+ # Execute research
384
+ if research_button and query:
385
+
386
+ st.markdown("---")
387
+ st.markdown("## 🤖 Agent Activity")
388
+
389
+ # Progress container
390
+ progress_placeholder = st.empty()
391
+ agent_placeholder = st.empty()
392
+
393
+ try:
394
+ # Show progress
395
+ with progress_placeholder:
396
+ progress_bar = st.progress(0)
397
+ status_text = st.empty()
398
+
399
+ # Execute research with progress updates
400
+ with st.spinner("🔍 Research in progress..."):
401
+
402
+ # Agent 1: Researcher
403
+ status_text.text("🔍 Researcher Agent: Gathering information...")
404
+ progress_bar.progress(25)
405
+
406
+ final_state = st.session_state.system.research(query)
407
+
408
+ # Agent 2: Analyst
409
+ status_text.text("📊 Analyst Agent: Analyzing findings...")
410
+ progress_bar.progress(50)
411
+ time.sleep(0.5)
412
+
413
+ # Agent 3: Writer
414
+ status_text.text("✍️ Writer Agent: Creating report...")
415
+ progress_bar.progress(75)
416
+ time.sleep(0.5)
417
+
418
+ # Agent 4: Critic
419
+ status_text.text("🎯 Critic Agent: Quality check...")
420
+ progress_bar.progress(100)
421
+ time.sleep(0.5)
422
+
423
+ # Clear progress
424
+ progress_placeholder.empty()
425
+
426
+ if final_state and final_state.get("report_output"):
427
+
428
+ # Display agent summary
429
+ with agent_placeholder:
430
+ st.success("✅ Research Complete!")
431
+
432
+ col1, col2, col3, col4 = st.columns(4)
433
+
434
+ with col1:
435
+ st.markdown("**🔍 Researcher**")
436
+ st.caption("Information gathered")
437
+
438
+ with col2:
439
+ st.markdown("**📊 Analyst**")
440
+ st.caption("Insights extracted")
441
+
442
+ with col3:
443
+ st.markdown("**✍️ Writer**")
444
+ st.caption("Report created")
445
+
446
+ with col4:
447
+ st.markdown("**🎯 Critic**")
448
+ st.caption(f"Score: {final_state['critique_output'].score:.1f}/10")
449
+
450
+ # Store in session
451
+ st.session_state.current_research = final_state
452
+
453
+ # Add to history
454
+ st.session_state.history.append({
455
+ "timestamp": datetime.now(),
456
+ "query": query,
457
+ "result": final_state
458
+ })
459
+
460
+ # Display report
461
+ format_report(
462
+ final_state["report_output"],
463
+ final_state["research_output"],
464
+ final_state["critique_output"]
465
+ )
466
+
467
+ # Export options
468
+ st.markdown("---")
469
+ st.markdown("### 📥 Export")
470
+
471
+ col1, col2, col3 = st.columns([1, 1, 2])
472
+
473
+ with col1:
474
+ report_text = export_report(
475
+ final_state["report_output"],
476
+ final_state["research_output"]
477
+ )
478
+
479
+ st.download_button(
480
+ label="📄 Download Markdown",
481
+ data=report_text,
482
+ file_name=f"research_report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.md",
483
+ mime="text/markdown",
484
+ use_container_width=True
485
+ )
486
+
487
+ with col2:
488
+ report_json = json.dumps({
489
+ "query": query,
490
+ "report": final_state["report_output"].dict(),
491
+ "research": final_state["research_output"].dict(),
492
+ "critique": final_state["critique_output"].dict(),
493
+ "timestamp": datetime.now().isoformat()
494
+ }, indent=2)
495
+
496
+ st.download_button(
497
+ label="📊 Download JSON",
498
+ data=report_json,
499
+ file_name=f"research_data_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json",
500
+ mime="application/json",
501
+ use_container_width=True
502
+ )
503
+
504
+ else:
505
+ st.error("❌ Research failed. Please try again.")
506
+
507
+ except Exception as e:
508
+ st.error(f"❌ Error during research: {str(e)}")
509
+ st.exception(e)
510
+
511
+ # Display current research if exists
512
+ elif st.session_state.current_research:
513
+ st.markdown("---")
514
+ st.info("💡 Previous research result shown below. Ask a new question above!")
515
+
516
+ final_state = st.session_state.current_research
517
+
518
+ format_report(
519
+ final_state["report_output"],
520
+ final_state["research_output"],
521
+ final_state["critique_output"]
522
+ )
523
+
524
+ # ═══════════════════════════════════════════════════════════════════════════
525
+ # HISTORY TAB
526
+ # ═══════════════════════════════════════════════════════════════════════════
527
+
528
+ if st.session_state.history:
529
+ st.markdown("---")
530
+ st.markdown("## 📚 Research History")
531
+
532
+ for i, item in enumerate(reversed(st.session_state.history)):
533
+ with st.expander(
534
+ f"🔍 {item['query'][:60]}... - {item['timestamp'].strftime('%H:%M:%S')}",
535
+ expanded=(i == 0)
536
+ ):
537
+ if item['result'] and item['result'].get('report_output'):
538
+
539
+ col1, col2 = st.columns([3, 1])
540
+
541
+ with col1:
542
+ st.markdown(f"**Question:** {item['query']}")
543
+ st.markdown(f"**Answer:** {item['result']['research_output'].answer[:200]}...")
544
+
545
+ with col2:
546
+ st.metric("Quality", f"{item['result']['critique_output'].score:.1f}/10")
547
+ st.metric("Confidence", f"{item['result']['research_output'].confidence*100:.0f}%")
548
+
549
+ if st.button(f"📄 View Full Report #{len(st.session_state.history)-i}", key=f"view_{i}"):
550
+ st.session_state.current_research = item['result']
551
+ st.rerun()
552
+
553
+
554
+ # ═══════════════════════════════════════════════════════════════════════════
555
+ # FOOTER
556
+ # ═══════════════════════════════════════════════════════════════════════════
557
+
558
+ st.markdown("---")
559
+
560
+ footer_col1, footer_col2, footer_col3 = st.columns(3)
561
+
562
+ with footer_col1:
563
+ st.markdown("""
564
+ **🤖 Agentic AI System**
565
+ - Autonomous tool selection
566
+ - Multi-agent collaboration
567
+ - Iterative refinement
568
+ """)
569
+
570
+ with footer_col2:
571
+ st.markdown("""
572
+ **🛠️ Technologies**
573
+ - LangGraph
574
+ - Tavily Search
575
+ - Llama 3.1 8B
576
+ """)
577
+
578
+ with footer_col3:
579
+ st.markdown("""
580
+ **📊 Capabilities**
581
+ - Web search
582
+ - Calculations
583
+ - Knowledge base
584
+ - Real-time info
585
+ """)
586
+
587
+ st.markdown("""
588
+ <div style='text-align: center; color: gray; padding: 2rem;'>
589
+ <small>Multi-Agent Research Assistant | Powered by Tavily & LangGraph</small>
590
+ </div>
591
+ """, unsafe_allow_html=True)
multi_agent_assistant.py ADDED
@@ -0,0 +1,913 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Multi-Agent Research Assistant
3
+ ======================================================================
4
+
5
+
6
+ Installation:
7
+ pip install langgraph langchain langchain-community langchain-huggingface pydantic numexpr tavily-python
8
+ """
9
+
10
+ import operator
11
+ import re
12
+ import json
13
+ from typing import Annotated, List, Optional, TypedDict, Literal
14
+ from pydantic import BaseModel, Field, ValidationError
15
+ import numexpr as ne
16
+ from datetime import datetime
17
+
18
+ # LangGraph
19
+ from langgraph.graph import StateGraph, END
20
+
21
+ # LangChain
22
+ from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
23
+ from langchain_core.tools import tool
24
+ from langchain_core.messages import HumanMessage
25
+ from tavily import TavilyClient
26
+
27
+ # Tavily
28
+ try:
29
+ from tavily import TavilyClient
30
+ TAVILY_AVAILABLE = True
31
+ except ImportError:
32
+ print("⚠️ Install tavily: pip install tavily-python")
33
+ TAVILY_AVAILABLE = False
34
+
35
+
36
+ # ═══════════════════════════════════════════════════════════════════════════
37
+ # CONFIGURATION
38
+ # ═══════════════════════════════════════════════════════════════════════════
39
+
40
+ class Config:
41
+ """System configuration"""
42
+ HF_TOKEN = "" # Your Hugging Face token
43
+ TAVILY_API_KEY = "" # Your Tavily API key
44
+
45
+
46
+ # ═══════════════════════════════════════════════════════════════════════════
47
+ # PYDANTIC SCHEMAS
48
+ # ═══════════════════════════════════════════════════════════════════════════
49
+
50
+ class ResearchOutput(BaseModel):
51
+ answer: str = Field(description="Direct answer to question")
52
+ sources_used: List[str] = Field(description="Tools/sources consulted")
53
+ confidence: float = Field(description="Confidence 0-1", ge=0, le=1)
54
+ web_sources: Optional[List[dict]] = Field(default=None, description="Web sources with URLs")
55
+ needs_web_search: bool = Field(default=False, description="Whether web search is needed")
56
+ retry_count: int = Field(default=0, description="Number of retry attempts")
57
+
58
+
59
+ class AnalysisOutput(BaseModel):
60
+ key_points: List[str] = Field(description="2-4 key insights")
61
+ implications: str = Field(description="Why this matters")
62
+
63
+
64
+ class ReportOutput(BaseModel):
65
+ title: str = Field(description="Report title")
66
+ content: str = Field(description="Full report content")
67
+
68
+
69
+ class CritiqueOutput(BaseModel):
70
+ score: float = Field(description="Quality score 0-10", ge=0, le=10)
71
+ needs_revision: bool = Field(description="Whether revision needed")
72
+ needs_research_retry: bool = Field(default=False, description="Whether research needs retry")
73
+ feedback: str = Field(description="Specific feedback")
74
+ reasoning: str = Field(description="Why this score was given")
75
+
76
+
77
+ # ═══════════════════════════════════════════════════════════════════════════
78
+ # AGENT STATE
79
+ # ═══════════════════════════════════════════════════════════════════════════
80
+
81
+ class AgentState(TypedDict):
82
+ question: str
83
+ research_output: Optional[ResearchOutput]
84
+ analysis_output: Optional[AnalysisOutput]
85
+ report_output: Optional[ReportOutput]
86
+ critique_output: Optional[CritiqueOutput]
87
+ report_iterations: int
88
+ research_iterations: int
89
+ max_iterations: int
90
+ current_step: str
91
+
92
+
93
+ # ═══════════════════════════════════════════════════════════════════════════
94
+ # TOOLS
95
+ # ═══════════════════════════════════════════════════════════════════════════
96
+
97
+ @tool
98
+ def calculator(expression: str) -> str:
99
+ """Perform mathematical calculations."""
100
+ try:
101
+ expression = expression.strip()
102
+ allowed = set("0123456789+-*/(). ")
103
+ if not all(c in allowed for c in expression):
104
+ return "Error: Invalid characters"
105
+ result = ne.evaluate(expression)
106
+ return str(float(result))
107
+ except Exception as e:
108
+ return f"Error: {str(e)}"
109
+
110
+
111
+ @tool
112
+ def search_knowledge(query: str) -> str:
113
+ """Search internal knowledge base."""
114
+ knowledge = {
115
+ "ai": "AI (Artificial Intelligence) simulates human intelligence in machines through machine learning, neural networks, and deep learning.",
116
+ "machine learning": "Machine Learning is a subset of AI enabling systems to learn from data without explicit programming. Types: supervised, unsupervised, reinforcement learning.",
117
+ "python": "Python is a high-level programming language created by Guido van Rossum (1991). Used in web development, data science, AI/ML, automation.",
118
+ "deep learning": "Deep Learning uses multi-layered neural networks to learn hierarchical data representations. Requires large datasets and GPUs.",
119
+ "nlp": "Natural Language Processing enables computers to understand and generate human language using transformers like BERT, GPT.",
120
+ "data science": "Data Science extracts insights from data using statistics, programming, and domain expertise.",
121
+ "blockchain": "Blockchain is distributed ledger technology ensuring secure, transparent transactions through cryptographic hashing.",
122
+ "quantum computing": "Quantum Computing uses quantum mechanical phenomena (superposition, entanglement) for computation.",
123
+ "cloud computing": "Cloud Computing delivers computing services over the internet. Models: IaaS, PaaS, SaaS.",
124
+ "cybersecurity": "Cybersecurity protects systems, networks, and data from digital attacks."
125
+ }
126
+
127
+ query_lower = query.lower()
128
+ for key, value in knowledge.items():
129
+ if key in query_lower or query_lower in key:
130
+ return value
131
+
132
+ return f"No information in knowledge base for '{query}'. This query likely needs web search for current information."
133
+
134
+
135
+ @tool
136
+ def web_search(query: str, max_results: int = 5) -> str:
137
+ """Search the web using Tavily AI-optimized search."""
138
+ if not TAVILY_AVAILABLE:
139
+ return "Error: Tavily not installed. Run: pip install tavily-python"
140
+
141
+ if not Config.TAVILY_API_KEY or Config.TAVILY_API_KEY == "":
142
+ return "Error: TAVILY_API_KEY not set. Get free key from https://tavily.com/"
143
+
144
+ try:
145
+ tavily = TavilyClient(api_key=Config.TAVILY_API_KEY)
146
+
147
+ response = tavily.search(
148
+ query=query,
149
+ search_depth="advanced",
150
+ max_results=max_results
151
+ )
152
+
153
+ if not response or "results" not in response:
154
+ return f"No results found for: {query}"
155
+
156
+ results = response["results"]
157
+ if not results:
158
+ return f"No results found for: {query}"
159
+
160
+ formatted_results = []
161
+ for i, result in enumerate(results, 1):
162
+ formatted_results.append(
163
+ f"{i}. {result.get('title', 'No title')}\n"
164
+ f" {result.get('content', 'No content')}\n"
165
+ f" Source: {result.get('url', 'No URL')}\n"
166
+ f" Relevance: {result.get('score', 0):.2f}"
167
+ )
168
+
169
+ final_output = "\n\n".join(formatted_results)
170
+
171
+ if "answer" in response and response["answer"]:
172
+ final_output = f"Quick Answer: {response['answer']}\n\n" + final_output
173
+
174
+ return final_output
175
+
176
+ except Exception as e:
177
+ return f"Web search error: {str(e)}"
178
+
179
+
180
+ # ═══════════════════════════════════════════════════════════════════════════
181
+ # TOOL EXECUTOR
182
+ # ═══════════════════════════════════════════════════════════════════════════
183
+
184
+ class ToolExecutor:
185
+ """Execute tools based on LLM requests"""
186
+
187
+ def __init__(self, tools):
188
+ self.tools = {t.name: t for t in tools}
189
+
190
+ def detect_tool_call(self, text: str) -> Optional[tuple]:
191
+ """Detect tool call in LLM response"""
192
+ pattern = r'USE_TOOL:\s*(\w+)\((.*?)\)'
193
+ match = re.search(pattern, text, re.IGNORECASE)
194
+
195
+ if match:
196
+ return (match.group(1), match.group(2).strip('"\''))
197
+
198
+ for tool_name in self.tools.keys():
199
+ if f"{tool_name}:" in text.lower():
200
+ pattern = rf'{tool_name}:\s*([^\n]+)'
201
+ match = re.search(pattern, text, re.IGNORECASE)
202
+ if match:
203
+ return (tool_name, match.group(1).strip('"\''))
204
+
205
+ return None
206
+
207
+ def execute(self, tool_name: str, arguments: str) -> str:
208
+ """Execute tool"""
209
+ if tool_name not in self.tools:
210
+ return f"Error: Unknown tool '{tool_name}'"
211
+
212
+ try:
213
+ return self.tools[tool_name].func(arguments)
214
+ except Exception as e:
215
+ return f"Error executing {tool_name}: {str(e)}"
216
+
217
+
218
+ # ═══════════════════════════════════════════════════════════════════════════
219
+ # HELPER FUNCTIONS
220
+ # ═══════════════════════════════════════════════════════════════════════════
221
+
222
+ def detect_insufficient_answer(answer: str) -> bool:
223
+ """Detect if LLM doesn't know the answer"""
224
+
225
+ insufficient_patterns = [
226
+ r"i don't know",
227
+ r"i do not know",
228
+ r"i don't have information",
229
+ r"i cannot provide",
230
+ r"i'm not sure",
231
+ r"i am not sure",
232
+ r"no information available",
233
+ r"beyond my knowledge",
234
+ r"i lack information",
235
+ r"insufficient information",
236
+ r"unable to answer",
237
+ r"cannot answer",
238
+ r"don't have access to",
239
+ r"my knowledge cutoff",
240
+ r"as of my last update"
241
+ ]
242
+
243
+ answer_lower = answer.lower()
244
+ return any(re.search(pattern, answer_lower) for pattern in insufficient_patterns)
245
+
246
+
247
+ def extract_json(text: str) -> Optional[dict]:
248
+ """Extract JSON from text"""
249
+ json_pattern = r'```(?:json)?\s*(\{.*?\})\s*```'
250
+ matches = re.findall(json_pattern, text, re.DOTALL)
251
+ if matches:
252
+ try:
253
+ return json.loads(matches[0])
254
+ except:
255
+ pass
256
+
257
+ json_pattern = r'\{[^{}]*(?:\{[^{}]*\}[^{}]*)*\}'
258
+ matches = re.findall(json_pattern, text, re.DOTALL)
259
+ for match in matches:
260
+ try:
261
+ parsed = json.loads(match)
262
+ if isinstance(parsed, dict) and len(parsed) > 0:
263
+ return parsed
264
+ except:
265
+ continue
266
+
267
+ return None
268
+
269
+
270
+ def safe_parse_pydantic(text: str, model: BaseModel, fallback: dict) -> BaseModel:
271
+ """Parse text into Pydantic model with fallback"""
272
+ json_data = extract_json(text)
273
+
274
+ if json_data:
275
+ try:
276
+ return model(**json_data)
277
+ except ValidationError:
278
+ pass
279
+
280
+ try:
281
+ return model.model_validate_json(text)
282
+ except:
283
+ pass
284
+
285
+ try:
286
+ return model(**fallback)
287
+ except:
288
+ return model(**{k: v for k, v in fallback.items() if k in model.model_fields})
289
+
290
+
291
+ # ═══════════════════════════════════════════════════════════════════════════
292
+ # LLM FACTORY
293
+ # ═══════════════════════════════════════════════════════════════════════════
294
+
295
+ class LLMFactory:
296
+ @staticmethod
297
+ def create_llm(token: str, temperature: float = 0.3):
298
+ try:
299
+ endpoint = HuggingFaceEndpoint(
300
+ repo_id="meta-llama/Llama-3.1-8B-Instruct",
301
+ huggingfacehub_api_token=token,
302
+ temperature=temperature,
303
+ max_new_tokens=1500,
304
+ top_p=0.9,
305
+ repetition_penalty=1.1,
306
+ task="conversational"
307
+ )
308
+ return ChatHuggingFace(llm=endpoint)
309
+ except:
310
+ return HuggingFaceEndpoint(
311
+ repo_id="meta-llama/Llama-3.1-8B-Instruct",
312
+ huggingfacehub_api_token=token,
313
+ temperature=temperature,
314
+ max_new_tokens=1500
315
+ )
316
+
317
+
318
+ # ═══════════════════════════════════════════════════════════════════════════
319
+ # ENHANCED RESEARCHER AGENT (with retry logic)
320
+ # ═══════════════════════════════════════════════════════════════════════════
321
+
322
+ class ResearcherAgent:
323
+ """Enhanced Researcher with automatic web search retry"""
324
+
325
+ def __init__(self, llm, tool_executor):
326
+ self.llm = llm
327
+ self.tool_executor = tool_executor
328
+
329
+ def __call__(self, state: AgentState) -> AgentState:
330
+ print("\n🔍 RESEARCHER AGENT")
331
+
332
+ question = state["question"]
333
+ retry_count = state.get("research_iterations", 0)
334
+
335
+ # Check if this is a retry from critic
336
+ force_web_search = False
337
+ if retry_count > 0:
338
+ print(f" 🔄 RETRY #{retry_count} - Forcing web search")
339
+ force_web_search = True
340
+
341
+ # Initial tool selection prompt
342
+ if force_web_search:
343
+ # Force web search on retry
344
+ prompt = f"""IMPORTANT: Previous answer was insufficient. Use web search to find current information.
345
+
346
+ Question: {question}
347
+
348
+ You MUST use web search for this query.
349
+
350
+ To use web search: USE_TOOL: web_search({question})
351
+
352
+ Your response:"""
353
+ else:
354
+ # Normal tool selection
355
+ prompt = f"""You are a research assistant. Answer: {question}
356
+
357
+ Available tools:
358
+ 1. calculator(expression) - Math operations
359
+ 2. search_knowledge(topic) - Internal knowledge base (for general facts, not current events)
360
+ 3. web_search(query) - Real-time web search (USE THIS for current events, recent news, 2025 info, "who won", "latest")
361
+
362
+ CRITICAL: Use web_search for:
363
+ - Questions with "2025", "current", "recent", "latest", "today", "who won"
364
+ - Elections, news, prices, events
365
+ - Anything that requires up-to-date information
366
+
367
+ To use tool: USE_TOOL: tool_name(arguments)
368
+
369
+ Your response:"""
370
+
371
+ try:
372
+ if hasattr(self.llm, 'invoke'):
373
+ response_obj = self.llm.invoke([HumanMessage(content=prompt)])
374
+ response = response_obj.content if hasattr(response_obj, 'content') else str(response_obj)
375
+ else:
376
+ response = self.llm(prompt)
377
+ except Exception as e:
378
+ print(f" ⚠️ Error: {e}")
379
+ response = f"Error processing: {question}"
380
+
381
+ print(f" LLM: {response[:150]}...")
382
+
383
+ # Execute tool if detected
384
+ tool_call = self.tool_executor.detect_tool_call(response)
385
+ web_sources = []
386
+ needs_web_search = False
387
+
388
+ if tool_call:
389
+ tool_name, arguments = tool_call
390
+ print(f" 🔧 Tool: {tool_name}({arguments})")
391
+
392
+ tool_result = self.tool_executor.execute(tool_name, arguments)
393
+ print(f" ✅ Result: {tool_result[:200]}...")
394
+
395
+ # Check if knowledge base says it needs web search
396
+ if tool_name == "search_knowledge" and "needs web search" in tool_result.lower():
397
+ print(f" ⚠️ Knowledge base insufficient - flagging for web search")
398
+ needs_web_search = True
399
+
400
+ # Extract sources from web search
401
+ if tool_name == "web_search":
402
+ url_pattern = r'Source: (https?://[^\s]+)'
403
+ urls = re.findall(url_pattern, tool_result)
404
+
405
+ title_pattern = r'\d+\.\s+([^\n]+)'
406
+ titles = re.findall(title_pattern, tool_result)
407
+
408
+ web_sources = [
409
+ {"title": titles[i] if i < len(titles) else "No title", "url": url}
410
+ for i, url in enumerate(urls[:3])
411
+ ]
412
+
413
+ # Synthesize answer
414
+ synthesis_prompt = f"""Based on this information, provide a comprehensive answer to: {question}
415
+
416
+ Tool: {tool_name}
417
+ Information:
418
+ {tool_result}
419
+
420
+ Provide clear answer:"""
421
+
422
+ try:
423
+ if hasattr(self.llm, 'invoke'):
424
+ answer_obj = self.llm.invoke([HumanMessage(content=synthesis_prompt)])
425
+ answer = answer_obj.content if hasattr(answer_obj, 'content') else str(answer_obj)
426
+ else:
427
+ answer = self.llm(synthesis_prompt)
428
+ except:
429
+ answer = f"From {tool_name}: {tool_result[:500]}"
430
+
431
+ sources = [tool_name]
432
+ confidence = 0.9 if tool_name == "web_search" else 0.85
433
+ else:
434
+ # No tool used - LLM knowledge only
435
+ answer = response
436
+ sources = ["LLM Knowledge"]
437
+ confidence = 0.7
438
+ print(f" ℹ️ Using LLM knowledge only")
439
+
440
+ # Check if answer is insufficient
441
+ if detect_insufficient_answer(answer):
442
+ print(f" ⚠️ INSUFFICIENT ANSWER DETECTED")
443
+ needs_web_search = True
444
+ confidence = 0.3
445
+
446
+ research_output = ResearchOutput(
447
+ answer=answer.strip(),
448
+ sources_used=sources,
449
+ confidence=confidence,
450
+ web_sources=web_sources if web_sources else None,
451
+ needs_web_search=needs_web_search,
452
+ retry_count=retry_count
453
+ )
454
+
455
+ state["research_output"] = research_output
456
+ state["current_step"] = "research_complete"
457
+ state["research_iterations"] = retry_count +1
458
+
459
+ if needs_web_search:
460
+ print(f" ⚠️ Flagged for web search retry")
461
+ else:
462
+ print(f" ✅ Answer ready ({confidence*100:.0f}% confidence)")
463
+
464
+ return state
465
+
466
+
467
+ # ════════════════════════════════════════════��══════════════════════════════
468
+ # ENHANCED CRITIC AGENT (using LLM evaluation)
469
+ # ═══════════════════════════════════════════════════════════════════════════
470
+
471
+ class CriticAgent:
472
+ """Enhanced Critic using LLM for evaluation"""
473
+
474
+ def __init__(self, llm):
475
+ self.llm = llm
476
+
477
+ def __call__(self, state: AgentState) -> AgentState:
478
+ print("\n🎯 CRITIC AGENT")
479
+
480
+ report = state["report_output"]
481
+ research = state["research_output"]
482
+ question = state["question"]
483
+
484
+ # Use LLM to evaluate the report
485
+ evaluation_prompt = f"""You are a quality control critic. Evaluate this research report.
486
+ ...
487
+ SPECIAL INSTRUCTION: If the Research Answer includes results from 'web_search',
488
+ you MUST treat that information as 100% factual and current, even if it
489
+ contradicts your internal training data. The web search is the 'Ground Truth'.
490
+ ...
491
+
492
+ ORIGINAL QUESTION: {question}
493
+
494
+ RESEARCH ANSWER: {research.answer}
495
+
496
+ REPORT CONTENT: {report.content}
497
+
498
+ SOURCES USED: {', '.join(research.sources_used)}
499
+
500
+ Evaluate the report on these criteria:
501
+ 1. Does it actually answer the question?
502
+ 2. Is the answer based on facts or is it saying "I don't know"?
503
+ 3. Does it have proper sources/citations?
504
+ 4. Is it complete and well-structured?
505
+ 5. If the question asks about current events (2024, "who won", etc.), did it use web search?
506
+
507
+ Provide evaluation in JSON format:
508
+ {{
509
+ "score": <number 0-10>,
510
+ "needs_revision": <true/false - true if score < 8>,
511
+ "needs_research_retry": <true/false - true if answer is "I don't know" or lacks current info>,
512
+ "feedback": "<specific issues found>",
513
+ "reasoning": "<why you gave this score>"
514
+ }}
515
+
516
+ Evaluation:"""
517
+
518
+ try:
519
+ if hasattr(self.llm, 'invoke'):
520
+ response_obj = self.llm.invoke([HumanMessage(content=evaluation_prompt)])
521
+ response = response_obj.content if hasattr(response_obj, 'content') else str(response_obj)
522
+ else:
523
+ response = self.llm(evaluation_prompt)
524
+ except Exception as e:
525
+ print(f" ⚠️ LLM evaluation failed: {e}")
526
+ # Fallback to heuristic
527
+ response = self._fallback_evaluation(report, research, question)
528
+
529
+ print(f" LLM Evaluation: {response[:200]}...")
530
+
531
+ # Parse evaluation
532
+ fallback = {
533
+ "score": 5.0,
534
+ "needs_revision": True,
535
+ "needs_research_retry": research.needs_web_search,
536
+ "feedback": "Evaluation failed",
537
+ "reasoning": "Could not evaluate properly"
538
+ }
539
+
540
+ critique_output = safe_parse_pydantic(response, CritiqueOutput, fallback)
541
+
542
+ # Override if research flagged for web search
543
+ if research.needs_web_search and research.retry_count < 2:
544
+ critique_output.needs_research_retry = True
545
+ critique_output.feedback = "Answer insufficient - needs web search"
546
+ print(f" 🔄 Research retry needed")
547
+
548
+ # Check iteration limits
549
+ if state["research_iterations"] >= 2:
550
+ critique_output.needs_research_retry = False
551
+ print(f" ⚠️ Max research retries reached")
552
+
553
+ if state["report_iterations"] >= state["max_iterations"]:
554
+ critique_output.needs_revision = False
555
+ print(f" ⚠️ Max report revisions reached")
556
+
557
+ state["critique_output"] = critique_output
558
+ state["current_step"] = "critique_complete"
559
+
560
+ print(f" ✅ Score: {critique_output.score:.1f}/10")
561
+ print(f" 📝 Feedback: {critique_output.feedback[:100]}")
562
+
563
+ return state
564
+
565
+ def _fallback_evaluation(self, report, research, question):
566
+ """Fallback heuristic evaluation if LLM fails"""
567
+
568
+ score = 5.0
569
+ feedback = []
570
+
571
+ # Check if answer seems insufficient
572
+ if detect_insufficient_answer(research.answer):
573
+ score = 3.0
574
+ feedback.append("Answer is insufficient or says 'I don't know'")
575
+ else:
576
+ score = 7.0
577
+
578
+ # Check sources
579
+ if research.web_sources:
580
+ score += 1.0
581
+
582
+ # Check length
583
+ if len(report.content) > 200:
584
+ score += 0.5
585
+
586
+ score = min(10.0, max(0.0, score))
587
+
588
+ needs_retry = detect_insufficient_answer(research.answer) or research.needs_web_search
589
+
590
+ return json.dumps({
591
+ "score": score,
592
+ "needs_revision": score < 8.0,
593
+ "needs_research_retry": needs_retry,
594
+ "feedback": " | ".join(feedback) if feedback else "Heuristic evaluation",
595
+ "reasoning": "Fallback evaluation used"
596
+ })
597
+
598
+
599
+ # ═══════════════════════════════════════════════════════════════════════════
600
+ # OTHER AGENTS
601
+ # ═══════════════════════════════════════════════════════════════════════════
602
+
603
+ class AnalystAgent:
604
+ def __init__(self, llm):
605
+ self.llm = llm
606
+
607
+ def __call__(self, state: AgentState) -> AgentState:
608
+ print("\n📊 ANALYST AGENT")
609
+
610
+ research = state["research_output"]
611
+
612
+ prompt = f"""Extract key insights from this research.
613
+
614
+ Question: {state['question']}
615
+ Answer: {research.answer}
616
+
617
+ Provide analysis in JSON:
618
+ {{
619
+ "key_points": ["insight 1", "insight 2", "insight 3"],
620
+ "implications": "why this matters"
621
+ }}"""
622
+
623
+ try:
624
+ if hasattr(self.llm, 'invoke'):
625
+ response_obj = self.llm.invoke([HumanMessage(content=prompt)])
626
+ response = response_obj.content if hasattr(response_obj, 'content') else str(response_obj)
627
+ else:
628
+ response = self.llm(prompt)
629
+ except:
630
+ response = '{}'
631
+
632
+ fallback = {
633
+ "key_points": [research.answer[:100]],
634
+ "implications": "Research findings provided"
635
+ }
636
+
637
+ analysis_output = safe_parse_pydantic(response, AnalysisOutput, fallback)
638
+ state["analysis_output"] = analysis_output
639
+ state["current_step"] = "analysis_complete"
640
+ print(f" ✅ {len(analysis_output.key_points)} insights extracted")
641
+
642
+ return state
643
+
644
+
645
+ class WriterAgent:
646
+ def __init__(self, llm):
647
+ self.llm = llm
648
+
649
+ def __call__(self, state: AgentState) -> AgentState:
650
+ print(f"\n✍️ WRITER AGENT (Iteration {state['report_iterations'] + 1})")
651
+
652
+ research = state["research_output"]
653
+ analysis = state["analysis_output"]
654
+
655
+ sources_text = ""
656
+ if research.web_sources:
657
+ sources_text = "\n\nWeb Sources:\n" + "\n".join(
658
+ f"- {s['title']}: {s['url']}" for s in research.web_sources
659
+ )
660
+
661
+ prompt = f"""Create professional research report.
662
+
663
+ Question: {state['question']}
664
+ Answer: {research.answer}
665
+ Insights: {', '.join(analysis.key_points)}
666
+ Sources: {', '.join(research.sources_used)}{sources_text}
667
+
668
+ JSON format:
669
+ {{
670
+ "title": "clear title",
671
+ "content": "executive summary + findings + insights + implications + sources"
672
+ }}"""
673
+
674
+ try:
675
+ if hasattr(self.llm, 'invoke'):
676
+ response_obj = self.llm.invoke([HumanMessage(content=prompt)])
677
+ response = response_obj.content if hasattr(response_obj, 'content') else str(response_obj)
678
+ else:
679
+ response = self.llm(prompt)
680
+ except:
681
+ response = ""
682
+
683
+ fallback_content = f"""# {state['question']}
684
+
685
+ ## Answer
686
+ {research.answer}
687
+
688
+ ## Key Insights
689
+ {chr(10).join(f'• {p}' for p in analysis.key_points)}
690
+
691
+ ## Implications
692
+ {analysis.implications}
693
+
694
+ ## Sources
695
+ {', '.join(research.sources_used)}"""
696
+
697
+ if research.web_sources:
698
+ fallback_content += "\n\n## References\n" + "\n".join(
699
+ f"• [{s['title']}]({s['url']})" for s in research.web_sources
700
+ )
701
+
702
+ fallback = {"title": state['question'], "content": fallback_content}
703
+
704
+ report_output = safe_parse_pydantic(response, ReportOutput, fallback)
705
+ state["report_output"] = report_output
706
+ state["report_iterations"] += 1
707
+ state["current_step"] = "report_complete"
708
+ print(f" ✅ Report: {len(report_output.content)} chars")
709
+
710
+ return state
711
+
712
+
713
+ # ═══════════════════════════════════════════════════════════════════════════
714
+ # ENHANCED ROUTING (with research retry)
715
+ # ═══════════════════════════════════════════════════════════════════════════
716
+
717
+ def route_critique(state: AgentState) -> Literal["retry_research", "revise", "finish"]:
718
+ """Enhanced routing with research retry"""
719
+ critique = state["critique_output"]
720
+
721
+ # Priority 1: Retry research if answer insufficient
722
+ if critique.needs_research_retry:
723
+ print(f"\n🔄 ROUTING: Retry research with web search")
724
+ return "retry_research"
725
+
726
+ # Priority 2: Revise report if quality low
727
+ if critique.needs_revision:
728
+ print(f"\n🔄 ROUTING: Revise report (Score: {critique.score:.1f}/10)")
729
+ return "revise"
730
+
731
+ # Success: Approve
732
+ print(f"\n✅ ROUTING: Approve (Score: {critique.score:.1f}/10)")
733
+ return "finish"
734
+
735
+
736
+ # ═══════════════════════════════════════════════════════════════════════════
737
+ # MAIN SYSTEM
738
+ # ═══════════════════════════════════════════════════════════════════════════
739
+
740
+ class MultiAgentSystem:
741
+ """Enhanced Multi-Agent System with Intelligent Retry"""
742
+
743
+ def __init__(self, hf_token: str, tavily_key: str, max_iterations: int = 2):
744
+ Config.HF_TOKEN = hf_token
745
+ Config.TAVILY_API_KEY = tavily_key
746
+ self.max_iterations = max_iterations
747
+
748
+ print("\n" + "="*70)
749
+ print("🤖 ENHANCED AGENTIC AI SYSTEM V3")
750
+ print("="*70)
751
+ print("NEW: Intelligent retry with web search")
752
+ print("NEW: LLM-based critic evaluation")
753
+ print("="*70)
754
+
755
+ # Tools
756
+ tools = [calculator, search_knowledge, web_search]
757
+ self.tool_executor = ToolExecutor(tools)
758
+ print(f"🛠️ Tools: {[t.name for t in tools]}")
759
+
760
+ # LLM
761
+ print("📡 Initializing LLM...")
762
+ self.llm = LLMFactory.create_llm(hf_token)
763
+
764
+ # Agents
765
+ print("🤖 Creating agents...")
766
+ self.researcher = ResearcherAgent(self.llm, self.tool_executor)
767
+ self.analyst = AnalystAgent(self.llm)
768
+ self.writer = WriterAgent(self.llm)
769
+ self.critic = CriticAgent(self.llm)
770
+
771
+ # Build graph
772
+ self.graph = self._build_graph()
773
+
774
+ print("\n✅ System Ready with Enhanced Features!")
775
+
776
+ def _build_graph(self):
777
+ workflow = StateGraph(AgentState)
778
+
779
+ workflow.add_node("researcher", self.researcher)
780
+ workflow.add_node("analyst", self.analyst)
781
+ workflow.add_node("writer", self.writer)
782
+ workflow.add_node("critic", self.critic)
783
+
784
+ workflow.set_entry_point("researcher")
785
+ workflow.add_edge("researcher", "analyst")
786
+ workflow.add_edge("analyst", "writer")
787
+ workflow.add_edge("writer", "critic")
788
+
789
+ # Enhanced routing with research retry
790
+ workflow.add_conditional_edges(
791
+ "critic",
792
+ route_critique,
793
+ {
794
+ "retry_research": "researcher", # NEW: Retry research
795
+ "revise": "writer",
796
+ "finish": END
797
+ })
798
+ return workflow.compile()
799
+
800
+ def research(self, question: str) -> dict:
801
+ print("="*70)
802
+ print(f"📋 RESEARCH QUESTION: {question}")
803
+ print("="*70)
804
+
805
+ initial_state = AgentState(
806
+ question=question,
807
+ research_output=None,
808
+ analysis_output=None,
809
+ report_output=None,
810
+ critique_output=None,
811
+ report_iterations=0,
812
+ research_iterations=0,
813
+ max_iterations=self.max_iterations,
814
+ current_step="start"
815
+ )
816
+
817
+ try:
818
+ final_state = self.graph.invoke(initial_state)
819
+
820
+ print("\n" + "="*70)
821
+ print("✅ RESEARCH COMPLETE")
822
+ print("="*70)
823
+
824
+ if final_state.get("critique_output"):
825
+ critique = final_state["critique_output"]
826
+ print(f"Final Score: {critique.score:.1f}/10")
827
+ print(f"Research Retries: {final_state.get('research_iterations', 0)}")
828
+ print(f"Report Revisions: {final_state['report_iterations']}")
829
+
830
+ return final_state
831
+ except Exception as e:
832
+ print(f"\n❌ Error: {e}")
833
+ import traceback
834
+ traceback.print_exc()
835
+ return None
836
+
837
+
838
+ #═══════════════════════════════════════════════════════════════════════════
839
+ #CLI DEMO
840
+ #═══════════════════════════════════════════════════════════════════════════
841
+
842
+ def cli_demo():
843
+ print("""
844
+ ╔══════════════════════════════════════════════════════════════════════╗
845
+ ║ ENHANCED AGENTIC AI SYSTEM ║
846
+ ║ WITH LANGGRAPH AND TAVILY- AI SEARCH ║
847
+ ╚══════════════════════════════════════════════════════════════════════╝
848
+
849
+ """)
850
+
851
+ hf_token = input("Hugging Face Token: ").strip()
852
+ tavily_key = input("Tavily API Key: ").strip()
853
+
854
+ if not hf_token or not tavily_key:
855
+ print("❌ Both tokens required!")
856
+ return
857
+
858
+ try:
859
+ system = MultiAgentSystem(hf_token, tavily_key, max_iterations=2)
860
+ except Exception as e:
861
+ print(f"❌ Init failed: {e}")
862
+ return
863
+
864
+ print("\n💡 Try these queries to test retry logic:")
865
+ print(" • who won 2024 elections (will retry with web search)")
866
+ print(" • latest AI news December 2024 (uses web search first)")
867
+ print(" • explain machine learning (uses knowledge base)")
868
+ print(" • what is 25*4+10 (uses calculator)")
869
+
870
+ while True:
871
+ print("\n" + "="*70)
872
+ question = input("\n🤔 Your question (or 'quit'): ").strip()
873
+
874
+ if question.lower() in ['quit', 'exit', 'q']:
875
+ print("\n👋 Goodbye!")
876
+ break
877
+
878
+ if not question:
879
+ continue
880
+
881
+ final_state = system.research(question)
882
+
883
+ if final_state and final_state.get("report_output"):
884
+ print("\n" + "="*70)
885
+ print("📄 RESEARCH REPORT")
886
+ print("="*70)
887
+
888
+ report = final_state["report_output"]
889
+ print(f"\n📌 {report.title}\n")
890
+ print(report.content)
891
+
892
+ if final_state.get("research_output"):
893
+ research = final_state["research_output"]
894
+ print("\n" + "-"*70)
895
+ print("📊 METADATA")
896
+ print("-"*70)
897
+ print(f"Sources: {', '.join(research.sources_used)}")
898
+ print(f"Confidence: {research.confidence*100:.0f}%")
899
+ print(f"Research Retries: {research.retry_count}")
900
+
901
+ if research.web_sources:
902
+ print(f"\n🌐 Web References:")
903
+ for i, source in enumerate(research.web_sources, 1):
904
+ print(f" {i}. {source['title']}")
905
+ print(f" {source['url']}")
906
+
907
+ critique = final_state["critique_output"]
908
+ print(f"\n🎯 Quality Score: {critique.score:.1f}/10")
909
+ print(f"📝 Feedback: {critique.feedback}")
910
+
911
+
912
+ if __name__ == "__main__":
913
+ cli_demo()
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ langgraph
2
+ langchain
3
+ langchain-community
4
+ langchain-huggingface
5
+ pydantic
6
+ numexpr
7
+ tavily-python
8
+ streamlit