Final_Assignment_Template / test_new_multi_agent_system.py
Humanlearning's picture
updated agent
f844f16
"""
Test script for the new LangGraph Multi-Agent System
This script tests the complete workflow:
- Lead Agent orchestration
- Research Agent information gathering
- Code Agent computational tasks
- Answer Formatter GAIA compliance
- Memory system integration
"""
import asyncio
import os
from dotenv import load_dotenv
from langgraph_agent_system import run_agent_system
# Load environment variables
load_dotenv("env.local")
async def test_simple_factual_question():
"""Test with a simple factual question that should primarily use research"""
print("πŸ§ͺ Testing simple factual question...")
query = "What is the capital of Maharashtra?"
result = await run_agent_system(
query=query,
user_id="test_user_1",
session_id="test_session_1"
)
print(f"Query: {query}")
print(f"Result: {result}")
print("-" * 50)
return result
async def test_computational_question():
"""Test with a computational question that should use both research and code"""
print("πŸ§ͺ Testing computational question...")
query = "What is 25 + 17 * 3?"
result = await run_agent_system(
query=query,
user_id="test_user_2",
session_id="test_session_2"
)
print(f"Query: {query}")
print(f"Result: {result}")
print("-" * 50)
return result
async def test_complex_question():
"""Test with a complex question requiring both research and computation"""
print("πŸ§ͺ Testing complex question...")
query = "How many seconds are there in a week? Show the calculation."
result = await run_agent_system(
query=query,
user_id="test_user_3",
session_id="test_session_3"
)
print(f"Query: {query}")
print(f"Result: {result}")
print("-" * 50)
return result
async def test_list_question():
"""Test with a question that should return a list"""
print("πŸ§ͺ Testing list question...")
query = "What are the first 3 prime numbers?"
result = await run_agent_system(
query=query,
user_id="test_user_4",
session_id="test_session_4"
)
print(f"Query: {query}")
print(f"Result: {result}")
print("-" * 50)
return result
async def run_all_tests():
"""Run all test cases"""
print("πŸš€ Starting Multi-Agent System Tests")
print("=" * 60)
tests = [
test_simple_factual_question,
test_computational_question,
test_complex_question,
test_list_question
]
results = []
for test_func in tests:
try:
result = await test_func()
results.append(("PASS", test_func.__name__, result))
except Exception as e:
print(f"❌ {test_func.__name__} failed: {e}")
results.append(("FAIL", test_func.__name__, str(e)))
# Summary
print("=" * 60)
print("🏁 Test Results Summary:")
for status, test_name, result in results:
status_emoji = "βœ…" if status == "PASS" else "❌"
print(f"{status_emoji} {test_name}: {status}")
if status == "PASS":
print(f" Result: {result[:100]}...")
passed = sum(1 for status, _, _ in results if status == "PASS")
total = len(results)
print(f"\nπŸ“Š Tests passed: {passed}/{total}")
if __name__ == "__main__":
# Check environment setup
required_env_vars = ["GROQ_API_KEY", "LANGFUSE_PUBLIC_KEY", "LANGFUSE_SECRET_KEY", "LANGFUSE_HOST"]
missing_vars = [var for var in required_env_vars if not os.getenv(var)]
if missing_vars:
print(f"❌ Missing required environment variables: {missing_vars}")
print("Please set up your environment variables in env.local")
exit(1)
# Run tests
asyncio.run(run_all_tests())