CyberLegalAIendpoint / tests /test_gemini_tool_calling.py
Charles Grandjean
solve tests
8cc8e89
#!/usr/bin/env python3
"""
Test the Gemini tool calling configuration for document editor
"""
import os
import sys
import asyncio
from dotenv import load_dotenv
# Add parent directory to path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from agent_api import LLMConfig
from langchain_openai import ChatOpenAI
from langchain_google_genai import ChatGoogleGenerativeAI
# Load environment variables
load_dotenv(dotenv_path=".env", override=False)
async def test_llm_config():
"""Test the LLMConfig initialization and LLM types"""
print("=" * 80)
print("πŸ§ͺ TESTING GEMINI TOOL CALLING CONFIGURATION")
print("=" * 80)
# Check environment configuration
google_api_key = os.getenv("GOOGLE_API_KEY")
gemini_model = os.getenv("GEMINI_TOOL_MODEL")
openai_key = os.getenv("OPENAI_API_KEY")
openai_model = os.getenv("LLM_MODEL")
print(f"\nπŸ“‹ Environment Configuration:")
print(f" GOOGLE_API_KEY: {'βœ… Set' if google_api_key else '❌ Missing'}")
print(f" GEMINI_TOOL_MODEL: {gemini_model or '❌ Not set'}")
print(f" OPENAI_API_KEY: {'βœ… Set' if openai_key else '❌ Missing'}")
print(f" LLM_MODEL: {openai_model or '❌ Not set'}")
if not google_api_key or not openai_key:
print("\n⚠️ Skipping test - required environment variables not configured")
print(" Required: GOOGLE_API_KEY and OPENAI_API_KEY")
return
# Initialize LLMConfig
print(f"\nπŸš€ Initializing LLMConfig...")
try:
llm_config = LLMConfig()
print("βœ… LLMConfig initialized successfully")
except Exception as e:
print(f"❌ Failed to initialize LLMConfig: {str(e)}")
import traceback
traceback.print_exc()
return
# Check LLM types
print(f"\nπŸ” LLM Types:")
print(f" llm_config.openai_llm type: {type(llm_config.openai_llm).__name__}")
print(f" llm_config.gemini_llm type: {type(llm_config.gemini_llm).__name__}")
# Verify correct types
is_openai = isinstance(llm_config.openai_llm, ChatOpenAI)
is_gemini = isinstance(llm_config.gemini_llm, ChatGoogleGenerativeAI)
print(f"\nβœ… Type Verification:")
print(f" OpenAI LLM is ChatOpenAI: {is_openai}")
print(f" Gemini LLM is ChatGoogleGenerativeAI: {is_gemini}")
if not is_openai or not is_gemini:
print(f"\n❌ Type verification failed!")
return
# Test simple invocation of both LLMs
print(f"\nπŸ§ͺ Testing LLM invocation...")
# Test OpenAI LLM
try:
print(f"\n Testing OpenAI LLM...")
response = await llm_config.openai_llm.ainvoke("Say 'OpenAI LLM works!'")
print(f" βœ… OpenAI LLM response: {response.content[:50]}...")
except Exception as e:
print(f" ❌ OpenAI LLM failed: {str(e)}")
return
# Test Gemini LLM
try:
print(f"\n Testing Gemini LLM...")
response = await llm_config.gemini_llm.ainvoke("Say 'Gemini LLM works!'")
print(f" βœ… Gemini LLM response: {response.content[:50]}...")
except Exception as e:
print(f" ❌ Gemini LLM failed: {str(e)}")
return
# Test DocumentEditorAgent initialization
print(f"\nπŸ§ͺ Testing DocumentEditorAgent initialization...")
try:
from agents.doc_editor import DocumentEditorAgent
doc_editor = DocumentEditorAgent(
llm=llm_config.openai_llm,
llm_tool_calling=llm_config.gemini_llm
)
print(f"βœ… DocumentEditorAgent initialized successfully")
print(f" - Uses {type(doc_editor.llm).__name__} for summary")
print(f" - Uses {type(doc_editor.llm_tool_calling).__name__} for tool calling")
# Verify tool binding
print(f"\nπŸ” Tool binding check:")
print(f" - Tools count: {len(doc_editor.tools)}")
print(f" - Tool names: {[t.name for t in doc_editor.tools]}")
print(f" - Has llm_with_tools: {hasattr(doc_editor, 'llm_with_tools')}")
except Exception as e:
print(f"❌ DocumentEditorAgent initialization failed: {str(e)}")
import traceback
traceback.print_exc()
return
print("\n" + "=" * 80)
print("βœ… ALL TESTS PASSED")
print("=" * 80)
print("\nπŸ“Š Summary:")
print(" - LLMConfig: βœ… Working")
print(" - OpenAI LLM: βœ… Working")
print(" - Gemini LLM: βœ… Working")
print(" - DocumentEditorAgent: βœ… Initialized with both LLMs")
print(" - Tool calling: βœ… Configured with Gemini")
print(" - Summary generation: βœ… Configured with OpenAI")
if __name__ == "__main__":
asyncio.run(test_llm_config())