Spaces:
Running
Running
File size: 4,771 Bytes
65eb048 8cc8e89 65eb048 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 | #!/usr/bin/env python3
"""
Test the Gemini tool calling configuration for document editor
"""
import os
import sys
import asyncio
from dotenv import load_dotenv
# Add parent directory to path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from agent_api import LLMConfig
from langchain_openai import ChatOpenAI
from langchain_google_genai import ChatGoogleGenerativeAI
# Load environment variables
load_dotenv(dotenv_path=".env", override=False)
async def test_llm_config():
"""Test the LLMConfig initialization and LLM types"""
print("=" * 80)
print("π§ͺ TESTING GEMINI TOOL CALLING CONFIGURATION")
print("=" * 80)
# Check environment configuration
google_api_key = os.getenv("GOOGLE_API_KEY")
gemini_model = os.getenv("GEMINI_TOOL_MODEL")
openai_key = os.getenv("OPENAI_API_KEY")
openai_model = os.getenv("LLM_MODEL")
print(f"\nπ Environment Configuration:")
print(f" GOOGLE_API_KEY: {'β
Set' if google_api_key else 'β Missing'}")
print(f" GEMINI_TOOL_MODEL: {gemini_model or 'β Not set'}")
print(f" OPENAI_API_KEY: {'β
Set' if openai_key else 'β Missing'}")
print(f" LLM_MODEL: {openai_model or 'β Not set'}")
if not google_api_key or not openai_key:
print("\nβ οΈ Skipping test - required environment variables not configured")
print(" Required: GOOGLE_API_KEY and OPENAI_API_KEY")
return
# Initialize LLMConfig
print(f"\nπ Initializing LLMConfig...")
try:
llm_config = LLMConfig()
print("β
LLMConfig initialized successfully")
except Exception as e:
print(f"β Failed to initialize LLMConfig: {str(e)}")
import traceback
traceback.print_exc()
return
# Check LLM types
print(f"\nπ LLM Types:")
print(f" llm_config.openai_llm type: {type(llm_config.openai_llm).__name__}")
print(f" llm_config.gemini_llm type: {type(llm_config.gemini_llm).__name__}")
# Verify correct types
is_openai = isinstance(llm_config.openai_llm, ChatOpenAI)
is_gemini = isinstance(llm_config.gemini_llm, ChatGoogleGenerativeAI)
print(f"\nβ
Type Verification:")
print(f" OpenAI LLM is ChatOpenAI: {is_openai}")
print(f" Gemini LLM is ChatGoogleGenerativeAI: {is_gemini}")
if not is_openai or not is_gemini:
print(f"\nβ Type verification failed!")
return
# Test simple invocation of both LLMs
print(f"\nπ§ͺ Testing LLM invocation...")
# Test OpenAI LLM
try:
print(f"\n Testing OpenAI LLM...")
response = await llm_config.openai_llm.ainvoke("Say 'OpenAI LLM works!'")
print(f" β
OpenAI LLM response: {response.content[:50]}...")
except Exception as e:
print(f" β OpenAI LLM failed: {str(e)}")
return
# Test Gemini LLM
try:
print(f"\n Testing Gemini LLM...")
response = await llm_config.gemini_llm.ainvoke("Say 'Gemini LLM works!'")
print(f" β
Gemini LLM response: {response.content[:50]}...")
except Exception as e:
print(f" β Gemini LLM failed: {str(e)}")
return
# Test DocumentEditorAgent initialization
print(f"\nπ§ͺ Testing DocumentEditorAgent initialization...")
try:
from agents.doc_editor import DocumentEditorAgent
doc_editor = DocumentEditorAgent(
llm=llm_config.openai_llm,
llm_tool_calling=llm_config.gemini_llm
)
print(f"β
DocumentEditorAgent initialized successfully")
print(f" - Uses {type(doc_editor.llm).__name__} for summary")
print(f" - Uses {type(doc_editor.llm_tool_calling).__name__} for tool calling")
# Verify tool binding
print(f"\nπ Tool binding check:")
print(f" - Tools count: {len(doc_editor.tools)}")
print(f" - Tool names: {[t.name for t in doc_editor.tools]}")
print(f" - Has llm_with_tools: {hasattr(doc_editor, 'llm_with_tools')}")
except Exception as e:
print(f"β DocumentEditorAgent initialization failed: {str(e)}")
import traceback
traceback.print_exc()
return
print("\n" + "=" * 80)
print("β
ALL TESTS PASSED")
print("=" * 80)
print("\nπ Summary:")
print(" - LLMConfig: β
Working")
print(" - OpenAI LLM: β
Working")
print(" - Gemini LLM: β
Working")
print(" - DocumentEditorAgent: β
Initialized with both LLMs")
print(" - Tool calling: β
Configured with Gemini")
print(" - Summary generation: β
Configured with OpenAI")
if __name__ == "__main__":
asyncio.run(test_llm_config()) |