CyberLegalAIendpoint / tests /test_bug_fixes.py
Charles Grandjean
solve tests
8cc8e89
s#!/usr/bin/env python3
"""
Test script to verify bug fixes for document editor tools
Tests that:
1. All internal functions exist and have __name__ attribute
2. tools_real contains only functions (no StructuredTools)
3. Workflow builds successfully
"""
import sys
import os
from pathlib import Path
# Add parent directory to path
sys.path.insert(0, str(Path(__file__).parent.parent))
def test_internal_functions_exist():
"""Test that all internal functions exist and have __name__"""
print("\n" + "=" * 80)
print("TEST 1: Internal Functions Exist and Have __name__")
print("=" * 80)
from utils.editor_tools import (
_replace_html, _add_html, _delete_html, _inspect_document, _attempt_completion
)
functions = [
("_replace_html", _replace_html),
("_add_html", _add_html),
("_delete_html", _delete_html),
("_inspect_document", _inspect_document),
("_attempt_completion", _attempt_completion)
]
all_passed = True
for name, func in functions:
try:
func_name = func.__name__
print(f"βœ… {name}: __name__ = '{func_name}'")
except AttributeError as e:
print(f"❌ {name}: {e}")
all_passed = False
return all_passed
def test_tools_real_are_functions():
"""Test that tools_real contains only functions"""
print("\n" + "=" * 80)
print("TEST 2: tools_real Contains Only Functions")
print("=" * 80)
# Import agent - this will fail if tools_real has StructuredTools
try:
from agents.doc_editor import DocumentEditorAgent
# We need a mock LLM to initialize
class MockLLM:
def bind_tools(self, tools):
return self
async def ainvoke(self, messages):
from langchain_core.messages import AIMessage
return AIMessage(content="Test response")
llm = MockLLM()
agent = DocumentEditorAgent(llm=llm)
print(f"βœ… Agent initialized successfully")
print(f"πŸ“¦ tools_real has {len(agent.tools_real)} items")
# Check that all are functions
all_passed = True
for i, tool in enumerate(agent.tools_real):
try:
name = tool.__name__
print(f"βœ… Tool {i}: {name} (has __name__)")
except AttributeError as e:
print(f"❌ Tool {i}: {e}")
all_passed = False
return all_passed
except Exception as e:
print(f"❌ Failed to initialize agent: {e}")
import traceback
traceback.print_exc()
return False
def test_workflow_builds():
"""Test that workflow builds successfully"""
print("\n" + "=" * 80)
print("TEST 3: Workflow Builds Successfully")
print("=" * 80)
try:
from agents.doc_editor import DocumentEditorAgent
class MockLLM:
def bind_tools(self, tools):
return self
async def ainvoke(self, messages):
from langchain_core.messages import AIMessage
return AIMessage(content="Test response")
llm = MockLLM()
agent = DocumentEditorAgent(llm=llm)
print(f"βœ… Workflow built successfully")
# Note: CompiledStateGraph doesn't expose nodes() and edges() methods directly
# The important thing is that it built without errors
return True
except Exception as e:
print(f"❌ Failed to build workflow: {e}")
import traceback
traceback.print_exc()
return False
def test_tools_callable():
"""Test that internal tools are callable"""
print("\n" + "=" * 80)
print("TEST 4: Internal Tools Are Callable")
print("=" * 80)
from utils.editor_tools import (
_replace_html, _add_html, _delete_html, _inspect_document, _attempt_completion
)
import asyncio
# Test _replace_html
async def test_replace():
result = await _replace_html(
doc_text="Hello World",
search="World",
replace="Universe",
expected_matches=1
)
return result
# Test _inspect_document
async def test_inspect():
result = await _inspect_document(
doc_text="Test document"
)
return result
# Test _attempt_completion
async def test_attempt():
result = await _attempt_completion(
message="Test complete"
)
return result
all_passed = True
# Run tests
print("\nTesting _replace_html...")
try:
result = asyncio.run(test_replace())
if result.get("ok"):
print(f"βœ… _replace_html returned: {result}")
else:
print(f"⚠️ _replace_html failed (expected for invalid HTML): {result}")
except Exception as e:
print(f"❌ _replace_html error: {e}")
all_passed = False
print("\nTesting _inspect_document...")
try:
result = asyncio.run(test_inspect())
if result.get("ok"):
print(f"βœ… _inspect_document returned: {result}")
else:
print(f"❌ _inspect_document failed: {result}")
all_passed = False
except Exception as e:
print(f"❌ _inspect_document error: {e}")
all_passed = False
print("\nTesting _attempt_completion...")
try:
result = asyncio.run(test_attempt())
if result.get("ok"):
print(f"βœ… _attempt_completion returned: {result}")
else:
print(f"❌ _attempt_completion failed: {result}")
all_passed = False
except Exception as e:
print(f"❌ _attempt_completion error: {e}")
all_passed = False
return all_passed
if __name__ == "__main__":
print("\n" + "=" * 80)
print("BUG FIX VERIFICATION TESTS")
print("=" * 80)
results = {}
# Test 1: Internal functions exist
results["test_internal_functions_exist"] = test_internal_functions_exist()
# Test 2: tools_real contains only functions
results["test_tools_real_are_functions"] = test_tools_real_are_functions()
# Test 3: Workflow builds
results["test_workflow_builds"] = test_workflow_builds()
# Test 4: Tools are callable
results["test_tools_callable"] = test_tools_callable()
# Summary
print("\n" + "=" * 80)
print("TEST SUMMARY")
print("=" * 80)
passed = sum(1 for v in results.values() if v)
total = len(results)
for test_name, result in results.items():
status = "βœ… PASSED" if result else "❌ FAILED"
print(f"{status}: {test_name}")
print("\n" + "=" * 80)
if passed == total:
print(f"βœ… ALL TESTS PASSED ({passed}/{total})")
else:
print(f"❌ SOME TESTS FAILED ({passed}/{total} passed)")
print("=" * 80 + "\n")
sys.exit(0 if passed == total else 1)