CyberLegalAIendpoint / tests /test_logging_doc_editor.py
Charles Grandjean
solve tests
8cc8e89
#!/usr/bin/env python3
"""
Test script to verify detailed logging in document editor
"""
import json
import logging
import sys
import os
import asyncio
from datetime import datetime
from pathlib import Path
# Add parent directory to path for imports
sys.path.insert(0, str(Path(__file__).parent.parent))
from utils.editor_tools import replace, add, delete, attempt_completion
from agents.doc_editor import DocumentEditorAgent
from langchain_openai import ChatOpenAI
# Configure detailed logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s | %(levelname)-8s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
stream=sys.stdout
)
logger = logging.getLogger(__name__)
def test_tool_logging():
"""Test logging in individual tools"""
print("\n" + "=" * 80)
print("TEST 1: Testing Tool Logging")
print("=" * 80)
# Sample TipTap JSON
doc = {
"type": "doc",
"content": [
{
"type": "heading",
"attrs": {"level": 1, "textAlign": "center"},
"content": [{"type": "text", "text": "Document de test"}]
},
{
"type": "paragraph",
"content": [{"type": "text", "text": "Ceci est un test du document editor."}]
}
]
}
# Convert to canonical format
doc_text = json.dumps(doc, ensure_ascii=False, sort_keys=True, indent=2)
print(f"\nπŸ“ Initial document: {len(doc_text)} bytes")
print(f"\nInitial document:")
print(doc_text)
# Test 1: Replace
print("\n" + "-" * 80)
print("TEST 1.1: Replace Tool")
print("-" * 80)
result = replace.invoke({
"doc_text": doc_text,
"search": '"text": "test"',
"replace": '"text": "example"',
"expected_matches": 1
})
print(f"\nResult: {json.dumps(result, indent=2)}")
if result.get("ok"):
print("\nβœ… Replace test PASSED")
print(f"Updated document ({len(result['doc_text'])} bytes):")
print(result['doc_text'])
else:
print(f"\n❌ Replace test FAILED: {result.get('error')}")
# Test 2: Add
print("\n" + "-" * 80)
print("TEST 1.2: Add Tool")
print("-" * 80)
if result.get("ok"):
new_doc_text = result['doc_text']
add_result = add.invoke({
"doc_text": new_doc_text,
"anchor_search": '"text": "Document de example"',
"insert": ',\n {"type": "text", "marks": [{"type": "bold"}], "text": " - Version 1.0"}',
"position": "after",
"expected_matches": 1
})
print(f"\nResult: {json.dumps(add_result, indent=2)}")
if add_result.get("ok"):
print("\nβœ… Add test PASSED")
print(f"Updated document ({len(add_result['doc_text'])} bytes):")
print(add_result['doc_text'])
else:
print(f"\n❌ Add test FAILED: {add_result.get('error')}")
# Test 3: Delete
print("\n" + "-" * 80)
print("TEST 1.3: Delete Tool")
print("-" * 80)
delete_result = delete.invoke({
"doc_text": doc_text,
"search": '"text": "Document de test"',
"expected_matches": 1
})
print(f"\nResult: {json.dumps(delete_result, indent=2)}")
if delete_result.get("ok"):
print("\nβœ… Delete test PASSED")
else:
print(f"\n❌ Delete test FAILED: {delete_result.get('error')}")
async def test_agent_logging():
"""Test logging in document editor agent"""
print("\n" + "=" * 80)
print("TEST 2: Testing Agent Logging")
print("=" * 80)
# Sample document
doc = {
"type": "doc",
"content": [
{
"type": "paragraph",
"content": [{"type": "text", "text": "Ceci est un document de test."}]
}
]
}
doc_text = json.dumps(doc, ensure_ascii=False, sort_keys=True, indent=2)
print(f"\nπŸ“ Initial document: {len(doc_text)} bytes")
print(f"πŸ“‹ Instruction: Replace 'test' with 'example'")
# Check if we have API key
if not os.getenv("OPENAI_API_KEY"):
print("\n⚠️ OPENAI_API_KEY not set, skipping agent test")
return
try:
# Initialize LLM
llm = ChatOpenAI(
model=os.getenv("LLM_MODEL", "gpt-4o-mini"),
api_key=os.getenv("OPENAI_API_KEY"),
base_url=os.getenv("LLM_BINDING_HOST", "https://api.openai.com/v1")
)
# Initialize agent
agent = DocumentEditorAgent(llm=llm)
# Run agent
result = await agent.edit_document(
doc_text=doc_text,
user_instruction="Replace 'test' with 'example'",
max_iterations=3
)
print(f"\nπŸ“Š Agent Result:")
print(f" βœ… Success: {result['success']}")
print(f" πŸ”„ Iterations: {result['iteration_count']}")
print(f" πŸ’¬ Message: {result['message']}")
if result['success']:
print(f"\nβœ… Agent test PASSED")
print(f"πŸ“ Final document size: {len(result['doc_text'])} bytes")
else:
print(f"\n❌ Agent test FAILED")
except Exception as e:
print(f"\n❌ Agent test failed with error: {e}")
import traceback
traceback.print_exc()
if __name__ == "__main__":
print("\n" + "=" * 80)
print("DOCUMENT EDITOR LOGGING TEST")
print("=" * 80)
print(f"Started at: {datetime.now().isoformat()}")
# Test tools logging
test_tool_logging()
# Test agent logging (if API key available)
# Uncomment to test with actual LLM calls
# asyncio.run(test_agent_logging())
print("\n" + "=" * 80)
print("TEST COMPLETED")
print("=" * 80)
print(f"Ended at: {datetime.now().isoformat()}")