File size: 5,903 Bytes
de9078a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8cc8e89
 
de9078a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
#!/usr/bin/env python3
"""
Test script to verify detailed logging in document editor
"""

import json
import logging
import sys
import os
import asyncio
from datetime import datetime
from pathlib import Path

# Add parent directory to path for imports
sys.path.insert(0, str(Path(__file__).parent.parent))

from utils.editor_tools import replace, add, delete, attempt_completion
from agents.doc_editor import DocumentEditorAgent
from langchain_openai import ChatOpenAI

# Configure detailed logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s | %(levelname)-8s | %(name)s | %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S',
    stream=sys.stdout
)

logger = logging.getLogger(__name__)

def test_tool_logging():
    """Test logging in individual tools"""
    print("\n" + "=" * 80)
    print("TEST 1: Testing Tool Logging")
    print("=" * 80)
    
    # Sample TipTap JSON
    doc = {
        "type": "doc",
        "content": [
            {
                "type": "heading",
                "attrs": {"level": 1, "textAlign": "center"},
                "content": [{"type": "text", "text": "Document de test"}]
            },
            {
                "type": "paragraph",
                "content": [{"type": "text", "text": "Ceci est un test du document editor."}]
            }
        ]
    }
    
    # Convert to canonical format
    doc_text = json.dumps(doc, ensure_ascii=False, sort_keys=True, indent=2)
    
    print(f"\nπŸ“ Initial document: {len(doc_text)} bytes")
    print(f"\nInitial document:")
    print(doc_text)
    
    # Test 1: Replace
    print("\n" + "-" * 80)
    print("TEST 1.1: Replace Tool")
    print("-" * 80)
    result = replace.invoke({
        "doc_text": doc_text,
        "search": '"text": "test"',
        "replace": '"text": "example"',
        "expected_matches": 1
    })
    print(f"\nResult: {json.dumps(result, indent=2)}")
    
    if result.get("ok"):
        print("\nβœ… Replace test PASSED")
        print(f"Updated document ({len(result['doc_text'])} bytes):")
        print(result['doc_text'])
    else:
        print(f"\n❌ Replace test FAILED: {result.get('error')}")
    
    # Test 2: Add
    print("\n" + "-" * 80)
    print("TEST 1.2: Add Tool")
    print("-" * 80)
    if result.get("ok"):
        new_doc_text = result['doc_text']
        add_result = add.invoke({
            "doc_text": new_doc_text,
            "anchor_search": '"text": "Document de example"',
            "insert": ',\n    {"type": "text", "marks": [{"type": "bold"}], "text": " - Version 1.0"}',
            "position": "after",
            "expected_matches": 1
        })
        print(f"\nResult: {json.dumps(add_result, indent=2)}")
        
        if add_result.get("ok"):
            print("\nβœ… Add test PASSED")
            print(f"Updated document ({len(add_result['doc_text'])} bytes):")
            print(add_result['doc_text'])
        else:
            print(f"\n❌ Add test FAILED: {add_result.get('error')}")
    
    # Test 3: Delete
    print("\n" + "-" * 80)
    print("TEST 1.3: Delete Tool")
    print("-" * 80)
    delete_result = delete.invoke({
        "doc_text": doc_text,
        "search": '"text": "Document de test"',
        "expected_matches": 1
    })
    print(f"\nResult: {json.dumps(delete_result, indent=2)}")
    
    if delete_result.get("ok"):
        print("\nβœ… Delete test PASSED")
    else:
        print(f"\n❌ Delete test FAILED: {delete_result.get('error')}")

async def test_agent_logging():
    """Test logging in document editor agent"""
    print("\n" + "=" * 80)
    print("TEST 2: Testing Agent Logging")
    print("=" * 80)
    
    # Sample document
    doc = {
        "type": "doc",
        "content": [
            {
                "type": "paragraph",
                "content": [{"type": "text", "text": "Ceci est un document de test."}]
            }
        ]
    }
    
    doc_text = json.dumps(doc, ensure_ascii=False, sort_keys=True, indent=2)
    
    print(f"\nπŸ“ Initial document: {len(doc_text)} bytes")
    print(f"πŸ“‹ Instruction: Replace 'test' with 'example'")
    
    # Check if we have API key
    if not os.getenv("OPENAI_API_KEY"):
        print("\n⚠️ OPENAI_API_KEY not set, skipping agent test")
        return
    
    try:
        # Initialize LLM
        llm = ChatOpenAI(
            model=os.getenv("LLM_MODEL", "gpt-4o-mini"),
            api_key=os.getenv("OPENAI_API_KEY"),
            base_url=os.getenv("LLM_BINDING_HOST", "https://api.openai.com/v1")
        )
        
        # Initialize agent
        agent = DocumentEditorAgent(llm=llm)
        
        # Run agent
        result = await agent.edit_document(
            doc_text=doc_text,
            user_instruction="Replace 'test' with 'example'",
            max_iterations=3
        )
        
        print(f"\nπŸ“Š Agent Result:")
        print(f"  βœ… Success: {result['success']}")
        print(f"  πŸ”„ Iterations: {result['iteration_count']}")
        print(f"  πŸ’¬ Message: {result['message']}")
        
        if result['success']:
            print(f"\nβœ… Agent test PASSED")
            print(f"πŸ“ Final document size: {len(result['doc_text'])} bytes")
        else:
            print(f"\n❌ Agent test FAILED")
    
    except Exception as e:
        print(f"\n❌ Agent test failed with error: {e}")
        import traceback
        traceback.print_exc()

if __name__ == "__main__":
    print("\n" + "=" * 80)
    print("DOCUMENT EDITOR LOGGING TEST")
    print("=" * 80)
    print(f"Started at: {datetime.now().isoformat()}")
    
    # Test tools logging
    test_tool_logging()
    
    # Test agent logging (if API key available)
    # Uncomment to test with actual LLM calls
    # asyncio.run(test_agent_logging())
    
    print("\n" + "=" * 80)
    print("TEST COMPLETED")
    print("=" * 80)
    print(f"Ended at: {datetime.now().isoformat()}")