File size: 20,983 Bytes
80ebded
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
# =============================================================
# File: backend/tests/test_retry_system.py
# =============================================================
"""
Comprehensive tests for autonomous retry and self-correction system.

Tests:
1. RAG retry with low scores (threshold adjustment + query expansion)
2. Web search retry with empty results (query rewriting)
3. Safe tool call retry mechanism
4. Rule safe message rewriting
5. Integration tests with reasoning traces
6. Analytics logging verification
"""

import sys
from pathlib import Path
import pytest
from unittest.mock import AsyncMock, MagicMock, patch
import asyncio

# Add backend directory to Python path
backend_dir = Path(__file__).parent.parent
sys.path.insert(0, str(backend_dir))

try:
    HAS_PYTEST = True
except ImportError:
    HAS_PYTEST = False
    class MockMark:
        def asyncio(self, func):
            return func
    class MockPytest:
        mark = MockMark()
        def fixture(self, func):
            return func
    pytest = MockPytest()

from api.services.agent_orchestrator import AgentOrchestrator
from api.models.agent import AgentRequest
from api.models.redflag import RedFlagMatch


# =============================================================
# FIXTURES
# =============================================================

@pytest.fixture
def mock_orchestrator():
    """Create orchestrator with mocked dependencies."""
    orch = AgentOrchestrator(
        rag_mcp_url="http://fake:8001",
        web_mcp_url="http://fake:8002", 
        admin_mcp_url="http://fake:8003",
        llm_backend="ollama"
    )
    
    # Mock MCP client
    orch.mcp = MagicMock()
    orch.analytics = MagicMock()
    orch.llm = MagicMock()
    orch.redflag = MagicMock()
    
    return orch


# =============================================================
# RAG RETRY TESTS
# =============================================================

@pytest.mark.asyncio
async def test_rag_with_repair_high_score_no_retry(mock_orchestrator):
    """Test RAG repair doesn't retry when scores are good."""
    
    # Mock high score result
    mock_orchestrator.mcp.call_rag = AsyncMock(return_value={
        "results": [{"text": "relevant content", "score": 0.85}]
    })
    
    reasoning_trace = []
    result = await mock_orchestrator.rag_with_repair(
        query="test query",
        tenant_id="tenant1",
        reasoning_trace=reasoning_trace,
        user_id="user1"
    )
    
    # Should only call once (no retry needed)
    assert mock_orchestrator.mcp.call_rag.call_count == 1
    assert result["results"][0]["score"] == 0.85


@pytest.mark.asyncio
async def test_rag_with_repair_low_score_retry_threshold(mock_orchestrator):
    """Test RAG repair retries with lower threshold when score < 0.30."""
    
    # Mock first call - low score, second call - better score
    mock_orchestrator.mcp.call_rag = AsyncMock(side_effect=[
        {"results": [{"text": "low relevance", "score": 0.25}]},
        {"results": [{"text": "better match", "score": 0.45}]}
    ])
    
    reasoning_trace = []
    result = await mock_orchestrator.rag_with_repair(
        query="test query",
        tenant_id="tenant1",
        original_threshold=0.3,
        reasoning_trace=reasoning_trace,
        user_id="user1"
    )
    
    # Should have retried with lower threshold (0.15)
    assert mock_orchestrator.mcp.call_rag.call_count == 2
    
    # Check second call used threshold 0.15
    second_call_kwargs = mock_orchestrator.mcp.call_rag.call_args_list[1].kwargs
    assert second_call_kwargs.get("threshold") == 0.15
    
    # Verify reasoning trace has retry step
    retry_steps = [s for s in reasoning_trace if "retry" in str(s).lower()]
    assert len(retry_steps) > 0


@pytest.mark.asyncio
async def test_rag_with_repair_expand_query(mock_orchestrator):
    """Test RAG repair expands query when score still low after threshold retry."""
    
    # Mock: low score -> still low after threshold retry -> better after expansion
    mock_orchestrator.mcp.call_rag = AsyncMock(side_effect=[
        {"results": [{"text": "low", "score": 0.12}]},  # Initial - very low
        {"results": [{"text": "still low", "score": 0.10}]},  # After threshold retry - still low
        {"results": [{"text": "better", "score": 0.35}]}  # After query expansion - better
    ])
    
    reasoning_trace = []
    result = await mock_orchestrator.rag_with_repair(
        query="test",
        tenant_id="tenant1",
        original_threshold=0.3,
        reasoning_trace=reasoning_trace,
        user_id="user1"
    )
    
    # Should have retried 3 times (initial + threshold + expanded query)
    assert mock_orchestrator.mcp.call_rag.call_count == 3
    
    # Check reasoning trace has expanded query step
    expand_steps = [s for s in reasoning_trace if "expanded" in str(s).lower() or "expand" in str(s).lower()]
    assert len(expand_steps) > 0
    
    # Verify analytics was called for retries
    assert mock_orchestrator.analytics.log_tool_usage.call_count > 1


@pytest.mark.asyncio
async def test_rag_with_repair_no_results(mock_orchestrator):
    """Test RAG repair handles empty results gracefully."""
    
    mock_orchestrator.mcp.call_rag = AsyncMock(return_value={
        "results": []
    })
    
    reasoning_trace = []
    result = await mock_orchestrator.rag_with_repair(
        query="test query",
        tenant_id="tenant1",
        reasoning_trace=reasoning_trace,
        user_id="user1"
    )
    
    # Should handle gracefully (may retry or return empty)
    assert isinstance(result, dict)
    assert "results" in result


# =============================================================
# WEB SEARCH RETRY TESTS
# =============================================================

@pytest.mark.asyncio
async def test_web_with_repair_has_results_no_retry(mock_orchestrator):
    """Test web repair doesn't retry when results are found."""
    
    mock_orchestrator.mcp.call_web = AsyncMock(return_value={
        "results": [
            {"title": "Result 1", "snippet": "Content", "url": "http://example.com"}
        ]
    })
    
    reasoning_trace = []
    result = await mock_orchestrator.web_with_repair(
        query="normal query",
        tenant_id="tenant1",
        reasoning_trace=reasoning_trace,
        user_id="user1"
    )
    
    # Should only call once (no retry needed)
    assert mock_orchestrator.mcp.call_web.call_count == 1
    assert len(result["results"]) > 0


@pytest.mark.asyncio
async def test_web_with_repair_empty_results_retry(mock_orchestrator):
    """Test web repair retries with rewritten query when results are empty."""
    
    # Mock: empty -> empty -> success
    mock_orchestrator.mcp.call_web = AsyncMock(side_effect=[
        {"results": []},  # Initial - empty
        {"results": []},  # First retry - still empty
        {"results": [{"title": "Found", "snippet": "Result", "url": "http://example.com"}]}  # Second retry - success
    ])
    
    reasoning_trace = []
    result = await mock_orchestrator.web_with_repair(
        query="obscure query xyz",
        tenant_id="tenant1",
        reasoning_trace=reasoning_trace,
        user_id="user1"
    )
    
    # Should have retried (up to 2 rewrites)
    assert mock_orchestrator.mcp.call_web.call_count >= 2
    
    # Verify reasoning trace has retry steps
    retry_steps = [s for s in reasoning_trace if "retry" in str(s).lower()]
    assert len(retry_steps) > 0
    
    # Check that rewritten queries were used
    # call_web takes positional args: (tenant_id, query)
    calls = mock_orchestrator.mcp.call_web.call_args_list
    rewritten_queries = []
    for call in calls:
        # Extract query from positional args (args[1] after tenant_id)
        if len(call.args) > 1:
            rewritten_queries.append(call.args[1])
    
    # Should have at least original + retry queries
    assert len(rewritten_queries) >= 2
    # Check that at least one rewritten query contains our rewrite patterns
    assert any("best explanation" in str(q).lower() or "facts summary" in str(q).lower() 
               for q in rewritten_queries if q)


@pytest.mark.asyncio
async def test_web_with_repair_analytics_logging(mock_orchestrator):
    """Test web repair logs retry attempts to analytics."""
    
    mock_orchestrator.mcp.call_web = AsyncMock(side_effect=[
        {"results": []},
        {"results": [{"title": "Result", "snippet": "Content"}]}
    ])
    
    await mock_orchestrator.web_with_repair(
        query="test",
        tenant_id="tenant1",
        user_id="user1"
    )
    
    # Verify analytics was called
    assert mock_orchestrator.analytics.log_tool_usage.called


# =============================================================
# SAFE TOOL CALL TESTS
# =============================================================

@pytest.mark.asyncio
async def test_safe_tool_call_success_first_attempt(mock_orchestrator):
    """Test safe_tool_call succeeds on first attempt."""
    
    successful_tool = AsyncMock(return_value={"success": True, "data": "result"})
    
    result = await mock_orchestrator.safe_tool_call(
        tool_fn=successful_tool,
        params={"param1": "value1"},
        max_retries=2,
        tool_name="test_tool",
        tenant_id="tenant1",
        user_id="user1"
    )
    
    # Should succeed on first try
    assert successful_tool.call_count == 1
    assert result["success"] is True
    assert result["data"] == "result"


@pytest.mark.asyncio
async def test_safe_tool_call_retry_on_failure(mock_orchestrator):
    """Test safe_tool_call retries on failure."""
    
    failing_tool = AsyncMock(side_effect=[
        Exception("First failure"),
        {"success": True, "data": "recovered"}
    ])
    
    reasoning_trace = []
    result = await mock_orchestrator.safe_tool_call(
        tool_fn=failing_tool,
        params={},
        max_retries=2,
        tool_name="test_tool",
        tenant_id="tenant1",
        user_id="user1",
        reasoning_trace=reasoning_trace
    )
    
    # Should have retried
    assert failing_tool.call_count == 2
    assert result["success"] is True
    
    # Verify reasoning trace has retry info
    retry_steps = [s for s in reasoning_trace if "retry" in str(s).lower()]
    assert len(retry_steps) > 0


@pytest.mark.asyncio
async def test_safe_tool_call_exhausts_retries(mock_orchestrator):
    """Test safe_tool_call returns error after all retries exhausted."""
    
    failing_tool = AsyncMock(side_effect=Exception("Always fails"))
    
    reasoning_trace = []
    result = await mock_orchestrator.safe_tool_call(
        tool_fn=failing_tool,
        params={},
        max_retries=2,
        tool_name="test_tool",
        tenant_id="tenant1",
        user_id="user1",
        reasoning_trace=reasoning_trace
    )
    
    # Should have retried max_retries times
    assert failing_tool.call_count == 2
    assert "error" in result
    
    # Verify analytics logged failures
    assert mock_orchestrator.analytics.log_tool_usage.called


@pytest.mark.asyncio
async def test_safe_tool_call_fallback_params(mock_orchestrator):
    """Test safe_tool_call uses fallback params on retry."""
    
    tool_calls = []
    
    async def mock_tool_async(**kwargs):
        tool_calls.append(kwargs.copy())
        if len(tool_calls) == 1:
            raise Exception("First attempt failed")
        return {"success": True, "params": kwargs}
    
    result = await mock_orchestrator.safe_tool_call(
        tool_fn=mock_tool_async,
        params={"param1": "value1"},
        max_retries=2,
        fallback_params={"param1": "fallback_value"},
        tool_name="test_tool",
        tenant_id="tenant1"
    )
    
    # Should have used fallback params on retry
    assert len(tool_calls) == 2
    assert tool_calls[0]["param1"] == "value1"  # Original params
    assert tool_calls[1]["param1"] == "fallback_value"  # Fallback params on retry
    assert result["success"] is True


# =============================================================
# RULE SAFE MESSAGE TESTS
# =============================================================

@pytest.mark.asyncio
async def test_rule_safe_message_no_violations(mock_orchestrator):
    """Test rule_safe_message returns original when no violations."""
    
    mock_orchestrator.redflag.check = AsyncMock(return_value=[])
    
    safe_msg = await mock_orchestrator.rule_safe_message(
        user_message="Normal message",
        tenant_id="tenant1"
    )
    
    # Should return original message
    assert safe_msg == "Normal message"
    assert mock_orchestrator.redflag.check.call_count == 1


@pytest.mark.asyncio
async def test_rule_safe_message_rewrites_violation(mock_orchestrator):
    """Test rule_safe_message rewrites violating messages."""
    
    # Mock redflag check - first call violates, second (rewritten) passes
    violation = RedFlagMatch(
        rule_id="1",
        pattern="salary",
        severity="high",
        description="salary access",
        matched_text="salary"
    )
    
    mock_orchestrator.redflag.check = AsyncMock(side_effect=[
        [violation],  # Original message violates
        []  # Rewritten message is safe
    ])
    
    mock_orchestrator.llm.simple_call = AsyncMock(
        return_value="This is a compliant version of your request about compensation"
    )
    
    reasoning_trace = []
    safe_msg = await mock_orchestrator.rule_safe_message(
        user_message="I want to see salary info",
        tenant_id="tenant1",
        reasoning_trace=reasoning_trace
    )
    
    # Should have checked rules twice (original + rewritten)
    assert mock_orchestrator.redflag.check.call_count == 2
    
    # Should have called LLM to rewrite
    assert mock_orchestrator.llm.simple_call.called
    
    # Should return rewritten message
    assert "compliant" in safe_msg.lower() or safe_msg != "I want to see salary info"
    
    # Verify reasoning trace
    rewrite_steps = [s for s in reasoning_trace if "rewrite" in str(s).lower()]
    assert len(rewrite_steps) > 0


@pytest.mark.asyncio
async def test_rule_safe_message_brief_rule_no_rewrite(mock_orchestrator):
    """Test rule_safe_message doesn't rewrite brief response rules."""
    
    # Brief response rules are handled separately, so should return original
    brief_rule = RedFlagMatch(
        rule_id="1",
        pattern="greeting",
        severity="low",
        description="greeting",
        matched_text="hi"
    )
    
    mock_orchestrator.redflag.check = AsyncMock(return_value=[brief_rule])
    
    safe_msg = await mock_orchestrator.rule_safe_message(
        user_message="Hi there",
        tenant_id="tenant1"
    )
    
    # Should return original (brief rules are handled elsewhere)
    assert safe_msg == "Hi there"


@pytest.mark.asyncio
async def test_rule_safe_message_llm_failure_fallback(mock_orchestrator):
    """Test rule_safe_message falls back to original if LLM rewrite fails."""
    
    violation = RedFlagMatch(
        rule_id="1",
        pattern="blocked",
        severity="high",
        description="blocked",
        matched_text="blocked"
    )
    
    mock_orchestrator.redflag.check = AsyncMock(return_value=[violation])
    mock_orchestrator.llm.simple_call = AsyncMock(side_effect=Exception("LLM failed"))
    
    original_msg = "I want blocked content"
    safe_msg = await mock_orchestrator.rule_safe_message(
        user_message=original_msg,
        tenant_id="tenant1"
    )
    
    # Should return original message if rewrite fails
    assert safe_msg == original_msg


# =============================================================
# INTEGRATION TESTS
# =============================================================

@pytest.mark.asyncio
async def test_rag_integration_reasoning_trace(mock_orchestrator):
    """Test RAG retry steps appear in reasoning trace."""
    
    mock_orchestrator.mcp.call_rag = AsyncMock(side_effect=[
        {"results": [{"text": "low", "score": 0.20}]},
        {"results": [{"text": "better", "score": 0.50}]}
    ])
    
    reasoning_trace = []
    await mock_orchestrator.rag_with_repair(
        query="test",
        tenant_id="tenant1",
        reasoning_trace=reasoning_trace,
        user_id="user1"
    )
    
    # Check reasoning trace has retry information
    trace_str = str(reasoning_trace).lower()
    assert "retry" in trace_str or "threshold" in trace_str


@pytest.mark.asyncio
async def test_web_integration_reasoning_trace(mock_orchestrator):
    """Test web retry steps appear in reasoning trace."""
    
    mock_orchestrator.mcp.call_web = AsyncMock(side_effect=[
        {"results": []},
        {"results": [{"title": "Result", "snippet": "Content"}]}
    ])
    
    reasoning_trace = []
    await mock_orchestrator.web_with_repair(
        query="test",
        tenant_id="tenant1",
        reasoning_trace=reasoning_trace,
        user_id="user1"
    )
    
    # Check reasoning trace has retry information
    trace_str = str(reasoning_trace).lower()
    assert "retry" in trace_str or "rewritten" in trace_str


@pytest.mark.asyncio
async def test_analytics_logging_on_retries(mock_orchestrator):
    """Test that retry attempts are logged to analytics."""
    
    mock_orchestrator.mcp.call_rag = AsyncMock(side_effect=[
        {"results": [{"text": "low", "score": 0.25}]},
        {"results": [{"text": "better", "score": 0.45}]}
    ])
    
    await mock_orchestrator.rag_with_repair(
        query="test",
        tenant_id="tenant1",
        user_id="user1"
    )
    
    # Verify analytics was called (for initial + retry)
    assert mock_orchestrator.analytics.log_tool_usage.call_count > 0
    
    # Verify RAG search was logged
    assert mock_orchestrator.analytics.log_rag_search.called


@pytest.mark.asyncio
async def test_full_agent_flow_with_retry(mock_orchestrator):
    """Test full agent flow integrates retry system."""
    
    # Setup mocks for a full agent request
    mock_orchestrator.intent = MagicMock()
    mock_orchestrator.intent.classify = AsyncMock(return_value="rag")
    
    mock_orchestrator.selector = MagicMock()
    from api.models.agent import AgentDecision
    mock_orchestrator.selector.select = AsyncMock(return_value=AgentDecision(
        action="call_tool",
        tool="rag",
        tool_input={"query": "test query"},
        reason="test"
    ))
    
    mock_orchestrator.redflag.check = AsyncMock(return_value=[])
    
    mock_orchestrator.mcp.call_rag = AsyncMock(side_effect=[
        {"results": [{"text": "low relevance", "score": 0.25}]},
        {"results": [{"text": "better match", "score": 0.50}]}
    ])
    
    mock_orchestrator.llm.simple_call = AsyncMock(return_value="Final answer")
    
    # Create request
    req = AgentRequest(
        tenant_id="tenant1",
        user_id="user1",
        message="test query"
    )
    
    # Handle request
    response = await mock_orchestrator.handle(req)
    
    # Verify retry happened (2 RAG calls)
    assert mock_orchestrator.mcp.call_rag.call_count == 2
    
    # Verify response is generated
    assert response.text == "Final answer"
    
    # Verify reasoning trace contains retry info
    trace_str = str(response.reasoning_trace).lower()
    # Should have retry or repair related steps


# =============================================================
# EDGE CASES
# =============================================================

@pytest.mark.asyncio
async def test_rag_repair_edge_case_exactly_threshold(mock_orchestrator):
    """Test RAG repair behavior at threshold boundary."""
    
    # Score exactly at threshold - should not retry
    mock_orchestrator.mcp.call_rag = AsyncMock(return_value={
        "results": [{"text": "content", "score": 0.30}]}  # Exactly at threshold
    )
    
    reasoning_trace = []
    await mock_orchestrator.rag_with_repair(
        query="test",
        tenant_id="tenant1",
        original_threshold=0.3,
        reasoning_trace=reasoning_trace,
        user_id="user1"
    )
    
    # Should not retry (score >= 0.30)
    assert mock_orchestrator.mcp.call_rag.call_count == 1


@pytest.mark.asyncio
async def test_web_repair_all_retries_fail(mock_orchestrator):
    """Test web repair handles case where all retries return empty."""
    
    mock_orchestrator.mcp.call_web = AsyncMock(return_value={"results": []})
    
    reasoning_trace = []
    result = await mock_orchestrator.web_with_repair(
        query="very obscure query",
        tenant_id="tenant1",
        reasoning_trace=reasoning_trace,
        user_id="user1"
    )
    
    # Should have attempted retries
    assert mock_orchestrator.mcp.call_web.call_count >= 2
    
    # Should still return result (even if empty)
    assert isinstance(result, dict)


if __name__ == "__main__":
    # Allow running tests directly
    print("Running retry system tests...")
    pytest.main([__file__, "-v", "--tb=short"])