Commit
Β·
7c51be5
1
Parent(s):
cc5dfc8
fix: Address all CodeRabbit review nitpicks
Browse files- Extract magic number 45 to class constant _EST_SECONDS_PER_ROUND (advanced.py)
- Add
@pytest
.mark.unit to test_advanced_p2_dead_zones.py
- Remove debug print statements from test
- Add pytestmark = pytest.mark.unit to test_magentic_judge_termination.py
- Fix import ordering (stdlib before third-party)
- Add fallback p=0.15 for progress events without iteration (app.py)
- Refactor max_iters lookup with getattr chains to reduce statement count
- Fix SPEC_16 markdown: list spacing (1. vs 1. ) and code block languages
All 327 tests pass.
docs/specs/SPEC_16_UNIFIED_CHAT_CLIENT_ARCHITECTURE.md
CHANGED
|
@@ -12,15 +12,15 @@ Eliminate the Simple Mode / Advanced Mode parallel universe by implementing a pl
|
|
| 12 |
|
| 13 |
## Strategic Goals
|
| 14 |
|
| 15 |
-
1.
|
| 16 |
-
2.
|
| 17 |
-
3.
|
| 18 |
|
| 19 |
## Problem Statement
|
| 20 |
|
| 21 |
### Current Architecture: Two Parallel Universes
|
| 22 |
|
| 23 |
-
```
|
| 24 |
User Query
|
| 25 |
β
|
| 26 |
βββ Has API Key? ββYesβββ Advanced Mode (488 lines)
|
|
@@ -42,7 +42,7 @@ User Query
|
|
| 42 |
|
| 43 |
### Architecture After Implementation
|
| 44 |
|
| 45 |
-
```
|
| 46 |
User Query
|
| 47 |
β
|
| 48 |
ββββ Advanced Mode (unified)
|
|
@@ -55,7 +55,7 @@ User Query
|
|
| 55 |
|
| 56 |
### New Files
|
| 57 |
|
| 58 |
-
```
|
| 59 |
src/
|
| 60 |
βββ clients/
|
| 61 |
β βββ __init__.py
|
|
@@ -250,9 +250,9 @@ def has_gemini_key(self) -> bool:
|
|
| 250 |
|
| 251 |
## Why This is "Elegant"
|
| 252 |
|
| 253 |
-
1.
|
| 254 |
-
2.
|
| 255 |
-
3.
|
| 256 |
|
| 257 |
---
|
| 258 |
|
|
|
|
| 12 |
|
| 13 |
## Strategic Goals
|
| 14 |
|
| 15 |
+
1. **Namespace Neutrality**: Decouple the core orchestrator from the `OpenAI` namespace. The system should speak `ChatClient`, not `OpenAIChatClient`.
|
| 16 |
+
2. **Full-Stack Provider Chain**: Prioritize providers that offer both LLM and Embeddings (OpenAI, Gemini, HuggingFace+Local) to ensure a unified environment.
|
| 17 |
+
3. **Fragmentation Reduction**: Remove "LLM-only" providers (Anthropic) that force complex hybrid dependency chains (e.g., Anthropic LLM + OpenAI Embeddings).
|
| 18 |
|
| 19 |
## Problem Statement
|
| 20 |
|
| 21 |
### Current Architecture: Two Parallel Universes
|
| 22 |
|
| 23 |
+
```text
|
| 24 |
User Query
|
| 25 |
β
|
| 26 |
βββ Has API Key? ββYesβββ Advanced Mode (488 lines)
|
|
|
|
| 42 |
|
| 43 |
### Architecture After Implementation
|
| 44 |
|
| 45 |
+
```text
|
| 46 |
User Query
|
| 47 |
β
|
| 48 |
ββββ Advanced Mode (unified)
|
|
|
|
| 55 |
|
| 56 |
### New Files
|
| 57 |
|
| 58 |
+
```text
|
| 59 |
src/
|
| 60 |
βββ clients/
|
| 61 |
β βββ __init__.py
|
|
|
|
| 250 |
|
| 251 |
## Why This is "Elegant"
|
| 252 |
|
| 253 |
+
1. **One System**: We stop maintaining two parallel universes.
|
| 254 |
+
2. **Dependency Injection**: The specific LLM provider is injected, not hardcoded.
|
| 255 |
+
3. **Full-Stack Alignment**: We prioritize providers (OpenAI, Gemini) that own the whole vertical (LLM + Embeddings), reducing environment complexity.
|
| 256 |
|
| 257 |
---
|
| 258 |
|
src/app.py
CHANGED
|
@@ -245,20 +245,14 @@ async def research_agent(
|
|
| 245 |
elif event.type == "thinking":
|
| 246 |
progress(0.1, desc="Multi-agent reasoning...")
|
| 247 |
elif event.type == "progress":
|
| 248 |
-
#
|
| 249 |
-
p =
|
| 250 |
-
max_iters =
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
elif hasattr(orchestrator, "config") and hasattr(
|
| 254 |
-
orchestrator.config, "max_iterations"
|
| 255 |
-
):
|
| 256 |
-
max_iters = orchestrator.config.max_iterations
|
| 257 |
-
|
| 258 |
if event.iteration:
|
| 259 |
# Map 0..max to 0.2..0.9
|
| 260 |
p = 0.2 + (0.7 * (min(event.iteration, max_iters) / max_iters))
|
| 261 |
-
|
| 262 |
progress(p, desc=event.message)
|
| 263 |
|
| 264 |
# BUG FIX: Handle streaming events separately to avoid token-by-token spam
|
|
|
|
| 245 |
elif event.type == "thinking":
|
| 246 |
progress(0.1, desc="Multi-agent reasoning...")
|
| 247 |
elif event.type == "progress":
|
| 248 |
+
# Calculate progress percentage (fallback to 0.15 for events without iteration)
|
| 249 |
+
p = 0.15
|
| 250 |
+
max_iters = getattr(orchestrator, "_max_rounds", None) or getattr(
|
| 251 |
+
getattr(orchestrator, "config", None), "max_iterations", 10
|
| 252 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 253 |
if event.iteration:
|
| 254 |
# Map 0..max to 0.2..0.9
|
| 255 |
p = 0.2 + (0.7 * (min(event.iteration, max_iters) / max_iters))
|
|
|
|
| 256 |
progress(p, desc=event.message)
|
| 257 |
|
| 258 |
# BUG FIX: Handle streaming events separately to avoid token-by-token spam
|
src/orchestrators/advanced.py
CHANGED
|
@@ -64,6 +64,9 @@ class AdvancedOrchestrator(OrchestratorProtocol):
|
|
| 64 |
- Configurable timeouts and round limits
|
| 65 |
"""
|
| 66 |
|
|
|
|
|
|
|
|
|
|
| 67 |
def __init__(
|
| 68 |
self,
|
| 69 |
max_rounds: int | None = None,
|
|
@@ -168,7 +171,7 @@ The final output should be a structured research report."""
|
|
| 168 |
def _get_progress_message(self, iteration: int) -> str:
|
| 169 |
"""Generate progress message with time estimation."""
|
| 170 |
rounds_remaining = max(self._max_rounds - iteration, 0)
|
| 171 |
-
est_seconds = rounds_remaining *
|
| 172 |
if est_seconds >= 60:
|
| 173 |
est_display = f"{est_seconds // 60}m {est_seconds % 60}s"
|
| 174 |
else:
|
|
|
|
| 64 |
- Configurable timeouts and round limits
|
| 65 |
"""
|
| 66 |
|
| 67 |
+
# Estimated seconds per coordination round (for progress UI)
|
| 68 |
+
_EST_SECONDS_PER_ROUND: int = 45
|
| 69 |
+
|
| 70 |
def __init__(
|
| 71 |
self,
|
| 72 |
max_rounds: int | None = None,
|
|
|
|
| 171 |
def _get_progress_message(self, iteration: int) -> str:
|
| 172 |
"""Generate progress message with time estimation."""
|
| 173 |
rounds_remaining = max(self._max_rounds - iteration, 0)
|
| 174 |
+
est_seconds = rounds_remaining * self._EST_SECONDS_PER_ROUND
|
| 175 |
if est_seconds >= 60:
|
| 176 |
est_display = f"{est_seconds // 60}m {est_seconds % 60}s"
|
| 177 |
else:
|
tests/unit/agents/test_magentic_judge_termination.py
CHANGED
|
@@ -2,8 +2,12 @@
|
|
| 2 |
|
| 3 |
from unittest.mock import patch
|
| 4 |
|
|
|
|
|
|
|
| 5 |
from src.agents.magentic_agents import create_judge_agent
|
| 6 |
|
|
|
|
|
|
|
| 7 |
|
| 8 |
def test_judge_agent_has_termination_instructions() -> None:
|
| 9 |
"""Judge agent must be created with explicit instructions for early termination."""
|
|
|
|
| 2 |
|
| 3 |
from unittest.mock import patch
|
| 4 |
|
| 5 |
+
import pytest
|
| 6 |
+
|
| 7 |
from src.agents.magentic_agents import create_judge_agent
|
| 8 |
|
| 9 |
+
pytestmark = pytest.mark.unit
|
| 10 |
+
|
| 11 |
|
| 12 |
def test_judge_agent_has_termination_instructions() -> None:
|
| 13 |
"""Judge agent must be created with explicit instructions for early termination."""
|
tests/unit/orchestrators/test_advanced_p2_dead_zones.py
CHANGED
|
@@ -6,6 +6,7 @@ from src.orchestrators.advanced import AdvancedOrchestrator
|
|
| 6 |
|
| 7 |
|
| 8 |
@pytest.mark.asyncio
|
|
|
|
| 9 |
async def test_advanced_initialization_events():
|
| 10 |
"""Verify granular progress events are emitted during initialization."""
|
| 11 |
# Mock dependencies
|
|
@@ -45,10 +46,6 @@ async def test_advanced_initialization_events():
|
|
| 45 |
messages = [e.message for e in events]
|
| 46 |
types = [e.type for e in events]
|
| 47 |
|
| 48 |
-
print("\nEvents received:")
|
| 49 |
-
for t, m in zip(types, messages, strict=True):
|
| 50 |
-
print(f"- {t}: {m}")
|
| 51 |
-
|
| 52 |
# Expected sequence:
|
| 53 |
# 1. started
|
| 54 |
# 2. progress (Loading embedding...)
|
|
|
|
| 6 |
|
| 7 |
|
| 8 |
@pytest.mark.asyncio
|
| 9 |
+
@pytest.mark.unit
|
| 10 |
async def test_advanced_initialization_events():
|
| 11 |
"""Verify granular progress events are emitted during initialization."""
|
| 12 |
# Mock dependencies
|
|
|
|
| 46 |
messages = [e.message for e in events]
|
| 47 |
types = [e.type for e in events]
|
| 48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
# Expected sequence:
|
| 50 |
# 1. started
|
| 51 |
# 2. progress (Loading embedding...)
|