Spaces:
Running
Running
Commit
Β·
7d28de6
1
Parent(s):
ff16d8e
fix: address CodeRabbit review feedback
Browse filesFixes from CodeRabbit analysis:
- docs/to_do/DEEP_RESEARCH_ROADMAP.md: Fix MagenticBuilder β MagenticOrchestrator
- docs/to_do/DEEP_RESEARCH_ROADMAP.md: Add note clarifying RAGService vs EmbeddingService
- src/services/llamaindex_rag.py: Add migration note for collection name change
- tests/unit/utils/test_exceptions.py: Add pytestmark = pytest.mark.unit
- docs/implementation/12_phase_mcp_server.md: Update all bioRxiv refs β Europe PMC
All 127 tests pass, 0 warnings.
docs/implementation/12_phase_mcp_server.md
CHANGED
|
@@ -130,15 +130,15 @@ Each function follows the MCP tool contract:
|
|
| 130 |
- Formatted string returns
|
| 131 |
"""
|
| 132 |
|
| 133 |
-
from src.tools.biorxiv import BioRxivTool
|
| 134 |
from src.tools.clinicaltrials import ClinicalTrialsTool
|
|
|
|
| 135 |
from src.tools.pubmed import PubMedTool
|
| 136 |
|
| 137 |
|
| 138 |
# Singleton instances (avoid recreating on each call)
|
| 139 |
_pubmed = PubMedTool()
|
| 140 |
_trials = ClinicalTrialsTool()
|
| 141 |
-
|
| 142 |
|
| 143 |
|
| 144 |
async def search_pubmed(query: str, max_results: int = 10) -> str:
|
|
@@ -202,10 +202,10 @@ async def search_clinical_trials(query: str, max_results: int = 10) -> str:
|
|
| 202 |
return "\n".join(formatted)
|
| 203 |
|
| 204 |
|
| 205 |
-
async def
|
| 206 |
-
"""Search
|
| 207 |
|
| 208 |
-
Searches
|
| 209 |
Note: Preprints are NOT peer-reviewed but contain the latest findings.
|
| 210 |
|
| 211 |
Args:
|
|
@@ -217,10 +217,10 @@ async def search_biorxiv(query: str, max_results: int = 10) -> str:
|
|
| 217 |
"""
|
| 218 |
max_results = max(1, min(50, max_results))
|
| 219 |
|
| 220 |
-
results = await
|
| 221 |
|
| 222 |
if not results:
|
| 223 |
-
return f"No
|
| 224 |
|
| 225 |
formatted = [f"## Preprint Results for: {query}\n"]
|
| 226 |
for i, evidence in enumerate(results, 1):
|
|
@@ -236,7 +236,7 @@ async def search_biorxiv(query: str, max_results: int = 10) -> str:
|
|
| 236 |
async def search_all_sources(query: str, max_per_source: int = 5) -> str:
|
| 237 |
"""Search all biomedical sources simultaneously.
|
| 238 |
|
| 239 |
-
Performs parallel search across PubMed, ClinicalTrials.gov, and
|
| 240 |
This is the most comprehensive search option for drug repurposing research.
|
| 241 |
|
| 242 |
Args:
|
|
@@ -253,10 +253,10 @@ async def search_all_sources(query: str, max_per_source: int = 5) -> str:
|
|
| 253 |
# Run all searches in parallel
|
| 254 |
pubmed_task = search_pubmed(query, max_per_source)
|
| 255 |
trials_task = search_clinical_trials(query, max_per_source)
|
| 256 |
-
|
| 257 |
|
| 258 |
-
pubmed_results, trials_results,
|
| 259 |
-
pubmed_task, trials_task,
|
| 260 |
)
|
| 261 |
|
| 262 |
formatted = [f"# Comprehensive Search: {query}\n"]
|
|
@@ -272,10 +272,10 @@ async def search_all_sources(query: str, max_per_source: int = 5) -> str:
|
|
| 272 |
else:
|
| 273 |
formatted.append(f"## Clinical Trials\n*Error: {trials_results}*\n")
|
| 274 |
|
| 275 |
-
if isinstance(
|
| 276 |
-
formatted.append(
|
| 277 |
else:
|
| 278 |
-
formatted.append(f"## Preprints\n*Error: {
|
| 279 |
|
| 280 |
return "\n---\n".join(formatted)
|
| 281 |
```
|
|
@@ -294,12 +294,12 @@ import gradio as gr
|
|
| 294 |
from src.agent_factory.judges import JudgeHandler, MockJudgeHandler
|
| 295 |
from src.mcp_tools import (
|
| 296 |
search_all_sources,
|
| 297 |
-
|
| 298 |
search_clinical_trials,
|
| 299 |
search_pubmed,
|
| 300 |
)
|
| 301 |
from src.orchestrator_factory import create_orchestrator
|
| 302 |
-
from src.tools.
|
| 303 |
from src.tools.clinicaltrials import ClinicalTrialsTool
|
| 304 |
from src.tools.pubmed import PubMedTool
|
| 305 |
from src.tools.search_handler import SearchHandler
|
|
@@ -325,7 +325,7 @@ def create_demo() -> Any:
|
|
| 325 |
## AI-Powered Drug Repurposing Research Agent
|
| 326 |
|
| 327 |
Ask questions about potential drug repurposing opportunities.
|
| 328 |
-
The agent searches PubMed, ClinicalTrials.gov, and
|
| 329 |
|
| 330 |
**Example questions:**
|
| 331 |
- "What drugs could be repurposed for Alzheimer's disease?"
|
|
@@ -381,13 +381,13 @@ def create_demo() -> Any:
|
|
| 381 |
|
| 382 |
with gr.Tab("Preprints"):
|
| 383 |
gr.Interface(
|
| 384 |
-
fn=
|
| 385 |
inputs=[
|
| 386 |
gr.Textbox(label="Query", placeholder="long covid treatment"),
|
| 387 |
gr.Slider(1, 50, value=10, step=1, label="Max Results"),
|
| 388 |
],
|
| 389 |
outputs=gr.Markdown(label="Results"),
|
| 390 |
-
api_name="
|
| 391 |
)
|
| 392 |
|
| 393 |
with gr.Tab("Search All"):
|
|
@@ -406,7 +406,7 @@ def create_demo() -> Any:
|
|
| 406 |
**Note**: This is a research tool and should not be used for medical decisions.
|
| 407 |
Always consult healthcare professionals for medical advice.
|
| 408 |
|
| 409 |
-
Built with PydanticAI + PubMed, ClinicalTrials.gov &
|
| 410 |
|
| 411 |
**MCP Server**: Available at `/gradio_api/mcp/` for Claude Desktop integration
|
| 412 |
""")
|
|
@@ -444,7 +444,7 @@ import pytest
|
|
| 444 |
|
| 445 |
from src.mcp_tools import (
|
| 446 |
search_all_sources,
|
| 447 |
-
|
| 448 |
search_clinical_trials,
|
| 449 |
search_pubmed,
|
| 450 |
)
|
|
@@ -525,18 +525,18 @@ class TestSearchClinicalTrials:
|
|
| 525 |
assert "Clinical Trials" in result
|
| 526 |
|
| 527 |
|
| 528 |
-
class
|
| 529 |
-
"""Tests for
|
| 530 |
|
| 531 |
@pytest.mark.asyncio
|
| 532 |
async def test_returns_formatted_string(self, mock_evidence: Evidence) -> None:
|
| 533 |
"""Should return formatted markdown string."""
|
| 534 |
-
mock_evidence.citation.source = "
|
| 535 |
|
| 536 |
-
with patch("src.mcp_tools.
|
| 537 |
mock_tool.search = AsyncMock(return_value=[mock_evidence])
|
| 538 |
|
| 539 |
-
result = await
|
| 540 |
|
| 541 |
assert isinstance(result, str)
|
| 542 |
assert "Preprint Results" in result
|
|
@@ -550,11 +550,11 @@ class TestSearchAllSources:
|
|
| 550 |
"""Should combine results from all sources."""
|
| 551 |
with patch("src.mcp_tools.search_pubmed", new_callable=AsyncMock) as mock_pubmed, \
|
| 552 |
patch("src.mcp_tools.search_clinical_trials", new_callable=AsyncMock) as mock_trials, \
|
| 553 |
-
patch("src.mcp_tools.
|
| 554 |
|
| 555 |
mock_pubmed.return_value = "## PubMed Results"
|
| 556 |
mock_trials.return_value = "## Clinical Trials"
|
| 557 |
-
|
| 558 |
|
| 559 |
result = await search_all_sources("metformin", 5)
|
| 560 |
|
|
@@ -568,11 +568,11 @@ class TestSearchAllSources:
|
|
| 568 |
"""Should handle partial failures gracefully."""
|
| 569 |
with patch("src.mcp_tools.search_pubmed", new_callable=AsyncMock) as mock_pubmed, \
|
| 570 |
patch("src.mcp_tools.search_clinical_trials", new_callable=AsyncMock) as mock_trials, \
|
| 571 |
-
patch("src.mcp_tools.
|
| 572 |
|
| 573 |
mock_pubmed.return_value = "## PubMed Results"
|
| 574 |
mock_trials.side_effect = Exception("API Error")
|
| 575 |
-
|
| 576 |
|
| 577 |
result = await search_all_sources("metformin", 5)
|
| 578 |
|
|
@@ -599,10 +599,10 @@ class TestMCPDocstrings:
|
|
| 599 |
assert search_clinical_trials.__doc__ is not None
|
| 600 |
assert "Args:" in search_clinical_trials.__doc__
|
| 601 |
|
| 602 |
-
def
|
| 603 |
"""Docstring must have Args section for MCP schema generation."""
|
| 604 |
-
assert
|
| 605 |
-
assert "Args:" in
|
| 606 |
|
| 607 |
def test_search_all_sources_has_args_section(self) -> None:
|
| 608 |
"""Docstring must have Args section for MCP schema generation."""
|
|
@@ -817,14 +817,14 @@ Phase 12 is **COMPLETE** when:
|
|
| 817 |
β Gradio MCP Server β
|
| 818 |
β /gradio_api/mcp/ β
|
| 819 |
β ββββββββββββββββ ββββββββββββββββ ββββββββββββββββ βββββββββββ β
|
| 820 |
-
β βsearch_pubmed β βsearch_trials β β
|
| 821 |
β β β β β β β βall β β
|
| 822 |
β ββββββββ¬ββββββββ ββββββββ¬ββββββββ ββββββββ¬ββββββββ ββββββ¬βββββ β
|
| 823 |
βββββββββββΌβββββββββββββββββΌβββββββββββββββββΌβββββββββββββββΌβββββββ
|
| 824 |
β β β β
|
| 825 |
βΌ βΌ βΌ βΌ
|
| 826 |
ββββββββββββ βββββοΏ½οΏ½ββββββ ββββββββββββ (calls all)
|
| 827 |
-
βPubMedToolβ βTrials β β
|
| 828 |
β β βTool β βTool β
|
| 829 |
ββββββββββββ ββββββββββββ ββββββββββββ
|
| 830 |
```
|
|
|
|
| 130 |
- Formatted string returns
|
| 131 |
"""
|
| 132 |
|
|
|
|
| 133 |
from src.tools.clinicaltrials import ClinicalTrialsTool
|
| 134 |
+
from src.tools.europepmc import EuropePMCTool
|
| 135 |
from src.tools.pubmed import PubMedTool
|
| 136 |
|
| 137 |
|
| 138 |
# Singleton instances (avoid recreating on each call)
|
| 139 |
_pubmed = PubMedTool()
|
| 140 |
_trials = ClinicalTrialsTool()
|
| 141 |
+
_europepmc = EuropePMCTool()
|
| 142 |
|
| 143 |
|
| 144 |
async def search_pubmed(query: str, max_results: int = 10) -> str:
|
|
|
|
| 202 |
return "\n".join(formatted)
|
| 203 |
|
| 204 |
|
| 205 |
+
async def search_europepmc(query: str, max_results: int = 10) -> str:
|
| 206 |
+
"""Search Europe PMC for preprint and open access research.
|
| 207 |
|
| 208 |
+
Searches Europe PMC for preprints and open access papers.
|
| 209 |
Note: Preprints are NOT peer-reviewed but contain the latest findings.
|
| 210 |
|
| 211 |
Args:
|
|
|
|
| 217 |
"""
|
| 218 |
max_results = max(1, min(50, max_results))
|
| 219 |
|
| 220 |
+
results = await _europepmc.search(query, max_results)
|
| 221 |
|
| 222 |
if not results:
|
| 223 |
+
return f"No Europe PMC results found for: {query}"
|
| 224 |
|
| 225 |
formatted = [f"## Preprint Results for: {query}\n"]
|
| 226 |
for i, evidence in enumerate(results, 1):
|
|
|
|
| 236 |
async def search_all_sources(query: str, max_per_source: int = 5) -> str:
|
| 237 |
"""Search all biomedical sources simultaneously.
|
| 238 |
|
| 239 |
+
Performs parallel search across PubMed, ClinicalTrials.gov, and Europe PMC.
|
| 240 |
This is the most comprehensive search option for drug repurposing research.
|
| 241 |
|
| 242 |
Args:
|
|
|
|
| 253 |
# Run all searches in parallel
|
| 254 |
pubmed_task = search_pubmed(query, max_per_source)
|
| 255 |
trials_task = search_clinical_trials(query, max_per_source)
|
| 256 |
+
europepmc_task = search_europepmc(query, max_per_source)
|
| 257 |
|
| 258 |
+
pubmed_results, trials_results, europepmc_results = await asyncio.gather(
|
| 259 |
+
pubmed_task, trials_task, europepmc_task, return_exceptions=True
|
| 260 |
)
|
| 261 |
|
| 262 |
formatted = [f"# Comprehensive Search: {query}\n"]
|
|
|
|
| 272 |
else:
|
| 273 |
formatted.append(f"## Clinical Trials\n*Error: {trials_results}*\n")
|
| 274 |
|
| 275 |
+
if isinstance(europepmc_results, str):
|
| 276 |
+
formatted.append(europepmc_results)
|
| 277 |
else:
|
| 278 |
+
formatted.append(f"## Preprints\n*Error: {europepmc_results}*\n")
|
| 279 |
|
| 280 |
return "\n---\n".join(formatted)
|
| 281 |
```
|
|
|
|
| 294 |
from src.agent_factory.judges import JudgeHandler, MockJudgeHandler
|
| 295 |
from src.mcp_tools import (
|
| 296 |
search_all_sources,
|
| 297 |
+
search_europepmc,
|
| 298 |
search_clinical_trials,
|
| 299 |
search_pubmed,
|
| 300 |
)
|
| 301 |
from src.orchestrator_factory import create_orchestrator
|
| 302 |
+
from src.tools.europepmc import EuropePMCTool
|
| 303 |
from src.tools.clinicaltrials import ClinicalTrialsTool
|
| 304 |
from src.tools.pubmed import PubMedTool
|
| 305 |
from src.tools.search_handler import SearchHandler
|
|
|
|
| 325 |
## AI-Powered Drug Repurposing Research Agent
|
| 326 |
|
| 327 |
Ask questions about potential drug repurposing opportunities.
|
| 328 |
+
The agent searches PubMed, ClinicalTrials.gov, and Europe PMC preprints.
|
| 329 |
|
| 330 |
**Example questions:**
|
| 331 |
- "What drugs could be repurposed for Alzheimer's disease?"
|
|
|
|
| 381 |
|
| 382 |
with gr.Tab("Preprints"):
|
| 383 |
gr.Interface(
|
| 384 |
+
fn=search_europepmc,
|
| 385 |
inputs=[
|
| 386 |
gr.Textbox(label="Query", placeholder="long covid treatment"),
|
| 387 |
gr.Slider(1, 50, value=10, step=1, label="Max Results"),
|
| 388 |
],
|
| 389 |
outputs=gr.Markdown(label="Results"),
|
| 390 |
+
api_name="search_europepmc",
|
| 391 |
)
|
| 392 |
|
| 393 |
with gr.Tab("Search All"):
|
|
|
|
| 406 |
**Note**: This is a research tool and should not be used for medical decisions.
|
| 407 |
Always consult healthcare professionals for medical advice.
|
| 408 |
|
| 409 |
+
Built with PydanticAI + PubMed, ClinicalTrials.gov & Europe PMC
|
| 410 |
|
| 411 |
**MCP Server**: Available at `/gradio_api/mcp/` for Claude Desktop integration
|
| 412 |
""")
|
|
|
|
| 444 |
|
| 445 |
from src.mcp_tools import (
|
| 446 |
search_all_sources,
|
| 447 |
+
search_europepmc,
|
| 448 |
search_clinical_trials,
|
| 449 |
search_pubmed,
|
| 450 |
)
|
|
|
|
| 525 |
assert "Clinical Trials" in result
|
| 526 |
|
| 527 |
|
| 528 |
+
class TestSearchEuropePMC:
|
| 529 |
+
"""Tests for search_europepmc MCP tool."""
|
| 530 |
|
| 531 |
@pytest.mark.asyncio
|
| 532 |
async def test_returns_formatted_string(self, mock_evidence: Evidence) -> None:
|
| 533 |
"""Should return formatted markdown string."""
|
| 534 |
+
mock_evidence.citation.source = "europepmc" # type: ignore
|
| 535 |
|
| 536 |
+
with patch("src.mcp_tools._europepmc") as mock_tool:
|
| 537 |
mock_tool.search = AsyncMock(return_value=[mock_evidence])
|
| 538 |
|
| 539 |
+
result = await search_europepmc("preprint search", 10)
|
| 540 |
|
| 541 |
assert isinstance(result, str)
|
| 542 |
assert "Preprint Results" in result
|
|
|
|
| 550 |
"""Should combine results from all sources."""
|
| 551 |
with patch("src.mcp_tools.search_pubmed", new_callable=AsyncMock) as mock_pubmed, \
|
| 552 |
patch("src.mcp_tools.search_clinical_trials", new_callable=AsyncMock) as mock_trials, \
|
| 553 |
+
patch("src.mcp_tools.search_europepmc", new_callable=AsyncMock) as mock_europepmc:
|
| 554 |
|
| 555 |
mock_pubmed.return_value = "## PubMed Results"
|
| 556 |
mock_trials.return_value = "## Clinical Trials"
|
| 557 |
+
mock_europepmc.return_value = "## Preprints"
|
| 558 |
|
| 559 |
result = await search_all_sources("metformin", 5)
|
| 560 |
|
|
|
|
| 568 |
"""Should handle partial failures gracefully."""
|
| 569 |
with patch("src.mcp_tools.search_pubmed", new_callable=AsyncMock) as mock_pubmed, \
|
| 570 |
patch("src.mcp_tools.search_clinical_trials", new_callable=AsyncMock) as mock_trials, \
|
| 571 |
+
patch("src.mcp_tools.search_europepmc", new_callable=AsyncMock) as mock_europepmc:
|
| 572 |
|
| 573 |
mock_pubmed.return_value = "## PubMed Results"
|
| 574 |
mock_trials.side_effect = Exception("API Error")
|
| 575 |
+
mock_europepmc.return_value = "## Preprints"
|
| 576 |
|
| 577 |
result = await search_all_sources("metformin", 5)
|
| 578 |
|
|
|
|
| 599 |
assert search_clinical_trials.__doc__ is not None
|
| 600 |
assert "Args:" in search_clinical_trials.__doc__
|
| 601 |
|
| 602 |
+
def test_search_europepmc_has_args_section(self) -> None:
|
| 603 |
"""Docstring must have Args section for MCP schema generation."""
|
| 604 |
+
assert search_europepmc.__doc__ is not None
|
| 605 |
+
assert "Args:" in search_europepmc.__doc__
|
| 606 |
|
| 607 |
def test_search_all_sources_has_args_section(self) -> None:
|
| 608 |
"""Docstring must have Args section for MCP schema generation."""
|
|
|
|
| 817 |
β Gradio MCP Server β
|
| 818 |
β /gradio_api/mcp/ β
|
| 819 |
β ββββββββββββββββ ββββββββββββββββ ββββββββββββββββ βββββββββββ β
|
| 820 |
+
β βsearch_pubmed β βsearch_trials β βsearch_epmc β βsearch_ β β
|
| 821 |
β β β β β β β βall β β
|
| 822 |
β ββββββββ¬ββββββββ ββββββββ¬ββββββββ ββββββββ¬ββββββββ ββββββ¬βββββ β
|
| 823 |
βββββββββββΌβββββββββββββββββΌβββββββββββββββββΌβββββββββββββββΌβββββββ
|
| 824 |
β β β β
|
| 825 |
βΌ βΌ βΌ βΌ
|
| 826 |
ββββββββββββ βββββοΏ½οΏ½ββββββ ββββββββββββ (calls all)
|
| 827 |
+
βPubMedToolβ βTrials β βEuropePMC β
|
| 828 |
β β βTool β βTool β
|
| 829 |
ββββββββββββ ββββββββββββ ββββββββββββ
|
| 830 |
```
|
docs/to_do/DEEP_RESEARCH_ROADMAP.md
CHANGED
|
@@ -12,7 +12,7 @@ We already have:
|
|
| 12 |
| Multi-agent orchestration | `orchestrator_magentic.py` | Working |
|
| 13 |
| SearchAgent, JudgeAgent, HypothesisAgent, ReportAgent | `agents/magentic_agents.py` | Working |
|
| 14 |
| HuggingFace free tier | `agent_factory/judges.py` (HFInferenceJudgeHandler) | Working |
|
| 15 |
-
| Budget constraints |
|
| 16 |
| Simple mode (linear) | `orchestrator.py` | Working |
|
| 17 |
|
| 18 |
## What Deep Research Adds
|
|
@@ -200,8 +200,13 @@ async def test_deep_orchestrator_runs_parallel():
|
|
| 200 |
|
| 201 |
**Goal**: Semantic search over accumulated evidence.
|
| 202 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 203 |
```python
|
| 204 |
-
# src/services/rag.py
|
| 205 |
|
| 206 |
class RAGService:
|
| 207 |
"""
|
|
|
|
| 12 |
| Multi-agent orchestration | `orchestrator_magentic.py` | Working |
|
| 13 |
| SearchAgent, JudgeAgent, HypothesisAgent, ReportAgent | `agents/magentic_agents.py` | Working |
|
| 14 |
| HuggingFace free tier | `agent_factory/judges.py` (HFInferenceJudgeHandler) | Working |
|
| 15 |
+
| Budget constraints | MagenticOrchestrator (max_round_count, max_stall_count) | Built-in |
|
| 16 |
| Simple mode (linear) | `orchestrator.py` | Working |
|
| 17 |
|
| 18 |
## What Deep Research Adds
|
|
|
|
| 200 |
|
| 201 |
**Goal**: Semantic search over accumulated evidence.
|
| 202 |
|
| 203 |
+
> **Note**: We already have `src/services/embeddings.py` (EmbeddingService) which provides
|
| 204 |
+
> ChromaDB + sentence-transformers with `add_evidence()` and `search_similar()` methods.
|
| 205 |
+
> The code below is illustrative - in practice, extend EmbeddingService or use it directly.
|
| 206 |
+
> See also: `src/services/llamaindex_rag.py` for OpenAI-based RAG (different use case).
|
| 207 |
+
|
| 208 |
```python
|
| 209 |
+
# src/services/rag.py (illustrative - use EmbeddingService instead)
|
| 210 |
|
| 211 |
class RAGService:
|
| 212 |
"""
|
src/services/llamaindex_rag.py
CHANGED
|
@@ -1,6 +1,10 @@
|
|
| 1 |
"""LlamaIndex RAG service for evidence retrieval and indexing.
|
| 2 |
|
| 3 |
Requires optional dependencies: uv sync --extra modal
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
"""
|
| 5 |
|
| 6 |
from typing import Any
|
|
@@ -34,7 +38,8 @@ class LlamaIndexRAGService:
|
|
| 34 |
Initialize LlamaIndex RAG service.
|
| 35 |
|
| 36 |
Args:
|
| 37 |
-
collection_name: Name of the ChromaDB collection
|
|
|
|
| 38 |
persist_dir: Directory to persist ChromaDB data
|
| 39 |
embedding_model: OpenAI embedding model (defaults to settings.openai_embedding_model)
|
| 40 |
similarity_top_k: Number of top results to retrieve
|
|
|
|
| 1 |
"""LlamaIndex RAG service for evidence retrieval and indexing.
|
| 2 |
|
| 3 |
Requires optional dependencies: uv sync --extra modal
|
| 4 |
+
|
| 5 |
+
Migration Note (v1.0 rebrand):
|
| 6 |
+
Default collection_name changed from "deepcritical_evidence" to "deepboner_evidence".
|
| 7 |
+
To preserve existing data, explicitly pass collection_name="deepcritical_evidence".
|
| 8 |
"""
|
| 9 |
|
| 10 |
from typing import Any
|
|
|
|
| 38 |
Initialize LlamaIndex RAG service.
|
| 39 |
|
| 40 |
Args:
|
| 41 |
+
collection_name: Name of the ChromaDB collection (default changed from
|
| 42 |
+
"deepcritical_evidence" to "deepboner_evidence" in v1.0 rebrand)
|
| 43 |
persist_dir: Directory to persist ChromaDB data
|
| 44 |
embedding_model: OpenAI embedding model (defaults to settings.openai_embedding_model)
|
| 45 |
similarity_top_k: Number of top results to retrieve
|
tests/unit/utils/test_exceptions.py
CHANGED
|
@@ -1,5 +1,9 @@
|
|
| 1 |
"""Unit tests for custom exceptions."""
|
| 2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
from src.utils.exceptions import (
|
| 4 |
ConfigurationError,
|
| 5 |
DeepBonerError,
|
|
|
|
| 1 |
"""Unit tests for custom exceptions."""
|
| 2 |
|
| 3 |
+
import pytest
|
| 4 |
+
|
| 5 |
+
pytestmark = pytest.mark.unit
|
| 6 |
+
|
| 7 |
from src.utils.exceptions import (
|
| 8 |
ConfigurationError,
|
| 9 |
DeepBonerError,
|