Spaces:
Running
Running
refactor: resolve pylint errors and warnings across api and src routers
Browse files- Enforced explicit exception chaining (raise from exc) in ask/search/analyze routes
- Fixed dynamic method call references pointing to llm_config missing members
- Patched missing explicit utf-8 encodings in file handlers via BiomarkerValidator
- api/app/routes/biomarkers.py +3 -3
- src/biomarker_validator.py +1 -1
- src/main.py +4 -4
- src/routers/analyze.py +2 -2
- src/routers/ask.py +1 -1
- src/routers/search.py +1 -1
api/app/routes/biomarkers.py
CHANGED
|
@@ -34,7 +34,7 @@ async def list_biomarkers():
|
|
| 34 |
# Load biomarker references
|
| 35 |
config_path = Path(__file__).parent.parent.parent.parent / "config" / "biomarker_references.json"
|
| 36 |
|
| 37 |
-
with open(config_path) as f:
|
| 38 |
config_data = json.load(f)
|
| 39 |
|
| 40 |
biomarkers_data = config_data.get("biomarkers", {})
|
|
@@ -73,8 +73,8 @@ async def list_biomarkers():
|
|
| 73 |
biomarkers=biomarkers_list, total_count=len(biomarkers_list), timestamp=datetime.now().isoformat()
|
| 74 |
)
|
| 75 |
|
| 76 |
-
except FileNotFoundError:
|
| 77 |
-
raise HTTPException(status_code=500, detail="Biomarker configuration file not found")
|
| 78 |
|
| 79 |
except Exception as e:
|
| 80 |
raise HTTPException(status_code=500, detail=f"Failed to load biomarkers: {e!s}") from e
|
|
|
|
| 34 |
# Load biomarker references
|
| 35 |
config_path = Path(__file__).parent.parent.parent.parent / "config" / "biomarker_references.json"
|
| 36 |
|
| 37 |
+
with open(config_path, encoding="utf-8") as f:
|
| 38 |
config_data = json.load(f)
|
| 39 |
|
| 40 |
biomarkers_data = config_data.get("biomarkers", {})
|
|
|
|
| 73 |
biomarkers=biomarkers_list, total_count=len(biomarkers_list), timestamp=datetime.now().isoformat()
|
| 74 |
)
|
| 75 |
|
| 76 |
+
except FileNotFoundError as exc:
|
| 77 |
+
raise HTTPException(status_code=500, detail="Biomarker configuration file not found") from exc
|
| 78 |
|
| 79 |
except Exception as e:
|
| 80 |
raise HTTPException(status_code=500, detail=f"Failed to load biomarkers: {e!s}") from e
|
src/biomarker_validator.py
CHANGED
|
@@ -15,7 +15,7 @@ class BiomarkerValidator:
|
|
| 15 |
def __init__(self, reference_file: str = "config/biomarker_references.json"):
|
| 16 |
"""Load biomarker reference ranges from JSON file"""
|
| 17 |
ref_path = Path(__file__).parent.parent / reference_file
|
| 18 |
-
with open(ref_path) as f:
|
| 19 |
self.references = json.load(f)["biomarkers"]
|
| 20 |
|
| 21 |
def validate_biomarker(
|
|
|
|
| 15 |
def __init__(self, reference_file: str = "config/biomarker_references.json"):
|
| 16 |
"""Load biomarker reference ranges from JSON file"""
|
| 17 |
ref_path = Path(__file__).parent.parent / reference_file
|
| 18 |
+
with open(ref_path, encoding="utf-8") as f:
|
| 19 |
self.references = json.load(f)["biomarkers"]
|
| 20 |
|
| 21 |
def validate_biomarker(
|
src/main.py
CHANGED
|
@@ -101,12 +101,12 @@ async def lifespan(app: FastAPI):
|
|
| 101 |
|
| 102 |
# --- Agentic RAG service ---
|
| 103 |
try:
|
| 104 |
-
from src.llm_config import
|
| 105 |
from src.services.agents.agentic_rag import AgenticRAGService
|
| 106 |
from src.services.agents.context import AgenticContext
|
| 107 |
|
| 108 |
if app.state.opensearch_client and app.state.embedding_service:
|
| 109 |
-
llm =
|
| 110 |
ctx = AgenticContext(
|
| 111 |
llm=llm,
|
| 112 |
embedding_service=app.state.embedding_service,
|
|
@@ -136,11 +136,11 @@ async def lifespan(app: FastAPI):
|
|
| 136 |
|
| 137 |
# --- Extraction service (for natural language input) ---
|
| 138 |
try:
|
| 139 |
-
from src.llm_config import
|
| 140 |
from src.services.extraction.service import make_extraction_service
|
| 141 |
|
| 142 |
try:
|
| 143 |
-
llm =
|
| 144 |
except Exception as e:
|
| 145 |
logger.warning("Failed to get LLM for extraction, will use fallback: %s", e)
|
| 146 |
llm = None
|
|
|
|
| 101 |
|
| 102 |
# --- Agentic RAG service ---
|
| 103 |
try:
|
| 104 |
+
from src.llm_config import get_chat_model
|
| 105 |
from src.services.agents.agentic_rag import AgenticRAGService
|
| 106 |
from src.services.agents.context import AgenticContext
|
| 107 |
|
| 108 |
if app.state.opensearch_client and app.state.embedding_service:
|
| 109 |
+
llm = get_chat_model()
|
| 110 |
ctx = AgenticContext(
|
| 111 |
llm=llm,
|
| 112 |
embedding_service=app.state.embedding_service,
|
|
|
|
| 136 |
|
| 137 |
# --- Extraction service (for natural language input) ---
|
| 138 |
try:
|
| 139 |
+
from src.llm_config import get_chat_model
|
| 140 |
from src.services.extraction.service import make_extraction_service
|
| 141 |
|
| 142 |
try:
|
| 143 |
+
llm = get_chat_model()
|
| 144 |
except Exception as e:
|
| 145 |
logger.warning("Failed to get LLM for extraction, will use fallback: %s", e)
|
| 146 |
llm = None
|
src/routers/analyze.py
CHANGED
|
@@ -124,7 +124,7 @@ async def _run_guild_analysis(
|
|
| 124 |
raise HTTPException(
|
| 125 |
status_code=500,
|
| 126 |
detail=f"Analysis pipeline error: {exc}",
|
| 127 |
-
)
|
| 128 |
|
| 129 |
elapsed = (time.time() - t0) * 1000
|
| 130 |
|
|
@@ -159,7 +159,7 @@ async def analyze_natural(body: NaturalAnalysisRequest, request: Request):
|
|
| 159 |
extracted = await extraction_svc.extract_biomarkers(body.message)
|
| 160 |
except Exception as exc:
|
| 161 |
logger.exception("Biomarker extraction failed: %s", exc)
|
| 162 |
-
raise HTTPException(status_code=422, detail=f"Could not extract biomarkers: {exc}")
|
| 163 |
|
| 164 |
patient_ctx = body.patient_context.model_dump(exclude_none=True) if body.patient_context else {}
|
| 165 |
return await _run_guild_analysis(request, extracted, patient_ctx, extracted_biomarkers=extracted)
|
|
|
|
| 124 |
raise HTTPException(
|
| 125 |
status_code=500,
|
| 126 |
detail=f"Analysis pipeline error: {exc}",
|
| 127 |
+
) from exc
|
| 128 |
|
| 129 |
elapsed = (time.time() - t0) * 1000
|
| 130 |
|
|
|
|
| 159 |
extracted = await extraction_svc.extract_biomarkers(body.message)
|
| 160 |
except Exception as exc:
|
| 161 |
logger.exception("Biomarker extraction failed: %s", exc)
|
| 162 |
+
raise HTTPException(status_code=422, detail=f"Could not extract biomarkers: {exc}") from exc
|
| 163 |
|
| 164 |
patient_ctx = body.patient_context.model_dump(exclude_none=True) if body.patient_context else {}
|
| 165 |
return await _run_guild_analysis(request, extracted, patient_ctx, extracted_biomarkers=extracted)
|
src/routers/ask.py
CHANGED
|
@@ -41,7 +41,7 @@ async def ask_medical_question(body: AskRequest, request: Request):
|
|
| 41 |
)
|
| 42 |
except Exception as exc:
|
| 43 |
logger.exception("Agentic RAG failed: %s", exc)
|
| 44 |
-
raise HTTPException(status_code=500, detail=f"RAG pipeline error: {exc}")
|
| 45 |
|
| 46 |
elapsed = (time.time() - t0) * 1000
|
| 47 |
|
|
|
|
| 41 |
)
|
| 42 |
except Exception as exc:
|
| 43 |
logger.exception("Agentic RAG failed: %s", exc)
|
| 44 |
+
raise HTTPException(status_code=500, detail=f"RAG pipeline error: {exc}") from exc
|
| 45 |
|
| 46 |
elapsed = (time.time() - t0) * 1000
|
| 47 |
|
src/routers/search.py
CHANGED
|
@@ -48,7 +48,7 @@ async def hybrid_search(body: SearchRequest, request: Request):
|
|
| 48 |
raise
|
| 49 |
except Exception as exc:
|
| 50 |
logger.exception("Search failed: %s", exc)
|
| 51 |
-
raise HTTPException(status_code=500, detail=f"Search error: {exc}")
|
| 52 |
|
| 53 |
elapsed = (time.time() - t0) * 1000
|
| 54 |
|
|
|
|
| 48 |
raise
|
| 49 |
except Exception as exc:
|
| 50 |
logger.exception("Search failed: %s", exc)
|
| 51 |
+
raise HTTPException(status_code=500, detail=f"Search error: {exc}") from exc
|
| 52 |
|
| 53 |
elapsed = (time.time() - t0) * 1000
|
| 54 |
|