Spaces:
Sleeping
Sleeping
File size: 18,709 Bytes
0ee5e7e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 |
from fastapi import FastAPI, HTTPException, Query
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from pydantic import BaseModel
from typing import List, Optional, Dict, Any
import time
import logging
import pprint
# Import your existing neural searcher and the new multi-collection system
# from neural_searcher import NeuralSearcher
from chapter_retrieval_system_v2 import MultiCollectionChapterRetrieval
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
app = FastAPI(
title="ICD-10 Multi-Collection Search API",
description="Advanced ICD-10 code search with intelligent chapter detection",
version="2.0.0"
)
# Add CORS middleware for web frontend integration
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # Configure this properly for production
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Initialize systems
try:
# Initialize the multi-collection chapter retrieval system
chapter_retriever = MultiCollectionChapterRetrieval()
# Keep your original neural searcher for backward compatibility
# You might not need this if switching fully to multi-collection approach
# neural_searcher = NeuralSearcher(collection_name="icd10_codes_chapter_3")
logger.info("Successfully initialized search systems")
except Exception as e:
logger.error(f"Failed to initialize search systems: {e}")
chapter_retriever = None
# neural_searcher = None
# Pydantic models for request/response validation
class SearchRequest(BaseModel):
query: str
limit: Optional[int] = 10
score_threshold: Optional[float] = 0.3
search_mode: Optional[str] = "smart" # "smart", "all_chapters", "specific_chapters"
target_chapters: Optional[List[str]] = None
detailed_analysis: Optional[bool] = False
chapters_per_sentence: Optional[int] = 2 # NEW: How many chapters to search per sentence
class ChapterInfo(BaseModel):
chapter_id: str
collection_name: str
relevance_score: float
description: str
match_count: int
avg_score: float
max_score: float
class SearchResult(BaseModel):
code: str
title: str
description: Optional[str] = None
score: float
chapter_id: Optional[str] = None
collection: str
source_sentence: Optional[str] = None # NEW: Track which sentence generated this result
sentence_key: Optional[str] = None # NEW: Track sentence identifier
class SentenceResults(BaseModel):
sentence_text: str
sentence_key: str
results: List[SearchResult]
total_results: int
class SearchResponse(BaseModel):
query: str
total_results: int
search_time: float
search_mode: str
relevant_chapters: List[ChapterInfo]
results: List[SearchResult] # Keep for backward compatibility
sentence_results: Optional[List[SentenceResults]] = None # NEW: Results grouped by sentence
class ChapterAnalysisResponse(BaseModel):
query: str
analysis_time: float
chapters: List[ChapterInfo]
# Health check endpoint
@app.get("/health")
def health_check():
"""Health check endpoint"""
if chapter_retriever is None:
raise HTTPException(status_code=503, detail="Search system not initialized")
return {"status": "healthy", "timestamp": time.time()}
# Chapter analysis endpoint
@app.get("/api/analyze-chapters", response_model=ChapterAnalysisResponse)
def analyze_chapters(
q: str = Query(..., description="Diagnostic query string"),
detailed: bool = Query(False, description="Include detailed chapter statistics")
):
"""
Analyze which ICD-10 chapters are most relevant for a diagnostic query
"""
if not chapter_retriever:
raise HTTPException(status_code=503, detail="Chapter retrieval system not available")
if not q or not q.strip():
raise HTTPException(status_code=400, detail="Query parameter 'q' is required")
try:
start_time = time.time()
# Perform chapter analysis
analysis = chapter_retriever.analyze_chapters_parallel(
q.strip(),
sample_size_per_chapter=15,
score_threshold=0.2
)
analysis_time = time.time() - start_time
# Convert to response format
chapters = []
for chapter_id, stats in analysis.items():
if stats['relevance_score'] > 0.05: # Filter very low relevance
chapter_info = ChapterInfo(
chapter_id=chapter_id,
collection_name=stats['collection_name'],
relevance_score=stats['relevance_score'],
description=chapter_retriever.chapter_info.get(chapter_id, "Unknown chapter"),
match_count=stats['match_count'],
avg_score=stats['avg_score'],
max_score=stats['max_score']
)
chapters.append(chapter_info)
return ChapterAnalysisResponse(
query=q,
analysis_time=analysis_time,
chapters=chapters
)
except Exception as e:
logger.error(f"Error in chapter analysis: {e}")
raise HTTPException(status_code=500, detail=f"Chapter analysis failed: {str(e)}")
# Smart search endpoint (main search functionality)
@app.post("/api/search", response_model=SearchResponse)
def search_smart(request: SearchRequest):
"""
Advanced search with intelligent chapter detection and targeted searching
"""
return _perform_search(request)
@app.get("/api/search", response_model=SearchResponse)
def search_smart_get(
q: str = Query(..., description="Diagnostic query string"),
limit: int = Query(10, ge=1, le=100, description="Maximum number of results"),
score_threshold: float = Query(0.3, ge=0.0, le=1.0, description="Minimum similarity score"),
search_mode: str = Query("smart", description="Search mode: smart, all_chapters, specific_chapters"),
target_chapters: Optional[str] = Query(None, description="Comma-separated list of target chapters (for specific_chapters mode)"),
detailed_analysis: bool = Query(False, description="Include detailed chapter analysis"),
chapters_per_sentence: int = Query(2, ge=1, le=5, description="Number of chapters to search per sentence") # NEW
):
"""
Advanced search with intelligent chapter detection (GET version)
"""
# Parse target_chapters if provided
parsed_chapters = None
if target_chapters:
parsed_chapters = [ch.strip() for ch in target_chapters.split(",") if ch.strip()]
request = SearchRequest(
query=q,
limit=limit,
score_threshold=score_threshold,
search_mode=search_mode,
target_chapters=parsed_chapters,
detailed_analysis=detailed_analysis,
chapters_per_sentence=chapters_per_sentence # NEW
)
return _perform_search(request)
def _perform_search(request: SearchRequest) -> SearchResponse:
"""Internal search logic - UPDATED to return top responses for each sentence"""
if not chapter_retriever:
raise HTTPException(status_code=503, detail="Search system not available")
if not request.query or not request.query.strip():
raise HTTPException(status_code=400, detail="Query is required")
try:
start_time = time.time()
query = request.query.strip()
# Initialize response data
relevant_chapters = []
results = []
sentence_results = [] # NEW: For sentence-based results
if request.search_mode == "smart":
# Smart search: auto-identify chapters then search them sentence by sentence
logger.info(f"Performing sentence-based smart search for: '{query}'")
# First, analyze chapters if detailed analysis is requested
if request.detailed_analysis:
analysis = chapter_retriever.analyze_chapters_parallel(query)
for chapter_id, stats in analysis.items():
if stats['relevance_score'] > 0.1:
chapter_info = ChapterInfo(
chapter_id=chapter_id,
collection_name=stats['collection_name'],
relevance_score=stats['relevance_score'],
description=chapter_retriever.chapter_info.get(chapter_id, "Unknown"),
match_count=stats['match_count'],
avg_score=stats['avg_score'],
max_score=stats['max_score']
)
relevant_chapters.append(chapter_info)
# Perform sentence-based targeted search
search_results = chapter_retriever.search_targeted_chapters(
query,
target_chapters=request.target_chapters,
results_per_sentence=request.limit, # Use full limit per sentence
chapters_per_sentence=request.chapters_per_sentence
)
# NEW: Process results by sentence instead of flattening
sentence_result_map = {} # Track results by sentence
all_results = [] # Keep flattened results for backward compatibility
# Group results by sentence
for chapter_id, chapter_data in search_results.items():
for sentence_key, sentence_data in chapter_data.items():
sentence_text = sentence_data['text']
# Initialize sentence entry if not exists
if sentence_key not in sentence_result_map:
sentence_result_map[sentence_key] = {
'text': sentence_text,
'results': []
}
# Add results for this sentence
for result in sentence_data['results']:
# Create enriched result with metadata
enriched_result = {
**result,
'chapter_id': chapter_id,
'source_sentence': sentence_text,
'sentence_key': sentence_key
}
# Add to sentence-specific results
sentence_result_map[sentence_key]['results'].append(enriched_result)
# Add to flattened results for backward compatibility
all_results.append(enriched_result)
# NEW: Create sentence-based result objects
for sentence_key, sentence_data in sentence_result_map.items():
# Sort sentence results by score
sentence_data['results'].sort(key=lambda x: x['score'], reverse=True)
# Apply score threshold and limit per sentence
filtered_sentence_results = [
r for r in sentence_data['results']
if r['score'] >= request.score_threshold
][:request.limit]
# Convert to SearchResult objects
sentence_search_results = []
for result in filtered_sentence_results:
payload = result['payload']
search_result = SearchResult(
code=payload.get('code', 'N/A'),
title=payload.get('title', 'N/A'),
description=payload.get('description'),
score=result['score'],
chapter_id=result.get('chapter_id'),
collection=result['collection'],
source_sentence=result.get('source_sentence'),
sentence_key=result.get('sentence_key')
)
sentence_search_results.append(search_result)
# Create SentenceResults object
if sentence_search_results: # Only include sentences with results
sentence_result_obj = SentenceResults(
sentence_text=sentence_data['text'],
sentence_key=sentence_key,
results=sentence_search_results,
total_results=len(sentence_search_results)
)
sentence_results.append(sentence_result_obj)
# Sort sentence results by average score (optional)
sentence_results.sort(
key=lambda x: sum(r.score for r in x.results) / len(x.results) if x.results else 0,
reverse=True
)
# Process flattened results for backward compatibility
all_results.sort(key=lambda x: x['score'], reverse=True)
all_results = all_results[:request.limit]
elif request.search_mode == "all_chapters":
# Handle other search modes (keeping original logic)
# You can implement similar sentence-based logic here if needed
logger.info("All chapters search mode - using original logic")
# ... implement if needed
elif request.search_mode == "specific_chapters":
# Handle specific chapters mode
logger.info("Specific chapters search mode - using original logic")
# ... implement if needed
else:
raise HTTPException(status_code=400, detail=f"Unknown search mode: {request.search_mode}")
# Convert flattened results to response format (for backward compatibility)
for result in all_results:
if result['score'] >= request.score_threshold:
payload = result['payload']
search_result = SearchResult(
code=payload.get('code', 'N/A'),
title=payload.get('title', 'N/A'),
description=payload.get('description'),
score=result['score'],
chapter_id=result.get('chapter_id'),
collection=result['collection'],
source_sentence=result.get('source_sentence'),
sentence_key=result.get('sentence_key')
)
results.append(search_result)
search_time = time.time() - start_time
logger.info(f"Sentence-based search completed: {len(results)} total results, {len(sentence_results)} sentences in {search_time:.3f}s")
# Debug output
logger.info(f"Sentence results breakdown:")
for sent_result in sentence_results:
logger.info(f" '{sent_result.sentence_text}': {sent_result.total_results} results")
return SearchResponse(
query=query,
total_results=len(results),
search_time=search_time,
search_mode=request.search_mode,
relevant_chapters=relevant_chapters,
results=results, # Flattened results for backward compatibility
sentence_results=sentence_results # NEW: Results organized by sentence
)
except Exception as e:
logger.error(f"Search error: {e}")
raise HTTPException(status_code=500, detail=f"Search failed: {str(e)}")
# Backward compatibility endpoint (your original endpoint)
# @app.get("/api/search/legacy")
# def search_legacy(q: str):
# """
# Legacy search endpoint for backward compatibility
# Uses your original neural searcher
# """
# # if not neural_searcher:
# # raise HTTPException(status_code=503, detail="Legacy search system not available")
# if not q or not q.strip():
# raise HTTPException(status_code=400, detail="Query parameter 'q' is required")
# try:
# result = neural_searcher.search(text=q.strip())
# return {"result": result}
# except Exception as e:
# logger.error(f"Legacy search error: {e}")
# raise HTTPException(status_code=500, detail=f"Legacy search failed: {str(e)}")
# Get available chapters
@app.get("/api/chapters")
def get_available_chapters():
"""
Get list of available ICD-10 chapters and their descriptions
"""
if not chapter_retriever:
raise HTTPException(status_code=503, detail="Chapter system not available")
try:
chapter_collections = chapter_retriever.get_chapter_collections()
chapters = []
for chapter_id, collection_name in chapter_collections.items():
description = chapter_retriever.chapter_info.get(chapter_id, "Unknown chapter")
chapters.append({
"chapter_id": chapter_id,
"collection_name": collection_name,
"description": description
})
return {
"total_chapters": len(chapters),
"chapters": chapters
}
except Exception as e:
logger.error(f"Error getting chapters: {e}")
raise HTTPException(status_code=500, detail=f"Failed to get chapters: {str(e)}")
# Get search suggestions/autocomplete (optional enhancement)
@app.get("/api/suggest")
def get_search_suggestions(
q: str = Query(..., min_length=2, description="Partial query for suggestions"),
limit: int = Query(5, ge=1, le=20, description="Maximum number of suggestions")
):
"""
Get search suggestions based on partial query
This is a simple implementation - you might want to enhance this
"""
# Simple keyword-based suggestions
# In a real implementation, you might use a more sophisticated approach
common_terms = [
"chest pain", "shortness of breath", "diabetes", "hypertension",
"pneumonia", "fracture", "depression", "anxiety", "fever",
"headache", "abdominal pain", "nausea", "vomiting", "infection",
"cancer", "tumor", "heart attack", "stroke", "asthma"
]
query_lower = q.lower().strip()
suggestions = [term for term in common_terms if query_lower in term.lower()]
return {"suggestions": suggestions[:limit]}
if __name__ == "__main__":
import uvicorn
# Run with more configuration options
uvicorn.run(
app,
host="0.0.0.0",
port=8000,
log_level="info",
access_log=True
) |