Spaces:
Running
Running
| """ | |
| Medical Query Router for Lung Cancer AI Advisor | |
| """ | |
| import asyncio | |
| import inspect | |
| from fastapi import APIRouter, HTTPException | |
| from fastapi.responses import StreamingResponse | |
| from pydantic import BaseModel, Field | |
| import sys | |
| import os | |
| # Add src to path for imports | |
| sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) | |
| from core.agent import safe_run_agent, safe_run_agent_streaming | |
| router = APIRouter(tags=["medical"]) | |
| class QueryRequest(BaseModel): | |
| """ | |
| Request model for medical queries | |
| Example: | |
| { | |
| "query": "What are the early symptoms of lung cancer?", | |
| "session_id": "user_123_session_456" | |
| } | |
| """ | |
| query: str = Field( | |
| ..., | |
| description="The medical question or query about lung cancer", | |
| example="Give me the options for first line treatment for NSCLC?" | |
| ) | |
| session_id: str = Field( | |
| ..., | |
| description="Unique session identifier for conversation continuity. Use the same `session_id` to maintain context across multiple queries. Format: `user_{user_id}_session_{timestamp}`", | |
| example="user_123_session_1699612345" | |
| ) | |
| async def ask(request: QueryRequest): | |
| """ | |
| Process a lung cancer-related medical query and return a complete response. | |
| The AI agent intelligently selects appropriate tools and data sources to provide | |
| accurate, evidence-based information about lung cancer. | |
| Request Body: | |
| - `query` (required): Your medical question about lung cancer | |
| - `session_id` (required): Unique identifier to maintain conversation context | |
| Response: | |
| - `response`: Complete AI-generated answer in markdown format | |
| - `session_id`: Echo of the session identifier used | |
| Example Request: | |
| { | |
| "query": "What are the early symptoms of lung cancer?", | |
| "session_id": "user_123_session_1699612345" | |
| } | |
| Example Response: | |
| { | |
| "response": "Early symptoms of lung cancer may include...\n\n**Common Early Signs:**\n- Persistent cough...", | |
| "session_id": "user_123_session_1699612345" | |
| } | |
| Frontend Integration Tips: | |
| - Use the same `session_id` for follow-up questions to maintain context | |
| - Display response in markdown renderer for better formatting | |
| - Show loading state while waiting for response | |
| - Handle 500 errors gracefully with user-friendly messages | |
| Args: | |
| request: QueryRequest containing query and session_id | |
| Returns: | |
| Dictionary with response text and session_id | |
| Raises: | |
| HTTPException: 500 if query processing fails | |
| """ | |
| try: | |
| response = await safe_run_agent(user_input=request.query, session_id=request.session_id) | |
| return {"response": response, "session_id": request.session_id} | |
| except Exception as e: | |
| raise HTTPException( | |
| status_code=500, | |
| detail=f"Error processing medical query: {str(e)}" | |
| ) | |
| # Dedent the docstring so OpenAPI/Redoc renderers don't treat the | |
| # indented lines as a markdown code block (leading 4-space indentation). | |
| ask.__doc__ = inspect.cleandoc(ask.__doc__ or "") | |
| async def ask_stream(request: QueryRequest): | |
| """ | |
| Process a lung cancer-related medical query with real-time streaming response. | |
| Recommended for frontend use - Provides better user experience by streaming | |
| the response as it's generated, similar to ChatGPT. | |
| Request Body: | |
| - `query` (required): Your medical question about lung cancer | |
| - `session_id` (required): Unique identifier to maintain conversation context | |
| Response: | |
| - Streaming text/markdown content | |
| - Response is sent in chunks as it's generated | |
| - Connection stays open until response is complete | |
| Example Request: | |
| { | |
| "query": "Explain the difference between small cell and non-small cell lung cancer", | |
| "session_id": "user_123_session_1699612345" | |
| } | |
| Frontend Integration Tips: | |
| - Use the same `session_id` for follow-up questions to maintain context | |
| - Display response in markdown renderer for better formatting | |
| - Show loading state while waiting for response | |
| - Render markdown progressively as chunks arrive | |
| - Show typing indicator while streaming | |
| - Handle 500 errors gracefully with user-friendly messages | |
| Args: | |
| request: QueryRequest containing query and session_id | |
| Returns: | |
| StreamingResponse with text/markdown content | |
| Raises: | |
| HTTPException: 500 if query processing fails | |
| """ | |
| async def event_stream(): | |
| try: | |
| chunk_buffer = "" | |
| async for chunk in safe_run_agent_streaming(user_input=request.query, session_id=request.session_id): | |
| chunk_buffer += chunk | |
| # Send chunks in reasonable sizes for smoother streaming | |
| if len(chunk_buffer) >= 10: # Adjust this value as needed | |
| yield chunk_buffer | |
| chunk_buffer = "" | |
| await asyncio.sleep(0.01) # Small delay for smoother streaming | |
| # Send any remaining content | |
| if chunk_buffer: | |
| yield chunk_buffer | |
| except Exception as e: | |
| yield f"Error: {str(e)}" | |
| return StreamingResponse(event_stream(), media_type="text/markdown") | |
| # Dedent streaming endpoint docstring for proper Markdown rendering in docs | |
| ask_stream.__doc__ = inspect.cleandoc(ask_stream.__doc__ or "") | |