File size: 5,647 Bytes
2a8faae
0176a31
2a8faae
 
0176a31
2a8faae
 
0176a31
2a8faae
 
 
 
 
 
 
 
 
 
 
0176a31
2a8faae
0176a31
20953d6
0176a31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2a8faae
0176a31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2a8faae
0176a31
 
2a8faae
 
 
 
 
 
 
0176a31
 
 
2a8faae
0176a31
 
 
 
 
 
 
2a8faae
0176a31
 
 
 
 
 
 
 
 
 
 
 
 
20953d6
0176a31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20953d6
0176a31
 
 
 
 
 
 
2a8faae
 
 
 
0176a31
2a8faae
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0176a31
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
"""
Medical Query Router for Lung Cancer AI Advisor
"""
import asyncio
import inspect
from fastapi import APIRouter, HTTPException
from fastapi.responses import StreamingResponse
from pydantic import BaseModel, Field
import sys
import os

# Add src to path for imports
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))

from core.agent import safe_run_agent, safe_run_agent_streaming

router = APIRouter(tags=["medical"])


class QueryRequest(BaseModel):
    """
    Request model for medical queries
    
    Example:
        {
            "query": "What are the early symptoms of lung cancer?",
            "session_id": "user_123_session_456"
        }
    """
    query: str = Field(
        ..., 
        description="The medical question or query about lung cancer",
        example="Give me the options for first line treatment for NSCLC?"
    )
    session_id: str = Field(
        ..., 
        description="Unique session identifier for conversation continuity. Use the same `session_id` to maintain context across multiple queries. Format: `user_{user_id}_session_{timestamp}`",
        example="user_123_session_1699612345"
    )


@router.post(
    "/ask",
    summary="Ask a lung cancer question",
)
async def ask(request: QueryRequest):
    """
    Process a lung cancer-related medical query and return a complete response.
    
The AI agent intelligently selects appropriate tools and data sources to provide
accurate, evidence-based information about lung cancer.

Request Body:
- `query` (required): Your medical question about lung cancer
- `session_id` (required): Unique identifier to maintain conversation context

Response:
- `response`: Complete AI-generated answer in markdown format
- `session_id`: Echo of the session identifier used

Example Request:
    {
        "query": "What are the early symptoms of lung cancer?",
        "session_id": "user_123_session_1699612345"
    }

Example Response:
    {
        "response": "Early symptoms of lung cancer may include...\n\n**Common Early Signs:**\n- Persistent cough...",
        "session_id": "user_123_session_1699612345"
    }

Frontend Integration Tips:
- Use the same `session_id` for follow-up questions to maintain context
- Display response in markdown renderer for better formatting
- Show loading state while waiting for response
- Handle 500 errors gracefully with user-friendly messages

Args:
    request: QueryRequest containing query and session_id
    
Returns:
    Dictionary with response text and session_id
    
Raises:
    HTTPException: 500 if query processing fails
"""

    try:
        response = await safe_run_agent(user_input=request.query, session_id=request.session_id)
        return {"response": response, "session_id": request.session_id}
        
    except Exception as e:
        raise HTTPException(
            status_code=500,
            detail=f"Error processing medical query: {str(e)}"
        )

# Dedent the docstring so OpenAPI/Redoc renderers don't treat the
# indented lines as a markdown code block (leading 4-space indentation).
ask.__doc__ = inspect.cleandoc(ask.__doc__ or "")


@router.post(
    "/ask/stream",
    summary="Ask a lung cancer question with streaming response",
    
)
async def ask_stream(request: QueryRequest):
    """
    Process a lung cancer-related medical query with real-time streaming response.
    
    Recommended for frontend use - Provides better user experience by streaming
    the response as it's generated, similar to ChatGPT.
    
    Request Body:
    - `query` (required): Your medical question about lung cancer
    - `session_id` (required): Unique identifier to maintain conversation context
    
    Response:
    - Streaming text/markdown content
    - Response is sent in chunks as it's generated
    - Connection stays open until response is complete
    
    
    Example Request:
        {
            "query": "Explain the difference between small cell and non-small cell lung cancer",
            "session_id": "user_123_session_1699612345"
        }
    
    Frontend Integration Tips:
    - Use the same `session_id` for follow-up questions to maintain context
    - Display response in markdown renderer for better formatting
    - Show loading state while waiting for response
    - Render markdown progressively as chunks arrive
    - Show typing indicator while streaming
    - Handle 500 errors gracefully with user-friendly messages

    Args:
        request: QueryRequest containing query and session_id
        
    Returns:
        StreamingResponse with text/markdown content
    
    Raises:
        HTTPException: 500 if query processing fails
    """
    async def event_stream():
        try:
            chunk_buffer = ""
            async for chunk in safe_run_agent_streaming(user_input=request.query, session_id=request.session_id):
                chunk_buffer += chunk
                
                # Send chunks in reasonable sizes for smoother streaming
                if len(chunk_buffer) >= 10:  # Adjust this value as needed
                    yield chunk_buffer
                    chunk_buffer = ""
                    await asyncio.sleep(0.01)  # Small delay for smoother streaming
            
            # Send any remaining content
            if chunk_buffer:
                yield chunk_buffer
                
        except Exception as e:
            yield f"Error: {str(e)}"
    
    return StreamingResponse(event_stream(), media_type="text/markdown")

# Dedent streaming endpoint docstring for proper Markdown rendering in docs
ask_stream.__doc__ = inspect.cleandoc(ask_stream.__doc__ or "")