File size: 9,803 Bytes
6dc9d46
 
 
 
 
 
696f787
6dc9d46
 
696f787
 
 
6dc9d46
 
 
696f787
6dc9d46
 
 
9659593
6dc9d46
 
 
 
 
696f787
6dc9d46
 
 
9659593
6dc9d46
 
9659593
6dc9d46
 
 
9659593
6dc9d46
9659593
696f787
9659593
 
 
696f787
6dc9d46
9659593
 
696f787
6dc9d46
 
696f787
6dc9d46
 
696f787
6dc9d46
696f787
aefac4f
696f787
6dc9d46
9659593
aefac4f
 
 
 
 
 
9659593
aefac4f
 
9659593
696f787
6dc9d46
 
 
 
 
9659593
 
 
 
aefac4f
9659593
 
6dc9d46
696f787
6dc9d46
aefac4f
6dc9d46
 
 
696f787
9659593
696f787
6dc9d46
9659593
6dc9d46
 
696f787
6dc9d46
9659593
 
 
696f787
6dc9d46
 
 
 
 
 
696f787
9659593
 
 
 
 
6dc9d46
 
 
 
 
 
 
 
9659593
 
 
 
 
6dc9d46
 
 
 
 
 
9659593
 
 
 
696f787
6dc9d46
696f787
6dc9d46
9659593
 
 
 
 
 
 
 
696f787
6dc9d46
696f787
6dc9d46
 
 
696f787
6dc9d46
9659593
696f787
6dc9d46
696f787
6dc9d46
 
9659593
696f787
6dc9d46
9659593
696f787
6dc9d46
 
 
696f787
6dc9d46
9659593
 
 
 
 
 
6dc9d46
 
 
9659593
6dc9d46
 
696f787
6dc9d46
 
9659593
 
 
 
696f787
6dc9d46
696f787
6dc9d46
 
9659593
696f787
6dc9d46
 
9659593
6dc9d46
 
9659593
6dc9d46
9659593
6dc9d46
 
696f787
6dc9d46
9659593
 
 
 
 
 
 
696f787
6dc9d46
9659593
 
 
 
 
 
 
696f787
6dc9d46
696f787
 
6dc9d46
 
696f787
6dc9d46
9659593
696f787
6dc9d46
9659593
6dc9d46
696f787
6dc9d46
696f787
6dc9d46
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
"""
MediGuard AI RAG-Helper
Clinical Guidelines Agent - Retrieves evidence-based recommendations
"""

from pathlib import Path

from langchain_core.prompts import ChatPromptTemplate

from src.llm_config import llm_config
from src.state import AgentOutput, GuildState


class ClinicalGuidelinesAgent:
    """Agent that retrieves clinical guidelines and recommendations using RAG"""

    def __init__(self, retriever):
        """
        Initialize with a retriever for clinical guidelines.

        Args:
            retriever: Vector store retriever for guidelines documents
        """
        self.retriever = retriever
        self.llm = llm_config.explainer

    def recommend(self, state: GuildState) -> GuildState:
        """
        Retrieve clinical guidelines and generate recommendations.

        Args:
            state: Current guild state

        Returns:
            Updated state with clinical recommendations
        """
        print("\n" + "=" * 70)
        print("EXECUTING: Clinical Guidelines Agent (RAG)")
        print("=" * 70)

        model_prediction = state["model_prediction"]
        disease = model_prediction["disease"]
        confidence = model_prediction["confidence"]

        # Get biomarker analysis
        biomarker_analysis = state.get("biomarker_analysis") or {}
        safety_alerts = biomarker_analysis.get("safety_alerts", [])

        # Retrieve guidelines
        print(f"\nRetrieving clinical guidelines for {disease}...")

        query = f"""What are the clinical practice guidelines for managing {disease}? 
        Include lifestyle modifications, monitoring recommendations, and when to seek medical care."""

        docs = self.retriever.invoke(query)

        print(f"Retrieved {len(docs)} guideline documents")

        # Generate recommendations
        if state["sop"].require_pdf_citations and not docs:
            recommendations = {
                "immediate_actions": [
                    "Insufficient evidence available in the knowledge base. Please consult a healthcare provider."
                ],
                "lifestyle_changes": [],
                "monitoring": [],
                "citations": [],
            }
        else:
            recommendations = self._generate_recommendations(disease, docs, safety_alerts, confidence, state)

        # Create agent output
        output = AgentOutput(
            agent_name="Clinical Guidelines",
            findings={
                "disease": disease,
                "immediate_actions": recommendations["immediate_actions"],
                "lifestyle_changes": recommendations["lifestyle_changes"],
                "monitoring": recommendations["monitoring"],
                "guideline_citations": recommendations["citations"],
                "safety_priority": len(safety_alerts) > 0,
                "citations_missing": state["sop"].require_pdf_citations and not docs,
            },
        )

        # Update state
        print("\nRecommendations generated")
        print(f"  - Immediate actions: {len(recommendations['immediate_actions'])}")
        print(f"  - Lifestyle changes: {len(recommendations['lifestyle_changes'])}")
        print(f"  - Monitoring recommendations: {len(recommendations['monitoring'])}")

        return {"agent_outputs": [output]}

    def _generate_recommendations(
        self, disease: str, docs: list, safety_alerts: list, confidence: float, state: GuildState
    ) -> dict:
        """Generate structured recommendations using LLM and guidelines"""

        # Format retrieved guidelines
        guidelines_context = "\n\n---\n\n".join(
            [f"Source: {doc.metadata.get('source', 'Unknown')}\n\n{doc.page_content}" for doc in docs]
        )

        # Build safety context
        safety_context = ""
        if safety_alerts:
            safety_context = "\n**CRITICAL SAFETY ALERTS:**\n"
            for alert in safety_alerts[:3]:
                safety_context += f"- {alert.get('biomarker', 'Unknown')}: {alert.get('message', '')}\n"

        prompt = ChatPromptTemplate.from_messages(
            [
                (
                    "system",
                    """You are a clinical decision support system providing evidence-based recommendations.
            Based on clinical practice guidelines, provide actionable recommendations for patient self-assessment.
            
            Structure your response with these sections:
            1. IMMEDIATE_ACTIONS: Urgent steps (especially if safety alerts present)
            2. LIFESTYLE_CHANGES: Diet, exercise, and behavioral modifications
            3. MONITORING: What to track and how often
            
            Make recommendations specific, actionable, and guideline-aligned. 
            Always emphasize consulting healthcare professionals for diagnosis and treatment.""",
                ),
                (
                    "human",
                    """Disease: {disease}
            Prediction Confidence: {confidence:.1%}
            {safety_context}
            
            Clinical Guidelines Context:
            {guidelines}
            
            Please provide structured recommendations for patient self-assessment.""",
                ),
            ]
        )

        chain = prompt | self.llm

        try:
            response = chain.invoke(
                {
                    "disease": disease,
                    "confidence": confidence,
                    "safety_context": safety_context,
                    "guidelines": guidelines_context,
                }
            )

            recommendations = self._parse_recommendations(response.content)

        except Exception as e:
            print(f"Warning: LLM recommendation generation failed: {e}")
            recommendations = self._get_default_recommendations(disease, safety_alerts)

        # Add citations
        recommendations["citations"] = self._extract_citations(docs)

        return recommendations

    def _parse_recommendations(self, content: str) -> dict:
        """Parse LLM response into structured recommendations"""
        recommendations = {"immediate_actions": [], "lifestyle_changes": [], "monitoring": []}

        current_section = None
        lines = content.split("\n")

        for line in lines:
            line_stripped = line.strip()
            line_upper = line_stripped.upper()

            # Detect section headers
            if "IMMEDIATE" in line_upper or "URGENT" in line_upper:
                current_section = "immediate_actions"
            elif "LIFESTYLE" in line_upper or "CHANGES" in line_upper or "DIET" in line_upper:
                current_section = "lifestyle_changes"
            elif "MONITORING" in line_upper or "TRACK" in line_upper:
                current_section = "monitoring"
            # Add bullet points or numbered items
            elif current_section and line_stripped:
                # Remove bullet points and numbers
                cleaned = line_stripped.lstrip("•-*0123456789. ")
                if cleaned and len(cleaned) > 10:  # Minimum length filter
                    recommendations[current_section].append(cleaned)

        # If parsing failed, create default structure
        if not any(recommendations.values()):
            sentences = content.split(".")
            recommendations["immediate_actions"] = [s.strip() for s in sentences[:2] if s.strip()]
            recommendations["lifestyle_changes"] = [s.strip() for s in sentences[2:4] if s.strip()]
            recommendations["monitoring"] = [s.strip() for s in sentences[4:6] if s.strip()]

        return recommendations

    def _get_default_recommendations(self, disease: str, safety_alerts: list) -> dict:
        """Provide default recommendations if LLM fails"""
        recommendations = {"immediate_actions": [], "lifestyle_changes": [], "monitoring": []}

        # Add safety-based immediate actions
        if safety_alerts:
            recommendations["immediate_actions"].append(
                "Consult healthcare provider immediately regarding critical biomarker values"
            )
            recommendations["immediate_actions"].append("Bring this report and recent lab results to your appointment")
        else:
            recommendations["immediate_actions"].append(
                f"Schedule appointment with healthcare provider to discuss {disease} findings"
            )

        # Generic lifestyle changes
        recommendations["lifestyle_changes"].extend(
            [
                "Follow a balanced, nutrient-rich diet as recommended by healthcare provider",
                "Maintain regular physical activity appropriate for your health status",
                "Track symptoms and biomarker trends over time",
            ]
        )

        # Generic monitoring
        recommendations["monitoring"].extend(
            [
                f"Regular monitoring of {disease}-related biomarkers as advised by physician",
                "Keep a health journal tracking symptoms, diet, and activities",
                "Schedule follow-up appointments as recommended",
            ]
        )

        return recommendations

    def _extract_citations(self, docs: list) -> list[str]:
        """Extract citations from retrieved guideline documents"""
        citations = []

        for doc in docs:
            source = doc.metadata.get("source", "Unknown")

            # Clean up source path
            if "\\" in source or "/" in source:
                source = Path(source).name

            citations.append(source)

        return list(set(citations))  # Remove duplicates


def create_clinical_guidelines_agent(retriever):
    """Factory function to create agent with retriever"""
    return ClinicalGuidelinesAgent(retriever)