Update agent.py
Browse files
agent.py
CHANGED
|
@@ -1,296 +1,308 @@
|
|
| 1 |
-
#
|
| 2 |
-
import
|
| 3 |
import json
|
| 4 |
import re
|
| 5 |
import os
|
| 6 |
-
import operator
|
| 7 |
import traceback
|
| 8 |
-
from
|
| 9 |
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
from
|
| 13 |
-
from langchain_core.
|
| 14 |
-
|
| 15 |
-
from
|
| 16 |
-
|
| 17 |
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
#
|
| 21 |
UMLS_API_KEY = os.environ.get("UMLS_API_KEY")
|
| 22 |
GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
|
| 23 |
TAVILY_API_KEY = os.environ.get("TAVILY_API_KEY")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
MAX_SEARCH_RESULTS = 3
|
| 29 |
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
@lru_cache(maxsize=256)
|
| 39 |
-
def get_rxcui(drug_name: str) -> Optional[str]:
|
| 40 |
-
if not drug_name or not isinstance(drug_name, str): return None; drug_name = drug_name.strip();
|
| 41 |
-
if not drug_name: return None; print(f"RxNorm Lookup for: '{drug_name}'");
|
| 42 |
-
try:
|
| 43 |
-
params = {"name": drug_name, "search": 1}; response = requests.get(f"{RXNORM_API_BASE}/rxcui.json", params=params, timeout=10); response.raise_for_status(); data = response.json();
|
| 44 |
-
if data and "idGroup" in data and "rxnormId" in data["idGroup"]: rxcui = data["idGroup"]["rxnormId"][0]; print(f" Found RxCUI: {rxcui} for '{drug_name}'"); return rxcui
|
| 45 |
-
else:
|
| 46 |
-
params = {"name": drug_name}; response = requests.get(f"{RXNORM_API_BASE}/drugs.json", params=params, timeout=10); response.raise_for_status(); data = response.json();
|
| 47 |
-
if data and "drugGroup" in data and "conceptGroup" in data["drugGroup"]:
|
| 48 |
-
for group in data["drugGroup"]["conceptGroup"]:
|
| 49 |
-
if group.get("tty") in ["SBD", "SCD", "GPCK", "BPCK", "IN", "MIN", "PIN"]:
|
| 50 |
-
if "conceptProperties" in group and group["conceptProperties"]: rxcui = group["conceptProperties"][0].get("rxcui");
|
| 51 |
-
if rxcui: print(f" Found RxCUI (via /drugs): {rxcui} for '{drug_name}'"); return rxcui
|
| 52 |
-
print(f" RxCUI not found for '{drug_name}'."); return None
|
| 53 |
-
except requests.exceptions.RequestException as e: print(f" Error fetching RxCUI for '{drug_name}': {e}"); return None
|
| 54 |
-
except json.JSONDecodeError as e: print(f" Error decoding RxNorm JSON response for '{drug_name}': {e}"); return None
|
| 55 |
-
except Exception as e: print(f" Unexpected error in get_rxcui for '{drug_name}': {e}"); return None
|
| 56 |
-
@lru_cache(maxsize=128)
|
| 57 |
-
def get_openfda_label(rxcui: Optional[str] = None, drug_name: Optional[str] = None) -> Optional[dict]:
|
| 58 |
-
if not rxcui and not drug_name: return None; print(f"OpenFDA Label Lookup for: RXCUI={rxcui}, Name={drug_name}"); search_terms = []
|
| 59 |
-
if rxcui: search_terms.append(f'spl_rxnorm_code:"{rxcui}" OR openfda.rxcui:"{rxcui}"')
|
| 60 |
-
if drug_name: search_terms.append(f'(openfda.brand_name:"{drug_name.lower()}" OR openfda.generic_name:"{drug_name.lower()}")')
|
| 61 |
-
search_query = " OR ".join(search_terms); params = {"search": search_query, "limit": 1};
|
| 62 |
-
try:
|
| 63 |
-
response = requests.get(OPENFDA_API_BASE, params=params, timeout=15); response.raise_for_status(); data = response.json();
|
| 64 |
-
if data and "results" in data and data["results"]: print(f" Found OpenFDA label for query: {search_query}"); return data["results"][0]
|
| 65 |
-
print(f" No OpenFDA label found for query: {search_query}"); return None
|
| 66 |
-
except requests.exceptions.RequestException as e: print(f" Error fetching OpenFDA label: {e}"); return None
|
| 67 |
-
except json.JSONDecodeError as e: print(f" Error decoding OpenFDA JSON response: {e}"); return None
|
| 68 |
-
except Exception as e: print(f" Unexpected error in get_openfda_label: {e}"); return None
|
| 69 |
-
def search_text_list(text_list: Optional[List[str]], search_terms: List[str]) -> List[str]:
|
| 70 |
-
found_snippets = [];
|
| 71 |
-
if not text_list or not search_terms: return found_snippets; search_terms_lower = [str(term).lower() for term in search_terms if term];
|
| 72 |
-
for text_item in text_list:
|
| 73 |
-
if not isinstance(text_item, str): continue; text_item_lower = text_item.lower();
|
| 74 |
-
for term in search_terms_lower:
|
| 75 |
-
if term in text_item_lower:
|
| 76 |
-
start_index = text_item_lower.find(term); snippet_start = max(0, start_index - 50); snippet_end = min(len(text_item), start_index + len(term) + 100); snippet = text_item[snippet_start:snippet_end];
|
| 77 |
-
snippet = re.sub(f"({re.escape(term)})", r"**\1**", snippet, count=1, flags=re.IGNORECASE) # Highlight match
|
| 78 |
-
found_snippets.append(f"...{snippet}...")
|
| 79 |
-
break # Only report first match per text item
|
| 80 |
-
return found_snippets
|
| 81 |
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
symptoms_lower = [str(s).lower() for s in symptoms if isinstance(s, str)]
|
| 96 |
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
if "syncope" in symptoms_lower: flags.append("Red Flag: Syncope (fainting).")
|
| 105 |
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
if spo2 is not None and spo2 <= 92:
|
| 121 |
-
flags.append(f"Red Flag: Hypoxia ({spo2}%).")
|
| 122 |
-
if bp_str:
|
| 123 |
-
bp = parse_bp(bp_str)
|
| 124 |
-
if bp:
|
| 125 |
-
if bp[0] >= 180 or bp[1] >= 110:
|
| 126 |
-
flags.append(f"Red Flag: Hypertensive Urgency/Emergency (BP: {bp_str} mmHg).")
|
| 127 |
-
if bp[0] <= 90 or bp[1] <= 60:
|
| 128 |
-
flags.append(f"Red Flag: Hypotension (BP: {bp_str} mmHg).")
|
| 129 |
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
|
| 138 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 139 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 140 |
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 152 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 153 |
|
| 154 |
-
#
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
|
|
|
| 160 |
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
if match: current_med_names_lower.append(match.group(1).lower());
|
| 173 |
-
allergies_lower = [str(a).lower().strip() for a in allergies_list if a]; print(f" Against Current Meds (names): {current_med_names_lower}"); print(f" Against Allergies: {allergies_lower}");
|
| 174 |
-
print(f" Step 1: Normalizing '{potential_prescription}'..."); potential_rxcui = get_rxcui(potential_prescription); potential_label = get_openfda_label(rxcui=potential_rxcui, drug_name=potential_prescription);
|
| 175 |
-
if not potential_rxcui and not potential_label: warnings.append(f"INFO: Could not reliably identify '{potential_prescription}'. Checks may be incomplete.");
|
| 176 |
-
print(" Step 2: Performing Allergy Check...");
|
| 177 |
-
for allergy in allergies_lower:
|
| 178 |
-
if allergy == potential_med_lower: warnings.append(f"CRITICAL ALLERGY (Name Match): Patient allergic to '{allergy}'. Potential prescription is '{potential_prescription}'.");
|
| 179 |
-
elif allergy in ["penicillin", "pcns"] and potential_med_lower in ["amoxicillin", "ampicillin", "augmentin", "piperacillin"]: warnings.append(f"POTENTIAL CROSS-ALLERGY: Patient allergic to Penicillin. High risk with '{potential_prescription}'.");
|
| 180 |
-
elif allergy == "sulfa" and potential_med_lower in ["sulfamethoxazole", "bactrim", "sulfasalazine"]: warnings.append(f"POTENTIAL CROSS-ALLERGY: Patient allergic to Sulfa. High risk with '{potential_prescription}'.");
|
| 181 |
-
elif allergy in ["nsaids", "aspirin"] and potential_med_lower in ["ibuprofen", "naproxen", "ketorolac", "diclofenac"]: warnings.append(f"POTENTIAL CROSS-ALLERGY: Patient allergic to NSAIDs/Aspirin. Risk with '{potential_prescription}'.");
|
| 182 |
-
if potential_label: contraindications = potential_label.get("contraindications"); warnings_section = potential_label.get("warnings_and_cautions") or potential_label.get("warnings");
|
| 183 |
-
if contraindications: allergy_mentions_ci = search_text_list(contraindications, allergies_lower);
|
| 184 |
-
if allergy_mentions_ci: warnings.append(f"ALLERGY RISK (Contraindication Found): Label for '{potential_prescription}' mentions contraindication potentially related to patient allergies: {'; '.join(allergy_mentions_ci)}");
|
| 185 |
-
if warnings_section: allergy_mentions_warn = search_text_list(warnings_section, allergies_lower);
|
| 186 |
-
if allergy_mentions_warn: warnings.append(f"ALLERGY RISK (Warning Found): Label for '{potential_prescription}' mentions warnings potentially related to patient allergies: {'; '.join(allergy_mentions_warn)}");
|
| 187 |
-
print(" Step 3: Performing Drug-Drug Interaction Check...");
|
| 188 |
-
if potential_rxcui or potential_label:
|
| 189 |
-
for current_med_name in current_med_names_lower:
|
| 190 |
-
if not current_med_name or current_med_name == potential_med_lower: continue; print(f" Checking interaction between '{potential_prescription}' and '{current_med_name}'..."); current_rxcui = get_rxcui(current_med_name); current_label = get_openfda_label(rxcui=current_rxcui, drug_name=current_med_name); search_terms_for_current = [current_med_name];
|
| 191 |
-
if current_rxcui: search_terms_for_current.append(current_rxcui); search_terms_for_potential = [potential_med_lower];
|
| 192 |
-
if potential_rxcui: search_terms_for_potential.append(potential_rxcui); interaction_found_flag = False;
|
| 193 |
-
if potential_label and potential_label.get("drug_interactions"): interaction_mentions = search_text_list(potential_label.get("drug_interactions"), search_terms_for_current);
|
| 194 |
-
if interaction_mentions: warnings.append(f"Potential Interaction ({potential_prescription.capitalize()} Label): Mentions '{current_med_name.capitalize()}'. Snippets: {'; '.join(interaction_mentions)}"); interaction_found_flag = True;
|
| 195 |
-
if current_label and current_label.get("drug_interactions") and not interaction_found_flag: interaction_mentions = search_text_list(current_label.get("drug_interactions"), search_terms_for_potential);
|
| 196 |
-
if interaction_mentions: warnings.append(f"Potential Interaction ({current_med_name.capitalize()} Label): Mentions '{potential_prescription.capitalize()}'. Snippets: {'; '.join(interaction_mentions)}");
|
| 197 |
-
else: warnings.append(f"INFO: Drug-drug interaction check skipped for '{potential_prescription}' as it could not be identified via RxNorm/OpenFDA.");
|
| 198 |
-
final_warnings = list(set(warnings)); status = "warning" if any("CRITICAL" in w or "Interaction" in w or "RISK" in w for w in final_warnings) else "clear";
|
| 199 |
-
if not final_warnings: status = "clear"; message = f"Interaction/Allergy check for '{potential_prescription}': {len(final_warnings)} potential issue(s) identified using RxNorm/OpenFDA." if final_warnings else f"No major interactions or allergy issues identified for '{potential_prescription}' based on RxNorm/OpenFDA lookup."; print(f"--- Interaction Check Complete ---");
|
| 200 |
-
return json.dumps({"status": status, "message": message, "warnings": final_warnings})
|
| 201 |
-
@tool("flag_risk", args_schema=FlagRiskInput)
|
| 202 |
-
def flag_risk(risk_description: str, urgency: str) -> str:
|
| 203 |
-
print(f"Executing flag_risk: {risk_description}, Urgency: {urgency}"); return json.dumps({"status": "flagged", "message": f"Risk '{risk_description}' flagged with {urgency} urgency."})
|
| 204 |
-
search_tool = TavilySearchResults(max_results=MAX_SEARCH_RESULTS, name="tavily_search_results")
|
| 205 |
-
all_tools = [order_lab_test, prescribe_medication, check_drug_interactions, flag_risk, search_tool]
|
| 206 |
|
| 207 |
-
|
| 208 |
-
class AgentState(TypedDict): messages: Annotated[list[Any], operator.add]; patient_data: Optional[dict]; summary: Optional[str]; interaction_warnings: Optional[List[str]]
|
| 209 |
-
llm = ChatGroq(temperature=AGENT_TEMPERATURE, model=AGENT_MODEL_NAME); model_with_tools = llm.bind_tools(all_tools); tool_executor = ToolExecutor(all_tools)
|
| 210 |
-
def agent_node(state: AgentState):
|
| 211 |
-
# ... (Keep implementation) ...
|
| 212 |
-
print("\n---AGENT NODE---"); current_messages = state['messages'];
|
| 213 |
-
if not current_messages or not isinstance(current_messages[0], SystemMessage): print("Prepending System Prompt."); current_messages = [SystemMessage(content=ClinicalPrompts.SYSTEM_PROMPT)] + current_messages;
|
| 214 |
-
print(f"Invoking LLM with {len(current_messages)} messages.");
|
| 215 |
-
try: response = model_with_tools.invoke(current_messages); print(f"Agent Raw Response Type: {type(response)}");
|
| 216 |
-
if hasattr(response, 'tool_calls') and response.tool_calls: print(f"Agent Response Tool Calls: {response.tool_calls}"); else: print("Agent Response: No tool calls.");
|
| 217 |
-
except Exception as e: print(f"ERROR in agent_node: {e}"); traceback.print_exc(); error_message = AIMessage(content=f"Error: {e}"); return {"messages": [error_message]};
|
| 218 |
-
return {"messages": [response]}
|
| 219 |
-
def tool_node(state: AgentState):
|
| 220 |
-
# ... (Keep implementation) ...
|
| 221 |
-
print("\n---TOOL NODE---"); tool_messages = []; last_message = state['messages'][-1]; interaction_warnings_found = [];
|
| 222 |
-
if not isinstance(last_message, AIMessage) or not getattr(last_message, 'tool_calls', None): print("Warning: Tool node called unexpectedly."); return {"messages": [], "interaction_warnings": None};
|
| 223 |
-
tool_calls = last_message.tool_calls; print(f"Tool calls received: {json.dumps(tool_calls, indent=2)}"); prescriptions_requested = {}; interaction_checks_requested = {};
|
| 224 |
-
for call in tool_calls: tool_name = call.get('name'); tool_args = call.get('args', {});
|
| 225 |
-
if tool_name == 'prescribe_medication': med_name = tool_args.get('medication_name', '').lower();
|
| 226 |
-
if med_name: prescriptions_requested[med_name] = call;
|
| 227 |
-
elif tool_name == 'check_drug_interactions': potential_med = tool_args.get('potential_prescription', '').lower();
|
| 228 |
-
if potential_med: interaction_checks_requested[potential_med] = call;
|
| 229 |
-
valid_tool_calls_for_execution = []; blocked_ids = set();
|
| 230 |
-
for med_name, prescribe_call in prescriptions_requested.items():
|
| 231 |
-
if med_name not in interaction_checks_requested: print(f"**SAFETY VIOLATION (Agent): Prescribe '{med_name}' blocked - no interaction check requested.**"); error_msg = ToolMessage(content=json.dumps({"status": "error", "message": f"Interaction check needed for '{med_name}'."}), tool_call_id=prescribe_call['id'], name=prescribe_call['name']); tool_messages.append(error_msg); blocked_ids.add(prescribe_call['id']);
|
| 232 |
-
valid_tool_calls_for_execution = [call for call in tool_calls if call['id'] not in blocked_ids];
|
| 233 |
-
patient_data = state.get("patient_data", {}); patient_meds_full = patient_data.get("medications", {}).get("current", []); patient_allergies = patient_data.get("allergies", []);
|
| 234 |
-
for call in valid_tool_calls_for_execution:
|
| 235 |
-
if call['name'] == 'check_drug_interactions':
|
| 236 |
-
if 'args' not in call: call['args'] = {}; call['args']['current_medications'] = patient_meds_full; call['args']['allergies'] = patient_allergies; print(f"Augmented interaction check args for call ID {call['id']}");
|
| 237 |
-
if valid_tool_calls_for_execution: print(f"Attempting execution: {[c['name'] for c in valid_tool_calls_for_execution]}");
|
| 238 |
-
try: responses = tool_executor.batch(valid_tool_calls_for_execution, return_exceptions=True);
|
| 239 |
-
for call, resp in zip(valid_tool_calls_for_execution, responses): tool_call_id = call['id']; tool_name = call['name'];
|
| 240 |
-
if isinstance(resp, Exception): error_type = type(resp).__name__; error_str = str(resp); print(f"ERROR executing tool '{tool_name}': {error_type} - {error_str}"); traceback.print_exc(); error_content = json.dumps({"status": "error", "message": f"Failed: {error_type} - {error_str}"}); tool_messages.append(ToolMessage(content=error_content, tool_call_id=tool_call_id, name=tool_name));
|
| 241 |
-
if isinstance(resp, AttributeError) and "'dict' object has no attribute 'tool'" in error_str: print("\n *** DETECTED SPECIFIC ATTRIBUTE ERROR *** \n");
|
| 242 |
-
else:
|
| 243 |
-
print(f"Tool '{tool_name}' executed."); content_str = str(resp); tool_messages.append(ToolMessage(content=content_str, tool_call_id=tool_call_id, name=tool_name));
|
| 244 |
-
if tool_name == "check_drug_interactions": # Extract warnings
|
| 245 |
-
try: result_data = json.loads(content_str);
|
| 246 |
-
if result_data.get("status") == "warning" and result_data.get("warnings"): print(f" Interaction check returned warnings: {result_data['warnings']}"); interaction_warnings_found.extend(result_data["warnings"]);
|
| 247 |
-
except Exception as e: print(f" Error processing interaction check result: {e}");
|
| 248 |
-
except Exception as e: # Outer exception handling...
|
| 249 |
-
print(f"CRITICAL TOOL NODE ERROR: {e}"); traceback.print_exc(); error_content = json.dumps({"status": "error", "message": f"Internal error: {e}"}); processed_ids = {msg.tool_call_id for msg in tool_messages}; [tool_messages.append(ToolMessage(content=error_content, tool_call_id=call['id'], name=call['name'])) for call in valid_tool_calls_for_execution if call['id'] not in processed_ids];
|
| 250 |
-
print(f"Returning {len(tool_messages)} tool messages. Warnings: {bool(interaction_warnings_found)}")
|
| 251 |
-
return {"messages": tool_messages, "interaction_warnings": interaction_warnings_found or None} # Return messages AND warnings
|
| 252 |
-
def reflection_node(state: AgentState):
|
| 253 |
-
# ... (Keep implementation) ...
|
| 254 |
-
print("\n---REFLECTION NODE---")
|
| 255 |
-
interaction_warnings = state.get("interaction_warnings")
|
| 256 |
-
if not interaction_warnings: print("Warning: Reflection node called without warnings."); return {"messages": [], "interaction_warnings": None};
|
| 257 |
-
print(f"Reviewing interaction warnings: {interaction_warnings}"); triggering_ai_message = None; relevant_tool_call_ids = set();
|
| 258 |
-
for msg in reversed(state['messages']):
|
| 259 |
-
if isinstance(msg, ToolMessage) and msg.name == "check_drug_interactions": relevant_tool_call_ids.add(msg.tool_call_id);
|
| 260 |
-
if isinstance(msg, AIMessage) and msg.tool_calls:
|
| 261 |
-
if any(tc['id'] in relevant_tool_call_ids for tc in msg.tool_calls): triggering_ai_message = msg; break;
|
| 262 |
-
if not triggering_ai_message: print("Error: Could not find triggering AI message for reflection."); return {"messages": [AIMessage(content="Internal Error: Reflection context missing.")], "interaction_warnings": None};
|
| 263 |
-
original_plan_proposal_context = triggering_ai_message.content;
|
| 264 |
-
reflection_prompt_text = f"""You are SynapseAI, performing a critical safety review... [PROMPT OMITTED FOR BREVITY]""" # Use full prompt
|
| 265 |
-
reflection_messages = [SystemMessage(content="Perform focused safety review based on interaction warnings."), HumanMessage(content=reflection_prompt_text)];
|
| 266 |
-
print("Invoking LLM for reflection...");
|
| 267 |
-
try: reflection_response = llm.invoke(reflection_messages); print(f"Reflection Response: {reflection_response.content}"); final_ai_message = AIMessage(content=reflection_response.content);
|
| 268 |
-
except Exception as e: print(f"ERROR during reflection: {e}"); traceback.print_exc(); final_ai_message = AIMessage(content=f"Error during safety reflection: {e}");
|
| 269 |
-
return {"messages": [final_ai_message], "interaction_warnings": None} # Return reflection response, clear warnings
|
| 270 |
|
| 271 |
-
#
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
print("\n---ROUTING DECISION (Agent Output)---"); last_message = state['messages'][-1] if state['messages'] else None;
|
| 275 |
-
if not isinstance(last_message, AIMessage): return "end_conversation_turn";
|
| 276 |
-
if "Sorry, an internal error occurred" in last_message.content: return "end_conversation_turn";
|
| 277 |
-
if getattr(last_message, 'tool_calls', None): return "continue_tools"; else: return "end_conversation_turn";
|
| 278 |
-
def after_tools_router(state: AgentState) -> str:
|
| 279 |
-
# ... (Keep implementation) ...
|
| 280 |
-
print("\n---ROUTING DECISION (After Tools)---");
|
| 281 |
-
if state.get("interaction_warnings"): print("Routing: Warnings found -> Reflection"); return "reflect_on_warnings";
|
| 282 |
-
else: print("Routing: No warnings -> Agent"); return "continue_to_agent";
|
| 283 |
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
def __init__(self):
|
| 287 |
-
# ... (Keep graph compilation) ...
|
| 288 |
-
workflow = StateGraph(AgentState); workflow.add_node("agent", agent_node); workflow.add_node("tools", tool_node); workflow.add_node("reflection", reflection_node)
|
| 289 |
-
workflow.set_entry_point("agent"); workflow.add_conditional_edges("agent", should_continue, {"continue_tools": "tools", "end_conversation_turn": END})
|
| 290 |
-
workflow.add_conditional_edges("tools", after_tools_router, {"reflect_on_warnings": "reflection", "continue_to_agent": "agent"})
|
| 291 |
-
workflow.add_edge("reflection", "agent"); self.graph_app = workflow.compile(); print("ClinicalAgent initialized and LangGraph compiled.")
|
| 292 |
-
def invoke_turn(self, state: Dict) -> Dict:
|
| 293 |
-
# ... (Keep implementation) ...
|
| 294 |
-
print(f"Invoking graph with state keys: {state.keys()}");
|
| 295 |
-
try: final_state = self.graph_app.invoke(state, {"recursion_limit": 15}); final_state.setdefault('summary', state.get('summary')); final_state.setdefault('interaction_warnings', None); return final_state
|
| 296 |
-
except Exception as e: print(f"CRITICAL ERROR during graph invocation: {type(e).__name__} - {e}"); traceback.print_exc(); error_msg = AIMessage(content=f"Sorry, error occurred: {e}"); return {"messages": state.get('messages', []) + [error_msg], "patient_data": state.get('patient_data'), "summary": state.get('summary'), "interaction_warnings": None}
|
|
|
|
| 1 |
+
# app.py
|
| 2 |
+
import streamlit as st
|
| 3 |
import json
|
| 4 |
import re
|
| 5 |
import os
|
|
|
|
| 6 |
import traceback
|
| 7 |
+
from dotenv import load_dotenv
|
| 8 |
|
| 9 |
+
# Import agent logic and message types from agent.py
|
| 10 |
+
try:
|
| 11 |
+
from agent import ClinicalAgent, AgentState, check_red_flags
|
| 12 |
+
from langchain_core.messages import HumanMessage, AIMessage, ToolMessage
|
| 13 |
+
except ImportError as e:
|
| 14 |
+
st.error(f"Failed to import from agent.py: {e}. Make sure agent.py is in the same directory.")
|
| 15 |
+
st.stop()
|
| 16 |
|
| 17 |
+
# --- Environment Variable Loading & Validation ---
|
| 18 |
+
load_dotenv()
|
| 19 |
+
# Check keys required by agent.py are present before initializing the agent
|
| 20 |
UMLS_API_KEY = os.environ.get("UMLS_API_KEY")
|
| 21 |
GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
|
| 22 |
TAVILY_API_KEY = os.environ.get("TAVILY_API_KEY")
|
| 23 |
+
missing_keys = []
|
| 24 |
+
if not UMLS_API_KEY: missing_keys.append("UMLS_API_KEY")
|
| 25 |
+
if not GROQ_API_KEY: missing_keys.append("GROQ_API_KEY")
|
| 26 |
+
if not TAVILY_API_KEY: missing_keys.append("TAVILY_API_KEY")
|
| 27 |
+
if missing_keys:
|
| 28 |
+
st.error(f"Missing required API Key(s): {', '.join(missing_keys)}. Please set them in Hugging Face Space Secrets or environment variables.")
|
| 29 |
+
st.stop()
|
| 30 |
+
|
| 31 |
+
# --- App Configuration ---
|
| 32 |
+
class ClinicalAppSettings:
|
| 33 |
+
APP_TITLE = "SynapseAI (UMLS/FDA Integrated)"
|
| 34 |
+
PAGE_LAYOUT = "wide"
|
| 35 |
+
MODEL_NAME_DISPLAY = "Llama3-70b (via Groq)" # Defined in agent.py
|
| 36 |
+
|
| 37 |
+
# --- Streamlit UI ---
|
| 38 |
+
def main():
|
| 39 |
+
st.set_page_config(page_title=ClinicalAppSettings.APP_TITLE, layout=ClinicalAppSettings.PAGE_LAYOUT)
|
| 40 |
+
st.title(f"🩺 {ClinicalAppSettings.APP_TITLE}")
|
| 41 |
+
st.caption(f"Interactive Assistant | LangGraph/Groq/Tavily/UMLS/OpenFDA | Model: {ClinicalAppSettings.MODEL_NAME_DISPLAY}")
|
| 42 |
+
|
| 43 |
+
# Initialize session state
|
| 44 |
+
if "messages" not in st.session_state:
|
| 45 |
+
st.session_state.messages = []
|
| 46 |
+
if "patient_data" not in st.session_state:
|
| 47 |
+
st.session_state.patient_data = None
|
| 48 |
+
if "summary" not in st.session_state:
|
| 49 |
+
st.session_state.summary = None
|
| 50 |
+
|
| 51 |
+
# Initialize the agent instance only once
|
| 52 |
+
if "agent" not in st.session_state:
|
| 53 |
+
try:
|
| 54 |
+
st.session_state.agent = ClinicalAgent()
|
| 55 |
+
print("ClinicalAgent successfully initialized in Streamlit session state.")
|
| 56 |
+
except Exception as e:
|
| 57 |
+
st.error(f"Failed to initialize Clinical Agent: {e}. Check API keys and dependencies.")
|
| 58 |
+
print(f"ERROR Initializing ClinicalAgent: {e}")
|
| 59 |
+
traceback.print_exc()
|
| 60 |
+
st.stop()
|
| 61 |
+
|
| 62 |
+
# --- Patient Data Input Sidebar ---
|
| 63 |
+
with st.sidebar:
|
| 64 |
+
st.header("📄 Patient Intake Form")
|
| 65 |
+
# Input fields... (Using shorter versions for brevity, assume full fields are here)
|
| 66 |
+
st.subheader("Demographics")
|
| 67 |
+
age = st.number_input("Age", 0, 120, 55, key="sb_age")
|
| 68 |
+
sex = st.selectbox("Sex", ["Male", "Female", "Other"], key="sb_sex")
|
| 69 |
+
|
| 70 |
+
st.subheader("HPI")
|
| 71 |
+
chief_complaint = st.text_input("Chief Complaint", "Chest pain", key="sb_cc")
|
| 72 |
+
hpi_details = st.text_area("HPI Details", "55 y/o male...", height=100, key="sb_hpi")
|
| 73 |
+
symptoms = st.multiselect(
|
| 74 |
+
"Symptoms",
|
| 75 |
+
["Nausea", "Diaphoresis", "SOB", "Dizziness", "Severe Headache", "Syncope", "Hemoptysis"],
|
| 76 |
+
default=["Nausea", "Diaphoresis"],
|
| 77 |
+
key="sb_sym"
|
| 78 |
+
)
|
| 79 |
|
| 80 |
+
st.subheader("History")
|
| 81 |
+
pmh = st.text_area("PMH", "HTN, HLD, DM2, History of MI", key="sb_pmh")
|
| 82 |
+
psh = st.text_area("PSH", "Appendectomy", key="sb_psh")
|
|
|
|
| 83 |
|
| 84 |
+
st.subheader("Meds & Allergies")
|
| 85 |
+
current_meds_str = st.text_area(
|
| 86 |
+
"Current Meds",
|
| 87 |
+
"Lisinopril 10mg daily\nMetformin 1000mg BID\nWarfarin 5mg daily",
|
| 88 |
+
key="sb_meds"
|
| 89 |
+
)
|
| 90 |
+
allergies_str = st.text_area("Allergies", "Penicillin (rash), Aspirin", key="sb_allergies")
|
| 91 |
|
| 92 |
+
st.subheader("Social/Family")
|
| 93 |
+
social_history = st.text_area("SH", "Smoker", key="sb_sh")
|
| 94 |
+
family_history = st.text_area("FHx", "Father MI", key="sb_fhx")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 95 |
|
| 96 |
+
st.subheader("Vitals & Exam")
|
| 97 |
+
col1, col2 = st.columns(2)
|
| 98 |
+
with col1:
|
| 99 |
+
temp_c = st.number_input("Temp C", 35.0, 42.0, 36.8, format="%.1f", key="sb_temp")
|
| 100 |
+
hr_bpm = st.number_input("HR", 30, 250, 95, key="sb_hr")
|
| 101 |
+
rr_rpm = st.number_input("RR", 5, 50, 18, key="sb_rr")
|
| 102 |
+
with col2:
|
| 103 |
+
bp_mmhg = st.text_input("BP", "155/90", key="sb_bp")
|
| 104 |
+
spo2_percent = st.number_input("SpO2", 70, 100, 96, key="sb_spo2")
|
| 105 |
+
pain_scale = st.slider("Pain", 0, 10, 8, key="sb_pain")
|
| 106 |
+
exam_notes = st.text_area("Exam Notes", "Awake, alert...", height=68, key="sb_exam")
|
| 107 |
|
| 108 |
+
if st.button("Start/Update Consultation", key="sb_start"):
|
| 109 |
+
# Compile data...
|
| 110 |
+
current_meds_list = [med.strip() for med in current_meds_str.split('\n') if med.strip()]
|
| 111 |
+
current_med_names_only = []
|
| 112 |
+
for med in current_meds_list:
|
| 113 |
+
match = re.match(r"^\s*([a-zA-Z\-]+)", med)
|
| 114 |
+
if match:
|
| 115 |
+
current_med_names_only.append(match.group(1).lower())
|
|
|
|
| 116 |
|
| 117 |
+
allergies_list = []
|
| 118 |
+
for a in allergies_str.split(','):
|
| 119 |
+
cleaned_allergy = a.strip()
|
| 120 |
+
if cleaned_allergy:
|
| 121 |
+
match = re.match(r"^\s*([a-zA-Z\-\s/]+)(?:\s*\(.*\))?", cleaned_allergy)
|
| 122 |
+
name_part = match.group(1).strip().lower() if match else cleaned_allergy.lower()
|
| 123 |
+
allergies_list.append(name_part)
|
|
|
|
| 124 |
|
| 125 |
+
# Update patient data in session state
|
| 126 |
+
st.session_state.patient_data = {
|
| 127 |
+
"demographics": {"age": age, "sex": sex},
|
| 128 |
+
"hpi": {"chief_complaint": chief_complaint, "details": hpi_details, "symptoms": symptoms},
|
| 129 |
+
"pmh": {"conditions": pmh},
|
| 130 |
+
"psh": {"procedures": psh},
|
| 131 |
+
"medications": {"current": current_meds_list, "names_only": current_med_names_only},
|
| 132 |
+
"allergies": allergies_list,
|
| 133 |
+
"social_history": {"details": social_history},
|
| 134 |
+
"family_history": {"details": family_history},
|
| 135 |
+
"vitals": {
|
| 136 |
+
"temp_c": temp_c,
|
| 137 |
+
"hr_bpm": hr_bpm,
|
| 138 |
+
"bp_mmhg": bp_mmhg,
|
| 139 |
+
"rr_rpm": rr_rpm,
|
| 140 |
+
"spo2_percent": spo2_percent,
|
| 141 |
+
"pain_scale": pain_scale
|
| 142 |
+
},
|
| 143 |
+
"exam_findings": {"notes": exam_notes}
|
| 144 |
+
}
|
| 145 |
|
| 146 |
+
# Call check_red_flags from agent module
|
| 147 |
+
red_flags = check_red_flags(st.session_state.patient_data)
|
| 148 |
+
st.sidebar.markdown("---")
|
| 149 |
+
if red_flags:
|
| 150 |
+
st.sidebar.warning("**Initial Red Flags:**")
|
| 151 |
+
for flag in red_flags:
|
| 152 |
+
st.sidebar.warning(f"- {flag.replace('Red Flag: ', '')}")
|
| 153 |
+
else:
|
| 154 |
+
st.sidebar.success("No immediate red flags.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 155 |
|
| 156 |
+
# Reset conversation and summary on new intake
|
| 157 |
+
initial_prompt = "Initiate consultation. Review patient data and begin analysis."
|
| 158 |
+
st.session_state.messages = [HumanMessage(content=initial_prompt)]
|
| 159 |
+
st.session_state.summary = None # Reset summary
|
| 160 |
+
st.success("Patient data loaded/updated.")
|
| 161 |
+
# Rerun might be needed if the main area should clear or update based on new data
|
| 162 |
+
st.rerun()
|
| 163 |
|
| 164 |
+
# --- Main Chat Interface Area ---
|
| 165 |
+
st.header("💬 Clinical Consultation")
|
| 166 |
+
# Display loop
|
| 167 |
+
for msg in st.session_state.messages:
|
| 168 |
+
if isinstance(msg, HumanMessage):
|
| 169 |
+
with st.chat_message("user"):
|
| 170 |
+
st.markdown(msg.content)
|
| 171 |
+
elif isinstance(msg, AIMessage):
|
| 172 |
+
with st.chat_message("assistant"):
|
| 173 |
+
ai_content = msg.content
|
| 174 |
+
structured_output = None
|
| 175 |
+
try:
|
| 176 |
+
# JSON Parsing logic...
|
| 177 |
+
json_match = re.search(r"```json\s*(\{.*?\})\s*```", ai_content, re.DOTALL | re.IGNORECASE)
|
| 178 |
+
if json_match:
|
| 179 |
+
json_str = json_match.group(1)
|
| 180 |
+
prefix = ai_content[:json_match.start()].strip()
|
| 181 |
+
suffix = ai_content[json_match.end():].strip()
|
| 182 |
+
if prefix:
|
| 183 |
+
st.markdown(prefix)
|
| 184 |
+
structured_output = json.loads(json_str)
|
| 185 |
+
if suffix:
|
| 186 |
+
st.markdown(suffix)
|
| 187 |
+
elif ai_content.strip().startswith("{") and ai_content.strip().endswith("}"):
|
| 188 |
+
structured_output = json.loads(ai_content)
|
| 189 |
+
ai_content = ""
|
| 190 |
+
else:
|
| 191 |
+
st.markdown(ai_content) # Display non-JSON content
|
| 192 |
+
except Exception as e:
|
| 193 |
+
st.markdown(ai_content)
|
| 194 |
+
print(f"Error parsing/displaying AI JSON: {e}")
|
| 195 |
|
| 196 |
+
if structured_output and isinstance(structured_output, dict):
|
| 197 |
+
# Structured JSON display logic...
|
| 198 |
+
st.divider()
|
| 199 |
+
st.subheader("📊 AI Analysis & Recommendations")
|
| 200 |
+
cols = st.columns(2)
|
| 201 |
+
with cols[0]:
|
| 202 |
+
st.markdown("**Assessment:**")
|
| 203 |
+
st.markdown(f"> {structured_output.get('assessment', 'N/A')}")
|
| 204 |
+
st.markdown("**Differential Diagnosis:**")
|
| 205 |
+
ddx = structured_output.get('differential_diagnosis', [])
|
| 206 |
+
if ddx:
|
| 207 |
+
for item in ddx:
|
| 208 |
+
likelihood = item.get('likelihood', 'Low')
|
| 209 |
+
medal = ('🥇' if likelihood.startswith('H') else '🥈' if likelihood.startswith('M') else '🥉')
|
| 210 |
+
expander_title = f"{medal} {item.get('diagnosis', 'Unknown')} ({likelihood})"
|
| 211 |
+
with st.expander(expander_title):
|
| 212 |
+
st.write(f"**Rationale:** {item.get('rationale', 'N/A')}")
|
| 213 |
+
else:
|
| 214 |
+
st.info("No DDx provided.")
|
| 215 |
+
st.markdown("**Risk Assessment:**")
|
| 216 |
+
risk = structured_output.get('risk_assessment', {})
|
| 217 |
+
flags = risk.get('identified_red_flags', [])
|
| 218 |
+
concerns = risk.get('immediate_concerns', [])
|
| 219 |
+
comps = risk.get('potential_complications', [])
|
| 220 |
+
if flags:
|
| 221 |
+
st.warning(f"**Flags:** {', '.join(flags)}")
|
| 222 |
+
if concerns:
|
| 223 |
+
st.warning(f"**Concerns:** {', '.join(concerns)}")
|
| 224 |
+
if comps:
|
| 225 |
+
st.info(f"**Potential Complications:** {', '.join(comps)}")
|
| 226 |
+
if not flags and not concerns:
|
| 227 |
+
st.success("No major risks highlighted.")
|
| 228 |
+
with cols[1]:
|
| 229 |
+
st.markdown("**Recommended Plan:**")
|
| 230 |
+
plan = structured_output.get('recommended_plan', {})
|
| 231 |
+
for section in ["investigations","therapeutics","consultations","patient_education"]:
|
| 232 |
+
st.markdown(f"_{section.replace('_',' ').capitalize()}:_")
|
| 233 |
+
items = plan.get(section)
|
| 234 |
+
if items and isinstance(items, list):
|
| 235 |
+
for it in items:
|
| 236 |
+
st.markdown(f"- {it}")
|
| 237 |
+
elif items:
|
| 238 |
+
st.markdown(f"- {items}")
|
| 239 |
+
else:
|
| 240 |
+
st.markdown("_None_")
|
| 241 |
+
st.markdown("")
|
| 242 |
+
st.markdown("**Rationale & Guideline Check:**")
|
| 243 |
+
st.markdown(f"> {structured_output.get('rationale_summary', 'N/A')}")
|
| 244 |
+
interaction_summary = structured_output.get('interaction_check_summary', "")
|
| 245 |
+
if interaction_summary:
|
| 246 |
+
st.markdown("**Interaction Check Summary:**")
|
| 247 |
+
st.markdown(f"> {interaction_summary}")
|
| 248 |
+
st.divider()
|
| 249 |
|
| 250 |
+
# Tool Call Display
|
| 251 |
+
if getattr(msg, 'tool_calls', None):
|
| 252 |
+
with st.expander("🛠️ AI requested actions", expanded=False):
|
| 253 |
+
if msg.tool_calls:
|
| 254 |
+
for tc in msg.tool_calls:
|
| 255 |
+
try:
|
| 256 |
+
st.code(
|
| 257 |
+
f"Action: {tc.get('name', 'Unknown Tool')}\nArgs: {json.dumps(tc.get('args', {}), indent=2)}",
|
| 258 |
+
language="json"
|
| 259 |
+
)
|
| 260 |
+
except Exception as display_e:
|
| 261 |
+
st.error(f"Could not display tool call args: {display_e}", icon="⚠️")
|
| 262 |
+
st.code(f"Action: {tc.get('name', 'Unknown Tool')}\nRaw Args: {tc.get('args')}")
|
| 263 |
+
else:
|
| 264 |
+
st.caption("_No actions requested._")
|
| 265 |
+
|
| 266 |
+
# --- Chat Input Logic ---
|
| 267 |
+
if prompt := st.chat_input("Your message or follow-up query..."):
|
| 268 |
+
if not st.session_state.patient_data:
|
| 269 |
+
st.warning("Please load patient data first.")
|
| 270 |
+
st.stop()
|
| 271 |
+
if 'agent' not in st.session_state or not st.session_state.agent:
|
| 272 |
+
st.error("Agent not initialized. Check logs.")
|
| 273 |
+
st.stop()
|
| 274 |
|
| 275 |
+
# Append user message and display immediately
|
| 276 |
+
user_message = HumanMessage(content=prompt)
|
| 277 |
+
st.session_state.messages.append(user_message)
|
| 278 |
+
with st.chat_message("user"):
|
| 279 |
+
st.markdown(prompt)
|
| 280 |
|
| 281 |
+
# Prepare state for the agent
|
| 282 |
+
current_state_dict = {
|
| 283 |
+
"messages": st.session_state.messages,
|
| 284 |
+
"patient_data": st.session_state.patient_data,
|
| 285 |
+
"summary": st.session_state.get("summary"),
|
| 286 |
+
"interaction_warnings": None # Start clean
|
| 287 |
+
}
|
| 288 |
|
| 289 |
+
# Invoke the agent's graph for one turn
|
| 290 |
+
with st.spinner("SynapseAI is processing..."):
|
| 291 |
+
try:
|
| 292 |
+
final_state = st.session_state.agent.invoke_turn(current_state_dict)
|
| 293 |
+
st.session_state.messages = final_state.get('messages', [])
|
| 294 |
+
st.session_state.summary = final_state.get('summary')
|
| 295 |
+
except Exception as e:
|
| 296 |
+
print(f"CRITICAL ERROR during agent invocation: {type(e).__name__} - {e}")
|
| 297 |
+
traceback.print_exc()
|
| 298 |
+
st.error(f"An error occurred during processing: {e}", icon="❌")
|
| 299 |
+
st.session_state.messages.append(AIMessage(content=f"Error during processing: {e}"))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 300 |
|
| 301 |
+
st.rerun()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 302 |
|
| 303 |
+
# Disclaimer
|
| 304 |
+
st.markdown("---")
|
| 305 |
+
st.warning("**Disclaimer:** SynapseAI is for demonstration...")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 306 |
|
| 307 |
+
if __name__ == "__main__":
|
| 308 |
+
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|