File size: 32,067 Bytes
078c1b4 d799a95 2a037e9 078c1b4 2a037e9 078c1b4 2a037e9 240850a 2a037e9 078c1b4 2a037e9 078c1b4 2a037e9 078c1b4 2a037e9 571894e 078c1b4 2a037e9 078c1b4 e6970e8 d799a95 e6970e8 3462136 078c1b4 614a873 249bea4 2a037e9 e6970e8 2a037e9 614a873 621b412 e8df9ea 614a873 e8df9ea 614a873 0805914 56f76ad 614a873 56f76ad 614a873 56f76ad 9ad40ad 614a873 0805914 078c1b4 614a873 571894e 078c1b4 614a873 4f17e1e 9e80670 2a037e9 e6970e8 2a037e9 9e80670 4f17e1e 0805914 56f76ad 9e80670 2a037e9 078c1b4 e6970e8 614a873 0805914 614a873 0805914 614a873 0805914 614a873 078c1b4 521d0d9 621b412 521d0d9 0805914 521d0d9 0805914 4f17e1e 0805914 521d0d9 4f17e1e 0805914 521d0d9 0805914 e6970e8 521d0d9 9e80670 4f17e1e 249bea4 2a037e9 240850a 2a037e9 078c1b4 2a037e9 571894e 2a037e9 571894e 2a037e9 571894e 2a037e9 571894e 2a037e9 249bea4 0805914 614a873 0805914 9ad40ad 249bea4 0805914 2a037e9 249bea4 56f76ad 614a873 249bea4 0805914 249bea4 0805914 249bea4 614a873 249bea4 0805914 4f17e1e 0805914 078c1b4 614a873 f5b7c50 521d0d9 0805914 521d0d9 614a873 521d0d9 f5b7c50 0805914 f5b7c50 078c1b4 f5b7c50 521d0d9 614a873 521d0d9 614a873 521d0d9 f5b7c50 078c1b4 614a873 f5b7c50 4f17e1e 614a873 f5b7c50 614a873 2a037e9 f5b7c50 435783d d799a95 79d1c78 d799a95 0805914 435783d 2a037e9 f5b7c50 d799a95 435783d d799a95 e8df9ea d799a95 0805914 d799a95 435783d d799a95 435783d d799a95 435783d d799a95 0805914 d799a95 e8df9ea f5b7c50 56f76ad 614a873 e8df9ea 0805914 435783d e8df9ea 0805914 e8df9ea 0805914 e8df9ea 614a873 435783d 2a037e9 e8df9ea 621b412 e8df9ea 435783d e8df9ea 435783d e8df9ea f5b7c50 d799a95 2a037e9 d799a95 d0861d5 d799a95 e8df9ea d799a95 e8df9ea f5b7c50 d799a95 e8df9ea d799a95 e8df9ea 4f17e1e d799a95 e8df9ea 078c1b4 614a873 2a037e9 621b412 2a037e9 614a873 2a037e9 d799a95 078c1b4 2a037e9 d799a95 4f17e1e 2a037e9 4f17e1e d799a95 2a037e9 d799a95 2a037e9 d799a95 2a037e9 f5b7c50 2a037e9 f5b7c50 2a037e9 d799a95 2a037e9 d799a95 2a037e9 d799a95 2a037e9 d799a95 2a037e9 614a873 078c1b4 2a037e9 0805914 078c1b4 ed7df0a 078c1b4 435783d 2a037e9 078c1b4 435783d 521d0d9 078c1b4 2a037e9 435783d 79d1c78 ed7df0a 2a037e9 240850a 0805914 4f17e1e 0805914 2a037e9 d799a95 435783d d799a95 435783d d799a95 435783d ed7df0a d799a95 2a037e9 d799a95 078c1b4 435783d 078c1b4 2a037e9 bf46458 bc685b8 bf46458 078c1b4 bf46458 2a037e9 bf46458 2a037e9 bf46458 d799a95 bf46458 d799a95 bf46458 ed7df0a bf46458 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 |
import os
import csv
from typing import Annotated, Literal
from typing_extensions import TypedDict
import chainlit as cl
import numpy as np
import pandas as pd
import shutil
from dotenv import load_dotenv
from langchain.schema.runnable.config import RunnableConfig
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import UnstructuredExcelLoader, PyMuPDFLoader
from langchain_qdrant import QdrantVectorStore
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.tools import tool
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_openai import ChatOpenAI
from langgraph.graph import END, StateGraph, START
from langgraph.graph.message import MessagesState
from langgraph.prebuilt import ToolNode
from qdrant_client import QdrantClient
from qdrant_client.models import Distance, VectorParams
from langchain_core.documents import Document
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from operator import itemgetter
from langchain_core.output_parsers import StrOutputParser
from langchain_experimental.text_splitter import SemanticChunker
# Load environment variables
load_dotenv()
#####LangSmith uncomment block for LangSmith tracing
# import uuid
# os.environ["LANGCHAIN_PROJECT"] = f"HEAL-SYNC - {uuid.uuid4().hex[0:8]}"
# os.environ["LANGCHAIN_TRACING_V2"] = "true"
# print(os.environ["LANGCHAIN_PROJECT"])
###########LangSmith
# ==================== CONSTANTS ====================
# Paths and directories
UPLOAD_PATH = "./uploads"
INITIAL_EMBEDDINGS_DIR = "./initial_embeddings"
INITIAL_EMBEDDINGS_NAME = "initial_embeddings"
USER_EMBEDDINGS_NAME = "user_embeddings"
os.makedirs(UPLOAD_PATH, exist_ok=True)
# Model IDs
EMBEDDING_MODEL_ID = "pritamdeka/S-PubMedBert-MS-MARCO"
#EMBEDDING_MODEL_ID = "Snowflake/snowflake-arctic-embed-m"
general_llm_model = "gpt-4o"
chat_model = ChatOpenAI(model_name=general_llm_model)
# NIH HEAL CDE core domains
NIH_HEAL_CORE_DOMAINS = [
"Anxiety",
"Depression",
"Global satisfaction with treatment",
"Pain catastrophizing",
"Pain interference",
"Pain intensity",
"Physical functioning",
"Quality of Life (QoL)",
"Sleep",
"Substance Use Screener"
]
# ==================== EMBEDDING MODEL SETUP to allow flexibility of model selection ====================
def get_embedding_model(model_id):
"""Creates and returns the appropriate embedding model based on the model ID."""
if "text-embedding" in model_id:
from langchain_openai import OpenAIEmbeddings
return OpenAIEmbeddings(model=model_id)
else:
return HuggingFaceEmbeddings(model_name=model_id)
def initialize_embedding_models():
"""Initialize a single embedding model for all document types"""
global embedding_model
# Initialize a single model for all document types
embedding_model = get_embedding_model(EMBEDDING_MODEL_ID)
print(f"Initialized embedding model: {EMBEDDING_MODEL_ID}")
# Initialize the embedding model
initialize_embedding_models()
# ==================== QDRANT SETUP ====================
# Create a global Qdrant client for the core embeddings (available to all sessions)
global_qdrant_client = QdrantClient(":memory:")
# Initialize a function to create session-specific Qdrant clients
def create_session_qdrant_client():
return QdrantClient(":memory:")
# ==================== DOCUMENT PROCESSING ====================
# Create a semantic splitter for documents
semantic_splitter = SemanticChunker(embedding_model, add_start_index=True, buffer_size=30)
# Keep the recursive splitter as a fallback option
recursive_splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=50)
def format_docs(docs):
"""Format a list of documents into a single string."""
return "\n\n".join(doc.page_content for doc in docs)
# ==================== CORE EMBEDDINGS PROCESSING ====================
def load_and_chunk_core_reference_files():
"""Loads all .xlsx files from the initial embeddings directory and splits them into chunks."""
text_splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=50)
all_chunks = []
file_count = 0
print("Processing Excel files...")
for file in os.listdir(INITIAL_EMBEDDINGS_DIR):
if file.endswith(".xlsx"):
file_path = os.path.join(INITIAL_EMBEDDINGS_DIR, file)
try:
loader = UnstructuredExcelLoader(file_path)
documents = loader.load()
chunks = text_splitter.split_documents(documents)
for chunk in chunks:
chunk.metadata = chunk.metadata or {}
chunk.metadata["filename"] = file
chunk.metadata["type"] = "excel"
all_chunks.extend(chunks)
file_count += 1
print(f"Processed: {file} - {len(chunks)} chunks")
except Exception as e:
print(f"Error processing {file}: {str(e)}")
print(f"Processed {file_count} Excel files with a total of {len(all_chunks)} chunks.")
return all_chunks
def embed_core_reference_in_qdrant(chunks):
"""Embeds core reference chunks and stores them in the global Qdrant instance."""
global embedding_model, global_qdrant_client
if not chunks:
print("No Excel files found to process or all files were empty.")
return None
# Ensure embedding model is valid
if embedding_model is None:
print("ERROR: No embedding model available. Initializing now.")
initialize_embedding_models()
print(f"Using embedding model: {EMBEDDING_MODEL_ID}")
print("Creating vector store for core reference data...")
try:
# First, check if collection exists and delete it if it does
if INITIAL_EMBEDDINGS_NAME in [c.name for c in global_qdrant_client.get_collections().collections]:
global_qdrant_client.delete_collection(INITIAL_EMBEDDINGS_NAME)
# Create the collection with proper parameters
# Get the embedding dimension from the model
embedding_dimension = len(embedding_model.embed_query("Sample text"))
global_qdrant_client.create_collection(
collection_name=INITIAL_EMBEDDINGS_NAME,
vectors_config=VectorParams(size=embedding_dimension, distance=Distance.COSINE)
)
# Create the vector store
vector_store = QdrantVectorStore(
client=global_qdrant_client,
collection_name=INITIAL_EMBEDDINGS_NAME,
embedding=embedding_model
)
# Add documents to the vector store
vector_store.add_documents(chunks)
print(f"Successfully loaded all .xlsx files into Qdrant collection '{INITIAL_EMBEDDINGS_NAME}'.")
return vector_store
except Exception as e:
print(f"Error creating vector store: {str(e)}")
print(f"Embedding model status: {embedding_model is not None}")
return None
# ==================== RAG SETUP ====================
# RAG template for all retrievals
RAG_TEMPLATE = """\
You are a helpful and kind assistant. Use the context provided below to answer the question.
If you do not know the answer, or are unsure, say you don't know.
Query:
{question}
Context:
{context}
"""
rag_prompt = ChatPromptTemplate.from_template(RAG_TEMPLATE)
# Initialize core embeddings on application startup
core_vectorstore = None
core_retriever = None # Global core retriever
def initialize_core_reference_embeddings():
"""Loads all .xlsx files, extracts text, embeds, and stores in global Qdrant."""
global core_vectorstore, core_retriever
chunks = load_and_chunk_core_reference_files()
core_vectorstore = embed_core_reference_in_qdrant(chunks)
# Create the core retriever if vector store was created successfully
if core_vectorstore:
core_retriever = core_vectorstore.as_retriever(search_kwargs={"k": 10})
print("Core reference retriever created successfully.")
else:
print("Failed to create core reference retriever: No vector store available.")
return core_vectorstore
# Initialize core reference embeddings
core_vectorstore = initialize_core_reference_embeddings()
# Chain for retrieving from core reference embeddings
if core_retriever:
core_reference_retrieval_chain = (
{"context": itemgetter("question") | core_retriever | format_docs,
"question": itemgetter("question")}
| rag_prompt
| chat_model
| StrOutputParser()
)
# ==================== PROTOCOL DOCUMENT PROCESSING ====================
async def load_and_chunk_protocol_files(files):
"""Load protocol PDF files and split them into chunks with metadata."""
print(f"Loading {len(files)} uploaded PDF files")
documents_with_metadata = []
for file in files:
print(f"Processing file: {file.name}, size: {file.size} bytes")
file_path = os.path.join(UPLOAD_PATH, file.name)
shutil.copyfile(file.path, file_path)
try:
loader = PyMuPDFLoader(file_path)
documents = loader.load()
# Add source filename to metadata for all documents
for doc in documents:
doc.metadata["source"] = file.name
doc.metadata["type"] = "pdf"
# Use semantic_splitter.split_documents to preserve metadata
chunks = semantic_splitter.split_documents(documents)
documents_with_metadata.extend(chunks)
print(f"Successfully processed {file.name}, extracted {len(chunks)} chunks")
except Exception as e:
print(f"Error processing {file.name}: {str(e)}")
# Fallback to recursive splitter if semantic chunking fails
try:
print(f"Falling back to recursive character splitting for {file.name}")
chunks = recursive_splitter.split_documents(documents)
documents_with_metadata.extend(chunks)
print(f"Fallback successful, extracted {len(chunks)} chunks")
except Exception as fallback_error:
print(f"Fallback also failed: {str(fallback_error)}")
return documents_with_metadata
async def embed_protocol_in_qdrant(documents_with_metadata, session_qdrant_client, model_name=EMBEDDING_MODEL_ID):
"""Create a vector store and embed protocol chunks into session-specific Qdrant."""
global embedding_model
print(f"Using embedding model: {model_name}")
try:
# First, check if collection exists and delete it if it does
if USER_EMBEDDINGS_NAME in [c.name for c in session_qdrant_client.get_collections().collections]:
session_qdrant_client.delete_collection(USER_EMBEDDINGS_NAME)
# Create the collection with proper parameters
# Get the embedding dimension from the model
embedding_dimension = len(embedding_model.embed_query("Sample text"))
session_qdrant_client.create_collection(
collection_name=USER_EMBEDDINGS_NAME,
vectors_config=VectorParams(size=embedding_dimension, distance=Distance.COSINE)
)
# Create the vector store
user_vectorstore = QdrantVectorStore(
client=session_qdrant_client,
collection_name=USER_EMBEDDINGS_NAME,
embedding=embedding_model
)
# Add documents to the vector store
user_vectorstore.add_documents(documents_with_metadata)
print(f"Added {len(documents_with_metadata)} chunks from uploaded files to collection '{USER_EMBEDDINGS_NAME}'")
return user_vectorstore
except Exception as e:
print(f"Error creating vector store: {str(e)}")
return None
async def process_uploaded_protocol(files, session_qdrant_client, model_name=EMBEDDING_MODEL_ID):
"""Process uploaded protocol PDF files and add them to a session-specific vector store collection"""
documents_with_metadata = await load_and_chunk_protocol_files(files)
return await embed_protocol_in_qdrant(documents_with_metadata, session_qdrant_client, model_name)
# ==================== RETRIEVAL FUNCTIONS ====================
async def retrieve_from_core(query, k=5):
"""Retrieve documents from core reference database"""
global core_retriever
if not core_retriever:
print("No core retriever available")
return []
# Override k if needed
if k != 10: # Assuming default k=10 was used when creating the retriever
retriever = core_vectorstore.as_retriever(search_kwargs={"k": k})
return await retriever.ainvoke(query)
return await core_retriever.ainvoke(query)
async def retrieve_from_protocol(query, k=5):
"""Retrieve documents from protocol database"""
# Get the session-specific client
session_qdrant_client = cl.user_session.get("session_qdrant_client")
if not session_qdrant_client:
print("No session client available")
return []
# Check if collection exists
try:
if USER_EMBEDDINGS_NAME not in [c.name for c in session_qdrant_client.get_collections().collections]:
print("No protocol document embedded")
return []
except Exception as e:
print(f"Error checking collections: {str(e)}")
return []
# Create vector store with the session client
protocol_vectorstore = QdrantVectorStore(
client=session_qdrant_client,
collection_name=USER_EMBEDDINGS_NAME,
embedding=embedding_model
)
# Create and use retriever
protocol_retriever = protocol_vectorstore.as_retriever(search_kwargs={"k": k})
return await protocol_retriever.ainvoke(query)
# ==================== TOOL DEFINITIONS ====================
@tool
async def search_all_data(query: str, doc_type: str = None) -> str:
"""Search all data or filter by document type (protocol/core_reference)"""
try:
chain = await create_rag_chain(doc_type)
return await chain.ainvoke({"question": query})
except Exception as e:
return f"Error searching data: {str(e)}"
@tool
async def analyze_protocol_domains(export_csv: bool = True) -> str:
"""Analyze all NIH HEAL CDE core domains and identify instruments used in the protocol(s)."""
# Check if protocol document exists
session_qdrant_client = cl.user_session.get("session_qdrant_client")
if not session_qdrant_client or USER_EMBEDDINGS_NAME not in [c.name for c in session_qdrant_client.get_collections().collections]:
return "No protocol document has been uploaded yet."
# Get the names of the uploaded protocol files from the user session
protocol_names = cl.user_session.get("protocol_filenames", ["Unknown Protocol"])
# Use asyncio.gather to run all domain searches in parallel
import asyncio
tasks = [_search_protocol_for_instruments(domain) for domain in NIH_HEAL_CORE_DOMAINS]
results = await asyncio.gather(*tasks)
# Format the results as a markdown table
title = "NIH HEAL CDE Core Domains Analysis"
result = f"# {title}\n\n"
# Create header with all protocol names
result += "| Domain |"
for protocol_name in protocol_names:
result += f" Protocol Instrument - {protocol_name} |"
result += "\n|--------|"
for _ in protocol_names:
result += "-" * (len("Protocol Instrument - ") + 15) + "|"
result += "\n"
# Process results and build table rows
for domain_result in results:
domain = domain_result["domain"]
instruments = domain_result.get("instruments", {})
# Debug output
print(f"Domain: {domain}, Instruments: {instruments}")
result += f"| {domain} |"
for protocol_name in protocol_names:
instrument = instruments.get(protocol_name, "Not identified")
# Clean up any trailing periods or whitespace
if isinstance(instrument, str):
instrument = instrument.strip().rstrip('.')
result += f" {instrument} |"
result += "\n"
# Export to CSV if requested
csv_path = None
if export_csv:
# Create output directory if it doesn't exist
output_dir = "./outputs"
os.makedirs(output_dir, exist_ok=True)
# Full path for the CSV file
filename = "domain_analysis.csv"
csv_path = os.path.join(output_dir, filename)
# Write the data to CSV
try:
with open(csv_path, 'w', newline='') as csvfile:
# Create fieldnames with all protocol names
fieldnames = ['Domain']
for protocol_name in protocol_names:
fieldnames.append(f'Protocol Instrument - {protocol_name}')
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for domain_result in results:
domain = domain_result["domain"]
instruments = domain_result.get("instruments", {})
row = {'Domain': domain}
for protocol_name in protocol_names:
instrument = instruments.get(protocol_name, "Not identified")
if isinstance(instrument, str):
instrument = instrument.strip().rstrip('.')
row[f'Protocol Instrument - {protocol_name}'] = instrument
writer.writerow(row)
# Store the CSV path in the user session
cl.user_session.set("csv_path", csv_path)
except Exception as e:
result += f"\n\nError creating CSV file: {str(e)}"
return result
async def _search_protocol_for_instruments(domain: str) -> dict:
"""Search the protocol for instruments related to a specific NIH HEAL CDE core domain."""
global embedding_model
# Get the session-specific Qdrant client
session_qdrant_client = cl.user_session.get("session_qdrant_client")
if not session_qdrant_client:
return {"domain": domain, "instrument": "No session-specific Qdrant client found", "context": ""}
# Get the names of the uploaded protocol files
protocol_names = cl.user_session.get("protocol_filenames", ["Unknown Protocol"])
# Check if user collection exists
try:
# Check if collection exists
if USER_EMBEDDINGS_NAME not in [c.name for c in session_qdrant_client.get_collections().collections]:
return {"domain": domain, "instrument": "No protocol document embedded", "context": ""}
# Create retriever for user documents
user_retriever = QdrantVectorStore(
client=session_qdrant_client,
collection_name=USER_EMBEDDINGS_NAME,
embedding=embedding_model
).as_retriever(search_kwargs={"k": 20}) # Increase k to get more documents
except Exception as e:
print(f"Error accessing user vector store: {str(e)}")
return {"domain": domain, "instrument": "Error accessing protocol", "context": str(e)}
# Create the chat model with the specified model from constants
domain_chat_model = ChatOpenAI(model_name=general_llm_model, temperature=0)
# Results for each protocol
protocol_results = {}
# Search for instruments in the core reference data that match this domain
core_reference_query = f"What are standard instruments or measures for {domain}?"
core_reference_instruments = await core_reference_retrieval_chain.ainvoke({"question": core_reference_query})
# Process each protocol separately
for protocol_name in protocol_names:
try:
# Search for instruments related to this domain in this specific protocol
query = f"What instrument or measure is used for {domain} in the protocol named {protocol_name}?"
# Retrieve relevant chunks from the protocol
docs = await user_retriever.ainvoke(query)
# Filter documents to only include those from this protocol
protocol_docs = [doc for doc in docs if doc.metadata.get("source") == protocol_name]
# If no documents match this protocol, try a more general search
if not protocol_docs:
print(f"No specific docs found for {protocol_name}, using all retrieved docs")
protocol_docs = docs
protocol_context = format_docs(protocol_docs)
# Use the model to identify the most likely instrument for this domain in this protocol
prompt = f"""
Based on the protocol information and known instruments, identify which instrument is being used for the domain: {domain} in the protocol: {protocol_name}
Protocol information:
{protocol_context}
Known instruments for this domain:
{core_reference_instruments}
Respond with only the name of the identified instrument. If you cannot identify a specific instrument, respond with "Not identified".
"""
instrument = await domain_chat_model.ainvoke([HumanMessage(content=prompt)])
# Store the result for this protocol
protocol_results[protocol_name] = {
"instrument": instrument.content.strip(),
"context": protocol_context
}
print(f"For {domain} in {protocol_name}: {instrument.content.strip()}")
except Exception as e:
print(f"Error identifying instrument for {domain} in {protocol_name}: {str(e)}")
protocol_results[protocol_name] = {
"instrument": "Error during identification",
"context": str(e)
}
# Combine results into a single response
combined_instruments = {}
for protocol_name, result in protocol_results.items():
combined_instruments[protocol_name] = result["instrument"]
return {
"domain": domain,
"instruments": combined_instruments,
"known_instruments": core_reference_instruments
}
async def create_rag_chain(doc_type=None):
"""Create a RAG chain based on the document type."""
# Get the session-specific Qdrant client
session_qdrant_client = cl.user_session.get("session_qdrant_client")
# Create retrievers based on document type
if doc_type == "protocol" and session_qdrant_client:
# Check if user collection exists
try:
if USER_EMBEDDINGS_NAME in [c.name for c in session_qdrant_client.get_collections().collections]:
protocol_vectorstore = QdrantVectorStore(
client=session_qdrant_client,
collection_name=USER_EMBEDDINGS_NAME,
embedding=embedding_model
)
retriever = protocol_vectorstore.as_retriever(search_kwargs={"k": 5})
else:
raise ValueError("No protocol document embedded")
except Exception as e:
raise ValueError(f"Error accessing protocol: {str(e)}")
elif doc_type == "core_reference":
if core_vectorstore:
retriever = core_vectorstore.as_retriever(search_kwargs={"k": 5})
else:
raise ValueError("Core reference data not available")
else:
# Default: search both if available
retrievers = []
# Add core reference retriever if available
if core_vectorstore:
core_retriever = core_vectorstore.as_retriever(search_kwargs={"k": 3})
retrievers.append(core_retriever)
# Add protocol retriever if available
if session_qdrant_client:
try:
if USER_EMBEDDINGS_NAME in [c.name for c in session_qdrant_client.get_collections().collections]:
protocol_vectorstore = QdrantVectorStore(
client=session_qdrant_client,
collection_name=USER_EMBEDDINGS_NAME,
embedding=embedding_model
)
protocol_retriever = protocol_vectorstore.as_retriever(search_kwargs={"k": 3})
retrievers.append(protocol_retriever)
except Exception as e:
print(f"Error accessing protocol: {str(e)}")
if not retrievers:
raise ValueError("No data sources available")
# If we have multiple retrievers, use them in sequence
if len(retrievers) > 1:
from langchain.retrievers import EnsembleRetriever
retriever = EnsembleRetriever(
retrievers=retrievers,
weights=[1.0/len(retrievers)] * len(retrievers)
)
else:
retriever = retrievers[0]
# Create and return the RAG chain with async support
return (
{"context": itemgetter("question") | retriever | format_docs,
"question": itemgetter("question")}
| rag_prompt
| chat_model
| StrOutputParser()
)
# Collect all tools - now just the two core tools
tools = [
search_all_data,
analyze_protocol_domains
]
# ==================== LANGGRAPH SETUP ====================
# LangGraph components
model = ChatOpenAI(model_name=general_llm_model, temperature=0)
# System message
system_message = """You are a helpful assistant specializing in NIH HEAL CDE protocols.
You have access to:
1. A tool to search all available data (search_all_data) - Use this to answer questions about the protocol or core reference data
2. A tool to analyze all NIH HEAL domains at once (analyze_protocol_domains) - This will identify instruments for each NIH HEAL CDE core domain, return the result in markdown, and also create a CSV file
WHEN TO USE TOOLS:
- When users ask general questions about the protocol or core reference data, use the search_all_data tool.
- When users want a complete analysis of all domains, use the analyze_protocol_domains tool.
Be specific in your tool queries to get the most relevant information.
Always use the appropriate tool before responding to questions about the protocol or core reference data.
IMPORTANT: When returning tool outputs, especially markdown tables or formatted content, preserve the exact formatting without adding any commentary, introduction, or conclusion.
"""
# Bind tools and configure models
model = model.bind_tools(tools)
tool_node = ToolNode(tools=tools)
def should_continue(state: MessagesState) -> Literal["tools", END]:
messages = state["messages"]
last_message = messages[-1]
# If the LLM makes a tool call, then we route to the "tools" node
if last_message.tool_calls:
return "tools"
# Otherwise, we end the graph (reply to the user)
return END
async def call_model(state: MessagesState):
messages = state["messages"]
# Add the system message at the beginning of the messages list
if messages and not any(isinstance(msg, SystemMessage) for msg in messages):
messages = [SystemMessage(content=system_message)] + messages
response = await model.ainvoke(messages)
# We return a list, because this will get added to the existing list
return {"messages": [response]}
# Build the graph
builder = StateGraph(MessagesState)
builder.add_node("supervisor", call_model)
builder.add_node("tools", tool_node)
builder.add_edge(START, "supervisor")
builder.add_conditional_edges(
"supervisor",
should_continue,
)
builder.add_edge("tools", "supervisor")
graph = builder.compile()
# ==================== CHAINLIT HANDLERS ====================
@cl.on_chat_start
async def on_chat_start():
# Create a session-specific Qdrant client
session_qdrant_client = create_session_qdrant_client()
cl.user_session.set("session_qdrant_client", session_qdrant_client)
files = await cl.AskFileMessage(
content="Please upload one or more NIH HEAL Protocols (PDFs only) for analysis.",
accept=["application/pdf"],
max_files=5, # Allow up to 5 files
max_size_mb=20,
timeout=180,
).send()
if not files:
await cl.Message("Error: You must upload at least one PDF file.").send()
return
if files:
# Store the filenames in the user session
protocol_filenames = [file.name for file in files]
cl.user_session.set("protocol_filenames", protocol_filenames)
processing_msg = cl.Message(content=f"Processing {len(files)} protocol(s)...")
await processing_msg.send()
# Process the uploaded files with the session-specific client
documents_with_metadata = await load_and_chunk_protocol_files(files)
user_vectorstore = await embed_protocol_in_qdrant(documents_with_metadata, session_qdrant_client)
if user_vectorstore:
# Present options to the user instead of automatically running analysis
options_message = f"""
Your protocol(s) have been successfully processed! What would you like to do next?
1. Ask questions about the uploaded protocol(s)
2. Run a complete analysis of what core domain instruments are used in the uploaded protocol(s)
This will identify instruments for each NIH HEAL CDE core domain, return the result to your screen and create a downloadable crosswalk.
"""
await cl.Message(content=options_message).send()
else:
await cl.Message(content="There was an issue processing your PDF(s). Please try uploading again.").send()
@cl.on_message
async def on_message(msg: cl.Message):
# Show a thinking indicator
with cl.Step("Heal SYNC to process your request"):
final_answer = await process_message(msg.content)
await final_answer.send()
await handle_file_attachments()
async def process_message(content: str):
config = {"configurable": {"thread_id": cl.context.session.id}}
final_answer = cl.Message(content="")
try:
async for msg_response, metadata in graph.astream(
{"messages": [HumanMessage(content=content)]},
stream_mode="messages",
config=config
):
# Process response
if should_stream_response(msg_response, metadata):
await final_answer.stream_token(msg_response.content)
except Exception as e:
# Handle graph processing errors gracefully
await final_answer.stream_token(f"\n\nI encountered an error: {str(e)}")
return final_answer
def should_stream_response(msg_response, metadata):
return (
msg_response.content
and not isinstance(msg_response, HumanMessage)
and metadata["langgraph_node"] == "supervisor"
)
async def handle_file_attachments():
csv_path = cl.user_session.get("csv_path")
if not csv_path:
return
try:
file_message = cl.Message(content="Download the crosswalk here:")
await file_message.send()
await cl.File(
name="domain_analysis.csv",
path=csv_path,
display="inline"
).send(for_id=file_message.id)
# Clear the path to avoid sending it multiple times
cl.user_session.set("csv_path", None)
except Exception as e:
print(f"Error attaching CSV file: {str(e)}") |