File size: 2,512 Bytes
55e745a
 
 
 
 
 
b438b2c
55e745a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
be9a1d2
55e745a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b438b2c
 
55e745a
 
 
b438b2c
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
# # Import embedding model
from langchain_huggingface import HuggingFaceEmbeddings
embed_model = HuggingFaceEmbeddings(model_name="mixedbread-ai/mxbai-embed-large-v1")
from langchain_core.prompts import PromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
import gradio as gr
import pandas as pd

from langchain_groq import ChatGroq
# Create a vector store...
from langchain_chroma import Chroma
import os

vectorstore = Chroma(
    collection_name="medical_dataset_store",
    embedding_function=embed_model,
    persist_directory="./",
)
vectorstore.get().keys()

# Load the dataset to be used.
context = pd.read_csv("./drugs_side_effects_drugs_com.csv")

# Because the vector store is empty... Add your context data.
vectorstore.add_texts(context)

retriever = vectorstore.as_retriever()


template = ("""
You are a medical expert specializing in pharmacology.
Your task is to use the provided context to answer questions about drug side effects for patients.
Please follow these guidelines:
- Provide accurate and detailed answers based on the context.
- If you don't know the answer, clearly state that you don't know.
- Do not reference the context directly in your response; just provide the answer.
- Ensure your answers are clear, concise, and informative.
Context: {context}
Question: {question}
Answer:
""")
rag_prompt = PromptTemplate.from_template(template)

# Initialize the model
llm_model = ChatGroq(model="llama-3.3-70b-versatile", api_key=os.environ.get("medibot"))
rag_chain = (
    {"context": retriever, "question": RunnablePassthrough()}
    | rag_prompt
    | llm_model
    | StrOutputParser()
)

def rag_memory_stream(message, history):
    partial_text = ""
    for new_text in rag_chain.stream(message):
        partial_text += new_text
        yield partial_text

examples = [
    "What is a drug ?", 
    "What are the side effects of lisinopril?"
]

description = "Real-Time AI-Powered Medical Assistant: Drug Side Effect Queries Chatbot"


title = "AI-Powered Medical Chatbot :) Try me!"
demo = gr.ChatInterface(fn=rag_memory_stream,
                        type="messages",
                        title=title,
                        description=description,
                        fill_height=True,
                        examples=examples,
                        theme="glass",
)

# Launch the application and make it sharable
demo.launch(share=True)


if __name__ == "__main__":
    demo.launch()