Spaces:
Sleeping
Sleeping
Tafazzul-Nadeeem
commited on
Commit
·
b41cccb
1
Parent(s):
d6e3b0b
Chatbot ready and deployment checked
Browse files- app.py +1 -1
- last_stable_app_on_hf.py +41 -19
app.py
CHANGED
|
@@ -167,7 +167,7 @@ with gr.Blocks() as demo:
|
|
| 167 |
##########################################################################
|
| 168 |
gr.Markdown(
|
| 169 |
"""
|
| 170 |
-
<h1 style='text-align: center; font-size:
|
| 171 |
MedScan Diagnostic Services Chatbot (Agentic AI framework powered by OpenAI)
|
| 172 |
</h1>
|
| 173 |
"""
|
|
|
|
| 167 |
##########################################################################
|
| 168 |
gr.Markdown(
|
| 169 |
"""
|
| 170 |
+
<h1 style='text-align: center; font-size: 1.5em; color: #2c3e50; margin-bottom: 0.2em;'>
|
| 171 |
MedScan Diagnostic Services Chatbot (Agentic AI framework powered by OpenAI)
|
| 172 |
</h1>
|
| 173 |
"""
|
last_stable_app_on_hf.py
CHANGED
|
@@ -4,6 +4,7 @@ import os
|
|
| 4 |
import base64
|
| 5 |
import time
|
| 6 |
import copy
|
|
|
|
| 7 |
|
| 8 |
from dotenv import load_dotenv
|
| 9 |
# Load environment variables from .env file
|
|
@@ -40,7 +41,7 @@ with gr.Blocks() as demo:
|
|
| 40 |
|
| 41 |
def agent4_get_prescription_text(messages):
|
| 42 |
"""
|
| 43 |
-
|
| 44 |
"""
|
| 45 |
prescription_text = get_prescription_text(messages)
|
| 46 |
return prescription_text
|
|
@@ -50,13 +51,17 @@ with gr.Blocks() as demo:
|
|
| 50 |
return base64.b64encode(f.read()).decode("utf-8")
|
| 51 |
|
| 52 |
def load_welcome():
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
|
|
|
| 56 |
|
| 57 |
def clear_and_load():
|
| 58 |
# Return the welcome message
|
| 59 |
-
|
|
|
|
|
|
|
|
|
|
| 60 |
|
| 61 |
def add_message(history, message):
|
| 62 |
# Send the image to the agent4_get_prescription_text
|
|
@@ -73,9 +78,13 @@ with gr.Blocks() as demo:
|
|
| 73 |
"image_url": {"url": f"data:image/jpeg;base64,{encoded_content}"}
|
| 74 |
})
|
| 75 |
history.append({"role": "user", "content": {"path": x}})
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
|
| 80 |
if message["text"] is not None:
|
| 81 |
history.append({"role": "user", "content": message["text"]})
|
|
@@ -83,6 +92,7 @@ with gr.Blocks() as demo:
|
|
| 83 |
return history, gr.MultimodalTextbox(value=None, interactive=False, file_count="multiple", placeholder="Enter message or upload file...")
|
| 84 |
|
| 85 |
def respond(history):
|
|
|
|
| 86 |
if len(history) == 2:
|
| 87 |
history.insert(0,{"role": "system", "content": openai_opening_system_message})
|
| 88 |
messages = copy.deepcopy(history)
|
|
@@ -107,27 +117,27 @@ with gr.Blocks() as demo:
|
|
| 107 |
"content": msg["content"]
|
| 108 |
}
|
| 109 |
clean_messages.append(clean_msg)
|
| 110 |
-
|
| 111 |
########################### AGENTIC WORKFLOW ##########################
|
| 112 |
# Call Agent1- the RAG Decision Agent
|
|
|
|
| 113 |
if clean_messages[-1]["role"] == "system" and "No prescription found" in clean_messages[-1]["content"]:
|
| 114 |
# If the last message is a system message with "No prescription found", skip RAG decision
|
| 115 |
rag_decision = False
|
| 116 |
elif clean_messages[-2]["role"] == "system" and "No prescription found" in clean_messages[-2]["content"]:
|
| 117 |
rag_decision = False
|
| 118 |
else:
|
| 119 |
-
rag_query = ""
|
| 120 |
# Get the last 10 messages in the format "role: <message>"
|
| 121 |
last_10 = clean_messages[-10:] if len(clean_messages) > 10 else clean_messages
|
| 122 |
rag_query = "\n".join(
|
| 123 |
f"{msg['role']}: {msg['content'][0]['text'] if isinstance(msg['content'], list) and msg['content'] and 'text' in msg['content'][0] else ''}"
|
| 124 |
for msg in last_10
|
| 125 |
)
|
|
|
|
| 126 |
rag_decision = agent1_rag_decision(rag_query)
|
| 127 |
|
| 128 |
if rag_decision == True:
|
| 129 |
#Call Agent2 - the RAG Retrieval Agent
|
| 130 |
-
top_k_results = agent2_use_rag(clean_messages[-1]["content"][0]["text"], k=
|
| 131 |
# Append the top k results to the messages
|
| 132 |
for i, result in enumerate(top_k_results):
|
| 133 |
clean_messages.append({
|
|
@@ -141,18 +151,30 @@ with gr.Blocks() as demo:
|
|
| 141 |
response = agent3_llm_agent(clean_messages)
|
| 142 |
#######################################################################
|
| 143 |
|
| 144 |
-
history.append({"role": "assistant", "content": response})
|
| 145 |
-
return history
|
| 146 |
|
| 147 |
-
|
| 148 |
-
#
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
|
|
|
|
|
|
|
|
|
| 152 |
|
| 153 |
|
| 154 |
##########################################################################
|
| 155 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 156 |
chat_input = gr.MultimodalTextbox(
|
| 157 |
interactive=True,
|
| 158 |
file_count="multiple",
|
|
|
|
| 4 |
import base64
|
| 5 |
import time
|
| 6 |
import copy
|
| 7 |
+
import re
|
| 8 |
|
| 9 |
from dotenv import load_dotenv
|
| 10 |
# Load environment variables from .env file
|
|
|
|
| 41 |
|
| 42 |
def agent4_get_prescription_text(messages):
|
| 43 |
"""
|
| 44 |
+
Openai agent to get prescription text.
|
| 45 |
"""
|
| 46 |
prescription_text = get_prescription_text(messages)
|
| 47 |
return prescription_text
|
|
|
|
| 51 |
return base64.b64encode(f.read()).decode("utf-8")
|
| 52 |
|
| 53 |
def load_welcome():
|
| 54 |
+
history = []
|
| 55 |
+
history.append({"role": "system", "content": openai_opening_system_message})
|
| 56 |
+
history.append({"role": "assistant", "content": bot_welcome_message})
|
| 57 |
+
return history
|
| 58 |
|
| 59 |
def clear_and_load():
|
| 60 |
# Return the welcome message
|
| 61 |
+
history = []
|
| 62 |
+
history.append({"role": "system", "content": openai_opening_system_message})
|
| 63 |
+
history.append({"role": "assistant", "content": bot_welcome_message})
|
| 64 |
+
return history, None
|
| 65 |
|
| 66 |
def add_message(history, message):
|
| 67 |
# Send the image to the agent4_get_prescription_text
|
|
|
|
| 78 |
"image_url": {"url": f"data:image/jpeg;base64,{encoded_content}"}
|
| 79 |
})
|
| 80 |
history.append({"role": "user", "content": {"path": x}})
|
| 81 |
+
|
| 82 |
+
# call agent4_get_prescription_text if there is an image_url in the message
|
| 83 |
+
has_image_url = any("image_url" in item for item in messages[0]["content"])
|
| 84 |
+
if has_image_url:
|
| 85 |
+
prescription_text = agent4_get_prescription_text(messages)
|
| 86 |
+
print(f"Prescription Text: {prescription_text}")
|
| 87 |
+
history.append({"role": "system", "content": prescription_text})
|
| 88 |
|
| 89 |
if message["text"] is not None:
|
| 90 |
history.append({"role": "user", "content": message["text"]})
|
|
|
|
| 92 |
return history, gr.MultimodalTextbox(value=None, interactive=False, file_count="multiple", placeholder="Enter message or upload file...")
|
| 93 |
|
| 94 |
def respond(history):
|
| 95 |
+
|
| 96 |
if len(history) == 2:
|
| 97 |
history.insert(0,{"role": "system", "content": openai_opening_system_message})
|
| 98 |
messages = copy.deepcopy(history)
|
|
|
|
| 117 |
"content": msg["content"]
|
| 118 |
}
|
| 119 |
clean_messages.append(clean_msg)
|
|
|
|
| 120 |
########################### AGENTIC WORKFLOW ##########################
|
| 121 |
# Call Agent1- the RAG Decision Agent
|
| 122 |
+
rag_query = ""
|
| 123 |
if clean_messages[-1]["role"] == "system" and "No prescription found" in clean_messages[-1]["content"]:
|
| 124 |
# If the last message is a system message with "No prescription found", skip RAG decision
|
| 125 |
rag_decision = False
|
| 126 |
elif clean_messages[-2]["role"] == "system" and "No prescription found" in clean_messages[-2]["content"]:
|
| 127 |
rag_decision = False
|
| 128 |
else:
|
|
|
|
| 129 |
# Get the last 10 messages in the format "role: <message>"
|
| 130 |
last_10 = clean_messages[-10:] if len(clean_messages) > 10 else clean_messages
|
| 131 |
rag_query = "\n".join(
|
| 132 |
f"{msg['role']}: {msg['content'][0]['text'] if isinstance(msg['content'], list) and msg['content'] and 'text' in msg['content'][0] else ''}"
|
| 133 |
for msg in last_10
|
| 134 |
)
|
| 135 |
+
|
| 136 |
rag_decision = agent1_rag_decision(rag_query)
|
| 137 |
|
| 138 |
if rag_decision == True:
|
| 139 |
#Call Agent2 - the RAG Retrieval Agent
|
| 140 |
+
top_k_results = agent2_use_rag(clean_messages[-1]["content"][0]["text"], k=5)
|
| 141 |
# Append the top k results to the messages
|
| 142 |
for i, result in enumerate(top_k_results):
|
| 143 |
clean_messages.append({
|
|
|
|
| 151 |
response = agent3_llm_agent(clean_messages)
|
| 152 |
#######################################################################
|
| 153 |
|
| 154 |
+
# history.append({"role": "assistant", "content": response})
|
| 155 |
+
# return history
|
| 156 |
|
| 157 |
+
history.append({"role": "assistant", "content": ""})
|
| 158 |
+
# Split by sentence boundaries (naive but works for most cases)
|
| 159 |
+
chunks = re.split(r'(?<=[.!?]) +', response)
|
| 160 |
+
|
| 161 |
+
for chunk in chunks:
|
| 162 |
+
history[-1]["content"] += chunk + " "
|
| 163 |
+
time.sleep(0.3)
|
| 164 |
+
yield history
|
| 165 |
|
| 166 |
|
| 167 |
##########################################################################
|
| 168 |
+
gr.Markdown(
|
| 169 |
+
"""
|
| 170 |
+
<h1 style='text-align: center; font-size: 1.5em; color: #2c3e50; margin-bottom: 0.2em;'>
|
| 171 |
+
MedScan Diagnostic Services Chatbot (Agentic AI framework powered by OpenAI)
|
| 172 |
+
</h1>
|
| 173 |
+
"""
|
| 174 |
+
)
|
| 175 |
+
chatbot = gr.Chatbot(type="messages",
|
| 176 |
+
render_markdown=True,
|
| 177 |
+
height=380)
|
| 178 |
chat_input = gr.MultimodalTextbox(
|
| 179 |
interactive=True,
|
| 180 |
file_count="multiple",
|