Tafazzul-Nadeeem commited on
Commit
7ff33ff
·
1 Parent(s): 64e0ac6

Precription feature done

Browse files
Files changed (1) hide show
  1. app.py +29 -6
app.py CHANGED
@@ -28,12 +28,25 @@ Also, the final service rates and offers will be available at the time of bookin
28
  and may vary from the information provided here.
29
  **Can I help you with anything?**
30
  """
31
- openai_opening_message = """"You are a helpful assistant of a diagnostic
32
  services business.
33
  The system uses RAG to retrieve relevant information from a knowledge base.
34
  You can also answer questions based on the information provided by the user.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  """
36
-
37
  chatbot = gr.Chatbot(type="messages")
38
  chat_input = gr.MultimodalTextbox(
39
  interactive=True,
@@ -87,7 +100,7 @@ You can also answer questions based on the information provided by the user.
87
  # print("history", history)
88
  if len(history) == 2:
89
  # print("Inside")
90
- history.insert(0,{"role": "system", "content": openai_opening_message})
91
  # print("history after insert", history)
92
  messages = copy.deepcopy(history)
93
  for i, msg in enumerate(messages):
@@ -108,6 +121,16 @@ You can also answer questions based on the information provided by the user.
108
  "image_url": {"url": f"data:image/jpeg;base64,{encoded_content}"}}]
109
  clean_messages = [] # OpenAI doesnot accept metadata or options in messages
110
  for msg in messages:
 
 
 
 
 
 
 
 
 
 
111
  clean_msg = {
112
  "role": msg["role"],
113
  "content": msg["content"]
@@ -117,11 +140,11 @@ You can also answer questions based on the information provided by the user.
117
 
118
  ########################### AGENTIC WORKFLOW ##########################
119
  # Call Agent1- the RAG Decision Agent
120
- rag_decision = agent1_rag_decision(messages[-1]["content"])
121
 
122
  if rag_decision == True:
123
  #Call Agent2 - the RAG Retrieval Agent
124
- top_k_results = agent2_use_rag(messages[-1]["content"][0]["text"], k=3)
125
  # Append the top k results to the messages
126
  for i, result in enumerate(top_k_results):
127
  clean_messages.append({
@@ -132,7 +155,7 @@ You can also answer questions based on the information provided by the user.
132
  response = agent3_llm_agent(clean_messages)
133
  else:
134
  # Call Agent3 - the LLM Agent to get query response
135
- response = agent3_llm_agent(messages)
136
  #######################################################################
137
 
138
  # print("response:", response)
 
28
  and may vary from the information provided here.
29
  **Can I help you with anything?**
30
  """
31
+ openai_opening_system_message = """"You are a helpful assistant of a diagnostic
32
  services business.
33
  The system uses RAG to retrieve relevant information from a knowledge base.
34
  You can also answer questions based on the information provided by the user.
35
+ Do not provide any medical advice or diagnosis.
36
+ """
37
+ prescription_upload_message = """The user has uploaded an image.
38
+ Check if the image contains a prescription, if not, ask the user to upload a valid prescription
39
+ image or provide details about the tests they are interested in. Do not provide
40
+ information about any other image if the image is not a prescription. Just reply that "I am
41
+ only trained to assist with prescription images and the uploaded image does not seem
42
+ to be a prescription." Do not answer any question if the uploaded
43
+ image is not a prescription.
44
+ If the image contains a prescription, extract the tests advised by the doctor
45
+ and provide details about those tests like rates and offers. For the tests
46
+ which are not in your knowledge base, say, "Not in our lab".
47
+ At last, also ask the user if You missed any test.
48
+ Do not provide any medical advice or diagnosis.
49
  """
 
50
  chatbot = gr.Chatbot(type="messages")
51
  chat_input = gr.MultimodalTextbox(
52
  interactive=True,
 
100
  # print("history", history)
101
  if len(history) == 2:
102
  # print("Inside")
103
+ history.insert(0,{"role": "system", "content": openai_opening_system_message})
104
  # print("history after insert", history)
105
  messages = copy.deepcopy(history)
106
  for i, msg in enumerate(messages):
 
121
  "image_url": {"url": f"data:image/jpeg;base64,{encoded_content}"}}]
122
  clean_messages = [] # OpenAI doesnot accept metadata or options in messages
123
  for msg in messages:
124
+ try:
125
+ if msg["content"][0]["type"] == "image_url":
126
+ clean_messages.append({
127
+ "role": "user",
128
+ "content": [{
129
+ "type": "text",
130
+ "text": prescription_upload_message
131
+ }]})
132
+ except:
133
+ pass
134
  clean_msg = {
135
  "role": msg["role"],
136
  "content": msg["content"]
 
140
 
141
  ########################### AGENTIC WORKFLOW ##########################
142
  # Call Agent1- the RAG Decision Agent
143
+ rag_decision = agent1_rag_decision(clean_messages[-1]["content"])
144
 
145
  if rag_decision == True:
146
  #Call Agent2 - the RAG Retrieval Agent
147
+ top_k_results = agent2_use_rag(clean_messages[-1]["content"][0]["text"], k=3)
148
  # Append the top k results to the messages
149
  for i, result in enumerate(top_k_results):
150
  clean_messages.append({
 
155
  response = agent3_llm_agent(clean_messages)
156
  else:
157
  # Call Agent3 - the LLM Agent to get query response
158
+ response = agent3_llm_agent(clean_messages)
159
  #######################################################################
160
 
161
  # print("response:", response)