WanIrfan commited on
Commit
d25fa7e
·
verified ·
1 Parent(s): e4c08c6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -34
app.py CHANGED
@@ -149,22 +149,26 @@ def homePage():
149
 
150
  @app.route("/medical", methods=["GET", "POST"])
151
  def medical_page():
152
- # Use session for history and document context
153
  if request.method == "GET":
154
- # Load all latest data from session (or default to empty if not found)
155
- latest_response = session.pop('latest_medical_response', {}) # POP to clear it after one display
156
 
157
  answer = latest_response.get('answer', "")
158
  thoughts = latest_response.get('thoughts', "")
159
  validation = latest_response.get('validation', "")
160
  source = latest_response.get('source', "")
161
 
162
- # Clear history only when a user first navigates (not on redirect)
163
- if not latest_response and 'medical_history' not in session:
164
- session.pop('current_medical_document', None)
 
 
 
 
 
165
 
166
  return render_template("medical_page.html",
167
- history=session.get('medical_history', []),
168
  answer=answer,
169
  thoughts=thoughts,
170
  validation=validation,
@@ -172,15 +176,12 @@ def medical_page():
172
 
173
  # POST Request
174
  answer, thoughts, validation, source = "", "", "", ""
175
- # 1. Get raw history (list of dicts) from session
176
  raw_history_list = session.get('medical_history', [])
177
- # 2. Hydrate it for the agent
178
  history_for_agent = hydrate_history(raw_history_list)
179
  current_medical_document = session.get('current_medical_document', "")
180
 
181
-
182
  try:
183
- query=standardize_query(request.form.get("query", ""))
184
  has_image = 'image' in request.files and request.files['image'].filename
185
  has_document = 'document' in request.files and request.files['document'].filename
186
  has_query = request.form.get("query") or request.form.get("question", "")
@@ -188,14 +189,12 @@ def medical_page():
188
  logger.info(f"POST request received: has_image={has_image}, has_document={has_document}, has_query={has_query}")
189
 
190
  if has_document:
191
- # Scenario 3: Query + Document
192
  logger.info("Processing Scenario 3: Query + Document with Medical Swarm")
193
  file = request.files['document']
194
  try:
195
- # Store the new document text in the session
196
  document_text = file.read().decode("utf-8")
197
  session['current_medical_document'] = document_text
198
- current_medical_document = document_text # Use the new document for this turn
199
  except UnicodeDecodeError:
200
  answer = "Error: Could not decode the uploaded document. Please ensure it is a valid text or PDF file."
201
  logger.error("Scenario 3: Document decode error")
@@ -205,15 +204,14 @@ def medical_page():
205
  answer = markdown_bold_to_html(swarm_answer)
206
 
207
  thoughts = "Swarm analysis complete. The process is orchestrated and does not use the ReAct thought process. You can now ask follow-up questions."
208
- source= "Medical Swarm"
209
- validation = (True, "Swarm output generated.") # Swarm has its own validation logic
 
210
  history_for_agent.append(HumanMessage(content=f"[Document Uploaded] Query: '{query}'"))
211
  history_for_agent.append(AIMessage(content=answer))
212
 
213
- elif has_image :
214
- #Scenario 1
215
  logger.info("Processing Multimodal RAG: Query + Image")
216
- # --- Step 1 & 2: Image Setup & Vision Analysis ---
217
  file = request.files['image']
218
  upload_dir = "Uploads"
219
  os.makedirs(upload_dir, exist_ok=True)
@@ -226,7 +224,6 @@ def medical_page():
226
  with open(image_path, "rb") as img_file:
227
  img_data = base64.b64encode(img_file.read()).decode("utf-8")
228
 
229
-
230
  vision_prompt = f"Analyze this image and identify the main subject in a single, concise sentence. The user's query is: '{query}'"
231
  message = HumanMessage(content=[
232
  {"type": "text", "text": vision_prompt},
@@ -236,18 +233,20 @@ def medical_page():
236
  visual_prediction = vision_response.content
237
  logger.info(f"Vision Prediction: {visual_prediction}")
238
 
239
- # --- Create an Enhanced Query ---
240
  enhanced_query = (
241
  f'User Query: "{query}" '
242
  f'Context from an image provided by the LLM: "{visual_prediction}" '
243
  'Based on the user\'s query and the context from LLM, provide a comprehensive answer.'
244
  )
245
- logger.info(f"Enhanced query : {enhanced_query}")
246
 
247
  agent = rag_systems['medical']
248
- if not agent: raise Exception("Medical RAG system is not loaded.")
 
 
249
  response_dict = agent.answer(enhanced_query, chat_history=history_for_agent)
250
  answer, thoughts, validation, source = parse_agent_response(response_dict)
 
251
  history_for_agent.append(HumanMessage(content=query))
252
  history_for_agent.append(AIMessage(content=answer))
253
 
@@ -257,12 +256,10 @@ def medical_page():
257
  os.remove(image_path)
258
  logger.info(f"Successfully deleted temporary image file: {image_path}")
259
  except PermissionError as e:
260
- logger.warning(f"Could not remove {image_path} after processing. "
261
- f"File may be locked by another process. Error: {e}")
262
 
263
  elif query:
264
- # --- SCENARIO 2: TEXT-ONLY QUERY OR SWARM FOLLOW-UP ---
265
- history_doc_context = history_for_agent # Use hydrated list
266
  if current_medical_document:
267
  logger.info("Processing Follow-up Query for Document")
268
  history_doc_context = [HumanMessage(content=f"We are discussing this document:\n{current_medical_document}")] + history_for_agent
@@ -270,12 +267,13 @@ def medical_page():
270
  logger.info("Processing Text RAG query for Medical domain")
271
 
272
  logger.info(f"Original Query: '{query}'")
273
- print(f"📚 Using chat history with {len(history_doc_context)} previous messages to create standalone query")
274
- standalone_query = get_standalone_question(query, history_doc_context,llm)
275
  logger.info(f"Standalone Query: '{standalone_query}'")
276
 
277
  agent = rag_systems['medical']
278
- if not agent: raise Exception("Medical RAG system is not loaded.")
 
 
279
  response_dict = agent.answer(standalone_query, chat_history=history_doc_context)
280
  answer, thoughts, validation, source = parse_agent_response(response_dict)
281
 
@@ -284,14 +282,16 @@ def medical_page():
284
 
285
  else:
286
  raise ValueError("No query or file provided.")
 
287
  except Exception as e:
288
  logger.error(f"Error on /medical page: {e}", exc_info=True)
289
  answer = f"An error occurred: {e}"
290
  thoughts = traceback.format_exc()
291
 
292
- # 4. DEHYDRATE the full history back into dicts for session saving
293
  session['medical_history'] = dehydrate_history(history_for_agent)
294
- # This (latest_response) is ALREADY JSON-serializable, so it's fine.
 
295
  session['latest_medical_response'] = {
296
  'answer': answer,
297
  'thoughts': thoughts,
@@ -300,9 +300,10 @@ def medical_page():
300
  }
301
  session.modified = True
302
 
303
- logger.info(f"DEBUG: Saving to session: ANSWER='{answer[:50]}...', THOUGHTS='{thoughts[:50]}...'")
 
 
304
 
305
- logger.debug(f"Redirecting after saving latest response.")
306
  return redirect(url_for('medical_page'))
307
 
308
  @app.route("/medical/clear")
 
149
 
150
  @app.route("/medical", methods=["GET", "POST"])
151
  def medical_page():
 
152
  if request.method == "GET":
153
+ # USE .get() instead of .pop() - don't remove it yet
154
+ latest_response = session.get('latest_medical_response', {})
155
 
156
  answer = latest_response.get('answer', "")
157
  thoughts = latest_response.get('thoughts', "")
158
  validation = latest_response.get('validation', "")
159
  source = latest_response.get('source', "")
160
 
161
+ # NOW clear it after reading (for next request)
162
+ if latest_response:
163
+ session.pop('latest_medical_response', None)
164
+ session.modified = True
165
+
166
+ # Load history
167
+ raw_history_list = session.get('medical_history', [])
168
+ history = hydrate_history(raw_history_list)
169
 
170
  return render_template("medical_page.html",
171
+ history=history, # ✅ Pass hydrated history
172
  answer=answer,
173
  thoughts=thoughts,
174
  validation=validation,
 
176
 
177
  # POST Request
178
  answer, thoughts, validation, source = "", "", "", ""
 
179
  raw_history_list = session.get('medical_history', [])
 
180
  history_for_agent = hydrate_history(raw_history_list)
181
  current_medical_document = session.get('current_medical_document', "")
182
 
 
183
  try:
184
+ query = standardize_query(request.form.get("query", ""))
185
  has_image = 'image' in request.files and request.files['image'].filename
186
  has_document = 'document' in request.files and request.files['document'].filename
187
  has_query = request.form.get("query") or request.form.get("question", "")
 
189
  logger.info(f"POST request received: has_image={has_image}, has_document={has_document}, has_query={has_query}")
190
 
191
  if has_document:
 
192
  logger.info("Processing Scenario 3: Query + Document with Medical Swarm")
193
  file = request.files['document']
194
  try:
 
195
  document_text = file.read().decode("utf-8")
196
  session['current_medical_document'] = document_text
197
+ current_medical_document = document_text
198
  except UnicodeDecodeError:
199
  answer = "Error: Could not decode the uploaded document. Please ensure it is a valid text or PDF file."
200
  logger.error("Scenario 3: Document decode error")
 
204
  answer = markdown_bold_to_html(swarm_answer)
205
 
206
  thoughts = "Swarm analysis complete. The process is orchestrated and does not use the ReAct thought process. You can now ask follow-up questions."
207
+ source = "Medical Swarm"
208
+ validation = "Swarm output generated."
209
+
210
  history_for_agent.append(HumanMessage(content=f"[Document Uploaded] Query: '{query}'"))
211
  history_for_agent.append(AIMessage(content=answer))
212
 
213
+ elif has_image:
 
214
  logger.info("Processing Multimodal RAG: Query + Image")
 
215
  file = request.files['image']
216
  upload_dir = "Uploads"
217
  os.makedirs(upload_dir, exist_ok=True)
 
224
  with open(image_path, "rb") as img_file:
225
  img_data = base64.b64encode(img_file.read()).decode("utf-8")
226
 
 
227
  vision_prompt = f"Analyze this image and identify the main subject in a single, concise sentence. The user's query is: '{query}'"
228
  message = HumanMessage(content=[
229
  {"type": "text", "text": vision_prompt},
 
233
  visual_prediction = vision_response.content
234
  logger.info(f"Vision Prediction: {visual_prediction}")
235
 
 
236
  enhanced_query = (
237
  f'User Query: "{query}" '
238
  f'Context from an image provided by the LLM: "{visual_prediction}" '
239
  'Based on the user\'s query and the context from LLM, provide a comprehensive answer.'
240
  )
241
+ logger.info(f"Enhanced query: {enhanced_query}")
242
 
243
  agent = rag_systems['medical']
244
+ if not agent:
245
+ raise Exception("Medical RAG system is not loaded.")
246
+
247
  response_dict = agent.answer(enhanced_query, chat_history=history_for_agent)
248
  answer, thoughts, validation, source = parse_agent_response(response_dict)
249
+
250
  history_for_agent.append(HumanMessage(content=query))
251
  history_for_agent.append(AIMessage(content=answer))
252
 
 
256
  os.remove(image_path)
257
  logger.info(f"Successfully deleted temporary image file: {image_path}")
258
  except PermissionError as e:
259
+ logger.warning(f"Could not remove {image_path}: {e}")
 
260
 
261
  elif query:
262
+ history_doc_context = history_for_agent
 
263
  if current_medical_document:
264
  logger.info("Processing Follow-up Query for Document")
265
  history_doc_context = [HumanMessage(content=f"We are discussing this document:\n{current_medical_document}")] + history_for_agent
 
267
  logger.info("Processing Text RAG query for Medical domain")
268
 
269
  logger.info(f"Original Query: '{query}'")
270
+ standalone_query = get_standalone_question(query, history_doc_context, llm)
 
271
  logger.info(f"Standalone Query: '{standalone_query}'")
272
 
273
  agent = rag_systems['medical']
274
+ if not agent:
275
+ raise Exception("Medical RAG system is not loaded.")
276
+
277
  response_dict = agent.answer(standalone_query, chat_history=history_doc_context)
278
  answer, thoughts, validation, source = parse_agent_response(response_dict)
279
 
 
282
 
283
  else:
284
  raise ValueError("No query or file provided.")
285
+
286
  except Exception as e:
287
  logger.error(f"Error on /medical page: {e}", exc_info=True)
288
  answer = f"An error occurred: {e}"
289
  thoughts = traceback.format_exc()
290
 
291
+ # DEHYDRATE history back to dicts
292
  session['medical_history'] = dehydrate_history(history_for_agent)
293
+
294
+ # ✅ Save the response
295
  session['latest_medical_response'] = {
296
  'answer': answer,
297
  'thoughts': thoughts,
 
300
  }
301
  session.modified = True
302
 
303
+ # ADD DEBUG LOG
304
+ logger.info(f"💾 SAVED TO SESSION - Answer length: {len(answer)}, First 100 chars: {answer[:100]}")
305
+ logger.info(f"💾 Session ID: {session.get('_id', 'NO ID')}")
306
 
 
307
  return redirect(url_for('medical_page'))
308
 
309
  @app.route("/medical/clear")