jdesiree commited on
Commit
03e3e06
·
verified ·
1 Parent(s): bdff161

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -27
app.py CHANGED
@@ -1167,41 +1167,70 @@ def orchestrate_turn(user_input: str, session_id: str = "default") -> str:
1167
  log_step("Step 7: Prompt assembly", step_start)
1168
 
1169
  # ====================================================================
1170
- # STEP 8: FINAL PROMPT CONSTRUCTION
1171
  # ====================================================================
1172
- step_start = log_step("Step 8: Final prompt construction")
1173
 
1174
- # Knowledge cutoff
1175
- knowledge_cutoff = f"""
1176
-
1177
- The current year is {CURRENT_YEAR}. Your knowledge cutoff date is October 2023. If the user asks about recent events or dynamic facts, inform them you may not have the most up-to-date information and suggest referencing direct sources."""
1178
 
1179
- complete_prompt = f"""
1180
- {prompt_segments_text}
1181
-
1182
- If tools were used, context and output will be here. Ignore if empty:
1183
- Image output: {tool_img_output}
1184
- Image context: {tool_context}
1185
-
1186
- Conversation history, if available:
1187
- {conversation_history_formatted}
1188
-
1189
- Consider any context available to you:
1190
- {thinking_context}
1191
-
1192
- Here is the user's current query:
1193
- {user_input}
1194
-
1195
- {knowledge_cutoff}
1196
- """
 
 
 
 
 
 
 
 
 
1197
 
1198
- log_step("Step 8: Final prompt construction", step_start)
1199
 
1200
  # ====================================================================
1201
- # STEP 9: RESPONSE GENERATION (Phi3)
1202
  # ====================================================================
1203
  step_start = log_step("Step 9: Response generation")
1204
- raw_response = response_agent.invoke(complete_prompt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1205
  log_step("Step 9: Response generation", step_start)
1206
 
1207
  # ====================================================================
 
1167
  log_step("Step 7: Prompt assembly", step_start)
1168
 
1169
  # ====================================================================
1170
+ # STEP 8: PREPARE RESPONSE AGENT INPUT
1171
  # ====================================================================
1172
+ step_start = log_step("Step 8: Prepare response input")
1173
 
1174
+ # Get active response prompts
1175
+ response_prompt_names = prompt_state.get_active_response_prompts()
1176
+ logger.info(f"Active prompts: {response_prompt_names}")
 
1177
 
1178
+ # Combine tool outputs for context
1179
+ # If we have tool_img_output, mention it in tool_context
1180
+ combined_tool_context = tool_context
1181
+ if tool_img_output:
1182
+ # Note: tool_img_output is HTML that will be embedded separately
1183
+ # Just note its presence in the context
1184
+ if combined_tool_context:
1185
+ combined_tool_context += "\n\nNote: A visualization has been generated for the user."
1186
+ else:
1187
+ combined_tool_context = "A visualization has been generated for the user."
1188
+
1189
+ # Build input dictionary for ResponseAgent
1190
+ # CRITICAL: Must be a Dict, NOT a string!
1191
+ input_data = {
1192
+ 'user_query': user_input,
1193
+ 'conversation_history': recent_history, # List[Dict], not formatted string!
1194
+ 'active_prompts': response_prompt_names, # List[str]
1195
+ 'thinking_context': thinking_context, # str (from thinking agents)
1196
+ 'tool_context': combined_tool_context, # str (tool usage info)
1197
+ }
1198
+
1199
+ logger.info(f"Response input prepared:")
1200
+ logger.info(f" - User query: {len(user_input)} chars")
1201
+ logger.info(f" - History: {len(recent_history)} messages")
1202
+ logger.info(f" - Active prompts: {len(response_prompt_names)} prompts")
1203
+ logger.info(f" - Thinking context: {len(thinking_context)} chars")
1204
+ logger.info(f" - Tool context: {len(combined_tool_context)} chars")
1205
 
1206
+ log_step("Step 8: Prepare response input", step_start)
1207
 
1208
  # ====================================================================
1209
+ # STEP 9: RESPONSE GENERATION (Llama-3.2-3B)
1210
  # ====================================================================
1211
  step_start = log_step("Step 9: Response generation")
1212
+
1213
+ try:
1214
+ result = response_agent.invoke(input_data)
1215
+
1216
+ # Extract response from result dict
1217
+ raw_response = result.get('response', '')
1218
+ metadata = result.get('metadata', {})
1219
+
1220
+ if not raw_response:
1221
+ logger.warning("ResponseAgent returned empty response")
1222
+ raw_response = "I apologize, but I wasn't able to generate a response. Please try again."
1223
+
1224
+ logger.info(f"✓ Generated {len(raw_response)} chars")
1225
+ if metadata:
1226
+ logger.info(f" Metadata: {metadata}")
1227
+
1228
+ except Exception as e:
1229
+ logger.error(f"Response generation failed: {e}")
1230
+ import traceback
1231
+ logger.error(traceback.format_exc())
1232
+ raw_response = "I apologize, but I encountered an error while generating a response. Please try rephrasing your question or try again."
1233
+
1234
  log_step("Step 9: Response generation", step_start)
1235
 
1236
  # ====================================================================