gabejavitt commited on
Commit
8430d9c
·
verified ·
1 Parent(s): ba1c1bf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +61 -61
app.py CHANGED
@@ -717,68 +717,68 @@ Your goal: Provide the EXACT answer in the EXACT format requested.
717
 
718
  self.graph = graph_builder.compile()
719
  print("✅ Graph compiled successfully")
720
- def __call__(self, question: str) -> str:
721
- print(f"\n--- Starting Agent Run for Question ---")
722
- print(f"Agent received question (first 100 chars): {question[:100]}...")
723
-
724
- # Initialize graph input with turn counter
725
- graph_input = {
726
- "messages": [
727
- SystemMessage(content=self.system_prompt),
728
- HumanMessage(content=question)
729
- ],
730
- "turn": 0
731
- }
732
-
733
- final_answer = "AGENT FAILED TO PRODUCE ANSWER"
734
- try:
735
- # Add config for recursion limit (LangGraph default is 25, but our turn limit is softer)
736
- config = {"recursion_limit": MAX_TURNS + 5} # Allow slightly more graph steps than turns
737
- for event in self.graph.stream(graph_input, stream_mode="values", config=config):
738
- last_message = event["messages"][-1]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
739
 
740
- # Check for final answer extraction
741
- if isinstance(last_message, AIMessage) and last_message.tool_calls:
742
- if last_message.tool_calls[0].get("name") == "final_answer_tool":
743
- final_answer = last_message.tool_calls[0]['args'].get('answer', "ERROR: FINAL_ANSWER_TOOL CALLED WITHOUT ANSWER")
744
- print(f"--- Final Answer Captured from tool call: '{final_answer}' ---")
745
- # We can break here since the graph condition should lead to END anyway
746
- break
747
-
748
- # Log other message types (optional but helpful)
749
- elif isinstance(last_message, ToolMessage):
750
- print(f"Tool Result ({last_message.tool_call_id}): {last_message.content[:500]}...")
751
- elif isinstance(last_message, AIMessage) and not last_message.tool_calls:
752
- # This is now expected (the "plan" or "think" step)
753
- print(f"AI Message (Plan/Thought): {last_message.content[:500]}...")
754
- # Don't set final_answer here anymore, only final_answer_tool counts
755
-
756
- # --- Cleaning step (Keep as is) ---
757
- cleaned_answer = str(final_answer).strip()
758
- # ... (keep existing prefix removal and fence removal logic) ...
759
- prefixes_to_remove = ["The answer is:", "Here is the answer:", "Based on the information:", "Final Answer:", "Answer:"]
760
- original_cleaned = cleaned_answer
761
- for prefix in prefixes_to_remove:
762
- if cleaned_answer.lower().startswith(prefix.lower()):
763
- potential_answer = cleaned_answer[len(prefix):].strip()
764
- if potential_answer: cleaned_answer = potential_answer; break
765
- if cleaned_answer == original_cleaned and any(cleaned_answer.lower().startswith(p.lower()) for p in prefixes_to_remove):
766
- print(f"Warning: Prefix found but not stripped: '{original_cleaned[:100]}...'")
767
- # Simple fence removal
768
- cleaned_answer = remove_fences_simple(cleaned_answer)
769
- if cleaned_answer.startswith("`") and cleaned_answer.endswith("`"):
770
- cleaned_answer = cleaned_answer[1:-1].strip()
771
- print(f"Agent returning final answer (cleaned): '{cleaned_answer}'")
772
- return cleaned_answer
773
- except Exception as e:
774
- print(f"Error running agent graph: {e}")
775
- tb_str = traceback.format_exc()
776
- print(tb_str)
777
- # Check if it was specifically our turn limit message
778
- if isinstance(e, SystemMessage) and f"maximum turn limit ({MAX_TURNS})" in str(e.content):
779
- return f"AGENT STOPPED: Reached maximum turn limit ({MAX_TURNS})."
780
- return f"AGENT GRAPH ERROR: {e}"
781
- # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
782
  # --- (Original Template Code - Mock Questions Version) ---
783
  def run_and_submit_all( profile: gr.OAuthProfile | None):
784
  """
 
717
 
718
  self.graph = graph_builder.compile()
719
  print("✅ Graph compiled successfully")
720
+ def __call__(self, question: str) -> str:
721
+ print(f"\n--- Starting Agent Run for Question ---")
722
+ print(f"Agent received question (first 100 chars): {question[:100]}...")
723
+
724
+ # Initialize graph input with turn counter
725
+ graph_input = {
726
+ "messages": [
727
+ SystemMessage(content=self.system_prompt),
728
+ HumanMessage(content=question)
729
+ ],
730
+ "turn": 0
731
+ }
732
+
733
+ final_answer = "AGENT FAILED TO PRODUCE ANSWER"
734
+ try:
735
+ # Add config for recursion limit (LangGraph default is 25, but our turn limit is softer)
736
+ config = {"recursion_limit": MAX_TURNS + 5} # Allow slightly more graph steps than turns
737
+ for event in self.graph.stream(graph_input, stream_mode="values", config=config):
738
+ last_message = event["messages"][-1]
739
+
740
+ # Check for final answer extraction
741
+ if isinstance(last_message, AIMessage) and last_message.tool_calls:
742
+ if last_message.tool_calls[0].get("name") == "final_answer_tool":
743
+ final_answer = last_message.tool_calls[0]['args'].get('answer', "ERROR: FINAL_ANSWER_TOOL CALLED WITHOUT ANSWER")
744
+ print(f"--- Final Answer Captured from tool call: '{final_answer}' ---")
745
+ # We can break here since the graph condition should lead to END anyway
746
+ break
747
+
748
+ # Log other message types (optional but helpful)
749
+ elif isinstance(last_message, ToolMessage):
750
+ print(f"Tool Result ({last_message.tool_call_id}): {last_message.content[:500]}...")
751
+ elif isinstance(last_message, AIMessage) and not last_message.tool_calls:
752
+ # This is now expected (the "plan" or "think" step)
753
+ print(f"AI Message (Plan/Thought): {last_message.content[:500]}...")
754
+ # Don't set final_answer here anymore, only final_answer_tool counts
755
 
756
+ # --- Cleaning step (Keep as is) ---
757
+ cleaned_answer = str(final_answer).strip()
758
+ # ... (keep existing prefix removal and fence removal logic) ...
759
+ prefixes_to_remove = ["The answer is:", "Here is the answer:", "Based on the information:", "Final Answer:", "Answer:"]
760
+ original_cleaned = cleaned_answer
761
+ for prefix in prefixes_to_remove:
762
+ if cleaned_answer.lower().startswith(prefix.lower()):
763
+ potential_answer = cleaned_answer[len(prefix):].strip()
764
+ if potential_answer: cleaned_answer = potential_answer; break
765
+ if cleaned_answer == original_cleaned and any(cleaned_answer.lower().startswith(p.lower()) for p in prefixes_to_remove):
766
+ print(f"Warning: Prefix found but not stripped: '{original_cleaned[:100]}...'")
767
+ # Simple fence removal
768
+ cleaned_answer = remove_fences_simple(cleaned_answer)
769
+ if cleaned_answer.startswith("`") and cleaned_answer.endswith("`"):
770
+ cleaned_answer = cleaned_answer[1:-1].strip()
771
+ print(f"Agent returning final answer (cleaned): '{cleaned_answer}'")
772
+ return cleaned_answer
773
+ except Exception as e:
774
+ print(f"Error running agent graph: {e}")
775
+ tb_str = traceback.format_exc()
776
+ print(tb_str)
777
+ # Check if it was specifically our turn limit message
778
+ if isinstance(e, SystemMessage) and f"maximum turn limit ({MAX_TURNS})" in str(e.content):
779
+ return f"AGENT STOPPED: Reached maximum turn limit ({MAX_TURNS})."
780
+ return f"AGENT GRAPH ERROR: {e}"
781
+ # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
782
  # --- (Original Template Code - Mock Questions Version) ---
783
  def run_and_submit_all( profile: gr.OAuthProfile | None):
784
  """