gabejavitt commited on
Commit
1bcb5c5
Β·
verified Β·
1 Parent(s): 5150392

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +110 -37
app.py CHANGED
@@ -143,7 +143,7 @@ def find_file(path: str) -> Optional[Path]:
143
  # =============================================================================
144
 
145
  class PlanInput(BaseModel):
146
- question: str = Field(description="The question to create a plan for")
147
 
148
  @tool(args_schema=PlanInput)
149
  def create_plan(question: str) -> str:
@@ -157,6 +157,8 @@ def create_plan(question: str) -> str:
157
  3. What tools will you use?
158
 
159
  After calling this, execute the plan step-by-step.
 
 
160
  """
161
  print(f"πŸ“‹ Planning phase initiated for: {question[:100]}...")
162
 
@@ -172,7 +174,7 @@ Now proceed with Step 1 of your plan."""
172
 
173
 
174
  class ReflectInput(BaseModel):
175
- current_situation: str = Field(description="Brief summary of what you've tried and where you are stuck")
176
 
177
  @tool(args_schema=ReflectInput)
178
  def reflect_on_progress(current_situation: str) -> str:
@@ -186,6 +188,8 @@ def reflect_on_progress(current_situation: str) -> str:
186
  - You've taken 5+ steps without getting closer to the answer
187
 
188
  This helps you step back and reconsider your approach.
 
 
189
  """
190
  print(f"πŸ€” Reflection initiated: {current_situation[:100]}...")
191
 
@@ -527,6 +531,7 @@ def audio_transcription_tool(file_path: str) -> str:
527
  except Exception as e:
528
  return f"Error transcribing '{file_path}': {str(e)}"
529
 
 
530
  class YoutubeInput(BaseModel):
531
  video_url: str = Field(description="YouTube video URL")
532
 
@@ -720,45 +725,87 @@ def parse_tool_call_from_string(content: str, tools: List) -> List[ToolCall]:
720
  tool_input = None
721
  cleaned_str = None
722
 
723
- func_match = re.search(
724
- r"<function[(=]\s*([^)]+)\s*[)>](.*)",
 
725
  content,
726
- re.DOTALL | re.IGNORECASE
727
  )
728
 
729
- if func_match:
730
  try:
731
- tool_name = func_match.group(1).strip().replace("'", "").replace('"', '')
732
- remaining_content = func_match.group(2)
733
 
734
- json_start_index = remaining_content.find('{')
735
- if json_start_index != -1:
736
- json_str = remaining_content[json_start_index:]
737
- cleaned_str = json_str.strip()
738
- cleaned_str = ''.join(c for c in cleaned_str if c.isprintable() or c in '\n\r\t')
739
- cleaned_str = cleaned_str.strip().rstrip(',')
740
-
741
- tool_input = json.loads(cleaned_str)
742
- print(f"πŸ”§ Fallback: Parsed tool call for '{tool_name}'")
743
- else:
744
- print(f"⚠️ Fallback: Found <function> but no JSON blob.")
745
- tool_name = None
746
-
747
- except json.JSONDecodeError as e:
748
- print(f"⚠️ Fallback: json.loads failed, trying ast.literal_eval.")
 
 
 
 
749
  try:
750
- if cleaned_str:
751
- potential_input = ast.literal_eval(cleaned_str)
752
- if isinstance(potential_input, dict):
753
- tool_input = potential_input
754
- print(f"πŸ”§ Fallback: Parsed with ast.literal_eval for '{tool_name}'")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
755
  else:
756
  tool_name = None
757
- else:
758
  tool_name = None
759
- except:
760
- tool_name = None
761
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
762
  if tool_name and tool_input is not None:
763
  if any(t.name == tool_name for t in tools):
764
  tool_call = ToolCall(
@@ -987,7 +1034,7 @@ Turn 7: final_answer_tool("1.796 trillion")
987
  )
988
  messages_to_send.append(hint)
989
 
990
- # Invoke LLM
991
  max_retries = 3
992
  ai_message = None
993
  for attempt in range(max_retries):
@@ -995,12 +1042,39 @@ Turn 7: final_answer_tool("1.796 trillion")
995
  ai_message = self.llm_with_tools.invoke(messages_to_send)
996
  break
997
  except Exception as e:
998
- print(f"⚠️ LLM attempt {attempt+1}/{max_retries} failed: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
999
  if attempt == max_retries - 1:
 
1000
  ai_message = AIMessage(
1001
- content=f"Error: LLM failed after {max_retries} attempts: {e}"
1002
  )
1003
- time.sleep(2 ** attempt)
 
1004
 
1005
  # Fallback Parsing
1006
  if not ai_message.tool_calls and isinstance(ai_message.content, str) and ai_message.content.strip():
@@ -1071,8 +1145,7 @@ Turn 7: final_answer_tool("1.796 trillion")
1071
 
1072
  self.graph = graph_builder.compile()
1073
  print("βœ… Planning & Reflection Agent graph compiled successfully.")
1074
-
1075
-
1076
 
1077
  def __call__(self, question: str) -> str:
1078
  print(f"\n--- Starting Agent Run for Question ---")
 
143
  # =============================================================================
144
 
145
  class PlanInput(BaseModel):
146
+ question: str = Field(description="Brief summary of the task (keep under 100 chars)")
147
 
148
  @tool(args_schema=PlanInput)
149
  def create_plan(question: str) -> str:
 
157
  3. What tools will you use?
158
 
159
  After calling this, execute the plan step-by-step.
160
+
161
+ NOTE: Keep the question summary brief (under 100 chars) to avoid errors.
162
  """
163
  print(f"πŸ“‹ Planning phase initiated for: {question[:100]}...")
164
 
 
174
 
175
 
176
  class ReflectInput(BaseModel):
177
+ current_situation: str = Field(description="What you've tried so far (keep brief, under 100 chars)")
178
 
179
  @tool(args_schema=ReflectInput)
180
  def reflect_on_progress(current_situation: str) -> str:
 
188
  - You've taken 5+ steps without getting closer to the answer
189
 
190
  This helps you step back and reconsider your approach.
191
+
192
+ NOTE: Keep the situation summary brief (under 100 chars).
193
  """
194
  print(f"πŸ€” Reflection initiated: {current_situation[:100]}...")
195
 
 
531
  except Exception as e:
532
  return f"Error transcribing '{file_path}': {str(e)}"
533
 
534
+
535
  class YoutubeInput(BaseModel):
536
  video_url: str = Field(description="YouTube video URL")
537
 
 
725
  tool_input = None
726
  cleaned_str = None
727
 
728
+ # STRATEGY 1: Parse Groq's <function=name{...}> format
729
+ groq_match = re.search(
730
+ r"<function=(\w+)\s*(\{.*?\})\s*(?:>|</function>)",
731
  content,
732
+ re.DOTALL
733
  )
734
 
735
+ if groq_match:
736
  try:
737
+ tool_name = groq_match.group(1).strip()
738
+ json_str = groq_match.group(2).strip()
739
 
740
+ # Unescape unicode and clean up
741
+ json_str = json_str.encode().decode('unicode_escape')
742
+
743
+ tool_input = json.loads(json_str)
744
+ print(f"πŸ”§ Fallback: Parsed Groq format for '{tool_name}'")
745
+
746
+ except Exception as e:
747
+ print(f"⚠️ Fallback: Failed to parse Groq format: {e}")
748
+ tool_name = None
749
+
750
+ # STRATEGY 2: Try original <function(tool_name)>...{json_string}... format
751
+ if not tool_name:
752
+ func_match = re.search(
753
+ r"<function[(=]\s*([^)]+)\s*[)>](.*)",
754
+ content,
755
+ re.DOTALL | re.IGNORECASE
756
+ )
757
+
758
+ if func_match:
759
  try:
760
+ tool_name = func_match.group(1).strip().replace("'", "").replace('"', '')
761
+ remaining_content = func_match.group(2)
762
+
763
+ json_start_index = remaining_content.find('{')
764
+ if json_start_index != -1:
765
+ json_str = remaining_content[json_start_index:]
766
+ cleaned_str = json_str.strip()
767
+ cleaned_str = ''.join(c for c in cleaned_str if c.isprintable() or c in '\n\r\t')
768
+ cleaned_str = cleaned_str.strip().rstrip(',')
769
+
770
+ tool_input = json.loads(cleaned_str)
771
+ print(f"πŸ”§ Fallback: Parsed standard format for '{tool_name}'")
772
+ else:
773
+ print(f"⚠️ Fallback: Found <function> but no JSON blob.")
774
+ tool_name = None
775
+
776
+ except json.JSONDecodeError as e:
777
+ print(f"⚠️ Fallback: json.loads failed, trying ast.literal_eval.")
778
+ try:
779
+ if cleaned_str:
780
+ potential_input = ast.literal_eval(cleaned_str)
781
+ if isinstance(potential_input, dict):
782
+ tool_input = potential_input
783
+ print(f"πŸ”§ Fallback: Parsed with ast.literal_eval for '{tool_name}'")
784
+ else:
785
+ tool_name = None
786
  else:
787
  tool_name = None
788
+ except:
789
  tool_name = None
 
 
790
 
791
+ # STRATEGY 3: Look for simple tool mentions and create default calls
792
+ if not tool_name and content:
793
+ # Look for tool name mentions
794
+ for tool in tools:
795
+ if tool.name in content.lower():
796
+ tool_name = tool.name
797
+ # Create minimal valid input
798
+ tool_input = {}
799
+ if tool.args_schema:
800
+ schema = tool.args_schema.model_json_schema()
801
+ for prop, details in schema.get('properties', {}).items():
802
+ if prop in schema.get('required', []):
803
+ # Extract value from content if possible
804
+ tool_input[prop] = "summarized_input"
805
+ print(f"πŸ”§ Fallback: Created default call for mentioned tool '{tool_name}'")
806
+ break
807
+
808
+ # FINAL VALIDATION
809
  if tool_name and tool_input is not None:
810
  if any(t.name == tool_name for t in tools):
811
  tool_call = ToolCall(
 
1034
  )
1035
  messages_to_send.append(hint)
1036
 
1037
+ # Invoke LLM with better error handling
1038
  max_retries = 3
1039
  ai_message = None
1040
  for attempt in range(max_retries):
 
1042
  ai_message = self.llm_with_tools.invoke(messages_to_send)
1043
  break
1044
  except Exception as e:
1045
+ error_str = str(e)
1046
+ print(f"⚠️ LLM attempt {attempt+1}/{max_retries} failed: {error_str[:200]}")
1047
+
1048
+ # If it's a tool_use_failed error, try without forcing tools
1049
+ if "tool_use_failed" in error_str and attempt < max_retries - 1:
1050
+ print("πŸ”§ Retrying without strict tool enforcement...")
1051
+ try:
1052
+ # Try with a simpler LLM call
1053
+ simple_llm = ChatGroq(
1054
+ temperature=0,
1055
+ groq_api_key=os.getenv("GROQ_API_KEY"),
1056
+ model_name="llama-3.3-70b-versatile",
1057
+ max_tokens=4096,
1058
+ timeout=60
1059
+ )
1060
+ ai_message = simple_llm.invoke(messages_to_send)
1061
+ # Manually parse for tool calls
1062
+ if ai_message.content:
1063
+ parsed_calls = parse_tool_call_from_string(ai_message.content, self.tools)
1064
+ if parsed_calls:
1065
+ ai_message.tool_calls = parsed_calls
1066
+ ai_message.content = ""
1067
+ break
1068
+ except Exception as e2:
1069
+ print(f"⚠️ Simple LLM also failed: {e2}")
1070
+
1071
  if attempt == max_retries - 1:
1072
+ # Last resort: return a message asking to proceed differently
1073
  ai_message = AIMessage(
1074
+ content="I need to approach this differently. Let me try a more direct method."
1075
  )
1076
+ else:
1077
+ time.sleep(2 ** attempt)
1078
 
1079
  # Fallback Parsing
1080
  if not ai_message.tool_calls and isinstance(ai_message.content, str) and ai_message.content.strip():
 
1145
 
1146
  self.graph = graph_builder.compile()
1147
  print("βœ… Planning & Reflection Agent graph compiled successfully.")
1148
+
 
1149
 
1150
  def __call__(self, question: str) -> str:
1151
  print(f"\n--- Starting Agent Run for Question ---")