Chris commited on
Commit
73eb248
·
1 Parent(s): 6afa67b

Final 7.7.3

Browse files
Files changed (1) hide show
  1. src/agents/web_researcher.py +6 -6
src/agents/web_researcher.py CHANGED
@@ -385,8 +385,8 @@ class WebResearchAgent:
385
  Please provide a direct, precise answer based on the research findings.
386
  """
387
 
388
- # Use appropriate model for synthesis
389
- model_tier = ModelTier.COMPLEX if len(results) > 2 else ModelTier.MAIN
390
  llm_result = self.llm_client.generate(synthesis_prompt, tier=model_tier, max_tokens=300)
391
 
392
  avg_confidence = sum(confidence_scores) / len(confidence_scores) if confidence_scores else 0.5
@@ -502,7 +502,7 @@ class WebResearchAgent:
502
  """
503
 
504
  # Use appropriate model tier
505
- model_tier = ModelTier.MAIN if state.complexity_assessment == "complex" else ModelTier.ROUTER
506
  llm_result = self.llm_client.generate(analysis_prompt, tier=model_tier, max_tokens=400)
507
 
508
  if llm_result.success:
@@ -607,7 +607,7 @@ class WebResearchAgent:
607
  analysis_prompt = self._create_enhanced_analysis_prompt(state.question, search_data, search_terms)
608
 
609
  # Use appropriate model tier based on complexity
610
- model_tier = ModelTier.COMPLEX if state.complexity_assessment == "complex" else ModelTier.MAIN
611
  llm_result = self.llm_client.generate(analysis_prompt, tier=model_tier, max_tokens=600)
612
 
613
  if llm_result.success:
@@ -988,7 +988,7 @@ Provide your analysis and answer:"""
988
  Please provide a direct, accurate answer.
989
  """
990
 
991
- model_tier = ModelTier.MAIN if len(state.question) > 100 else ModelTier.ROUTER
992
  llm_result = self.llm_client.generate(analysis_prompt, tier=model_tier, max_tokens=300)
993
 
994
  if llm_result.success:
@@ -1129,7 +1129,7 @@ Provide your analysis and answer:"""
1129
  Please provide a direct answer based on the page content.
1130
  """
1131
 
1132
- model_tier = ModelTier.MAIN
1133
  llm_result = self.llm_client.generate(analysis_prompt, tier=model_tier, max_tokens=400)
1134
 
1135
  if llm_result.success:
 
385
  Please provide a direct, precise answer based on the research findings.
386
  """
387
 
388
+ # Use appropriate model tier
389
+ model_tier = ModelTier.COMPLEX # Always use 72B model for best performance
390
  llm_result = self.llm_client.generate(synthesis_prompt, tier=model_tier, max_tokens=300)
391
 
392
  avg_confidence = sum(confidence_scores) / len(confidence_scores) if confidence_scores else 0.5
 
502
  """
503
 
504
  # Use appropriate model tier
505
+ model_tier = ModelTier.COMPLEX # Always use 72B model for best performance
506
  llm_result = self.llm_client.generate(analysis_prompt, tier=model_tier, max_tokens=400)
507
 
508
  if llm_result.success:
 
607
  analysis_prompt = self._create_enhanced_analysis_prompt(state.question, search_data, search_terms)
608
 
609
  # Use appropriate model tier based on complexity
610
+ model_tier = ModelTier.COMPLEX # Always use 72B model for best performance
611
  llm_result = self.llm_client.generate(analysis_prompt, tier=model_tier, max_tokens=600)
612
 
613
  if llm_result.success:
 
988
  Please provide a direct, accurate answer.
989
  """
990
 
991
+ model_tier = ModelTier.COMPLEX # Always use 72B model for best performance
992
  llm_result = self.llm_client.generate(analysis_prompt, tier=model_tier, max_tokens=300)
993
 
994
  if llm_result.success:
 
1129
  Please provide a direct answer based on the page content.
1130
  """
1131
 
1132
+ model_tier = ModelTier.COMPLEX # Always use 72B model for best performance
1133
  llm_result = self.llm_client.generate(analysis_prompt, tier=model_tier, max_tokens=400)
1134
 
1135
  if llm_result.success: