Kackle commited on
Commit
eeb8e52
·
verified ·
1 Parent(s): f517531

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -4
app.py CHANGED
@@ -28,10 +28,11 @@ class SlpMultiAgent:
28
 
29
  # Truncate question to avoid exceeding model context length
30
  MAX_QUESTION_LENGTH = 1000
31
- short_question = question[:MAX_QUESTION_LENGTH]
32
 
 
33
  model = OpenAIServerModel(
34
- model_id="gpt-3.5-turbo",
35
  temperature=0.0,
36
  max_tokens=1500
37
  )
@@ -47,7 +48,7 @@ class SlpMultiAgent:
47
  )
48
 
49
  manager_agent = CodeAgent(
50
- model=OpenAIServerModel("gpt-3.5-turbo"),
51
  tools=[],
52
  managed_agents=[web_agent],
53
  name="ManagerAgent",
@@ -84,7 +85,7 @@ class SlpMultiAgent:
84
  return fixed_answer
85
 
86
  def check_reasoning(final_answer, agent_memory):
87
- multimodal_model = OpenAIServerModel("gpt-4",
88
  max_tokens=1500)
89
  prompt = (
90
  f"Here is a user-given task and the agent steps: {agent_memory.get_succinct_steps()}. Now here is the plot that was made."
 
28
 
29
  # Truncate question to avoid exceeding model context length
30
  MAX_QUESTION_LENGTH = 1000
31
+ short_question = question#[:MAX_QUESTION_LENGTH]
32
 
33
+ # Use GPT-4o model with larger context window
34
  model = OpenAIServerModel(
35
+ model_id="gpt-4o",
36
  temperature=0.0,
37
  max_tokens=1500
38
  )
 
48
  )
49
 
50
  manager_agent = CodeAgent(
51
+ model=OpenAIServerModel("gpt-4o"),
52
  tools=[],
53
  managed_agents=[web_agent],
54
  name="ManagerAgent",
 
85
  return fixed_answer
86
 
87
  def check_reasoning(final_answer, agent_memory):
88
+ multimodal_model = OpenAIServerModel("gpt-4o",
89
  max_tokens=1500)
90
  prompt = (
91
  f"Here is a user-given task and the agent steps: {agent_memory.get_succinct_steps()}. Now here is the plot that was made."