Andrew-Gl commited on
Commit
b5dac5a
·
1 Parent(s): 75f26ef

08.09.2025

Browse files
Files changed (2) hide show
  1. app.py +3 -2
  2. statefulagent.py +11 -8
app.py CHANGED
@@ -2,7 +2,7 @@ import os
2
  import gradio as gr
3
  import requests
4
  import pandas as pd
5
- from statefulagent import AG_Agent
6
 
7
  # (Keep Constants as is)
8
  # --- Constants ---
@@ -32,7 +32,8 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
32
 
33
  # 1. Instantiate Agent ( modify this part to create your agent)
34
  try:
35
- agent = AG_Agent()
 
36
  except Exception as e:
37
  print(f"Error instantiating agent: {e}")
38
  return f"Error initializing agent: {e}", None
 
2
  import gradio as gr
3
  import requests
4
  import pandas as pd
5
+ from statefulagent import StatefulAgent
6
 
7
  # (Keep Constants as is)
8
  # --- Constants ---
 
32
 
33
  # 1. Instantiate Agent ( modify this part to create your agent)
34
  try:
35
+ # agent = StatefulAgent('meta-llama/Llama-3.3-70B-Instruct')
36
+ agent = StatefulAgent()
37
  except Exception as e:
38
  print(f"Error instantiating agent: {e}")
39
  return f"Error initializing agent: {e}", None
statefulagent.py CHANGED
@@ -2,10 +2,10 @@ import os
2
  from smolagents import CodeAgent, InferenceClientModel
3
  from a_tools import search_tool, final_answer, image_generation_tool
4
 
5
- class AG_Agent:
6
- def __init__(self):
7
  a_model = InferenceClientModel(
8
- model_id='meta-llama/Llama-3.3-70B-Instruct',
9
  max_tokens=1024,
10
  api_key=os.environ.get("HF_API_KEY"),
11
  )
@@ -15,29 +15,32 @@ class AG_Agent:
15
  model=self.model,
16
  stream_outputs=True,
17
  code_block_tags="markdown",
 
18
  additional_authorized_imports=['requests', 'bs4','pandas','numpy',
19
  'json','datetime','geopandas','shapely'])
20
  self.context = []
21
- self.max_context_len = 20
22
- self.max_steps = 15
23
 
24
  def ask(self, question):
25
  # Refresh context and add it to prompt
26
  self.context.append({"role": "user", "content": question})
27
  if len(self.context) > self.max_context_len:
28
  self.context = self.context[-self.max_context_len:]
29
- # Combine story for prompt (optional)
30
  prompt = ""
31
  for msg in self.context:
32
  if msg["role"] == "user":
33
  prompt += f"User: {msg['content']}\n"
34
  else:
35
  prompt += f"Agent: {msg['content']}\n"
36
- prompt += f"User: {question}\nAgent:"
 
37
  # Run agent with created prompt
38
  response = self.agent.run(prompt, max_steps=self.max_steps)
39
  # Add answer to history for context
40
- self.context.append({"role": "assistant", "content": response})
 
41
  if len(self.context) > self.max_context_len:
42
  self.context = self.context[-self.max_context_len:]
43
  return response
 
2
  from smolagents import CodeAgent, InferenceClientModel
3
  from a_tools import search_tool, final_answer, image_generation_tool
4
 
5
+ class StatefulAgent:
6
+ def __init__(self, model_id = 'Qwen/Qwen2.5-Coder-32B-Instruct', context_len = 0):
7
  a_model = InferenceClientModel(
8
+ model_id=model_id,
9
  max_tokens=1024,
10
  api_key=os.environ.get("HF_API_KEY"),
11
  )
 
15
  model=self.model,
16
  stream_outputs=True,
17
  code_block_tags="markdown",
18
+ use_structured_outputs_internally=True,
19
  additional_authorized_imports=['requests', 'bs4','pandas','numpy',
20
  'json','datetime','geopandas','shapely'])
21
  self.context = []
22
+ self.max_context_len = context_len
23
+ self.max_steps = 20
24
 
25
  def ask(self, question):
26
  # Refresh context and add it to prompt
27
  self.context.append({"role": "user", "content": question})
28
  if len(self.context) > self.max_context_len:
29
  self.context = self.context[-self.max_context_len:]
30
+ # Combine history for prompt (optional)
31
  prompt = ""
32
  for msg in self.context:
33
  if msg["role"] == "user":
34
  prompt += f"User: {msg['content']}\n"
35
  else:
36
  prompt += f"Agent: {msg['content']}\n"
37
+ if self.max_steps > 0:
38
+ prompt += f"User: {question}\nAgent:"
39
  # Run agent with created prompt
40
  response = self.agent.run(prompt, max_steps=self.max_steps)
41
  # Add answer to history for context
42
+ if self.max_steps > 0:
43
+ self.context.append({"role": "assistant", "content": response})
44
  if len(self.context) > self.max_context_len:
45
  self.context = self.context[-self.max_context_len:]
46
  return response