lopera47 commited on
Commit
859473a
·
verified ·
1 Parent(s): 7fce82d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -7
app.py CHANGED
@@ -12,8 +12,9 @@ from langchain_huggingface import HuggingFaceEndpoint
12
  from langchain_community.chat_models import ChatHuggingFace
13
  from langchain.llms.base import LLM
14
  from huggingface_hub import InferenceClient
15
- from smolagents import Agent
16
  from smolagents.tools import WebSearchTool
 
 
17
 
18
  # (Keep Constants as is)
19
  # --- Constants ---
@@ -37,16 +38,23 @@ class BasicAgent:
37
  print("BasicAgent initialized.")
38
 
39
  # Initialize the Agent with a text generation model (Falcon 7B instruct)
40
- self.agent = Agent.from_model(
41
- model="tiiuae/falcon-7b-instruct",
42
- model_kwargs={"temperature": 0, "max_new_tokens": 512},
43
- tools=[WebSearchTool()]
 
 
 
 
44
  )
45
 
 
46
  def __call__(self, question: str) -> str:
47
- # Run the agent on the input question and return final answer
 
48
  answer = self.agent.run(question)
49
- print(answer)
 
50
  return answer.strip()
51
 
52
  def run_and_submit_all( profile: gr.OAuthProfile | None):
 
12
  from langchain_community.chat_models import ChatHuggingFace
13
  from langchain.llms.base import LLM
14
  from huggingface_hub import InferenceClient
 
15
  from smolagents.tools import WebSearchTool
16
+ from smolagents import Agent, CodeAgent, DuckDuckGoSearchTool, InferenceClientModel
17
+
18
 
19
  # (Keep Constants as is)
20
  # --- Constants ---
 
38
  print("BasicAgent initialized.")
39
 
40
  # Initialize the Agent with a text generation model (Falcon 7B instruct)
41
+ model = InferenceClientModel(
42
+ model_name="tiiuae/falcon-7b-instruct",
43
+ temperature=0,
44
+ max_new_tokens=512
45
+ )
46
+ self.agent = CodeAgent(
47
+ tools=[DuckDuckGoSearchTool()],
48
+ model=model
49
  )
50
 
51
+
52
  def __call__(self, question: str) -> str:
53
+ print(f"Agent received question (first 50 chars): {question[:50]}...")
54
+ # Run the agent to get the answer
55
  answer = self.agent.run(question)
56
+ print(f"Agent returning answer: {answer.strip()}")
57
+ # Return the answer stripped (no extra spaces/newlines)
58
  return answer.strip()
59
 
60
  def run_and_submit_all( profile: gr.OAuthProfile | None):