mohantest commited on
Commit
15f5efd
·
verified ·
1 Parent(s): 9597b31

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +48 -23
agent.py CHANGED
@@ -5,29 +5,54 @@ from dotenv import load_dotenv
5
  # Load environment variables (API keys)
6
  load_dotenv()
7
 
8
- # Initialize the model
9
- # The evaluation runner might use OpenAI gpt-4o-mini as seen in your logs
10
- # Ensure OPENAI_API_KEY is set in your environment or .env file
11
- model = OpenAIModel(model_id="gpt-4o-mini")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
- # You can also use HfApiModel if you prefer Hugging Face models
14
- # model = HfApiModel(model_id="Qwen/Qwen2.5-Coder-32B-Instruct")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
- # Define tools
17
- search_tool = DuckDuckGoSearchTool()
18
-
19
- # Initialize the agent
20
- # CodeAgent is generally more powerful for answering questions because it can execute code to verify facts
21
- agent = CodeAgent(
22
- tools=[search_tool],
23
- model=model,
24
- add_base_tools=True, # Adds tools like 'image_generation', 'transcriber', etc. if needed
25
- )
26
-
27
- # This is the object the evaluation runner will likely import
28
- # Example: from myagent import agent
29
  if __name__ == "__main__":
30
- # Test your agent locally
31
- prompt = "How many studio albums were published by Mercedes Sosa?"
32
- response = agent.run(prompt)
33
- print(f"Agent Response: {response}")
 
5
  # Load environment variables (API keys)
6
  load_dotenv()
7
 
8
+ class CustomAgent:
9
+ def __init__(self):
10
+ # Hugging Face provides OPENAI_API_KEY as a secret
11
+ api_key = os.getenv("OPENAI_API_KEY")
12
+
13
+ if api_key:
14
+ # Using OpenAI gpt-4o-mini as requested/seen in logs
15
+ self.model = OpenAIModel(model_id="gpt-4o-mini", api_key=api_key)
16
+ else:
17
+ # Fallback to Hugging Face Model if no OpenAI key is set
18
+ self.model = HfApiModel(model_id="Qwen/Qwen2.5-Coder-32B-Instruct")
19
+
20
+ self.search_tool = DuckDuckGoSearchTool()
21
+
22
+ # Initialize the agent
23
+ # CodeAgent is the best choice for the final project
24
+ self.agent = CodeAgent(
25
+ tools=[self.search_tool],
26
+ model=self.model,
27
+ add_base_tools=True,
28
+ max_steps=10
29
+ )
30
 
31
+ def __call__(self, question: str) -> str:
32
+ """
33
+ Method called by the evaluation runner (app.py).
34
+ It MUST return a string.
35
+ """
36
+ try:
37
+ # Defensive check: ensure question is a string
38
+ if not isinstance(question, str):
39
+ question = str(question)
40
+
41
+ # Run the agent and capture the result
42
+ result = self.agent.run(question)
43
+
44
+ # CRITICAL: Always return a string
45
+ # If the result is a list (which matches your error), this converts it
46
+ if isinstance(result, list):
47
+ return " ".join(map(str, result))
48
+
49
+ return str(result)
50
+
51
+ except Exception as e:
52
+ # Catch and return the error so the whole evaluation loop doesn't break
53
+ return f"Agent Error: {str(e)}"
54
 
55
+ # Standard entry point for testing
 
 
 
 
 
 
 
 
 
 
 
 
56
  if __name__ == "__main__":
57
+ my_agent = CustomAgent()
58
+ print(my_agent("What is the capital of France?"))