AC-Angelo93 commited on
Commit
1003bb3
·
verified ·
1 Parent(s): b40a2d7

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +65 -53
agent.py CHANGED
@@ -1,71 +1,83 @@
1
  # agent.py
2
 
3
- import os, requests
4
-
 
 
5
  from retrieval import retrieve_examples, wiki_context
6
 
7
- # Load your system prompt once
8
  SYSTEM_PROMPT = open("system_prompt.txt", "r").read().strip()
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  class BasicAgent:
11
  def __init__(self):
12
- print("🔧 Initializing BasicAgent…")
 
13
  self.hf_token = os.environ.get("HF_TOKEN")
14
- assert self.hf_token, "HF_TOKEN not set in Secrets!"
15
- self.model_id = "HuggingFaceH4/zephyr-7b-beta"
16
- self.api_url = f"https://api-inference.huggingface.co/models/{self.model_id}"
17
- self.headers = {
18
- "Authorization": f"Bearer {self.hf_token}",
19
- "Content-Type": "application/json"
20
- }
21
 
22
- def should_use_wiki(self, q: str) -> bool:
23
- kws = ["who is", "what is", "when was", "where is", "capital", "continent", "studio album"]
24
- return any(kw in q.lower() for kw in kws)
25
-
26
- def __call__(self, question: str) -> str:
27
- print(f"📝 Question: {question}")
28
-
29
- # 1) Retrieve few-shot examples
30
- few_shot = retrieve_examples(question)
31
 
32
- # 2) Optionally retrieve wiki facts
33
- wiki_snip = ""
34
- if self.should_use_wiki(question):
35
- try:
36
- wiki_snip = wiki_context(question) + "\n\n"
37
- except Exception as e:
38
- print(f"⚠️ Wiki failed: {e}")
39
 
40
- # 3) Build the final prompt
41
- prompt = (
42
- f"[INST] {SYSTEM_PROMPT}\n\n"
43
- f"{wiki_snip}"
44
- f"### Few-Shot Examples ###\n{few_shot}\n\n"
45
- f"### Question ###\n{question}\n\n"
46
- "[/INST]"
 
47
  )
48
 
49
- # 4) Call the LLM
50
- payload = {
51
- "inputs": prompt,
52
- "parameters": {
53
- "temperature": 0.2,
54
- "max_new_tokens": 250,
55
- "return_full_text": False
56
- }
57
- }
58
  try:
59
- r = requests.post(self.api_url, headers=self.headers, json=payload, timeout=30)
60
- r.raise_for_status()
61
- out = r.json()
62
- txt = out[0].get("generated_text", "").strip()
63
  except Exception as e:
64
- print(f"❌ LLM call failed: {e}")
65
  return f"Error: {e}"
66
 
67
- # 5) Extract only the final answer
68
- if "FINAL ANSWER:" in txt:
69
- return txt.split("FINAL ANSWER:")[-1].strip()
70
- # fallback: last line
71
- return txt.splitlines()[-1].strip()
 
 
1
  # agent.py
2
 
3
+ import os
4
+ from langchain.tools import tool
5
+ from langchain import HuggingFaceHub
6
+ from langchain.agents import initialize_agent, AgentType
7
  from retrieval import retrieve_examples, wiki_context
8
 
9
+ # Load the system prompt
10
  SYSTEM_PROMPT = open("system_prompt.txt", "r").read().strip()
11
 
12
+ # --- 1) Define your tools with @tool ---
13
+
14
+ @tool
15
+ def few_shot_retriever(query: str) -> str:
16
+ """
17
+ Returns a few-shot snippet (top-k examples) for this question.
18
+ """
19
+ return retrieve_examples(query, k=3)
20
+
21
+ @tool
22
+ def wiki_search(query: str) -> str:
23
+ """
24
+ Returns concatenated Wikipedia pages for factual lookup.
25
+ """
26
+ return wiki_context(query, max_docs=1)
27
+
28
+ @tool
29
+ def calculator(expression: str) -> str:
30
+ """
31
+ Safely evaluates a math expression and returns the result.
32
+ """
33
+ try:
34
+ # VERY simple eval: you can sandbox this further
35
+ result = eval(expression, {"__builtins__": {}})
36
+ return str(result)
37
+ except Exception as e:
38
+ return f"Error: {e}"
39
+
40
+ # --- 2) Build your agent class ---
41
+
42
  class BasicAgent:
43
  def __init__(self):
44
+ print("🔧 Initializing tool-using agent…")
45
+ # Read your HF_TOKEN from Secrets
46
  self.hf_token = os.environ.get("HF_TOKEN")
47
+ assert self.hf_token, "HF_TOKEN environment variable not set!"
 
 
 
 
 
 
48
 
49
+ # 2a) LLM binding via HuggingFaceHub
50
+ self.llm = HuggingFaceHub(
51
+ repo_id="HuggingFaceH4/zephyr-7b-beta",
52
+ model_kwargs={"temperature": 0.2, "max_new_tokens": 250},
53
+ huggingfacehub_api_token=self.hf_token
54
+ )
 
 
 
55
 
56
+ # 2b) Collect the decorated tools
57
+ self.tools = [few_shot_retriever, wiki_search, calculator]
 
 
 
 
 
58
 
59
+ # 2c) Initialize a React-style agent
60
+ self.agent = initialize_agent(
61
+ self.tools,
62
+ self.llm,
63
+ agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
64
+ verbose=False,
65
+ max_iterations=3,
66
+ early_stopping_method="generate"
67
  )
68
 
69
+ def __call__(self, question: str) -> str:
70
+ # Prepend your system_prompt for clean FINAL ANSWER formatting
71
+ prompt = f"{SYSTEM_PROMPT}\n\nQUESTION: {question}"
 
 
 
 
 
 
72
  try:
73
+ answer = self.agent.run(prompt)
 
 
 
74
  except Exception as e:
75
+ print(f"❌ Agent.run() error: {e}")
76
  return f"Error: {e}"
77
 
78
+ # The agent should already obey your "FINAL ANSWER:" rule,
79
+ # but just in case, strip out everything before it:
80
+ if "FINAL ANSWER:" in answer:
81
+ return answer.split("FINAL ANSWER:")[-1].strip()
82
+ # fallback to last line
83
+ return answer.splitlines()[-1].strip()