junaid17 commited on
Commit
ef1f54c
·
verified ·
1 Parent(s): aa4ce92

Update tools.py

Browse files
Files changed (1) hide show
  1. tools.py +45 -185
tools.py CHANGED
@@ -5,7 +5,6 @@ from langchain_community.document_loaders import PyPDFLoader
5
  from langchain_openai import OpenAIEmbeddings
6
  from langchain_community.tools import WikipediaQueryRun, ArxivQueryRun
7
  from langchain_community.utilities import WikipediaAPIWrapper, ArxivAPIWrapper
8
- from langchain_core.tools import tool
9
  from langchain_community.tools.tavily_search import TavilySearchResults
10
  from dotenv import load_dotenv
11
  import os
@@ -13,17 +12,34 @@ import requests
13
 
14
  load_dotenv()
15
 
16
- API_KEY = os.getenv("ALPHAVANTAGE_API_KEY")
17
- NEWS_API_KEY = os.getenv("NEWS_API_KEY")
18
- WEATHER_API_KEY = os.getenv("WEATHER_API_KEY")
19
- NEWS_API_KEY = os.getenv("NEWS_API_KEY")
 
20
 
21
- # -------------------------------
22
  # GLOBAL RETRIEVER
23
- # -------------------------------
24
  retriever = None
25
 
26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  def build_vectorstore(path: str):
28
  loader = PyPDFLoader(path)
29
  docs = loader.load()
@@ -36,7 +52,10 @@ def build_vectorstore(path: str):
36
  split_docs = splitter.split_documents(docs)
37
 
38
  embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
39
- return FAISS.from_documents(split_docs, embeddings)
 
 
 
40
 
41
 
42
  def update_retriever(pdf_path: str):
@@ -45,9 +64,9 @@ def update_retriever(pdf_path: str):
45
  retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
46
 
47
 
48
- # -------------------------------
49
- # RAG TOOL
50
- # -------------------------------
51
  def create_rag_tool():
52
 
53
  @tool
@@ -55,208 +74,49 @@ def create_rag_tool():
55
  """
56
  Retrieve relevant information from uploaded documents.
57
  """
 
 
58
  if retriever is None:
59
- return "No document uploaded yet."
 
 
 
60
 
61
  docs = retriever.invoke(query)
62
 
63
  if not docs:
64
- return "No relevant information found."
65
 
66
  return "\n\n".join(d.page_content for d in docs)
67
 
68
  return rag_search
69
 
 
 
 
70
  @tool
71
  def arxiv_search(query: str) -> dict:
72
- """
73
- Search arXiv for academic papers related to the query.
74
- """
75
  try:
76
  arxiv = ArxivQueryRun(api_wrapper=ArxivAPIWrapper())
77
- results = arxiv.run(query)
78
- return {"query": query, "results": results}
79
- except Exception as e:
80
- return {"error": str(e)}
81
-
82
- @tool
83
- def calculator(first_num: float, second_num: float, operation: str) -> dict:
84
- """
85
- Perform a basic arithmetic operation on two numbers.
86
- Supported operations: add, sub, mul, div
87
- """
88
- try:
89
- if operation == "add":
90
- result = first_num + second_num
91
- elif operation == "sub":
92
- result = first_num - second_num
93
- elif operation == "mul":
94
- result = first_num * second_num
95
- elif operation == "div":
96
- if second_num == 0:
97
- return {"error": "Division by zero is not allowed"}
98
- result = first_num / second_num
99
- else:
100
- return {"error": f"Unsupported operation '{operation}'"}
101
-
102
- return {"first_num": first_num, "second_num": second_num, "operation": operation, "result": result}
103
- except Exception as e:
104
- return {"error": str(e)}
105
- @tool
106
- def tavily_search(query: str) -> dict:
107
- """
108
- Perform a web search using Tavily,
109
- also use it to get weather information,
110
- Returns up to 5 search results.
111
- """
112
- try:
113
- search = TavilySearchResults(max_results=5)
114
- results = search.run(query)
115
- return {"query": query, "results": results}
116
  except Exception as e:
117
  return {"error": str(e)}
118
 
119
 
120
- @tool
121
- def get_stock_price(symbol: str) -> dict:
122
- """
123
- Fetch latest stock price for a given symbol (e.g. 'AAPL', 'TSLA')
124
- using Alpha Vantage with API key in the URL.
125
- """
126
- url = f"https://www.alphavantage.co/query?function=GLOBAL_QUOTE&symbol={symbol}&apikey={API_KEY}"
127
- r = requests.get(url)
128
- return r.json()
129
-
130
  @tool
131
  def wikipedia_search(query: str) -> dict:
132
- """
133
- Search Wikipedia for a given query and return results.
134
- """
135
  try:
136
  wiki = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper())
137
- results = wiki.run(query)
138
- return {"query": query, "results": results}
139
- except Exception as e:
140
- return {"error": str(e)}
141
-
142
- @tool
143
- def convert_currency(amount: float, from_currency: str, to_currency: str) -> dict:
144
- """
145
- Convert amount from one currency to another using Frankfurter API.
146
- Example: convert_currency(100, "USD", "EUR")
147
- """
148
- try:
149
- url = f"https://api.frankfurter.app/latest?amount={amount}&from={from_currency}&to={to_currency}"
150
- r = requests.get(url)
151
- return r.json()
152
- except Exception as e:
153
- return {"error": str(e)}
154
- @tool
155
-
156
-
157
- def unit_converter(value: float, from_unit: str, to_unit: str) -> dict:
158
- """
159
- Convert between metric/imperial units (supports: km<->miles, kg<->lbs, C<->F).
160
- Example: unit_converter(10, "km", "miles")
161
- """
162
- try:
163
- conversions = {
164
- ("km", "miles"): lambda x: x * 0.621371,
165
- ("miles", "km"): lambda x: x / 0.621371,
166
- ("kg", "lbs"): lambda x: x * 2.20462,
167
- ("lbs", "kg"): lambda x: x / 2.20462,
168
- ("C", "F"): lambda x: (x * 9/5) + 32,
169
- ("F", "C"): lambda x: (x - 32) * 5/9
170
- }
171
- if (from_unit, to_unit) not in conversions:
172
- return {"error": f"Unsupported conversion: {from_unit} -> {to_unit}"}
173
- result = conversions[(from_unit, to_unit)](value)
174
- return {"value": value, "from": from_unit, "to": to_unit, "result": result}
175
- except Exception as e:
176
- return {"error": str(e)}
177
-
178
-
179
-
180
- @tool
181
- def get_news(query: str) -> dict:
182
- """
183
- Fetch latest news headlines for a given query.
184
- Example: get_news("artificial intelligence")
185
- """
186
- try:
187
- url = f"https://newsapi.org/v2/everything?q={query}&apiKey={NEWS_API_KEY}&language=en"
188
- r = requests.get(url)
189
- return r.json()
190
- except Exception as e:
191
- return {"error": str(e)}
192
-
193
-
194
- @tool
195
- def get_joke(category: str = "Any") -> dict:
196
- """
197
- Get a random joke. Categories: Programming, Misc, Pun, Spooky, Christmas, Any
198
- Example: get_joke("Programming")
199
- """
200
- try:
201
- url = f"https://v2.jokeapi.dev/joke/{category}"
202
- r = requests.get(url)
203
- return r.json()
204
  except Exception as e:
205
  return {"error": str(e)}
206
 
207
- @tool
208
- def get_quote(tag: str = "") -> dict:
209
- """
210
- Fetch a random quote. Optionally filter by tag (e.g., 'inspirational', 'technology').
211
- Example: get_quote("inspirational")
212
- """
213
- try:
214
- url = f"https://api.quotable.io/random"
215
- if tag:
216
- url += f"?tags={tag}"
217
- r = requests.get(url)
218
- return r.json()
219
- except Exception as e:
220
- return {"error": str(e)}
221
 
222
  @tool
223
- def get_weather(city: str) -> dict:
224
- """
225
- Get current weather for a given city using WeatherAPI.com.
226
- Example: get_weather("London")
227
- """
228
  try:
229
- url = f"http://api.weatherapi.com/v1/current.json?key={WEATHER_API_KEY}&q={city}&aqi=no"
230
- r = requests.get(url)
231
- data = r.json()
232
-
233
- if "error" in data:
234
- return {"error": data["error"]["message"]}
235
-
236
- return {
237
- "location": data["location"]["name"],
238
- "country": data["location"]["country"],
239
- "temperature_c": data["current"]["temp_c"],
240
- "temperature_f": data["current"]["temp_f"],
241
- "condition": data["current"]["condition"]["text"],
242
- "humidity": data["current"]["humidity"],
243
- "wind_kph": data["current"]["wind_kph"],
244
- "wind_dir": data["current"]["wind_dir"]
245
- }
246
  except Exception as e:
247
  return {"error": str(e)}
248
-
249
-
250
 
251
- @tool
252
- def get_news(query: str) -> dict:
253
- """
254
- Fetch latest news headlines for a given query.
255
- Example: get_news("artificial intelligence")
256
- """
257
- try:
258
- url = f"https://newsapi.org/v2/everything?q={query}&apiKey={NEWS_API_KEY}&language=en"
259
- r = requests.get(url)
260
- return r.json()
261
- except Exception as e:
262
- return {"error": str(e)}
 
5
  from langchain_openai import OpenAIEmbeddings
6
  from langchain_community.tools import WikipediaQueryRun, ArxivQueryRun
7
  from langchain_community.utilities import WikipediaAPIWrapper, ArxivAPIWrapper
 
8
  from langchain_community.tools.tavily_search import TavilySearchResults
9
  from dotenv import load_dotenv
10
  import os
 
12
 
13
  load_dotenv()
14
 
15
+ # ==============================
16
+ # CONFIG
17
+ # ==============================
18
+ VECTORSTORE_DIR = "data/vectorstore"
19
+ os.makedirs(VECTORSTORE_DIR, exist_ok=True)
20
 
21
+ # ==============================
22
  # GLOBAL RETRIEVER
23
+ # ==============================
24
  retriever = None
25
 
26
 
27
+ def load_retriever():
28
+ global retriever
29
+ try:
30
+ embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
31
+ if os.path.exists(os.path.join(VECTORSTORE_DIR, "index.faiss")):
32
+ vectorstore = FAISS.load_local(
33
+ VECTORSTORE_DIR,
34
+ embeddings,
35
+ allow_dangerous_deserialization=True
36
+ )
37
+ retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
38
+ print("✅ Vectorstore loaded from disk")
39
+ except Exception as e:
40
+ print("❌ Failed to load vectorstore:", e)
41
+
42
+
43
  def build_vectorstore(path: str):
44
  loader = PyPDFLoader(path)
45
  docs = loader.load()
 
52
  split_docs = splitter.split_documents(docs)
53
 
54
  embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
55
+ vectorstore = FAISS.from_documents(split_docs, embeddings)
56
+
57
+ vectorstore.save_local(VECTORSTORE_DIR)
58
+ return vectorstore
59
 
60
 
61
  def update_retriever(pdf_path: str):
 
64
  retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
65
 
66
 
67
+ # ==============================
68
+ # RAG TOOL (FIXED)
69
+ # ==============================
70
  def create_rag_tool():
71
 
72
  @tool
 
74
  """
75
  Retrieve relevant information from uploaded documents.
76
  """
77
+ global retriever
78
+
79
  if retriever is None:
80
+ load_retriever()
81
+
82
+ if retriever is None:
83
+ return "No document has been uploaded yet."
84
 
85
  docs = retriever.invoke(query)
86
 
87
  if not docs:
88
+ return "No relevant information found in the uploaded document."
89
 
90
  return "\n\n".join(d.page_content for d in docs)
91
 
92
  return rag_search
93
 
94
+
95
+ # ---------------- OTHER TOOLS ---------------- #
96
+
97
  @tool
98
  def arxiv_search(query: str) -> dict:
 
 
 
99
  try:
100
  arxiv = ArxivQueryRun(api_wrapper=ArxivAPIWrapper())
101
+ return {"results": arxiv.run(query)}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  except Exception as e:
103
  return {"error": str(e)}
104
 
105
 
 
 
 
 
 
 
 
 
 
 
106
  @tool
107
  def wikipedia_search(query: str) -> dict:
 
 
 
108
  try:
109
  wiki = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper())
110
+ return {"results": wiki.run(query)}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  except Exception as e:
112
  return {"error": str(e)}
113
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
 
115
  @tool
116
+ def tavily_search(query: str) -> dict:
 
 
 
 
117
  try:
118
+ search = TavilySearchResults(max_results=5)
119
+ return {"results": search.run(query)}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
  except Exception as e:
121
  return {"error": str(e)}
 
 
122