Spaces:
Runtime error
Runtime error
Upload 2 files
Browse files- agent.py +152 -25
- requirements.txt +5 -0
agent.py
CHANGED
|
@@ -23,6 +23,22 @@ from langchain_anthropic import ChatAnthropic
|
|
| 23 |
from supabase.client import Client, create_client
|
| 24 |
|
| 25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
load_dotenv()
|
| 28 |
|
|
@@ -80,39 +96,32 @@ def modulus(a: int, b: int) -> int:
|
|
| 80 |
|
| 81 |
@tool
|
| 82 |
def wiki_search(query: str) -> str:
|
| 83 |
-
"""Search Wikipedia for a query and return
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}">\n{content}\n</Document>'
|
| 95 |
-
)
|
| 96 |
-
return {"wiki_results": "\n\n---\n\n".join(summaries)}
|
| 97 |
-
|
| 98 |
|
| 99 |
|
| 100 |
|
| 101 |
@tool
|
| 102 |
def web_search(query: str) -> str:
|
| 103 |
-
"""Search Tavily for a query and return maximum 3 results.
|
|
|
|
|
|
|
|
|
|
| 104 |
search_docs = TavilySearch(max_results=3).invoke(query=query)
|
| 105 |
-
if not search_docs:
|
| 106 |
-
return "No web search results found."
|
| 107 |
-
|
| 108 |
formatted_search_docs = "\n\n---\n\n".join(
|
| 109 |
[
|
| 110 |
-
f'<Document source="{doc.metadata
|
| 111 |
-
f'{doc.page_content[:1000]}' # truncate to 1000 chars if needed
|
| 112 |
-
f'\n</Document>'
|
| 113 |
for doc in search_docs
|
| 114 |
-
]
|
| 115 |
-
)
|
| 116 |
return {"web_results": formatted_search_docs}
|
| 117 |
|
| 118 |
|
|
@@ -129,6 +138,36 @@ def arvix_search(query: str) -> str:
|
|
| 129 |
for doc in search_docs
|
| 130 |
])
|
| 131 |
return {"arvix_results": formatted_search_docs}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 132 |
|
| 133 |
|
| 134 |
@tool
|
|
@@ -141,6 +180,87 @@ def wolfram_alpha_query(query: str) -> str:
|
|
| 141 |
return next(res.results).text
|
| 142 |
except StopIteration:
|
| 143 |
return "No result found."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 144 |
|
| 145 |
|
| 146 |
|
|
@@ -172,16 +292,23 @@ retriever_tool = create_retriever_tool(
|
|
| 172 |
|
| 173 |
|
| 174 |
tools = [
|
|
|
|
| 175 |
multiply,
|
| 176 |
add,
|
| 177 |
subtract,
|
| 178 |
divide,
|
| 179 |
modulus,
|
| 180 |
wiki_search,
|
|
|
|
| 181 |
web_search,
|
| 182 |
arvix_search,
|
| 183 |
wolfram_alpha_query,
|
| 184 |
-
retriever_tool
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 185 |
]
|
| 186 |
|
| 187 |
# Build graph function
|
|
|
|
| 23 |
from supabase.client import Client, create_client
|
| 24 |
|
| 25 |
|
| 26 |
+
import re
|
| 27 |
+
from langchain_community.document_loaders import WikipediaLoader
|
| 28 |
+
from langchain_core.tools import tool
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
from langchain_core.tools import tool
|
| 32 |
+
from youtube_transcript_api import YouTubeTranscriptApi, TranscriptsDisabled, NoTranscriptFound
|
| 33 |
+
|
| 34 |
+
from langchain_core.tools import tool
|
| 35 |
+
from transformers import pipeline
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
import sys
|
| 39 |
+
|
| 40 |
+
# Before invoking your graph:
|
| 41 |
+
sys.setrecursionlimit(100) # Increase from default 25
|
| 42 |
|
| 43 |
load_dotenv()
|
| 44 |
|
|
|
|
| 96 |
|
| 97 |
@tool
|
| 98 |
def wiki_search(query: str) -> str:
|
| 99 |
+
"""Search Wikipedia for a query and return maximum 2 results.
|
| 100 |
+
|
| 101 |
+
Args:
|
| 102 |
+
query: The search query."""
|
| 103 |
+
search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
|
| 104 |
+
formatted_search_docs = "\n\n---\n\n".join(
|
| 105 |
+
[
|
| 106 |
+
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
|
| 107 |
+
for doc in search_docs
|
| 108 |
+
])
|
| 109 |
+
return {"wiki_results": formatted_search_docs}
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
|
| 111 |
|
| 112 |
|
| 113 |
@tool
|
| 114 |
def web_search(query: str) -> str:
|
| 115 |
+
"""Search Tavily for a query and return maximum 3 results.
|
| 116 |
+
|
| 117 |
+
Args:
|
| 118 |
+
query: The search query."""
|
| 119 |
search_docs = TavilySearch(max_results=3).invoke(query=query)
|
|
|
|
|
|
|
|
|
|
| 120 |
formatted_search_docs = "\n\n---\n\n".join(
|
| 121 |
[
|
| 122 |
+
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
|
|
|
|
|
|
|
| 123 |
for doc in search_docs
|
| 124 |
+
])
|
|
|
|
| 125 |
return {"web_results": formatted_search_docs}
|
| 126 |
|
| 127 |
|
|
|
|
| 138 |
for doc in search_docs
|
| 139 |
])
|
| 140 |
return {"arvix_results": formatted_search_docs}
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
@tool
|
| 144 |
+
def filtered_wiki_search(query: str, start_year: int = None, end_year: int = None) -> dict:
|
| 145 |
+
"""Search Wikipedia for a query and filter results by year if provided."""
|
| 146 |
+
search_docs = WikipediaLoader(query=query, load_max_docs=5).load()
|
| 147 |
+
|
| 148 |
+
def contains_year(text, start, end):
|
| 149 |
+
years = re.findall(r'\b(19\d{2}|20\d{2})\b', text)
|
| 150 |
+
for y in years:
|
| 151 |
+
y_int = int(y)
|
| 152 |
+
if start <= y_int <= end:
|
| 153 |
+
return True
|
| 154 |
+
return False
|
| 155 |
+
|
| 156 |
+
filtered_docs = []
|
| 157 |
+
for doc in search_docs:
|
| 158 |
+
if start_year and end_year:
|
| 159 |
+
if contains_year(doc.page_content, start_year, end_year):
|
| 160 |
+
filtered_docs.append(doc)
|
| 161 |
+
else:
|
| 162 |
+
filtered_docs.append(doc)
|
| 163 |
+
|
| 164 |
+
formatted_search_docs = "\n\n---\n\n".join(
|
| 165 |
+
[
|
| 166 |
+
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
|
| 167 |
+
for doc in filtered_docs
|
| 168 |
+
])
|
| 169 |
+
return {"wiki_results": formatted_search_docs}
|
| 170 |
+
|
| 171 |
|
| 172 |
|
| 173 |
@tool
|
|
|
|
| 180 |
return next(res.results).text
|
| 181 |
except StopIteration:
|
| 182 |
return "No result found."
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
@tool
|
| 188 |
+
def youtube_transcript(url: str) -> str:
|
| 189 |
+
"""Fetch YouTube transcript text from a video URL."""
|
| 190 |
+
try:
|
| 191 |
+
video_id = url.split("v=")[-1].split("&")[0]
|
| 192 |
+
transcript_list = YouTubeTranscriptApi.get_transcript(video_id)
|
| 193 |
+
transcript = " ".join([segment['text'] for segment in transcript_list])
|
| 194 |
+
return transcript
|
| 195 |
+
except (TranscriptsDisabled, NoTranscriptFound):
|
| 196 |
+
return "Transcript not available for this video."
|
| 197 |
+
except Exception as e:
|
| 198 |
+
return f"Error fetching transcript: {str(e)}"
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-mul-en")
|
| 205 |
+
|
| 206 |
+
@tool
|
| 207 |
+
def translate_to_english(text: str) -> str:
|
| 208 |
+
"""Translate input text in any language to English."""
|
| 209 |
+
try:
|
| 210 |
+
# HuggingFace translation expects a list of strings
|
| 211 |
+
translated = translator(text, max_length=512)
|
| 212 |
+
return translated[0]['translation_text']
|
| 213 |
+
except Exception as e:
|
| 214 |
+
return f"Translation error: {str(e)}"
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
from langchain_core.tools import tool
|
| 219 |
+
import sympy
|
| 220 |
+
|
| 221 |
+
@tool
|
| 222 |
+
def solve_algebraic_expression(expression: str) -> str:
|
| 223 |
+
"""Solve or simplify the given algebraic expression."""
|
| 224 |
+
try:
|
| 225 |
+
expr = sympy.sympify(expression)
|
| 226 |
+
simplified = sympy.simplify(expr)
|
| 227 |
+
return str(simplified)
|
| 228 |
+
except Exception as e:
|
| 229 |
+
return f"Error solving expression: {str(e)}"
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
from langchain_core.tools import tool
|
| 233 |
+
|
| 234 |
+
@tool
|
| 235 |
+
def run_python_code(code: str) -> str:
|
| 236 |
+
"""Execute python code and return the result of variable 'result' if defined."""
|
| 237 |
+
try:
|
| 238 |
+
local_vars = {}
|
| 239 |
+
exec(code, {}, local_vars)
|
| 240 |
+
if 'result' in local_vars:
|
| 241 |
+
return str(local_vars['result'])
|
| 242 |
+
else:
|
| 243 |
+
return "Code executed successfully but no 'result' variable found."
|
| 244 |
+
except Exception as e:
|
| 245 |
+
return f"Error executing code: {str(e)}"
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
from langchain_core.tools import tool
|
| 249 |
+
import requests
|
| 250 |
+
|
| 251 |
+
@tool
|
| 252 |
+
def wikidata_query(sparql_query: str) -> str:
|
| 253 |
+
"""Run a SPARQL query against Wikidata and return the JSON results."""
|
| 254 |
+
endpoint = "https://query.wikidata.org/sparql"
|
| 255 |
+
headers = {"Accept": "application/sparql-results+json"}
|
| 256 |
+
try:
|
| 257 |
+
response = requests.get(endpoint, params={"query": sparql_query}, headers=headers)
|
| 258 |
+
response.raise_for_status()
|
| 259 |
+
data = response.json()
|
| 260 |
+
return str(data) # Or format as needed
|
| 261 |
+
except Exception as e:
|
| 262 |
+
return f"Error querying Wikidata: {str(e)}"
|
| 263 |
+
|
| 264 |
|
| 265 |
|
| 266 |
|
|
|
|
| 292 |
|
| 293 |
|
| 294 |
tools = [
|
| 295 |
+
|
| 296 |
multiply,
|
| 297 |
add,
|
| 298 |
subtract,
|
| 299 |
divide,
|
| 300 |
modulus,
|
| 301 |
wiki_search,
|
| 302 |
+
filtered_wiki_search,
|
| 303 |
web_search,
|
| 304 |
arvix_search,
|
| 305 |
wolfram_alpha_query,
|
| 306 |
+
retriever_tool,
|
| 307 |
+
youtube_transcript,
|
| 308 |
+
translate_to_english,
|
| 309 |
+
solve_algebraic_expression,
|
| 310 |
+
run_python_code,
|
| 311 |
+
wikidata_query
|
| 312 |
]
|
| 313 |
|
| 314 |
# Build graph function
|
requirements.txt
CHANGED
|
@@ -21,3 +21,8 @@ python-dotenv
|
|
| 21 |
sentence-transformers
|
| 22 |
openai
|
| 23 |
anthropic
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
sentence-transformers
|
| 22 |
openai
|
| 23 |
anthropic
|
| 24 |
+
|
| 25 |
+
youtube-transcript-api
|
| 26 |
+
transformers
|
| 27 |
+
sympy
|
| 28 |
+
requests
|