Update app.py
Browse files
app.py
CHANGED
|
@@ -127,34 +127,23 @@ class BasicAgent:
|
|
| 127 |
return None
|
| 128 |
|
| 129 |
# --- Robust Wikipedia Tool ---
|
| 130 |
-
|
| 131 |
try:
|
| 132 |
-
# Step 1: Clean query and search for titles
|
| 133 |
query = query.strip(' "').replace('TOOL:', '').replace('INPUT:', '')
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
"action": "query",
|
| 137 |
-
|
| 138 |
-
"srsearch": query,
|
| 139 |
-
"format": "json",
|
| 140 |
-
"srlimit": 1
|
| 141 |
-
}
|
| 142 |
-
r = requests.get(search_url, params=params, timeout=10).json()
|
| 143 |
|
| 144 |
if not r.get("query", {}).get("search"):
|
| 145 |
-
return "No
|
| 146 |
|
| 147 |
title = r["query"]["search"][0]["title"]
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
sum_url = f"https://en.wikipedia.org{title.replace(' ', '_')}"
|
| 152 |
-
sum_r = requests.get(sum_url, timeout=10).json()
|
| 153 |
-
|
| 154 |
-
extract = sum_r.get('extract', 'No detailed summary available.')
|
| 155 |
-
return f"Information found for '{title}': {extract}"
|
| 156 |
except Exception as e:
|
| 157 |
-
return f"
|
| 158 |
|
| 159 |
def youtube_captions(self, url):
|
| 160 |
try:
|
|
@@ -168,84 +157,56 @@ class BasicAgent:
|
|
| 168 |
|
| 169 |
def execute_tool(self, tool, input_data, file_url):
|
| 170 |
input_data = input_data.strip(' "')
|
| 171 |
-
#
|
| 172 |
target = file_url if (not input_data or "http" not in input_data) else input_data
|
| 173 |
|
| 174 |
-
|
| 175 |
-
return self.wiki_search(input_data)
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
try:
|
| 180 |
r = requests.get(target, timeout=20)
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
return f"
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
return f"OCR Text from image: {pytesseract.image_to_string(Image.open('temp_file'))}"
|
| 195 |
-
return f"Excel Data: {pd.read_excel('temp_file').to_string()[:5000]}"
|
| 196 |
-
except Exception as e:
|
| 197 |
-
return f"File error: {str(e)}"
|
| 198 |
-
|
| 199 |
-
# Fallback for search/scrape
|
| 200 |
-
if tool == "scrape_page":
|
| 201 |
-
try:
|
| 202 |
-
r = requests.get(input_data, timeout=10)
|
| 203 |
-
return BeautifulSoup(r.text, "html.parser").get_text()[:4000]
|
| 204 |
-
except:
|
| 205 |
-
return "Could not scrape the page."
|
| 206 |
-
|
| 207 |
return f"Unknown tool: {tool}"
|
| 208 |
|
| 209 |
|
| 210 |
|
| 211 |
# 2. Handle web/search tools
|
| 212 |
|
| 213 |
-
if tool == "scrape_page":
|
| 214 |
-
try:
|
| 215 |
-
r = requests.get(input_data, timeout=15)
|
| 216 |
-
return BeautifulSoup(r.text, "html.parser").get_text()[:6000]
|
| 217 |
-
except: return "Web scraping failed."
|
| 218 |
if tool == "youtube_captions":
|
| 219 |
return self.youtube_captions(input_data)
|
| 220 |
|
| 221 |
|
| 222 |
def agent_loop(self, question, file_url):
|
| 223 |
memory = ""
|
| 224 |
-
context = f"File URL: {file_url}" if file_url else "No file provided."
|
| 225 |
-
|
| 226 |
for step in range(5):
|
| 227 |
-
prompt = f"""You are a
|
| 228 |
-
{
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
- History: {memory}
|
| 238 |
-
|
| 239 |
-
Output Format:
|
| 240 |
-
TOOL: tool_name
|
| 241 |
-
INPUT: tool_input
|
| 242 |
-
OR
|
| 243 |
-
FINAL: answer"""
|
| 244 |
|
| 245 |
response = self.client.chat.completions.create(
|
| 246 |
model="gpt-4o-mini",
|
| 247 |
temperature=0,
|
| 248 |
-
messages=[{"role": "system", "content": "You are a
|
| 249 |
{"role": "user", "content": prompt}]
|
| 250 |
)
|
| 251 |
|
|
@@ -254,18 +215,16 @@ FINAL: answer"""
|
|
| 254 |
|
| 255 |
if "FINAL:" in resp: return resp.split("FINAL:")[-1].strip()
|
| 256 |
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
except:
|
| 268 |
-
memory += f"\nStep {step}: Parsing error."
|
| 269 |
|
| 270 |
return "No answer found."
|
| 271 |
|
|
|
|
| 127 |
return None
|
| 128 |
|
| 129 |
# --- Robust Wikipedia Tool ---
|
| 130 |
+
def wiki_search(self, query):
|
| 131 |
try:
|
|
|
|
| 132 |
query = query.strip(' "').replace('TOOL:', '').replace('INPUT:', '')
|
| 133 |
+
# Step 1: Search
|
| 134 |
+
r = requests.get("https://en.wikipedia.org", params={
|
| 135 |
+
"action": "query", "list": "search", "srsearch": query, "format": "json"
|
| 136 |
+
}, timeout=10).json()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 137 |
|
| 138 |
if not r.get("query", {}).get("search"):
|
| 139 |
+
return "No Wikipedia page found. Try searching just 'Mercedes Sosa' or 'Dinosaur list'."
|
| 140 |
|
| 141 |
title = r["query"]["search"][0]["title"]
|
| 142 |
+
# Step 2: Summary
|
| 143 |
+
sum_r = requests.get(f"https://en.wikipedia.org{title.replace(' ', '_')}", timeout=10).json()
|
| 144 |
+
return f"Data from Wiki ({title}): {sum_r.get('extract', 'No summary found.')}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 145 |
except Exception as e:
|
| 146 |
+
return f"Wiki Tool Error: {str(e)}"
|
| 147 |
|
| 148 |
def youtube_captions(self, url):
|
| 149 |
try:
|
|
|
|
| 157 |
|
| 158 |
def execute_tool(self, tool, input_data, file_url):
|
| 159 |
input_data = input_data.strip(' "')
|
| 160 |
+
# FIX: If agent asks for 'none' or gives a generic string for a file tool, use the real file_url
|
| 161 |
target = file_url if (not input_data or "http" not in input_data) else input_data
|
| 162 |
|
| 163 |
+
try:
|
| 164 |
+
if tool == "wiki_search": return self.wiki_search(input_data)
|
| 165 |
+
|
| 166 |
+
if tool in ["read_image", "read_excel", "read_audio"]:
|
| 167 |
+
if not target: return "Error: No file URL provided by the system for this task."
|
|
|
|
| 168 |
r = requests.get(target, timeout=20)
|
| 169 |
+
ext = target.split('.')[-1].lower()
|
| 170 |
+
with open(f"temp.{ext}", "wb") as f: f.write(r.content)
|
| 171 |
+
|
| 172 |
+
if tool == "read_image": return f"Text in Image: {pytesseract.image_to_string(Image.open(f'temp.{ext}'))}"
|
| 173 |
+
if tool == "read_excel": return f"Excel Data: {pd.read_excel(f'temp.{ext}').to_string()[:4000]}"
|
| 174 |
+
if tool == "read_audio": return f"Transcript: {self.audio_model.transcribe(f'temp.{ext}')['text']}"
|
| 175 |
+
|
| 176 |
+
if tool == "scrape_page":
|
| 177 |
+
if "http" not in input_data: return "Error: Please provide a full URL to scrape."
|
| 178 |
+
soup = BeautifulSoup(requests.get(input_data, timeout=10).text, "html.parser")
|
| 179 |
+
return f"Webpage Text: {soup.get_text()[:4000]}"
|
| 180 |
+
except Exception as e:
|
| 181 |
+
return f"Tool Execution Error: {str(e)}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 182 |
return f"Unknown tool: {tool}"
|
| 183 |
|
| 184 |
|
| 185 |
|
| 186 |
# 2. Handle web/search tools
|
| 187 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 188 |
if tool == "youtube_captions":
|
| 189 |
return self.youtube_captions(input_data)
|
| 190 |
|
| 191 |
|
| 192 |
def agent_loop(self, question, file_url):
|
| 193 |
memory = ""
|
|
|
|
|
|
|
| 194 |
for step in range(5):
|
| 195 |
+
prompt = f"""You are a GAIA solver.
|
| 196 |
+
FILE_URL: {file_url if file_url else 'None provided'}
|
| 197 |
+
QUESTION: {question}
|
| 198 |
+
HISTORY: {memory}
|
| 199 |
+
|
| 200 |
+
INSTRUCTIONS:
|
| 201 |
+
1. To see the chess board/image, use TOOL: read_image with INPUT: {file_url if file_url else 'none'}.
|
| 202 |
+
2. For Mercedes Sosa/Dinosaurs, use TOOL: wiki_search.
|
| 203 |
+
3. If the question is backwards, decode it, then answer.
|
| 204 |
+
4. Respond with TOOL: name and INPUT: data OR FINAL: answer."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 205 |
|
| 206 |
response = self.client.chat.completions.create(
|
| 207 |
model="gpt-4o-mini",
|
| 208 |
temperature=0,
|
| 209 |
+
messages=[{"role": "system", "content": "You are a helpful assistant. Use tools to find facts."},
|
| 210 |
{"role": "user", "content": prompt}]
|
| 211 |
)
|
| 212 |
|
|
|
|
| 215 |
|
| 216 |
if "FINAL:" in resp: return resp.split("FINAL:")[-1].strip()
|
| 217 |
|
| 218 |
+
t_match = re.search(r"TOOL:\s*(\w+)", resp, re.I)
|
| 219 |
+
i_match = re.search(r"INPUT:\s*(.*)", resp, re.I)
|
| 220 |
+
|
| 221 |
+
if t_match and i_match:
|
| 222 |
+
t_name = t_match.group(1).lower().strip()
|
| 223 |
+
t_input = i_match.group(1).strip()
|
| 224 |
+
result = self.execute_tool(t_name, t_input, file_url)
|
| 225 |
+
memory += f"\nStep {step}: {t_name} returned -> {result[:1000]}"
|
| 226 |
+
else:
|
| 227 |
+
memory += f"\nStep {step}: {resp} (Note: Use TOOL/INPUT format to use tools)"
|
|
|
|
|
|
|
| 228 |
|
| 229 |
return "No answer found."
|
| 230 |
|