Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -38,18 +38,40 @@ def answer_nfl_question_tool(question: str ) -> str:
|
|
| 38 |
query = f"{question}"
|
| 39 |
print(f"🔍 Searching DuckDuckGo with query: {query}") # Debug
|
| 40 |
with DDGS() as ddgs:
|
| 41 |
-
|
| 42 |
|
| 43 |
-
print(f"📊 Found {len(
|
| 44 |
|
| 45 |
-
if not
|
| 46 |
return ["❌ We haven't founded answers to this question."]
|
| 47 |
|
| 48 |
# SHow results
|
| 49 |
-
for r in
|
| 50 |
print(f"📌 {r['title']} - {r['href']}")
|
| 51 |
|
| 52 |
-
return [f"{r['title']} - {r['href']}" for r in
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
|
| 54 |
|
| 55 |
@tool
|
|
|
|
| 38 |
query = f"{question}"
|
| 39 |
print(f"🔍 Searching DuckDuckGo with query: {query}") # Debug
|
| 40 |
with DDGS() as ddgs:
|
| 41 |
+
search_results = list(ddgs.text(query, max_results=5)) # Otteniamo i primi 5 risultati
|
| 42 |
|
| 43 |
+
print(f"📊 Found {len(search_results)} results") # Debug
|
| 44 |
|
| 45 |
+
if not search_results:
|
| 46 |
return ["❌ We haven't founded answers to this question."]
|
| 47 |
|
| 48 |
# SHow results
|
| 49 |
+
for r in search_results:
|
| 50 |
print(f"📌 {r['title']} - {r['href']}")
|
| 51 |
|
| 52 |
+
#return [f"{r['title']} - {r['href']}" for r in search_results]
|
| 53 |
+
|
| 54 |
+
# Extract URLs from the markdown-formatted search results
|
| 55 |
+
import re
|
| 56 |
+
urls = re.findall(r'\((https?://[^\)]+)\)', search_results)
|
| 57 |
+
|
| 58 |
+
# Visit each webpage and collect content
|
| 59 |
+
detailed_results = []
|
| 60 |
+
detailed_results.append(f"Search Results for: {search_query}\n")
|
| 61 |
+
detailed_results.append("=" * 50 + "\n")
|
| 62 |
+
|
| 63 |
+
visit_webpage_tool = VisitWebpageTool()
|
| 64 |
+
|
| 65 |
+
for i, url in enumerate(urls[:num_pages_to_visit], 1):
|
| 66 |
+
try:
|
| 67 |
+
page_content = visit_webpage_tool.forward(url)
|
| 68 |
+
detailed_results.append(f"\nSource {i}: {url}\n")
|
| 69 |
+
detailed_results.append("-" * 30 + "\n")
|
| 70 |
+
detailed_results.append(page_content[:1000] + "...\n") # Truncate long pages
|
| 71 |
+
except Exception as e:
|
| 72 |
+
detailed_results.append(f"\nError accessing {url}: {str(e)}\n")
|
| 73 |
+
|
| 74 |
+
return "\n".join(detailed_results)
|
| 75 |
|
| 76 |
|
| 77 |
@tool
|