Spaces:
Sleeping
Sleeping
File size: 1,492 Bytes
f9e5853 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
from crewai import Agent, Task
import requests
class WebCrawlerAgent(Agent):
def __init__(self, llm, role, backstory, goal, serper_api_key):
super().__init__(llm=llm, role=role, backstory=backstory, goal=goal)
self._serper_api_key = serper_api_key
def web_crawl(self, query):
"""Fetch search results from the Serper.dev API."""
url = "https://google.serper.dev/search"
headers = {"X-API-KEY": self._serper_api_key}
payload = {"q": query}
response = requests.post(url, json=payload, headers=headers)
if response.status_code == 200:
try:
results = response.json()
return results.get("organic", [])
except Exception as e:
raise ValueError(f"Failed to parse JSON: {e}")
else:
raise Exception(f"Serper API error: {response.status_code}, {response.text}")
def execute_task(self, task: Task, context: dict = None, tools: list = None):
"""Execute the task by performing a web search and returning results as a string."""
query = task.description
if not query:
raise ValueError("Task description must include a 'query' field.")
search_results = self.web_crawl(query)
search_results_str = "\n".join(
[result.get('title', 'No Title') for result in search_results]
)
return search_results_str |