slack-agi / crew /tools.py
ankush-003's picture
updated app
78459c8
from typing import List
from crewai_tools import ScrapeWebsiteTool
from pydantic import BaseModel, HttpUrl, Field
from googlesearch import search
from crewai.tools import tool
from langchain_community.tools import DuckDuckGoSearchRun
from datetime import datetime
@tool("get_current_time")
def get_current_time() -> str:
"""Returns the current date and time in a human-readable format."""
now = datetime.now()
return now.strftime("%A, %d %B %Y %I:%M:%S %p")
class SearchResponse(BaseModel):
"""google search response"""
urls: List[str] = Field(default_factory=list, description="List of URLs scraped")
content: str = Field(..., description="Formatted search results")
@tool("Google Search Tool")
def google_website_search_tool(query: str) -> SearchResponse:
"""Search the web for information on a given topic"""
search_results = list(search(query, num_results=3))
results, urls = [], []
results.append(f"Google Search Results for: '{query}'")
results.append("=" * 50)
for i, url in enumerate(search_results, 1):
results.append(f"\n{i}. URL: {url}")
urls.append(str(url)) # Convert to string for simple model
try:
scraper = ScrapeWebsiteTool(website_url=url)
content = scraper.run()
if len(content) > 1000:
content = content[:1000] + "... [Content truncated]"
results.append(f"Content: {content}")
results.append("-" * 30)
except Exception as scrape_error:
results.append(f"Error scraping content: {str(scrape_error)}")
results.append("-" * 30)
return SearchResponse(
urls=urls,
content="\n".join(results)
)
@tool("Website Search Tool")
def website_search_tool(query: str) -> str:
"""Search the web for information on a given topic"""
# return DuckDuckGoSearchRun().invoke(question)
search_results = list(search(query, num_results=2))
results, urls = [], []
results.append(f"Google Search Results for: '{query}'")
results.append("=" * 50)
for i, url in enumerate(search_results, 1):
results.append(f"\n{i}. URL: {url}")
urls.append(f"{url}")
try:
# Use CrewAI's ScrapeWebsiteTool to get content
scraper = ScrapeWebsiteTool(website_url=url)
content = scraper.run()
# Limit content length for readability
if len(content) > 1000:
content = content[:1000] + "... [Content truncated]"
results.append(f"Content: {content}")
results.append("-" * 30)
except Exception as scrape_error:
results.append(f"Error scraping content: {str(scrape_error)}")
results.append("-" * 30)
return "\n".join(results)