Spaces:
Sleeping
Sleeping
File size: 3,578 Bytes
6dbe477 de2076a 6dbe477 7491ada f658306 de2076a 6dbe477 de2076a dda2241 de2076a dda2241 de2076a 6dbe477 f658306 de2076a 6dbe477 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 | import os
from phi.agent import Agent
from phi.tools.firecrawl import FirecrawlTools
from phi.tools.duckduckgo import DuckDuckGo
from phi.model.openai import OpenAIChat
from pydantic import BaseModel, Field
from fastapi import UploadFile
from rich.live import Live
from rich.console import Console
from rich.errors import LiveError
# Load environment variables (API keys, etc.)
from dotenv import load_dotenv
load_dotenv()
#####################################################################################
# PHASE 1 #
#####################################################################################
##############################
# 1️⃣ Company Search Agent #
##############################
company_search_agent = Agent(
name="Company Search Agent",
model=OpenAIChat(id="gpt-4o"),
tools=[DuckDuckGo()],
description="Finds company details based on name using web search.",
instructions=["Always include sources in search results."],
show_tool_calls=True,
markdown=True,
)
def search_company(company_name: str) -> dict:
""" Searches for detailed company information using web search. """
query = f"Find details for {company_name}, including its official website, mission, services, about us Include sources."
response = company_search_agent.run(query)
return {"company_name": company_name, "details": response.content}
##############################
# 2️⃣ Website Scraper Agent #
##############################
firecrawl_agent = Agent(
name="Website Scraper Agent",
tools=[FirecrawlTools(scrape=True, crawl=False)],
description="Extracts content from company websites.",
show_tool_calls=True,
markdown=True,
)
def scrape_website(url):
try:
console = Console()
with Live(console=console, refresh_per_second=10) as live_log:
response = firecrawl_agent.run(
f"Extract business details from {url}. "
"Include information about mission, services, and about us if available."
)
except LiveError:
response = firecrawl_agent.run(
f"Extract business details from {url}. "
"Include information about mission, services, and about us if available."
)
return response.content
##############################
# 3️⃣ Text Processing Agent #
##############################
class CompanySummary(BaseModel):
summary: str = Field(..., description="Summarized company details.")
text_processing_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
description="Summarizes user-written company descriptions.",
response_model=CompanySummary,
)
def process_company_description(text: str) -> dict:
""" Summarizes the user-provided company description. """
response = text_processing_agent.run(f"Summarize the following description: {text}. Focus on mission, key services, industry, and AI potential.")
return {"user_description": text, "summary": response.content}
##############################
# 4️⃣ Document Processing Agent #
##############################
def process_uploaded_document(file: UploadFile) -> dict:
""" Reads and processes an uploaded document, returning extracted content. """
file_path = f"tmp/{file.filename}"
with open(file_path, "wb") as buffer:
buffer.write(file.file.read())
with open(file_path, "r", encoding="utf-8") as f:
document_text = f.read()
return {"document_name": file.filename, "content": document_text} |