GiantAnalytics commited on
Commit
6dbe477
·
verified ·
1 Parent(s): e5af540

Create phase1_agents.py

Browse files
Files changed (1) hide show
  1. phase1_agents.py +84 -0
phase1_agents.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from phi.agent import Agent
3
+ from phi.tools.firecrawl import FirecrawlTools
4
+ from phi.tools.duckduckgo import DuckDuckGo
5
+ from phi.model.openai import OpenAIChat
6
+ from pydantic import BaseModel, Field
7
+ from fastapi import UploadFile
8
+
9
+ # Load environment variables (API keys, etc.)
10
+ from dotenv import load_dotenv
11
+ load_dotenv()
12
+
13
+ #####################################################################################
14
+ # PHASE 1 #
15
+ #####################################################################################
16
+
17
+ ##############################
18
+ # 1️⃣ Company Search Agent #
19
+ ##############################
20
+ company_search_agent = Agent(
21
+ name="Company Search Agent",
22
+ model=OpenAIChat(id="gpt-4o"),
23
+ tools=[DuckDuckGo()],
24
+ description="Finds company details based on name using web search.",
25
+ instructions=["Always include sources in search results."],
26
+ show_tool_calls=True,
27
+ markdown=True,
28
+ )
29
+
30
+ def search_company(company_name: str) -> dict:
31
+ """ Searches for detailed company information using web search. """
32
+ query = f"Find details for {company_name}, including its official website, mission, services, and AI-related initiatives. Include sources."
33
+ response = company_search_agent.print_response(query)
34
+ return {"company_name": company_name, "details": response}
35
+
36
+
37
+ ##############################
38
+ # 2️⃣ Website Scraper Agent #
39
+ ##############################
40
+ firecrawl_agent = Agent(
41
+ name="Website Scraper Agent",
42
+ tools=[FirecrawlTools(scrape=True, crawl=False)],
43
+ description="Extracts content from company websites.",
44
+ show_tool_calls=True,
45
+ markdown=True,
46
+ )
47
+
48
+ def scrape_website(url: str) -> dict:
49
+ """ Scrapes relevant company information from the given website. """
50
+ response = firecrawl_agent.print_response(f"Extract business details from {url}, including mission, services, and AI-related information.")
51
+ return {"website": url, "scraped_data": response}
52
+
53
+
54
+ ##############################
55
+ # 3️⃣ Text Processing Agent #
56
+ ##############################
57
+ class CompanySummary(BaseModel):
58
+ summary: str = Field(..., description="Summarized company details.")
59
+
60
+ text_processing_agent = Agent(
61
+ model=OpenAIChat(id="gpt-4o"),
62
+ description="Summarizes user-written company descriptions.",
63
+ response_model=CompanySummary,
64
+ )
65
+
66
+ def process_company_description(text: str) -> dict:
67
+ """ Summarizes the user-provided company description. """
68
+ response = text_processing_agent.print_response(f"Summarize the following description: {text}. Focus on mission, key services, industry, and AI potential.")
69
+ return {"user_description": text, "summary": response}
70
+
71
+
72
+ ##############################
73
+ # 4️⃣ Document Processing Agent #
74
+ ##############################
75
+ def process_uploaded_document(file: UploadFile) -> dict:
76
+ """ Reads and processes an uploaded document, returning extracted content. """
77
+ file_path = f"tmp/{file.filename}"
78
+ with open(file_path, "wb") as buffer:
79
+ buffer.write(file.file.read())
80
+
81
+ with open(file_path, "r", encoding="utf-8") as f:
82
+ document_text = f.read()
83
+
84
+ return {"document_name": file.filename, "content": document_text}