Spaces:
Build error
Build error
File size: 12,845 Bytes
6919d16 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 |
import json
from pathlib import Path
from typing import Optional
from textwrap import dedent
from typing import List
from phi.assistant import Assistant
from phi.tools import Toolkit
from phi.tools.exa import ExaTools
from phi.tools.shell import ShellTools
from phi.tools.calculator import Calculator
from phi.tools.duckduckgo import DuckDuckGo
from phi.tools.yfinance import YFinanceTools
from phi.tools.file import FileTools
from phi.llm.openai import OpenAIChat
from phi.knowledge import AssistantKnowledge
from phi.embedder.openai import OpenAIEmbedder
from phi.assistant.duckdb import DuckDbAssistant
from phi.assistant.python import PythonAssistant
from phi.storage.assistant.postgres import PgAssistantStorage
from phi.utils.log import logger
from phi.vectordb.pgvector import PgVector2
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
cwd = Path(__file__).parent.resolve()
scratch_dir = cwd.joinpath("scratch")
if not scratch_dir.exists():
scratch_dir.mkdir(exist_ok=True, parents=True)
def get_llm_os(
llm_id: str = "gpt-4o",
calculator: bool = False,
ddg_search: bool = False,
file_tools: bool = False,
shell_tools: bool = False,
data_analyst: bool = False,
python_assistant: bool = False,
research_assistant: bool = False,
investment_assistant: bool = False,
user_id: Optional[str] = None,
run_id: Optional[str] = None,
debug_mode: bool = True,
) -> Assistant:
logger.info(f"-*- Creating {llm_id} LLM OS -*-")
# Add tools available to the LLM OS
tools: List[Toolkit] = []
extra_instructions: List[str] = []
if calculator:
tools.append(
Calculator(
add=True,
subtract=True,
multiply=True,
divide=True,
exponentiate=True,
factorial=True,
is_prime=True,
square_root=True,
)
)
if ddg_search:
tools.append(DuckDuckGo(fixed_max_results=3))
if shell_tools:
tools.append(ShellTools())
extra_instructions.append(
"You can use the `run_shell_command` tool to run shell commands. For example, `run_shell_command(args='ls')`."
)
if file_tools:
tools.append(FileTools(base_dir=cwd))
extra_instructions.append(
"You can use the `read_file` tool to read a file, `save_file` to save a file, and `list_files` to list files in the working directory."
)
# Add team members available to the LLM OS
team: List[Assistant] = []
if data_analyst:
_data_analyst = DuckDbAssistant(
name="Data Analyst",
role="Analyze movie data and provide insights",
semantic_model=json.dumps(
{
"tables": [
{
"name": "movies",
"description": "CSV of my favorite movies.",
"path": "https://phidata-public.s3.amazonaws.com/demo_data/IMDB-Movie-Data.csv",
}
]
}
),
base_dir=scratch_dir,
)
team.append(_data_analyst)
extra_instructions.append(
"To answer questions about my favorite movies, delegate the task to the `Data Analyst`."
)
if python_assistant:
_python_assistant = PythonAssistant(
name="Python Assistant",
role="Write and run python code",
pip_install=True,
charting_libraries=["streamlit"],
base_dir=scratch_dir,
)
team.append(_python_assistant)
extra_instructions.append("To write and run python code, delegate the task to the `Python Assistant`.")
if research_assistant:
_research_assistant = Assistant(
name="Research Assistant",
role="Write a research report on a given topic",
llm=OpenAIChat(model=llm_id),
description="You are a Senior New York Times researcher tasked with writing a cover story research report.",
instructions=[
"For a given topic, use the `search_exa` to get the top 10 search results.",
"Carefully read the results and generate a final - NYT cover story worthy report in the <report_format> provided below.",
"Make your report engaging, informative, and well-structured.",
"Remember: you are writing for the New York Times, so the quality of the report is important.",
],
expected_output=dedent(
"""\
An engaging, informative, and well-structured report in the following format:
<report_format>
## Title
- **Overview** Brief introduction of the topic.
- **Importance** Why is this topic significant now?
### Section 1
- **Detail 1**
- **Detail 2**
### Section 2
- **Detail 1**
- **Detail 2**
## Conclusion
- **Summary of report:** Recap of the key findings from the report.
- **Implications:** What these findings mean for the future.
## References
- [Reference 1](Link to Source)
- [Reference 2](Link to Source)
</report_format>
"""
),
tools=[ExaTools(num_results=5, text_length_limit=1000)],
# This setting tells the LLM to format messages in markdown
markdown=True,
add_datetime_to_instructions=True,
debug_mode=debug_mode,
)
team.append(_research_assistant)
extra_instructions.append(
"To write a research report, delegate the task to the `Research Assistant`. "
"Return the report in the <report_format> to the user as is, without any additional text like 'here is the report'."
)
if investment_assistant:
_investment_assistant = Assistant(
name="Investment Assistant",
role="Write a investment report on a given company (stock) symbol",
llm=OpenAIChat(model=llm_id),
description="You are a Senior Investment Analyst for Goldman Sachs tasked with writing an investment report for a very important client.",
instructions=[
"For a given stock symbol, get the stock price, company information, analyst recommendations, and company news",
"Carefully read the research and generate a final - Goldman Sachs worthy investment report in the <report_format> provided below.",
"Provide thoughtful insights and recommendations based on the research.",
"When you share numbers, make sure to include the units (e.g., millions/billions) and currency.",
"REMEMBER: This report is for a very important client, so the quality of the report is important.",
],
expected_output=dedent(
"""\
<report_format>
## [Company Name]: Investment Report
### **Overview**
{give a brief introduction of the company and why the user should read this report}
{make this section engaging and create a hook for the reader}
### Core Metrics
{provide a summary of core metrics and show the latest data}
- Current price: {current price}
- 52-week high: {52-week high}
- 52-week low: {52-week low}
- Market Cap: {Market Cap} in billions
- P/E Ratio: {P/E Ratio}
- Earnings per Share: {EPS}
- 50-day average: {50-day average}
- 200-day average: {200-day average}
- Analyst Recommendations: {buy, hold, sell} (number of analysts)
### Financial Performance
{analyze the company's financial performance}
### Growth Prospects
{analyze the company's growth prospects and future potential}
### News and Updates
{summarize relevant news that can impact the stock price}
### [Summary]
{give a summary of the report and what are the key takeaways}
### [Recommendation]
{provide a recommendation on the stock along with a thorough reasoning}
</report_format>
"""
),
tools=[YFinanceTools(stock_price=True, company_info=True, analyst_recommendations=True, company_news=True)],
# This setting tells the LLM to format messages in markdown
markdown=True,
add_datetime_to_instructions=True,
debug_mode=debug_mode,
)
team.append(_investment_assistant)
extra_instructions.extend(
[
"To get an investment report on a stock, delegate the task to the `Investment Assistant`. "
"Return the report in the <report_format> to the user without any additional text like 'here is the report'.",
"Answer any questions they may have using the information in the report.",
"Never provide investment advise without the investment report.",
]
)
# Create the LLM OS Assistant
llm_os = Assistant(
name="llm_os",
run_id=run_id,
user_id=user_id,
llm=OpenAIChat(model=llm_id),
description=dedent(
"""\
You are the most advanced AI system in the world called `LLM-OS`.
You have access to a set of tools and a team of AI Assistants at your disposal.
Your goal is to assist the user in the best way possible.\
"""
),
instructions=[
"When the user sends a message, first **think** and determine if:\n"
" - You can answer by using a tool available to you\n"
" - You need to search the knowledge base\n"
" - You need to search the internet\n"
" - You need to delegate the task to a team member\n"
" - You need to ask a clarifying question",
"If the user asks about a topic, first ALWAYS search your knowledge base using the `search_knowledge_base` tool.",
"If you dont find relevant information in your knowledge base, use the `duckduckgo_search` tool to search the internet.",
"If the user asks to summarize the conversation or if you need to reference your chat history with the user, use the `get_chat_history` tool.",
"If the users message is unclear, ask clarifying questions to get more information.",
"Carefully read the information you have gathered and provide a clear and concise answer to the user.",
"Do not use phrases like 'based on my knowledge' or 'depending on the information'.",
"You can delegate tasks to an AI Assistant in your team depending of their role and the tools available to them.",
],
extra_instructions=extra_instructions,
# Add long-term memory to the LLM OS backed by a PostgreSQL database
storage=PgAssistantStorage(table_name="llm_os_runs", db_url=db_url),
# Add a knowledge base to the LLM OS
knowledge_base=AssistantKnowledge(
vector_db=PgVector2(
db_url=db_url,
collection="llm_os_documents",
embedder=OpenAIEmbedder(model="text-embedding-3-small", dimensions=1536),
),
# 3 references are added to the prompt when searching the knowledge base
num_documents=3,
),
# Add selected tools to the LLM OS
tools=tools,
# Add selected team members to the LLM OS
team=team,
# Show tool calls in the chat
show_tool_calls=True,
# This setting gives the LLM a tool to search the knowledge base for information
search_knowledge=True,
# This setting gives the LLM a tool to get chat history
read_chat_history=True,
# This setting adds chat history to the messages
add_chat_history_to_messages=True,
# This setting adds 6 previous messages from chat history to the messages sent to the LLM
num_history_messages=6,
# This setting tells the LLM to format messages in markdown
markdown=True,
# This setting adds the current datetime to the instructions
add_datetime_to_instructions=True,
# Add an introductory Assistant message
introduction=dedent(
"""\
Hi, I'm your LLM OS.
I have access to a set of tools and AI Assistants to assist you.
Let's solve some problems together!\
"""
),
debug_mode=debug_mode,
)
return llm_os |