|
|
from langchain.tools import tool |
|
|
import contextlib |
|
|
import io |
|
|
import traceback |
|
|
|
|
|
@tool |
|
|
def execute_python_code(code: str) -> str: |
|
|
""" |
|
|
Executes the given Python code and returns stdout or any error. |
|
|
""" |
|
|
buffer = io.StringIO() |
|
|
try: |
|
|
with contextlib.redirect_stdout(buffer): |
|
|
exec(code, {}) |
|
|
return buffer.getvalue() or "β
Code executed successfully with no output." |
|
|
except Exception as e: |
|
|
return "β Execution Error:\n" + traceback.format_exc() |
|
|
@tool |
|
|
def web_search(query: str) -> str: |
|
|
"""Perform a web search using the Tavily API and return the top results's content.""" |
|
|
from tavily import TavilyClient |
|
|
tavily = TavilyClient(api_key=os.environ["tavily_api_key"]) |
|
|
try: |
|
|
return tavily.search(query=query)['results'][0]['content'] |
|
|
except Exception as e: |
|
|
return f"Error from Tavily: {e}" |
|
|
|
|
|
@tool |
|
|
def deep_think(prompt: str) -> str: |
|
|
"""Use an LLM to generate a deeply reasoned response to a prompt.""" |
|
|
llm = ChatTogether( |
|
|
model="meta-llama/Meta-Llama-3-8B-Instruct", |
|
|
together_api_key=os.environ["together_api_key"] |
|
|
) |
|
|
|
|
|
system_prompt = ( |
|
|
"You are a thoughtful and highly analytical AI assistant trained in critical thinking, planning, and strategy. " |
|
|
"Your task is to analyze the user's input carefully and reason through the problem step-by-step. " |
|
|
"Start by outlining the problem clearly, identify what is being asked, then break down your reasoning into logical steps. " |
|
|
"Also, outline what actions or steps the agent should take based on the prompt β as if you're planning for a human assistant to follow." |
|
|
) |
|
|
|
|
|
response = llm.invoke([ |
|
|
{"role": "system", "content": system_prompt}, |
|
|
{"role": "user", "content": prompt} |
|
|
]) |
|
|
return response.content |