Anshini commited on
Commit
c0fd6d9
·
verified ·
1 Parent(s): bd2db9c

Create tools.py

Browse files
Files changed (1) hide show
  1. tools.py +47 -0
tools.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.tools import tool
2
+ import contextlib
3
+ import io
4
+ import traceback
5
+
6
+ @tool
7
+ def execute_python_code(code: str) -> str:
8
+ """
9
+ Executes the given Python code and returns stdout or any error.
10
+ """
11
+ buffer = io.StringIO()
12
+ try:
13
+ with contextlib.redirect_stdout(buffer):
14
+ exec(code, {})
15
+ return buffer.getvalue() or "✅ Code executed successfully with no output."
16
+ except Exception as e:
17
+ return "❌ Execution Error:\n" + traceback.format_exc()
18
+ @tool
19
+ def web_search(query: str) -> str:
20
+ """Perform a web search using the Tavily API and return the top results's content."""
21
+ from tavily import TavilyClient
22
+ tavily = TavilyClient(api_key=os.environ["tavily_api_key"])
23
+ try:
24
+ return tavily.search(query=query)['results'][0]['content']
25
+ except Exception as e:
26
+ return f"Error from Tavily: {e}"
27
+
28
+ @tool
29
+ def deep_think(prompt: str) -> str:
30
+ """Use an LLM to generate a deeply reasoned response to a prompt."""
31
+ llm = ChatTogether(
32
+ model="meta-llama/Meta-Llama-3-8B-Instruct",
33
+ together_api_key=os.environ["together_api_key"]
34
+ )
35
+ # system_prompt = "You are a thoughtful reasoning assistant. Think deeply and provide insightful reasoning for the input given by the user. And also what are the steps you need to do as per the user input."
36
+ system_prompt = (
37
+ "You are a thoughtful and highly analytical AI assistant trained in critical thinking, planning, and strategy. "
38
+ "Your task is to analyze the user's input carefully and reason through the problem step-by-step. "
39
+ "Start by outlining the problem clearly, identify what is being asked, then break down your reasoning into logical steps. "
40
+ "Also, outline what actions or steps the agent should take based on the prompt — as if you're planning for a human assistant to follow."
41
+ )
42
+
43
+ response = llm.invoke([
44
+ {"role": "system", "content": system_prompt},
45
+ {"role": "user", "content": prompt}
46
+ ])
47
+ return response.content