Anshini commited on
Commit
0a307b0
·
verified ·
1 Parent(s): 122377f

Update tools.py

Browse files
Files changed (1) hide show
  1. tools.py +20 -9
tools.py CHANGED
@@ -153,13 +153,24 @@ def web_search(query: str) -> str:
153
  except Exception as e:
154
  return f"Error from Tavily: {e}"
155
 
156
- # --- Execute Code ---
157
  @tool
158
- def execute_code(code: str) -> str:
159
- """ Execute a code snippet and return the resulting local variable or ant error."""
160
- exec_locals = {}
161
- try:
162
- exec(code, {}, exec_locals)
163
- return str(exec_locals)
164
- except Exception as e:
165
- return str(e)
 
 
 
 
 
 
 
 
 
 
 
 
 
153
  except Exception as e:
154
  return f"Error from Tavily: {e}"
155
 
 
156
  @tool
157
+ def deep_think(prompt: str) -> str:
158
+ """Use an LLM to generate a deeply reasoned response to a prompt."""
159
+ llm = ChatTogether(
160
+ model="meta-llama/Meta-Llama-3-8B-Instruct",
161
+ together_api_key=os.environ["together_api_key"]
162
+ )
163
+ # system_prompt = "You are a thoughtful reasoning assistant. Think deeply and provide insightful reasoning for the input given by the user. And also what are the steps you need to do as per the user input."
164
+ system_prompt = (
165
+ "You are a thoughtful and highly analytical AI assistant trained in critical thinking, planning, and strategy. "
166
+ "Your task is to analyze the user's input carefully and reason through the problem step-by-step. "
167
+ "Start by outlining the problem clearly, identify what is being asked, then break down your reasoning into logical steps. "
168
+ "Also, outline what actions or steps the agent should take based on the prompt — as if you're planning for a human assistant to follow."
169
+ )
170
+
171
+ response = llm.invoke([
172
+ {"role": "system", "content": system_prompt},
173
+ {"role": "user", "content": prompt}
174
+ ])
175
+ return response.content
176
+