Anshini commited on
Commit
c76e3a9
·
verified ·
1 Parent(s): 6d82766

Upload tools.py

Browse files
Files changed (1) hide show
  1. tools.py +96 -0
tools.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_core.tools import tool
2
+ from langchain_together import ChatTogether
3
+ import os
4
+
5
+ # --- Execute Code ---
6
+ @tool
7
+ def execute_code(code: str) -> str:
8
+ """ Execute a code snippet and return the resulting local variable or ant error."""
9
+ exec_locals = {}
10
+ try:
11
+ exec(code, {}, exec_locals)
12
+ return str(exec_locals)
13
+ except Exception as e:
14
+ return str(e)
15
+
16
+ # --- Explain Code ---
17
+ @tool
18
+ def explain_code(code: str) -> str:
19
+ """Explain what a given code snippet does using an llm"""
20
+ llm = ChatTogether(
21
+ model="meta-llama/Meta-Llama-3-8B-Instruct",
22
+ together_api_key=os.environ["together_api_key"]
23
+ )
24
+ system_prompt = "You are a code explainer. Explain clearly and concisely what the code does."
25
+ response = llm.invoke([
26
+ {"role": "system", "content": system_prompt},
27
+ {"role": "user", "content": f"Explain this code:\n{code}"}
28
+ ])
29
+ return response.content
30
+
31
+ # --- Web Search ---
32
+ @tool
33
+ def web_search(query: str) -> str:
34
+ """Perform a web search using the Tavily API and return the top results's content."""
35
+ from tavily import TavilyClient
36
+ tavily = TavilyClient(api_key=os.environ["tavily_api_key"])
37
+ try:
38
+ return tavily.search(query=query)['results'][0]['content']
39
+ except Exception as e:
40
+ return f"Error from Tavily: {e}"
41
+
42
+ # --- Deep Think ---
43
+ @tool
44
+ def deep_think(prompt: str) -> str:
45
+ """Use an LLM to generate a deeply reasoned response to a prompt."""
46
+ llm = ChatTogether(
47
+ model="meta-llama/Meta-Llama-3-8B-Instruct",
48
+ together_api_key=os.environ["together_api_key"]
49
+ )
50
+ system_prompt = "You are a thoughtful reasoning assistant. Think deeply and provide insightful reasoning for the input given by the user. And also what are the steps you need to do as per the user input."
51
+ response = llm.invoke([
52
+ {"role": "system", "content": system_prompt},
53
+ {"role": "user", "content": prompt}
54
+ ])
55
+ return response.content
56
+
57
+ # --- Analyze Code ---
58
+ @tool
59
+ def analyze_code(code: str) -> str:
60
+ """Analyze the structure and type of code"""
61
+ llm = ChatTogether(
62
+ model="meta-llama/Meta-Llama-3-8B-Instruct",
63
+ together_api_key=os.environ["together_api_key"]
64
+ )
65
+ system_prompt = "You are a code structure analyzer. Determine what kind of code this is and its structure."
66
+ response = llm.invoke([
67
+ {"role": "system", "content": system_prompt},
68
+ {"role": "user", "content": f"Analyze this code:\n{code}"}
69
+ ])
70
+ return response.content
71
+
72
+ # --- Code Generator ---
73
+ @tool
74
+ def code_generator(prompt: str) -> str:
75
+ """ Generate code based on the natural language description using an LLM"""
76
+ llm = ChatTogether(
77
+ model="meta-llama/Meta-Llama-3-8B-Instruct",
78
+ together_api_key=os.environ["together_api_key"]
79
+ )
80
+ system_prompt = "You are a Python code generator. Generate efficient and clean Python code."
81
+ response = llm.invoke([
82
+ {"role": "system", "content": system_prompt},
83
+ {"role": "user", "content": f"Write Python code based on this requirement:\n{prompt}"}
84
+ ])
85
+ return response.content
86
+
87
+ # --- Return All Tools ---
88
+ def get_all_tools():
89
+ return [
90
+ execute_code,
91
+ explain_code,
92
+ web_search,
93
+ deep_think,
94
+ analyze_code,
95
+ code_generator,
96
+ ]