Anshini commited on
Commit
88aa38a
·
verified ·
1 Parent(s): 51211ad

Update tools.py

Browse files
Files changed (1) hide show
  1. tools.py +146 -125
tools.py CHANGED
@@ -1,125 +1,146 @@
1
- from langchain_core.tools import tool
2
- from langchain_together import ChatTogether
3
- import os
4
-
5
- # --- Execute Code ---
6
- @tool
7
- def execute_code(code: str) -> str:
8
- """ Execute a code snippet and return the resulting local variable or ant error."""
9
- exec_locals = {}
10
- try:
11
- exec(code, {}, exec_locals)
12
- return str(exec_locals)
13
- except Exception as e:
14
- return str(e)
15
-
16
- # --- Explain Code ---
17
- @tool
18
- def explain_code(code: str) -> str:
19
- """Explain what a given code snippet does using an llm"""
20
- llm = ChatTogether(
21
- model="meta-llama/Meta-Llama-3-8B-Instruct",
22
- together_api_key=os.environ["together_api_key"]
23
- )
24
- # system_prompt = "You are a code explainer. Explain clearly and concisely what the code does."
25
- system_prompt = (
26
- "You are a world-class software engineer and technical educator. "
27
- "Your job is to explain what a given piece of code does in clear, concise, and beginner-friendly language. "
28
- "Break down the functionality step by step, mention any libraries or functions used, and explain their purpose. "
29
- "Avoid jargon where possible, and provide analogies or examples if it helps understanding."
30
- )
31
-
32
- response = llm.invoke([
33
- {"role": "system", "content": system_prompt},
34
- {"role": "user", "content": f"Explain this code:\n{code}"}
35
- ])
36
- return response.content
37
-
38
- # --- Web Search ---
39
- @tool
40
- def web_search(query: str) -> str:
41
- """Perform a web search using the Tavily API and return the top results's content."""
42
- from tavily import TavilyClient
43
- tavily = TavilyClient(api_key=os.environ["tavily_api_key"])
44
- try:
45
- return tavily.search(query=query)['results'][0]['content']
46
- except Exception as e:
47
- return f"Error from Tavily: {e}"
48
-
49
- # --- Deep Think ---
50
- @tool
51
- def deep_think(prompt: str) -> str:
52
- """Use an LLM to generate a deeply reasoned response to a prompt."""
53
- llm = ChatTogether(
54
- model="meta-llama/Meta-Llama-3-8B-Instruct",
55
- together_api_key=os.environ["together_api_key"]
56
- )
57
- # system_prompt = "You are a thoughtful reasoning assistant. Think deeply and provide insightful reasoning for the input given by the user. And also what are the steps you need to do as per the user input."
58
- system_prompt = (
59
- "You are a thoughtful and highly analytical AI assistant trained in critical thinking, planning, and strategy. "
60
- "Your task is to analyze the user's input carefully and reason through the problem step-by-step. "
61
- "Start by outlining the problem clearly, identify what is being asked, then break down your reasoning into logical steps. "
62
- "Also, outline what actions or steps the agent should take based on the prompt — as if you're planning for a human assistant to follow."
63
- )
64
-
65
- response = llm.invoke([
66
- {"role": "system", "content": system_prompt},
67
- {"role": "user", "content": prompt}
68
- ])
69
- return response.content
70
-
71
- # --- Analyze Code ---
72
- @tool
73
- def analyze_code(code: str) -> str:
74
- """Analyze the structure and type of code"""
75
- llm = ChatTogether(
76
- model="meta-llama/Meta-Llama-3-8B-Instruct",
77
- together_api_key=os.environ["together_api_key"]
78
- )
79
- # system_prompt = "You are a code structure analyzer. Determine what kind of code this is and its structure."
80
- system_prompt = (
81
- "You are a professional code reviewer and software architect. "
82
- "Your job is to analyze the structure and type of the provided code. "
83
- "Start by identifying the programming language. Then, describe the overall structure: "
84
- "Is it a script, a class, a function, or a complete application? Mention any design patterns, coding conventions, and modules used. "
85
- "Finally, assess whether the code follows clean code principles."
86
- )
87
-
88
- response = llm.invoke([
89
- {"role": "system", "content": system_prompt},
90
- {"role": "user", "content": f"Analyze this code:\n{code}"}
91
- ])
92
- return response.content
93
-
94
- # --- Code Generator ---
95
- @tool
96
- def code_generator(prompt: str) -> str:
97
- """ Generate code based on the natural language description using an LLM"""
98
- llm = ChatTogether(
99
- model="meta-llama/Meta-Llama-3-8B-Instruct",
100
- together_api_key=os.environ["together_api_key"]
101
- )
102
- # system_prompt = "You are a Python code generator. Generate efficient and clean Python code."
103
- system_prompt = (
104
- "You are a Python expert and assistant developer. "
105
- "Your task is to generate efficient, clean, and well-documented Python code based on the given natural language prompt. "
106
- "Ensure code readability, use appropriate naming conventions, and write docstrings for functions if needed. "
107
- "You may also include comments for clarity, and avoid over-engineering the solution."
108
- )
109
-
110
- response = llm.invoke([
111
- {"role": "system", "content": system_prompt},
112
- {"role": "user", "content": f"Write Python code based on this requirement:\n{prompt}"}
113
- ])
114
- return response.content
115
-
116
- # --- Return All Tools ---
117
- def get_all_tools():
118
- return [
119
- execute_code,
120
- explain_code,
121
- web_search,
122
- deep_think,
123
- analyze_code,
124
- code_generator,
125
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from langchain_core.tools import tool
2
+ # from langchain_together import ChatTogether
3
+ # import os
4
+
5
+ # # --- Execute Code ---
6
+ # @tool
7
+ # def execute_code(code: str) -> str:
8
+ # """ Execute a code snippet and return the resulting local variable or ant error."""
9
+ # exec_locals = {}
10
+ # try:
11
+ # exec(code, {}, exec_locals)
12
+ # return str(exec_locals)
13
+ # except Exception as e:
14
+ # return str(e)
15
+
16
+ # # --- Explain Code ---
17
+ # @tool
18
+ # def explain_code(code: str) -> str:
19
+ # """Explain what a given code snippet does using an llm"""
20
+ # llm = ChatTogether(
21
+ # model="meta-llama/Meta-Llama-3-8B-Instruct",
22
+ # together_api_key=os.environ["together_api_key"]
23
+ # )
24
+ # # system_prompt = "You are a code explainer. Explain clearly and concisely what the code does."
25
+ # system_prompt = (
26
+ # "You are a world-class software engineer and technical educator. "
27
+ # "Your job is to explain what a given piece of code does in clear, concise, and beginner-friendly language. "
28
+ # "Break down the functionality step by step, mention any libraries or functions used, and explain their purpose. "
29
+ # "Avoid jargon where possible, and provide analogies or examples if it helps understanding."
30
+ # )
31
+
32
+ # response = llm.invoke([
33
+ # {"role": "system", "content": system_prompt},
34
+ # {"role": "user", "content": f"Explain this code:\n{code}"}
35
+ # ])
36
+ # return response.content
37
+
38
+ # # --- Web Search ---
39
+ # @tool
40
+ # def web_search(query: str) -> str:
41
+ # """Perform a web search using the Tavily API and return the top results's content."""
42
+ # from tavily import TavilyClient
43
+ # tavily = TavilyClient(api_key=os.environ["tavily_api_key"])
44
+ # try:
45
+ # return tavily.search(query=query)['results'][0]['content']
46
+ # except Exception as e:
47
+ # return f"Error from Tavily: {e}"
48
+
49
+ # # --- Deep Think ---
50
+ # @tool
51
+ # def deep_think(prompt: str) -> str:
52
+ # """Use an LLM to generate a deeply reasoned response to a prompt."""
53
+ # llm = ChatTogether(
54
+ # model="meta-llama/Meta-Llama-3-8B-Instruct",
55
+ # together_api_key=os.environ["together_api_key"]
56
+ # )
57
+ # # system_prompt = "You are a thoughtful reasoning assistant. Think deeply and provide insightful reasoning for the input given by the user. And also what are the steps you need to do as per the user input."
58
+ # system_prompt = (
59
+ # "You are a thoughtful and highly analytical AI assistant trained in critical thinking, planning, and strategy. "
60
+ # "Your task is to analyze the user's input carefully and reason through the problem step-by-step. "
61
+ # "Start by outlining the problem clearly, identify what is being asked, then break down your reasoning into logical steps. "
62
+ # "Also, outline what actions or steps the agent should take based on the prompt — as if you're planning for a human assistant to follow."
63
+ # )
64
+
65
+ # response = llm.invoke([
66
+ # {"role": "system", "content": system_prompt},
67
+ # {"role": "user", "content": prompt}
68
+ # ])
69
+ # return response.content
70
+
71
+ # # --- Analyze Code ---
72
+ # @tool
73
+ # def analyze_code(code: str) -> str:
74
+ # """Analyze the structure and type of code"""
75
+ # llm = ChatTogether(
76
+ # model="meta-llama/Meta-Llama-3-8B-Instruct",
77
+ # together_api_key=os.environ["together_api_key"]
78
+ # )
79
+ # # system_prompt = "You are a code structure analyzer. Determine what kind of code this is and its structure."
80
+ # system_prompt = (
81
+ # "You are a professional code reviewer and software architect. "
82
+ # "Your job is to analyze the structure and type of the provided code. "
83
+ # "Start by identifying the programming language. Then, describe the overall structure: "
84
+ # "Is it a script, a class, a function, or a complete application? Mention any design patterns, coding conventions, and modules used. "
85
+ # "Finally, assess whether the code follows clean code principles."
86
+ # )
87
+
88
+ # response = llm.invoke([
89
+ # {"role": "system", "content": system_prompt},
90
+ # {"role": "user", "content": f"Analyze this code:\n{code}"}
91
+ # ])
92
+ # return response.content
93
+
94
+ # # --- Code Generator ---
95
+ # @tool
96
+ # def code_generator(prompt: str) -> str:
97
+ # """ Generate code based on the natural language description using an LLM"""
98
+ # llm = ChatTogether(
99
+ # model="meta-llama/Meta-Llama-3-8B-Instruct",
100
+ # together_api_key=os.environ["together_api_key"]
101
+ # )
102
+ # # system_prompt = "You are a Python code generator. Generate efficient and clean Python code."
103
+ # system_prompt = (
104
+ # "You are a Python expert and assistant developer. "
105
+ # "Your task is to generate efficient, clean, and well-documented Python code based on the given natural language prompt. "
106
+ # "Ensure code readability, use appropriate naming conventions, and write docstrings for functions if needed. "
107
+ # "You may also include comments for clarity, and avoid over-engineering the solution."
108
+ # )
109
+
110
+ # response = llm.invoke([
111
+ # {"role": "system", "content": system_prompt},
112
+ # {"role": "user", "content": f"Write Python code based on this requirement:\n{prompt}"}
113
+ # ])
114
+ # return response.content
115
+
116
+ # # --- Return All Tools ---
117
+ # def get_all_tools():
118
+ # return [
119
+ # execute_code,
120
+ # explain_code,
121
+ # web_search,
122
+ # deep_think,
123
+ # analyze_code,
124
+ # code_generator,
125
+ # ]
126
+
127
+
128
+
129
+ from langchain.tools import tool
130
+ import contextlib
131
+ import io
132
+ import traceback
133
+
134
+ @tool
135
+ def execute_python_code(code: str) -> str:
136
+ """
137
+ Executes the given Python code and returns stdout or any error.
138
+ """
139
+ buffer = io.StringIO()
140
+ try:
141
+ with contextlib.redirect_stdout(buffer):
142
+ exec(code, {})
143
+ return buffer.getvalue() or "✅ Code executed successfully with no output."
144
+ except Exception as e:
145
+ return "❌ Execution Error:\n" + traceback.format_exc()
146
+