Spaces:
Sleeping
Sleeping
File size: 7,412 Bytes
88aa38a 5901ddd 122377f 0a307b0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 |
# from langchain_core.tools import tool
# from langchain_together import ChatTogether
# import os
# # --- Execute Code ---
# @tool
# def execute_code(code: str) -> str:
# """ Execute a code snippet and return the resulting local variable or ant error."""
# exec_locals = {}
# try:
# exec(code, {}, exec_locals)
# return str(exec_locals)
# except Exception as e:
# return str(e)
# # --- Explain Code ---
# @tool
# def explain_code(code: str) -> str:
# """Explain what a given code snippet does using an llm"""
# llm = ChatTogether(
# model="meta-llama/Meta-Llama-3-8B-Instruct",
# together_api_key=os.environ["together_api_key"]
# )
# # system_prompt = "You are a code explainer. Explain clearly and concisely what the code does."
# system_prompt = (
# "You are a world-class software engineer and technical educator. "
# "Your job is to explain what a given piece of code does in clear, concise, and beginner-friendly language. "
# "Break down the functionality step by step, mention any libraries or functions used, and explain their purpose. "
# "Avoid jargon where possible, and provide analogies or examples if it helps understanding."
# )
# response = llm.invoke([
# {"role": "system", "content": system_prompt},
# {"role": "user", "content": f"Explain this code:\n{code}"}
# ])
# return response.content
# # --- Web Search ---
# @tool
# def web_search(query: str) -> str:
# """Perform a web search using the Tavily API and return the top results's content."""
# from tavily import TavilyClient
# tavily = TavilyClient(api_key=os.environ["tavily_api_key"])
# try:
# return tavily.search(query=query)['results'][0]['content']
# except Exception as e:
# return f"Error from Tavily: {e}"
# # --- Deep Think ---
# @tool
# def deep_think(prompt: str) -> str:
# """Use an LLM to generate a deeply reasoned response to a prompt."""
# llm = ChatTogether(
# model="meta-llama/Meta-Llama-3-8B-Instruct",
# together_api_key=os.environ["together_api_key"]
# )
# # system_prompt = "You are a thoughtful reasoning assistant. Think deeply and provide insightful reasoning for the input given by the user. And also what are the steps you need to do as per the user input."
# system_prompt = (
# "You are a thoughtful and highly analytical AI assistant trained in critical thinking, planning, and strategy. "
# "Your task is to analyze the user's input carefully and reason through the problem step-by-step. "
# "Start by outlining the problem clearly, identify what is being asked, then break down your reasoning into logical steps. "
# "Also, outline what actions or steps the agent should take based on the prompt — as if you're planning for a human assistant to follow."
# )
# response = llm.invoke([
# {"role": "system", "content": system_prompt},
# {"role": "user", "content": prompt}
# ])
# return response.content
# # --- Analyze Code ---
# @tool
# def analyze_code(code: str) -> str:
# """Analyze the structure and type of code"""
# llm = ChatTogether(
# model="meta-llama/Meta-Llama-3-8B-Instruct",
# together_api_key=os.environ["together_api_key"]
# )
# # system_prompt = "You are a code structure analyzer. Determine what kind of code this is and its structure."
# system_prompt = (
# "You are a professional code reviewer and software architect. "
# "Your job is to analyze the structure and type of the provided code. "
# "Start by identifying the programming language. Then, describe the overall structure: "
# "Is it a script, a class, a function, or a complete application? Mention any design patterns, coding conventions, and modules used. "
# "Finally, assess whether the code follows clean code principles."
# )
# response = llm.invoke([
# {"role": "system", "content": system_prompt},
# {"role": "user", "content": f"Analyze this code:\n{code}"}
# ])
# return response.content
# # --- Code Generator ---
# @tool
# def code_generator(prompt: str) -> str:
# """ Generate code based on the natural language description using an LLM"""
# llm = ChatTogether(
# model="meta-llama/Meta-Llama-3-8B-Instruct",
# together_api_key=os.environ["together_api_key"]
# )
# # system_prompt = "You are a Python code generator. Generate efficient and clean Python code."
# system_prompt = (
# "You are a Python expert and assistant developer. "
# "Your task is to generate efficient, clean, and well-documented Python code based on the given natural language prompt. "
# "Ensure code readability, use appropriate naming conventions, and write docstrings for functions if needed. "
# "You may also include comments for clarity, and avoid over-engineering the solution."
# )
# response = llm.invoke([
# {"role": "system", "content": system_prompt},
# {"role": "user", "content": f"Write Python code based on this requirement:\n{prompt}"}
# ])
# return response.content
# # --- Return All Tools ---
# def get_all_tools():
# return [
# execute_code,
# explain_code,
# web_search,
# deep_think,
# analyze_code,
# code_generator,
# ]
from langchain.tools import tool
import contextlib
import io
import traceback
@tool
def execute_python_code(code: str) -> str:
"""
Executes the given Python code and returns stdout or any error.
"""
buffer = io.StringIO()
try:
with contextlib.redirect_stdout(buffer):
exec(code, {})
return buffer.getvalue() or "✅ Code executed successfully with no output."
except Exception as e:
return "❌ Execution Error:\n" + traceback.format_exc()
@tool
def web_search(query: str) -> str:
"""Perform a web search using the Tavily API and return the top results's content."""
from tavily import TavilyClient
tavily = TavilyClient(api_key=os.environ["tavily_api_key"])
try:
return tavily.search(query=query)['results'][0]['content']
except Exception as e:
return f"Error from Tavily: {e}"
@tool
def deep_think(prompt: str) -> str:
"""Use an LLM to generate a deeply reasoned response to a prompt."""
llm = ChatTogether(
model="meta-llama/Meta-Llama-3-8B-Instruct",
together_api_key=os.environ["together_api_key"]
)
# system_prompt = "You are a thoughtful reasoning assistant. Think deeply and provide insightful reasoning for the input given by the user. And also what are the steps you need to do as per the user input."
system_prompt = (
"You are a thoughtful and highly analytical AI assistant trained in critical thinking, planning, and strategy. "
"Your task is to analyze the user's input carefully and reason through the problem step-by-step. "
"Start by outlining the problem clearly, identify what is being asked, then break down your reasoning into logical steps. "
"Also, outline what actions or steps the agent should take based on the prompt — as if you're planning for a human assistant to follow."
)
response = llm.invoke([
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt}
])
return response.content
|