Spaces:
Sleeping
Sleeping
| # from langchain_core.tools import tool | |
| # from langchain_together import ChatTogether | |
| # import os | |
| # # --- Execute Code --- | |
| # @tool | |
| # def execute_code(code: str) -> str: | |
| # """ Execute a code snippet and return the resulting local variable or ant error.""" | |
| # exec_locals = {} | |
| # try: | |
| # exec(code, {}, exec_locals) | |
| # return str(exec_locals) | |
| # except Exception as e: | |
| # return str(e) | |
| # # --- Explain Code --- | |
| # @tool | |
| # def explain_code(code: str) -> str: | |
| # """Explain what a given code snippet does using an llm""" | |
| # llm = ChatTogether( | |
| # model="meta-llama/Meta-Llama-3-8B-Instruct", | |
| # together_api_key=os.environ["together_api_key"] | |
| # ) | |
| # # system_prompt = "You are a code explainer. Explain clearly and concisely what the code does." | |
| # system_prompt = ( | |
| # "You are a world-class software engineer and technical educator. " | |
| # "Your job is to explain what a given piece of code does in clear, concise, and beginner-friendly language. " | |
| # "Break down the functionality step by step, mention any libraries or functions used, and explain their purpose. " | |
| # "Avoid jargon where possible, and provide analogies or examples if it helps understanding." | |
| # ) | |
| # response = llm.invoke([ | |
| # {"role": "system", "content": system_prompt}, | |
| # {"role": "user", "content": f"Explain this code:\n{code}"} | |
| # ]) | |
| # return response.content | |
| # # --- Web Search --- | |
| # @tool | |
| # def web_search(query: str) -> str: | |
| # """Perform a web search using the Tavily API and return the top results's content.""" | |
| # from tavily import TavilyClient | |
| # tavily = TavilyClient(api_key=os.environ["tavily_api_key"]) | |
| # try: | |
| # return tavily.search(query=query)['results'][0]['content'] | |
| # except Exception as e: | |
| # return f"Error from Tavily: {e}" | |
| # # --- Deep Think --- | |
| # @tool | |
| # def deep_think(prompt: str) -> str: | |
| # """Use an LLM to generate a deeply reasoned response to a prompt.""" | |
| # llm = ChatTogether( | |
| # model="meta-llama/Meta-Llama-3-8B-Instruct", | |
| # together_api_key=os.environ["together_api_key"] | |
| # ) | |
| # # system_prompt = "You are a thoughtful reasoning assistant. Think deeply and provide insightful reasoning for the input given by the user. And also what are the steps you need to do as per the user input." | |
| # system_prompt = ( | |
| # "You are a thoughtful and highly analytical AI assistant trained in critical thinking, planning, and strategy. " | |
| # "Your task is to analyze the user's input carefully and reason through the problem step-by-step. " | |
| # "Start by outlining the problem clearly, identify what is being asked, then break down your reasoning into logical steps. " | |
| # "Also, outline what actions or steps the agent should take based on the prompt β as if you're planning for a human assistant to follow." | |
| # ) | |
| # response = llm.invoke([ | |
| # {"role": "system", "content": system_prompt}, | |
| # {"role": "user", "content": prompt} | |
| # ]) | |
| # return response.content | |
| # # --- Analyze Code --- | |
| # @tool | |
| # def analyze_code(code: str) -> str: | |
| # """Analyze the structure and type of code""" | |
| # llm = ChatTogether( | |
| # model="meta-llama/Meta-Llama-3-8B-Instruct", | |
| # together_api_key=os.environ["together_api_key"] | |
| # ) | |
| # # system_prompt = "You are a code structure analyzer. Determine what kind of code this is and its structure." | |
| # system_prompt = ( | |
| # "You are a professional code reviewer and software architect. " | |
| # "Your job is to analyze the structure and type of the provided code. " | |
| # "Start by identifying the programming language. Then, describe the overall structure: " | |
| # "Is it a script, a class, a function, or a complete application? Mention any design patterns, coding conventions, and modules used. " | |
| # "Finally, assess whether the code follows clean code principles." | |
| # ) | |
| # response = llm.invoke([ | |
| # {"role": "system", "content": system_prompt}, | |
| # {"role": "user", "content": f"Analyze this code:\n{code}"} | |
| # ]) | |
| # return response.content | |
| # # --- Code Generator --- | |
| # @tool | |
| # def code_generator(prompt: str) -> str: | |
| # """ Generate code based on the natural language description using an LLM""" | |
| # llm = ChatTogether( | |
| # model="meta-llama/Meta-Llama-3-8B-Instruct", | |
| # together_api_key=os.environ["together_api_key"] | |
| # ) | |
| # # system_prompt = "You are a Python code generator. Generate efficient and clean Python code." | |
| # system_prompt = ( | |
| # "You are a Python expert and assistant developer. " | |
| # "Your task is to generate efficient, clean, and well-documented Python code based on the given natural language prompt. " | |
| # "Ensure code readability, use appropriate naming conventions, and write docstrings for functions if needed. " | |
| # "You may also include comments for clarity, and avoid over-engineering the solution." | |
| # ) | |
| # response = llm.invoke([ | |
| # {"role": "system", "content": system_prompt}, | |
| # {"role": "user", "content": f"Write Python code based on this requirement:\n{prompt}"} | |
| # ]) | |
| # return response.content | |
| # # --- Return All Tools --- | |
| # def get_all_tools(): | |
| # return [ | |
| # execute_code, | |
| # explain_code, | |
| # web_search, | |
| # deep_think, | |
| # analyze_code, | |
| # code_generator, | |
| # ] | |
| from langchain.tools import tool | |
| import contextlib | |
| import io | |
| import traceback | |
| def execute_python_code(code: str) -> str: | |
| """ | |
| Executes the given Python code and returns stdout or any error. | |
| """ | |
| buffer = io.StringIO() | |
| try: | |
| with contextlib.redirect_stdout(buffer): | |
| exec(code, {}) | |
| return buffer.getvalue() or "β Code executed successfully with no output." | |
| except Exception as e: | |
| return "β Execution Error:\n" + traceback.format_exc() | |
| def web_search(query: str) -> str: | |
| """Perform a web search using the Tavily API and return the top results's content.""" | |
| from tavily import TavilyClient | |
| tavily = TavilyClient(api_key=os.environ["tavily_api_key"]) | |
| try: | |
| return tavily.search(query=query)['results'][0]['content'] | |
| except Exception as e: | |
| return f"Error from Tavily: {e}" | |
| def deep_think(prompt: str) -> str: | |
| """Use an LLM to generate a deeply reasoned response to a prompt.""" | |
| llm = ChatTogether( | |
| model="meta-llama/Meta-Llama-3-8B-Instruct", | |
| together_api_key=os.environ["together_api_key"] | |
| ) | |
| # system_prompt = "You are a thoughtful reasoning assistant. Think deeply and provide insightful reasoning for the input given by the user. And also what are the steps you need to do as per the user input." | |
| system_prompt = ( | |
| "You are a thoughtful and highly analytical AI assistant trained in critical thinking, planning, and strategy. " | |
| "Your task is to analyze the user's input carefully and reason through the problem step-by-step. " | |
| "Start by outlining the problem clearly, identify what is being asked, then break down your reasoning into logical steps. " | |
| "Also, outline what actions or steps the agent should take based on the prompt β as if you're planning for a human assistant to follow." | |
| ) | |
| response = llm.invoke([ | |
| {"role": "system", "content": system_prompt}, | |
| {"role": "user", "content": prompt} | |
| ]) | |
| return response.content | |