import os from dotenv import load_dotenv import pandas as pd import json import base64 from langgraph.graph import START, StateGraph, MessagesState from langgraph.prebuilt import tools_condition from langgraph.prebuilt import ToolNode from langchain_groq import ChatGroq from langchain_community.tools import DuckDuckGoSearchResults from langchain_community.document_loaders import WikipediaLoader from langchain_core.messages import SystemMessage, HumanMessage from langchain_core.tools import tool from langchain_core.output_parsers import StrOutputParser from langchain_core.tools import Tool from langchain_tavily import TavilySearch from langchain_tavily import TavilySearch, TavilyExtract from langchain_google_genai import ChatGoogleGenerativeAI # from langchain_community.tools.tavily_search import TavilySearchResults from langchain_experimental.utilities import PythonREPL import assemblyai as aai load_dotenv() aai.settings.api_key = os.getenv("ASSEMBLY_AI_KEY") repl_tool = Tool( name="python_repl", description="A Python shell. Use this to execute python commands. Input should be a valid python command. If you want to see the output of a value, you should print it out with `print(...)`.", func=PythonREPL().run, ) # Initialize Tavily Search Tool tavily_search_tool = TavilySearch( max_results=5, topic="general", search_depth="advanced" ) # Initialize Tavily Extract Tool tavily_extract_tool = TavilyExtract() @tool def describe_image(file_name: str) -> str: """Describe the image. Args: file_name: name of image file """ with open(file_name, "rb") as image_file: encoded_image = base64.b64encode(image_file.read()).decode("utf-8") message_local = HumanMessage( content=[ {"type": "text", "text": "Describe the local image."}, {"type": "image_url", "image_url": f"data:image/png;base64,{encoded_image}"}, ] ) llm = ChatGoogleGenerativeAI( model="gemini-2.0-flash", temperature=0.1, max_tokens=None, timeout=None, max_retries=2, # other params... ) result_local = llm.invoke([message_local]) return "Response for local image: {result_local.content}" @tool def read_excel_file(file_name: str) -> str: """Read the content of excel file. Args: file_name: name of excel file """ # Load the Excel file using pandas try: # Read the Excel file df = pd.read_excel(file_name, sheet_name=None) # sheet_name=None loads all sheets # Convert each sheet to a dictionary of rows json_output = {} for sheet_name, sheet_data in df.items(): # Convert the dataframe to a list of dictionaries (rows) json_output[sheet_name] = sheet_data.to_dict(orient="records") # Convert the result to a JSON formatted string json_result = json.dumps(json_output, indent=4) return json_result except Exception as e: return str(e) @tool def transcribe_audio(file_name: str) -> str: """Transcribe the audio file into text. Args: file_name: name of audio file """ config = aai.TranscriptionConfig(speech_model=aai.SpeechModel.best) transcript = aai.Transcriber(config=config).transcribe(file_name) if transcript.status == "error": raise RuntimeError(f"Transcription failed: {transcript.error}") return f"Here is the transcript: {transcript.text}" @tool def wiki_search(query: str) -> str: """Search Wikipedia for a query and return maximum 2 results. Args: query: The search query.""" search_docs = WikipediaLoader(query=query, load_max_docs=2).load() formatted_search_docs = "\n\n---\n\n".join( [ f'\n{doc.page_content}\n' for doc in search_docs ]) return {"wiki_results": formatted_search_docs} @tool def solve_math_problem(problem: str) -> str: """Solve logic or math problem. Args: problem: The problem statement.""" print('solve') llm = ChatGoogleGenerativeAI( model="gemini-2.0-flash", temperature=0.1, max_tokens=None, timeout=None, max_retries=2, # other params... ) response = llm.invoke(problem) return response.content # @tool # def web_search(query: str) -> str: # """Search Tavily for a query and return maximum 3 results. # Args: # query: The search query.""" # search_docs = TavilySearchResults(max_results=5).invoke(query=query) # formatted_search_docs = "\n\n---\n\n".join( # [ # f'\n{doc.page_content}\n' # for doc in search_docs # ]) # print({"web_results": formatted_search_docs}) # return {"web_results": formatted_search_docs} system_prompt = """ You are a helpful assistant tasked with answering questions using a set of tools. If the question is related to math or logic or a puzzle, ALWAYS USE a tool and NOT trying to answer by yourself. Now, I will ask you a question. Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string. Your answer should only start with "FINAL ANSWER: ", then follows with the answer. """ sys_msg = SystemMessage(content=system_prompt) tools = [ solve_math_problem, wiki_search, describe_image, tavily_search_tool, tavily_extract_tool, repl_tool, read_excel_file, transcribe_audio, ] llm = ChatGroq(model="qwen-qwq-32b", temperature=0.1) llm_with_tools = llm.bind_tools(tools) def assistant(state: MessagesState): """Assistant node""" return {"messages": [llm_with_tools.invoke(state["messages"])]} def final_answer(answer): return answer.replace("FINAL ANSWER:","") builder = StateGraph(MessagesState) builder.add_node("assistant", assistant) builder.add_node("tools", ToolNode(tools)) builder.add_edge(START, "assistant") builder.add_conditional_edges( "assistant", tools_condition, ) builder.add_edge("tools", "assistant") graph = builder.compile() def get_answer(query): messages = [sys_msg, HumanMessage(content=query)] results = graph.invoke({"messages": messages}) return final_answer(results["messages"][-1].content) if __name__ == "__main__": question = "In the video https://www.youtube.com/watch?v=L1vXCYZAYYM, what is the highest number of bird species to be on camera simultaneously?" # question = "Hi, I was out sick from my classes on Friday, so I'm trying to figure out what I need to study for my Calculus mid-term next week. My friend from class sent me an audio recording of Professor Willowbrook giving out the recommended reading for the test, but my headphones are broken :(\n\nCould you please listen to the recording for me and tell me the page numbers I'm supposed to go over? I've attached a file called Homework.mp3 that has the recording. Please provide just the page numbers as a comma-delimited list. And please provide the list in ascending order." # question = "What is the first name of the only Malko Competition recipient from the 20th Century (after 1977) whose nationality on record is a country that no longer exists?" # question = "Where were the Vietnamese specimens described by Kuznetzov in Nedoshivina's 2010 paper eventually deposited? Just give me the city name without abbreviations." question = "How many studio albums were published by Mercedes Sosa between 2000 and 2009 (included)? You can use the latest 2022 version of english wikipedia." question ="Given this table defining * on the set S = {a, b, c, d, e}\n\n|*|a|b|c|d|e|\n|---|---|---|---|---|---|\n|a|a|b|c|b|d|\n|b|b|c|a|e|c|\n|c|c|a|b|b|a|\n|d|b|e|b|e|d|\n|e|d|b|a|d|c|\n\nprovide the subset of S involved in any possible counter-examples that prove * is not commutative. Provide your answer as a comma separated list of the elements in the set in alphabetical order." # getmessages = [HumanMessage(content=question)] # messages = graph.invoke({"messages": messages}) # for m in messages["messages"]: # m.pretty_print() print(f"FINAL ANSWER: {get_answer(question)}")