import os from typing import TypedDict, List, Dict, Any, Optional from langchain.agents import create_tool_calling_agent, AgentExecutor, initialize_agent from langchain_google_genai import ChatGoogleGenerativeAI from langchain_core.tools import tool from langchain_core.messages import HumanMessage from langchain_core.prompts import ChatPromptTemplate # 1. Web Browsing from langchain_community.tools import DuckDuckGoSearchResults from langchain_community.document_loaders import ImageCaptionLoader import requests, time import pandas as pd from pypdf import PdfReader from langchain_community.tools import WikipediaQueryRun from langchain_community.utilities import WikipediaAPIWrapper from langchain_community.document_loaders import YoutubeLoader from langchain_community.document_loaders import UnstructuredExcelLoader from langchain_community.document_loaders import AssemblyAIAudioTranscriptLoader @tool def web_search(query: str) -> str: """Allows search through DuckDuckGo. Args: query: what you want to search """ search = DuckDuckGoSearchResults() results = search.invoke(query) return "\n".join(results) @tool def visit_webpage(url: str) -> str: """Fetches raw HTML content of a web page. Args: url: the webpage url """ try: response = requests.get(url, timeout=5) return response.text[:5000] except Exception as e: return f"[ERROR fetching {url}]: {str(e)}" @tool def wiki_search(query: str) -> str: """Wiki search tools. Args: query: what you want to wiki """ api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100) wikipediatool = WikipediaQueryRun(api_wrapper=api_wrapper) return wikipediatool.run({"query": query}) @tool def youtube_transcript(video_url: str) -> str: """Fetched youtube transcript Args: video_url: YouTube video url """ try: loader = YoutubeLoader.from_youtube_url(video_url) # video_id = video_url.split("v=")[-1].split("&")[0] # transcript = YouTubeTranscriptApi.get_transcript(video_id) return loader.load() except Exception as e: return f"Error fetching transcript: {str(e)}" # 4. File Reading @tool def read_file(dir: str) -> str: """Read the content of the file Args: dir: the filepath """ extension = dir.split['.'][-1] if extension == 'pdf': reader = PdfReader(dir) contents = [p.extract_text() for p in reader.pages] return "\n".join(contents) else: with open(dir) as f: return f.read() @tool def excel_read(dir: str) -> str: """Read the content of the excel file Args: dir: the filepath """ loader = UnstructuredExcelLoader(dir, mode="elements") docs = loader.load() contents = [doc.page_content for doc in docs] return "\n".join(contents) @tool def mp3_listen(dir: str) -> str: """Listen to the provided mp3 file Args: dir: the filepath """ loader = AssemblyAIAudioTranscriptLoader(file_path=dir) docs = loader.load() contents = [doc.page_content for doc in docs] return "\n".join(contents) # 5. Image Open @tool def image_caption(dir: str) -> str: """Understand the content of the provided image Args: dir: the image url link """ loader = ImageCaptionLoader(images=[dir]) metadata = loader.load() return metadata[0].page_content # 2. Coding from langchain_experimental.tools import PythonREPLTool python_tool = PythonREPLTool() @tool def multiply(a: float, b: float) -> float: """Multiply two numbers. Args: a: first float b: second float """ return a * b @tool def add(a: float, b: float) -> float: """Add two numbers. Args: a: first float b: second float """ return a + b @tool def subtract(a: float, b: float) -> float: """Subtract two numbers. Args: a: first float b: second float """ return a - b @tool def divide(a: float, b: float) -> float: """Divide two numbers. Args: a: first float b: second float """ if b == 0: raise ValueError("Cannot divide by zero.") return a / b # 3. Multi-Modality # - multiply: multiply two numbers, A and B # - add: add two numbers, A and B # - subtract: Subtract A by B with passing A as the first argument # - divide: Divide A by B with passing A as the first argument # ("human", f"Question: {question}\nReport to validate: {final_answer}") class BasicAgent: def __init__(self): self.model = ChatGoogleGenerativeAI( model="gemini-2.0-flash", temperature=0, max_tokens=128, timeout=None, max_retries=2, google_api_key="AIzaSyAxVUPaGJIgdxB46ZR0RWPKSjB9a63Z80o", # other params... ) # System Prompt for few shot prompting self.sys_prompt = """" You are a general AI assistant. I will ask you a question. Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separared list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (eg. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to put in the list is a number or a string. You have access to the following tools: - web_search: web search the content of the query by passing the query as input - visit_webpage: visit the given webpage url by passing the url as input - wiki_search: wiki search the content of the query by passing the query as input if the question asks for wiki search it - youtube_transcript: fetch the transcript of the Youtube video by passing the video url as input if the question asks for watching a Youtube video - read_file: read the content of the attached file by passing the file directory as input - excel_read: read the content of the attached excel file by passing the file directory as input - mp3_listen: listen to the content of the attached mp3 file by passing the file directory as input - image_caption: understand the visual content of the attached image by passing the image directory as input - python_tool: run the python code """ self.tools = [web_search, visit_webpage, wiki_search, youtube_transcript, read_file, excel_read, mp3_listen, image_caption, python_tool] self.prompt = ChatPromptTemplate.from_messages([ ("system", self.sys_prompt), ("human", "{input}") ]) self.agent = initialize_agent( tools=self.tools, llm=self.model, agent="zero-shot-react-description", # ReAct agent type verbose=True, system_prompt=self.prompt ) print("BasicAgent initialized.") def __call__(self, question: str) -> str: print(f"Agent received question (first 50 chars): {question[:50]}...") # response = self.agent_exe.invoke({"input": f"Question: {question}"}) # fixed_answer = response['message'][-1].content time.sleep(15) fixed_answer = self.agent.run(question) # fixed_answer = "This is a default answer." print(f"Agent returning fixed answer: {fixed_answer}") return fixed_answer