| import os |
| from typing import TypedDict, List, Dict, Any, Optional, Union |
| from langchain_core import tools |
| from langgraph.graph import StateGraph, START, END |
| from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFacePipeline |
| from langchain_core.messages import HumanMessage, AIMessage, SystemMessage |
| from langchain_core.tools import tool |
| from ddgs import DDGS |
| from dotenv import load_dotenv |
|
|
|
|
| load_dotenv() |
|
|
| |
| base_llm = HuggingFaceEndpoint( |
| repo_id="deepseek-ai/DeepSeek-R1-0528", |
| |
| task="text-generation", |
| temperature=0.0, |
| huggingfacehub_api_token=os.getenv("HUGGINGFACEHUB_API_TOKEN"), |
| ) |
|
|
| |
| model = ChatHuggingFace(llm=base_llm) |
|
|
| @tool |
| def web_search(keywords: str, max_results:int = 5) -> str: |
| """ |
| Uses duckduckgo to search the web |
| |
| Use cases: |
| - Identify personal information |
| - Information search |
| - Finding organisation information |
| - Obtain the latest news |
| |
| Args: |
| keywords: keywords used to search the web |
| max_results: number of results to show after searching the web, defaults to 5 |
| |
| Returns: |
| Search result (Header + body + url) |
| """ |
| with DDGS() as ddgs: |
| |
| output = "" |
| results = ddgs.text(keywords, max_results = max_results) |
| for result in results: |
| output += f"Results: {result['title']}\n{result['body']}\n{result['href']}\n\n" |
| return(output) |
|
|
| @tool |
| def get_image_file(task_id): |
| """ |
| Get the image file from the question |
| Use cases: |
| - Extract Image from the question |
| |
| Args: |
| task_id: the task_id of the question |
| |
| Returns: |
| Image file result |
| """ |
| pass |
| return '' |
|
|
|
|
| class AgentState(TypedDict): |
| messages: List[Union[HumanMessage, AIMessage]] |
|
|
|
|
| def read_message(state: AgentState) -> AgentState: |
| messages = state["messages"] |
| print(f"Processing question: {messages[-1].content if messages else ''}") |
| |
| return {"messages": messages} |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| tools = [web_search,get_image_file] |
| tools_by_name = {tool.name: tool for tool in tools} |
| model_with_tools = model.bind_tools(tools) |
|
|
| def answer_message(state: AgentState) -> AgentState: |
| messages = state["messages"] |
| prompt = [SystemMessage(f""" |
| You are a GAIA question answering expert. |
| Your task is to provide an answer to a question. |
| Think carefully before answering the question. |
| Do not include any thought process before answering the question, and only response exactly what was being asked of you. |
| If you are not able to provide an answer, use tools or state the limitation that you're facing instead. |
| |
| Example question: How many hours are there in a day? |
| Response: 24 |
| """)] |
| messages = prompt + messages |
| ai_msg = model_with_tools.invoke(messages) |
| messages.append(ai_msg) |
|
|
| |
| for tool_call in ai_msg.tool_calls: |
| |
| name = tool_call['name'] |
| args = tool_call['args'] |
| tool = tools_by_name[name] |
| tool_result = tool.invoke(args) |
| messages.append(tool_result) |
| |
| final_instruction = HumanMessage( |
| content=( |
| "Using the tool results above, provide the FINAL answer now. " |
| "Do not call any tools. Respond with only the answer." |
| ) |
| ) |
| messages.append(final_instruction) |
|
|
| final_response = model_with_tools.invoke(messages) |
|
|
| |
| print(f"Final response: {final_response}") |
| final_response = final_response.content.split('</think>')[1].trim() |
|
|
| |
| return {"messages": [final_response]} |
|
|
|
|
|
|
| def build_graph(): |
| agent_graph = StateGraph(AgentState) |
|
|
| |
| agent_graph.add_node("read_message", read_message) |
| agent_graph.add_node("answer_message", answer_message) |
|
|
| |
| agent_graph.add_edge(START, "read_message") |
| agent_graph.add_edge("read_message", "answer_message") |
|
|
| |
| agent_graph.add_edge("answer_message", END) |
|
|
| |
| compiled_graph = agent_graph.compile() |
| return compiled_graph |
|
|