| | """LangGraph Agent""" |
| | import os |
| | from dotenv import load_dotenv |
| | from langgraph.graph import START, StateGraph, MessagesState |
| | from langgraph.prebuilt import tools_condition |
| | from langgraph.prebuilt import ToolNode |
| | from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace, HuggingFaceEmbeddings |
| | from langchain_core.messages import SystemMessage, HumanMessage |
| | from langchain_core.globals import set_debug |
| | from langchain_groq import ChatGroq |
| | from search_tools import web_search, arvix_search, wiki_search |
| | from math_tools import multiply, add, subtract, divide |
| | |
| | |
| | |
| | import json |
| | from langchain_google_genai import ChatGoogleGenerativeAI |
| | from langchain_openai import ChatOpenAI |
| | from langchain_core.messages import HumanMessage |
| |
|
| | |
| | load_dotenv() |
| |
|
| | tools = [ |
| | multiply, |
| | add, |
| | subtract, |
| | divide, |
| | web_search, |
| | wiki_search, |
| | arvix_search |
| | ] |
| |
|
| | def build_graph(): |
| | dashscope_key = os.getenv("CODING_PLAN_API_KEY") |
| | |
| | llm = ChatOpenAI( |
| | api_key=dashscope_key, |
| | base_url="https://coding.dashscope.aliyuncs.com/v1", |
| | model="qwen3.5-plus", |
| | temperature=0.1, |
| | verbose=True |
| | ) |
| | |
| | chat_with_tools = llm.bind_tools(tools) |
| |
|
| | def assistant(state: MessagesState): |
| | sys_msg = "You are a helpful assistant with access to tools. Understand user requests accurately. Use your tools when needed to answer effectively. Strictly follow all user instructions and constraints." \ |
| | "Pay attention: your output needs to contain only the final answer without any reasoning since it will be strictly evaluated against a dataset which contains only the specific response." \ |
| | "Your final output needs to be just the string or integer containing the answer, not an array or technical stuff." |
| | return { |
| | "messages": [chat_with_tools.invoke([sys_msg] + state["messages"])], |
| | } |
| |
|
| | |
| | builder = StateGraph(MessagesState) |
| |
|
| | builder.add_node("assistant", assistant) |
| | builder.add_node("tools", ToolNode(tools)) |
| |
|
| | builder.add_edge(START, "assistant") |
| | builder.add_conditional_edges( |
| | "assistant", |
| | |
| | |
| | tools_condition, |
| | ) |
| | builder.add_edge("tools", "assistant") |
| | return builder.compile() |
| |
|
| | |
| | if __name__ == "__main__": |
| |
|
| | graph = build_graph() |
| | with open('sample.jsonl', 'r') as jsonl_file: |
| | json_list = list(jsonl_file) |
| | |
| | start = 10 |
| | end = start + 1 |
| | for json_str in json_list[start:end]: |
| | json_data = json.loads(json_str) |
| | print(f"Question::::::::: {json_data['Question']}") |
| | print(f"Final answer::::: {json_data['Final answer']}") |
| | |
| | question = json_data['Question'] |
| | messages = [HumanMessage(content=question)] |
| | messages = graph.invoke({"messages": messages}) |
| | for m in messages["messages"]: |
| | m.pretty_print() |