File size: 3,157 Bytes
d303e2f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 |
from dotenv import load_dotenv
from langgraph.graph import START, StateGraph, MessagesState
from langgraph.prebuilt import tools_condition
from langgraph.prebuilt import ToolNode
from langchain_core.messages import SystemMessage
from tools.searchtools import wiki_search, web_search, arxiv_search, get_youtube_transcript
from tools.mathtools import multiply, add, subtract, divide, modulus, power, square_root
from tools.codetools import execute_code_multilang
from tools.documenttools import create_file_with_content, read_file_content, download_file_from_url, extract_text_from_image, analyze_csv_file, analyze_excel_file
from tools.imagetools import analyze_image, transform_image, draw_on_image, generate_simple_image, combine_images
from tools.audiotools import transcribe_audio
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
load_dotenv()
# load the system prompt from the file
with open("system_prompt.txt", "r", encoding="utf-8") as f:
system_prompt = f.read()
# System message
sys_msg = SystemMessage(content=system_prompt)
tools = [
web_search,
wiki_search,
arxiv_search,
get_youtube_transcript,
multiply,
add,
subtract,
divide,
modulus,
power,
square_root,
create_file_with_content,
read_file_content,
download_file_from_url,
extract_text_from_image,
analyze_csv_file,
analyze_excel_file,
execute_code_multilang,
analyze_image,
transform_image,
draw_on_image,
generate_simple_image,
combine_images,
transcribe_audio,
]
# Build graph function
def build_graph():
"""Build the graph"""
# Load environment variables from .env file
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
# Bind tools to LLM
llm_with_tools = llm.bind_tools(tools)
# Node
def assistant(state: MessagesState):
"""Assistant node"""
# Prepend system message to the current messages
# Ensure sys_msg is only added if not already present or if it's the first turn
current_messages = state["messages"]
if not current_messages or current_messages[0].type != "system":
# Or, if you want to ensure it's always the first message for each LLM call in this node:
# updated_messages = [sys_msg] + [m for m in current_messages if m.type != "system"]
# For simplicity, let's assume we add it if it's not the very first message overall.
# A more robust check might be needed depending on multi-turn conversation flow.
updated_messages = [sys_msg] + current_messages
else:
updated_messages = current_messages
return {"messages": [llm_with_tools.invoke(updated_messages)]}
builder = StateGraph(MessagesState)
builder.add_node("assistant", assistant)
builder.add_node("tools", ToolNode(tools))
builder.add_edge(START, "assistant")
builder.add_conditional_edges(
"assistant",
tools_condition,
)
builder.add_edge("tools", "assistant")
# Compile graph
return builder.compile() |