Ricardo Teixeira
Final submission version
ed3a95c
from dotenv import load_dotenv
import os
from typing import TypedDict, Annotated
from langgraph.graph.message import add_messages
from langchain_core.messages import AnyMessage, HumanMessage, SystemMessage
from langgraph.graph import START, StateGraph, MessagesState
from langgraph.prebuilt import tools_condition, ToolNode
from langchain_ollama import ChatOllama
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_groq import ChatGroq
import asyncio
from tools import get_tools
load_dotenv()
class Agent():
def __init__(self):
self.tools = get_tools()
with open('system_prompt.txt', 'r') as f:
system_prompt = f.read()
self.sys_msg = SystemMessage(content=system_prompt)
print("Agent initialized.")
def build_graph(self, provider: str = '', model: str = None):
if provider == 'ollama':
if not model:
model = "llama3.1:8b"
llm = ChatOllama(model=model, temperature=0)
elif provider == 'google':
if not model:
model = "gemini-2.5-flash"
gemini_api_key = os.getenv("GEMINI_API_KEY")
llm = ChatGoogleGenerativeAI(model=model, temperature=0,google_api_key=gemini_api_key)
elif provider == 'groq':
if not model:
model = "meta-llama/llama-4-scout-17b-16e-instruct"
#model = "meta-llama/llama-4-maverick-17b-128e-instruct"
groq_api_key = os.getenv("GROQ_API_KEY")
llm = ChatGroq(model=model, temperature=0, groq_api_key=groq_api_key)
else:
raise ValueError(f'Provider {provider} not supported')
llm_with_tools = llm.bind_tools(self.tools)
def assistant(state: MessagesState):
"""Assistant node"""
new_message = llm_with_tools.invoke([self.sys_msg] + state["messages"])
return {"messages": [new_message]}
builder = StateGraph(MessagesState)
builder.add_node("assistant", assistant).set_entry_point("assistant")
builder.add_node("tools", ToolNode(self.tools))
builder.add_edge(START, "assistant")
builder.add_conditional_edges(
"assistant",
tools_condition,
)
builder.add_edge("tools", "assistant")
# Compile graph
return builder.compile()
def __call__(self, question: str, attachment: str | None) -> str:
print("\n\n#######################################################################\n")
print("#################### AGENT RECEIVED A NEW QUESTION ####################\n")
print("#######################################################################\n\n")
# Wrap the question in a HumanMessage from langchain_core
if attachment:
attachment_prompt = f"\n\nThe path for the file required to answer this question is: {attachment}\n"
else:
attachment_prompt = ""
messages = [HumanMessage(content=question+attachment_prompt)]
graph = self.build_graph('google')
messages = graph.invoke({"messages": messages})
answer = extract_final_answer(messages['messages'][-1].content)
for m in messages["messages"]:
m.pretty_print()
return answer
def extract_final_answer(response: str) -> str:
"""Extract the final answer from the model response."""
if "FINAL ANSWER:" in response:
return response.split("FINAL ANSWER:")[-1].strip()
else:
# Fallback: return the full response if no FINAL ANSWER found
return response.strip()
if __name__ == "__main__":
def main():
agent = Agent()
question = "Who did the actor who played Ray in the Polish-language version of Everybody Loves Raymond play in Magda M.? Give only the first name."
model = "gemini-2.5-flash"
graph = agent.build_graph('google', model)
messages = [HumanMessage(content=question)]
messages = graph.invoke({"messages": messages})
answer = messages['messages'][-1].content
for m in messages["messages"]:
m.pretty_print()
print(f"\n\n\nModel response: {extract_final_answer(answer)}")
main()