File size: 1,535 Bytes
304d2b8
d879ec7
304d2b8
a61cb76
d879ec7
304d2b8
 
 
 
 
d879ec7
 
 
 
 
 
 
a61cb76
d879ec7
 
 
 
 
304d2b8
 
 
 
 
01c1257
304d2b8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
01c1257
a3491c6
d879ec7
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
from src.config.env import OPENAI_API_KEY, GOOGLE_API_KEY, SERPER_API_KEY
from src.interface.ui import create_interface
from src.state import build_graph_with_callable
from src.chains.qa_chain_openai import build_openai_rag_chain_and_llm
from src.chains.qa_chain_gemini import build_gemini_rag_chain
from src.agents.prompts import CHARACTER_PROMPT
from src.agents.tools import get_tools

from langchain.agents import create_react_agent, AgentExecutor
from langchain.memory import ConversationBufferWindowMemory

# === Choose your backend model ===
MODEL = "openai"  # Change to "gemini" to use Google Gemini
PDF_PATH = "resources/22_studenthandbook-22-23_f2.pdf"

# === Build chain based on chosen model ===
if MODEL == "openai":
    rag_chain, llm = build_openai_rag_chain_and_llm(PDF_PATH)
elif MODEL == "gemini":
    rag_chain = build_gemini_rag_chain(PDF_PATH)
else:
    raise ValueError("Unsupported MODEL type. Choose 'openai' or 'gemini'.")

# === Set up tools ===
tools = get_tools(rag_chain)

# === Build the agent ===
agent = create_react_agent(
    llm=llm,
    tools=tools,
    prompt=CHARACTER_PROMPT
)

memory = ConversationBufferWindowMemory(memory_key='chat_history', k=5, return_messages=True, output_key='output')
agent_chain = AgentExecutor(
    agent=agent,
    tools=tools,
    memory=memory,
    max_iterations=5,
    handle_parsing_errors=True,
    verbose=True
)


# === Build the state graph with agent===
app = build_graph_with_callable(agent_chain.invoke)

# === Start the UI ===
create_interface(app.invoke)