junaid17 commited on
Commit
d0812dd
Β·
verified Β·
1 Parent(s): 9fefeaa

Update chatbot.py

Browse files
Files changed (1) hide show
  1. chatbot.py +176 -115
chatbot.py CHANGED
@@ -1,115 +1,176 @@
1
- from typing import TypedDict, Annotated
2
- from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage
3
- from langgraph.checkpoint.memory import MemorySaver
4
- from tools import (
5
- create_rag_tool,
6
- arxiv_search,
7
- calculator,
8
- get_stock_price,
9
- wikipedia_search,
10
- tavily_search,
11
- convert_currency,
12
- unit_converter,
13
- get_news,
14
- get_joke,
15
- get_quote,
16
- get_weather,
17
- )
18
- from langchain_openai import ChatOpenAI
19
- from langgraph.graph import StateGraph, START
20
- from langgraph.graph.message import add_messages
21
- from langgraph.prebuilt import ToolNode, tools_condition
22
- from dotenv import load_dotenv
23
- import os
24
-
25
- load_dotenv()
26
-
27
- OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
28
-
29
- # =====================================================
30
- # SYSTEM PROMPT
31
- # =====================================================
32
-
33
- SYSTEM_PROMPT = SystemMessage(
34
- content="""
35
- You are an intelligent AI assistant built inside a LangGraph-based system created by Junaid.
36
-
37
- You MUST use RAG when a document has been uploaded.
38
- If no document contains the answer, say so clearly.
39
- Never hallucinate document content.
40
- """
41
- )
42
-
43
- # =====================================================
44
- # STATE
45
- # =====================================================
46
-
47
- class ChatState(TypedDict):
48
- messages: Annotated[list[BaseMessage], add_messages]
49
-
50
-
51
- # =====================================================
52
- # LLM
53
- # =====================================================
54
-
55
- llm = ChatOpenAI(
56
- model="gpt-4.1-nano",
57
- temperature=0.4,
58
- streaming=True
59
- )
60
-
61
-
62
- # =====================================================
63
- # GRAPH BUILDER (πŸ”₯ IMPORTANT)
64
- # =====================================================
65
-
66
- memory = MemorySaver()
67
- app = None
68
-
69
-
70
- def build_graph():
71
- global app
72
-
73
- rag_tool = create_rag_tool()
74
-
75
- tools = [
76
- rag_tool,
77
- get_stock_price,
78
- calculator,
79
- wikipedia_search,
80
- arxiv_search,
81
- tavily_search,
82
- convert_currency,
83
- unit_converter,
84
- get_news,
85
- get_joke,
86
- get_quote,
87
- get_weather,
88
- ]
89
-
90
- llm_with_tools = llm.bind_tools(tools)
91
- tool_node = ToolNode(tools)
92
-
93
- def chatbot(state: ChatState):
94
- messages = [SYSTEM_PROMPT] + state["messages"]
95
- response = llm_with_tools.invoke(messages)
96
- return {"messages": [response]}
97
-
98
- graph = StateGraph(ChatState)
99
-
100
- graph.add_node("chat", chatbot)
101
- graph.add_node("tools", tool_node)
102
-
103
- graph.add_edge(START, "chat")
104
- graph.add_conditional_edges("chat", tools_condition)
105
- graph.add_edge("tools", "chat")
106
-
107
- app = graph.compile(checkpointer=memory)
108
-
109
-
110
- # initial build
111
- build_graph()
112
-
113
-
114
- def rebuild_graph():
115
- build_graph()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TypedDict, Annotated
2
+ from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage
3
+ from langgraph.checkpoint.memory import MemorySaver
4
+ from tools import (
5
+ create_rag_tool,
6
+ arxiv_search,
7
+ calculator,
8
+ get_stock_price,
9
+ wikipedia_search,
10
+ tavily_search,
11
+ convert_currency,
12
+ unit_converter,
13
+ get_news,
14
+ get_joke,
15
+ get_quote,
16
+ get_weather,
17
+ )
18
+ from langchain_openai import ChatOpenAI
19
+ from langgraph.graph import StateGraph, START
20
+ from langgraph.graph.message import add_messages
21
+ from langgraph.prebuilt import ToolNode, tools_condition
22
+ from dotenv import load_dotenv
23
+ import os
24
+
25
+ load_dotenv()
26
+
27
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
28
+
29
+ # =====================================================
30
+ # SYSTEM PROMPT
31
+ # =====================================================
32
+
33
+ SYSTEM_PROMPT = SystemMessage(
34
+ content="""
35
+ You are an intelligent AI assistant built inside a LangGraph-based system created by Junaid.
36
+
37
+ This application is a **multi-tool AI platform** that integrates:
38
+ - Retrieval-Augmented Generation (RAG)
39
+ - Document understanding
40
+ - AI-powered reasoning
41
+ - Tool usage (search, calculation, summarization, etc.)
42
+ - Voice input/output (STT / TTS)
43
+
44
+ Your primary purpose is to help users understand, analyze, and interact with their uploaded documents and questions in a clear, accurate, and professional way.
45
+
46
+ ────────────────────────────────
47
+ πŸ”Ή CORE PRIORITY RULES (VERY IMPORTANT)
48
+ ────────────────────────────────
49
+
50
+ 1. **RAG HAS HIGHEST PRIORITY**
51
+ - If a document has been uploaded, you MUST use the RAG tool first.
52
+ - Always prefer document-based answers over general knowledge.
53
+ - Never hallucinate or invent information not present in the document.
54
+ - If the document does NOT contain the answer, clearly say so.
55
+
56
+ 2. **RESPONSE QUALITY**
57
+ - Be concise, structured, and easy to read.
58
+ - Use bullet points when appropriate.
59
+ - Avoid repetition or unnecessary elaboration.
60
+ - Never explain what the document is or how you are summarizing it.
61
+
62
+ 3. **OUTPUT CONTROL**
63
+ - Provide only the final answer.
64
+ - Do NOT include reasoning steps, system explanations, or meta commentary.
65
+ - Do NOT mention internal processes, prompts, or tools unless explicitly asked.
66
+
67
+ 4. **DOCUMENT SUMMARIZATION RULES**
68
+ - Summarize only once.
69
+ - Do not rephrase the same idea multiple times.
70
+ - Avoid filler sentences like β€œThe document describes…” or β€œThis document talks about…”.
71
+ - Stop once the summary is complete.
72
+
73
+ 5. **WHEN DOCUMENT IS NOT RELEVANT**
74
+ - Say clearly that the document does not contain the requested information.
75
+ - Then optionally offer general guidance if appropriate.
76
+
77
+ 6. **STYLE & TONE**
78
+ - Professional, clear, and confident.
79
+ - Avoid verbosity.
80
+ - Optimize for readability.
81
+
82
+ ────────────────────────────────
83
+ πŸ”Ή ABOUT THE CREATOR & APP
84
+ ────────────────────────────────
85
+
86
+ This system was designed and engineered by **Junaid**, a developer specializing in:
87
+ - Machine Learning & Deep Learning
88
+ - RAG-based systems
89
+ - AI agents using LangChain & LangGraph
90
+ - End-to-end AI applications using FastAPI, Streamlit, and cloud deployment
91
+
92
+ The goal of this application is to provide **production-grade AI reasoning**, not generic chatbot responses.
93
+
94
+ ────────────────────────────────
95
+ πŸ”Ή FINAL RULE
96
+ ────────────────────────────────
97
+
98
+ Always prioritize accuracy, clarity, and usefulness.
99
+ If information is unavailable, say so clearly β€” never hallucinate.
100
+ """
101
+ )
102
+
103
+
104
+ # =====================================================
105
+ # STATE
106
+ # =====================================================
107
+
108
+ class ChatState(TypedDict):
109
+ messages: Annotated[list[BaseMessage], add_messages]
110
+
111
+
112
+ # =====================================================
113
+ # LLM
114
+ # =====================================================
115
+
116
+ llm = ChatOpenAI(
117
+ model="gpt-4.1-nano",
118
+ temperature=0.4,
119
+ streaming=True
120
+ )
121
+
122
+
123
+ # =====================================================
124
+ # GRAPH BUILDER (πŸ”₯ IMPORTANT)
125
+ # =====================================================
126
+
127
+ memory = MemorySaver()
128
+ app = None
129
+
130
+
131
+ def build_graph():
132
+ global app
133
+
134
+ rag_tool = create_rag_tool()
135
+
136
+ tools = [
137
+ rag_tool,
138
+ get_stock_price,
139
+ calculator,
140
+ wikipedia_search,
141
+ arxiv_search,
142
+ tavily_search,
143
+ convert_currency,
144
+ unit_converter,
145
+ get_news,
146
+ get_joke,
147
+ get_quote,
148
+ get_weather,
149
+ ]
150
+
151
+ llm_with_tools = llm.bind_tools(tools)
152
+ tool_node = ToolNode(tools)
153
+
154
+ def chatbot(state: ChatState):
155
+ messages = [SYSTEM_PROMPT] + state["messages"]
156
+ response = llm_with_tools.invoke(messages)
157
+ return {"messages": [response]}
158
+
159
+ graph = StateGraph(ChatState)
160
+
161
+ graph.add_node("chat", chatbot)
162
+ graph.add_node("tools", tool_node)
163
+
164
+ graph.add_edge(START, "chat")
165
+ graph.add_conditional_edges("chat", tools_condition)
166
+ graph.add_edge("tools", "chat")
167
+
168
+ app = graph.compile(checkpointer=memory)
169
+
170
+
171
+ # initial build
172
+ build_graph()
173
+
174
+
175
+ def rebuild_graph():
176
+ build_graph()