yashAI007 commited on
Commit
45f00f2
·
1 Parent(s): 2bd81f8

add logg term and short term memory

Browse files
Files changed (6) hide show
  1. .gitignore +3 -0
  2. app.py +3 -119
  3. memory_chatbot.py +217 -0
  4. requirements.txt +0 -0
  5. test.ipynb +113 -0
  6. tool.py +108 -0
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ venv
2
+ __pycache__/
3
+
app.py CHANGED
@@ -1,124 +1,8 @@
1
- # import gradio as gr
2
- # from chabot import chatbot
3
- # from langchain_core.messages import HumanMessage
4
-
5
- # config = {"configurable": {"thread_id": "1"}}
6
-
7
- # # ---- Gradio Chat Function ----
8
- # def chat_fn(message, history):
9
- # """
10
- # message: current user message (string)
11
- # history: list of previous messages [(user, bot), ...]
12
- # """
13
-
14
- # # Send message to LangGraph chatbot
15
- # res = chatbot.invoke(
16
- # {"messages": [HumanMessage(content=message)]},
17
- # config=config
18
- # )
19
-
20
- # # Get AI response
21
- # ai_message = res["messages"][-1].content
22
-
23
- # return ai_message
24
-
25
-
26
- # # ---- Create UI ----
27
- # demo = gr.ChatInterface(
28
- # fn=chat_fn,
29
- # title="LangGraph + Groq Chatbot",
30
- # description="Chatbot powered by LangGraph, Groq (Qwen3-32B), and memory",
31
- # )
32
-
33
- # if __name__ == "__main__":
34
- # demo.launch()
35
-
36
-
37
-
38
- # import gradio as gr
39
- # import uuid
40
- # from langchain_core.messages import HumanMessage
41
- # from chabot import chatbot
42
-
43
- # # -----------------------
44
- # # Generate Unique Thread ID
45
- # # -----------------------
46
- # def new_thread():
47
- # config = {"configurable": {"thread_id": str(uuid.uuid4())[:8] }}
48
- # return config
49
-
50
-
51
- # # -----------------------
52
- # # Chat Function
53
- # # -----------------------
54
- # def chat_fn(message, history, thread_id):
55
- # config = {"configurable": {"thread_id": thread_id}}
56
-
57
- # res = chatbot.invoke(
58
- # {"messages": [HumanMessage(content=message)]},
59
- # config=config
60
- # )
61
-
62
- # ai_message = res["messages"][-1].content
63
- # return ai_message
64
-
65
-
66
- # # -----------------------
67
- # # Start With Default Thread
68
- # # -----------------------
69
- # default_thread = new_thread()
70
- # default_thread = default_thread["configurable"]["thread_id"]
71
-
72
- # # -----------------------
73
- # # Gradio UI
74
- # # -----------------------
75
- # with gr.Blocks() as demo:
76
-
77
- # # Store thread id in hidden state
78
- # thread_state = gr.State(default_thread)
79
-
80
- # with gr.Row():
81
-
82
- # # -------- Left Sidebar --------
83
- # with gr.Column(scale=1):
84
- # gr.Markdown("## 🧵 Current Thread ID")
85
- # thread_display = gr.Textbox(
86
- # value=default_thread,
87
- # interactive=False,
88
- # label="Thread ID"
89
- # )
90
-
91
- # new_chat_btn = gr.Button("➕ New Chat")
92
-
93
- # # -------- Main Chat Area --------
94
- # with gr.Column(scale=4):
95
- # chatbot_ui = gr.ChatInterface(
96
- # fn=lambda msg, hist: chat_fn(msg, hist, thread_state.value),
97
- # title="LangGraph + Groq Chatbot",
98
- # description="Memory enabled chatbot with unique threads",
99
- # )
100
-
101
- # # -----------------------
102
- # # New Chat Button Logic
103
- # # -----------------------
104
- # def reset_chat():
105
- # new_id = new_thread()
106
- # return new_id, new_id, []
107
-
108
- # new_chat_btn.click(
109
- # reset_chat,
110
- # outputs=[thread_state, thread_display, chatbot_ui.chatbot]
111
- # )
112
-
113
-
114
- # demo.launch()
115
-
116
-
117
-
118
  import gradio as gr
119
  import uuid
120
  from langchain_core.messages import HumanMessage
121
- from chatbot import chatbot
 
122
 
123
 
124
  # -----------------------
@@ -139,7 +23,7 @@ first_thread = new_thread_id()
139
  # -----------------------
140
  def chat_fn(message, chat_history, thread_id, all_threads):
141
 
142
- config = {"configurable": {"thread_id": thread_id}}
143
 
144
  res = chatbot.invoke(
145
  {"messages": [HumanMessage(content=message)]},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import uuid
3
  from langchain_core.messages import HumanMessage
4
+ # from chatbot import chatbot
5
+ from memory_chatbot import chatbot
6
 
7
 
8
  # -----------------------
 
23
  # -----------------------
24
  def chat_fn(message, chat_history, thread_id, all_threads):
25
 
26
+ config = {"configurable": {"thread_id": thread_id, "user_id": "user_id"}}
27
 
28
  res = chatbot.invoke(
29
  {"messages": [HumanMessage(content=message)]},
memory_chatbot.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from typing import TypedDict, Annotated
3
+
4
+ from dotenv import load_dotenv
5
+ from langchain_core.messages import BaseMessage, HumanMessage
6
+ from langchain_groq import ChatGroq
7
+
8
+ from langgraph.graph import START, END, StateGraph
9
+ from langgraph.graph.message import add_messages
10
+
11
+ # Short-term memory
12
+ from langgraph.checkpoint.memory import InMemorySaver
13
+
14
+ # Long-term memory
15
+ from langgraph.store.memory import InMemoryStore
16
+
17
+ # Agent
18
+ from langchain.agents import create_agent
19
+ from tool import tool_tavily,time_date,calculator,python_exec,get_weather,wikipedia_search,scrape_website,read_file,format_json,generate_sql,system_info,save_user_preference
20
+
21
+
22
+ load_dotenv()
23
+
24
+ tools = [tool_tavily,time_date,calculator,python_exec,get_weather,wikipedia_search,scrape_website,read_file,format_json,generate_sql,system_info,save_user_preference]
25
+
26
+ # -----------------------------
27
+ # Memory Systems
28
+ # -----------------------------
29
+
30
+ checkpointer = InMemorySaver()
31
+ store = InMemoryStore()
32
+
33
+ # -----------------------------
34
+ # LLM
35
+ # -----------------------------
36
+
37
+ llm = ChatGroq(
38
+ model="qwen/qwen3-32b",
39
+ temperature=0,
40
+ reasoning_format="parsed",
41
+ )
42
+
43
+ # -----------------------------
44
+ # State
45
+ # -----------------------------
46
+
47
+ class ChatState(TypedDict):
48
+ messages: Annotated[list[BaseMessage], add_messages]
49
+
50
+
51
+ # -----------------------------
52
+ # Memory Analyzer Node
53
+ # -----------------------------
54
+
55
+ def memory_analyzer(state: ChatState, config, store):
56
+
57
+ user_id = config["configurable"]["user_id"]
58
+
59
+ last_message = state["messages"][-1].content
60
+
61
+ prompt = f"""
62
+ Analyze the message and decide if it contains useful long-term memory.
63
+
64
+ Possible memory categories:
65
+ - preferences
66
+ - profile
67
+ - interests
68
+ - project
69
+
70
+ Return JSON format:
71
+
72
+ {{
73
+ "store_memory": true/false,
74
+ "category": "preferences | profile | interests | project",
75
+ "key": "memory_key",
76
+ "value": {{}}
77
+ }}
78
+
79
+ Message:
80
+ {last_message}
81
+ """
82
+
83
+ result = llm.invoke([HumanMessage(content=prompt)])
84
+
85
+ try:
86
+ data = json.loads(result.content)
87
+
88
+ if data.get("store_memory"):
89
+
90
+ namespace = (user_id, data["category"])
91
+
92
+ store.put(
93
+ namespace,
94
+ data["key"],
95
+ data["value"]
96
+ )
97
+
98
+ print("Stored memory:", data)
99
+
100
+ except Exception as e:
101
+ pass
102
+
103
+ return {}
104
+
105
+
106
+ # -----------------------------
107
+ # Memory Retrieval Node
108
+ # -----------------------------
109
+
110
+ def memory_retrieval(state: ChatState, config, store):
111
+
112
+ user_id = config["configurable"]["user_id"]
113
+ query = state["messages"][-1].content
114
+
115
+ categories = [
116
+ "preferences",
117
+ "profile",
118
+ "interests",
119
+ "project"
120
+ ]
121
+
122
+ all_memories = []
123
+
124
+ for category in categories:
125
+
126
+ namespace = (user_id, category)
127
+
128
+ results = store.search(
129
+ namespace,
130
+ query=query,
131
+ limit=3
132
+ )
133
+
134
+ all_memories.extend(results)
135
+
136
+ memory_text = "\n".join(
137
+ [f"{m.namespace[1]}: {m.value}" for m in all_memories]
138
+ )
139
+
140
+ if memory_text:
141
+
142
+ state["messages"].append(
143
+ HumanMessage(
144
+ content=f"Relevant user memories:\n{memory_text}"
145
+ )
146
+ )
147
+
148
+ return {"messages": state["messages"]}
149
+
150
+ # -----------------------------
151
+ # Chat Node
152
+ # -----------------------------
153
+
154
+
155
+ def chat_node(state: ChatState):
156
+ agent = create_agent(llm, tools=tools)
157
+ messages = state["messages"]
158
+
159
+ response = agent.invoke({"messages": messages})
160
+
161
+ # Return only messages added by the agent (exclude the ones we sent in)
162
+ new_messages = response["messages"][len(messages):]
163
+ return {"messages": new_messages}
164
+
165
+ # -----------------------------
166
+ # Graph
167
+ # -----------------------------
168
+
169
+ graph = StateGraph(ChatState)
170
+
171
+ graph.add_node("memory_analyzer", memory_analyzer)
172
+ graph.add_node("memory_retrieval", memory_retrieval)
173
+ graph.add_node("chat_node", chat_node)
174
+
175
+ graph.add_edge(START, "memory_analyzer")
176
+ graph.add_edge("memory_analyzer", "memory_retrieval")
177
+ graph.add_edge("memory_retrieval", "chat_node")
178
+ graph.add_edge("chat_node", END)
179
+
180
+ chatbot = graph.compile(
181
+ checkpointer=checkpointer,
182
+ store=store
183
+ )
184
+
185
+ # -----------------------------
186
+ # Chat Loop
187
+ # -----------------------------
188
+
189
+ # config = {
190
+ # "configurable": {
191
+ # "thread_id": "thread_1",
192
+ # "user_id": "user_123"
193
+ # }
194
+ # }
195
+
196
+ # messages = []
197
+
198
+ # while True:
199
+
200
+ # user_input = input("\nUser: ")
201
+
202
+ # if user_input.lower() in ["quit", "exit"]:
203
+ # break
204
+
205
+ # messages.append(HumanMessage(content=user_input))
206
+
207
+ # result = chatbot.invoke(
208
+ # {"messages": messages},
209
+ # config
210
+ # )
211
+
212
+ # response = result["messages"][-1].content
213
+
214
+ # print("\nAssistant:", response)
215
+
216
+ # messages = result["messages"]
217
+
requirements.txt CHANGED
Binary files a/requirements.txt and b/requirements.txt differ
 
test.ipynb ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 42,
6
+ "id": "2946f394",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "from langchain.agents import create_agent\n",
11
+ "from dotenv import load_dotenv\n",
12
+ "from langchain_groq import ChatGroq\n",
13
+ "from langchain_core.messages import BaseMessage, HumanMessage\n",
14
+ "\n",
15
+ "load_dotenv()\n",
16
+ "\n",
17
+ "llm = ChatGroq(\n",
18
+ " model=\"qwen/qwen3-32b\",\n",
19
+ " temperature=0,\n",
20
+ " reasoning_format=\"parsed\",\n",
21
+ " streaming=True\n",
22
+ ")\n",
23
+ "\n",
24
+ "\n",
25
+ "def get_weather(city: str) -> str:\n",
26
+ " \"\"\"Get weather for a given city.\"\"\"\n",
27
+ "\n",
28
+ " return f\"It's always sunny in {city}!\"\n",
29
+ "\n",
30
+ "tools = [get_weather]\n",
31
+ "agent = create_agent(llm, tools=tools)\n",
32
+ "\n"
33
+ ]
34
+ },
35
+ {
36
+ "cell_type": "code",
37
+ "execution_count": 43,
38
+ "id": "7ddafbda",
39
+ "metadata": {},
40
+ "outputs": [],
41
+ "source": [
42
+ "# for chunk in llm.stream(\n",
43
+ "# {\"messages\": [{\"role\": \"user\", \"content\": \"write 500 words eassy on AI\"}]}\n",
44
+ "# ):\n",
45
+ "# if chunk.get(\"model\"):\n",
46
+ "# print(chunk.get(\"model\").get(\"messages\")[0].content)"
47
+ ]
48
+ },
49
+ {
50
+ "cell_type": "code",
51
+ "execution_count": 45,
52
+ "id": "acd47304",
53
+ "metadata": {},
54
+ "outputs": [
55
+ {
56
+ "name": "stdout",
57
+ "output_type": "stream",
58
+ "text": [
59
+ "**The Rise of Artificial Intelligence: Promise and Peril** \n",
60
+ "\n",
61
+ "Artificial Intelligence (AI), the simulation of human intelligence by machines, has emerged as one of the most transformative technologies of the 21st century. From streamlining industries to redefining daily life, AI’s influence is pervasive. However, its rapid advancement also raises critical questions about ethics, employment, and societal impact. As we navigate this dual-edged revolution, understanding AI’s potential and pitfalls is essential for shaping a future that balances innovation with responsibility. \n",
62
+ "\n",
63
+ "AI’s applications span nearly every sector. In healthcare, machine learning algorithms analyze medical data to detect diseases like cancer with unprecedented accuracy, while robotic surgery systems enhance precision in operations. In education, AI-powered platforms personalize learning, adapting to individual student needs and bridging gaps in traditional teaching methods. The financial industry leverages AI for fraud detection, risk management, and algorithmic trading, optimizing decisions in milliseconds. Even creative fields, such as music and art, now incorporate AI tools that generate original content, challenging notions of human creativity. These advancements underscore AI’s capacity to augment human capabilities, driving efficiency and innovation. \n",
64
+ "\n",
65
+ "Yet, the rise of AI is not without challenges. One of the most pressing concerns is job displacement. Automation threatens industries reliant on repetitive tasks, from manufacturing to customer service, potentially exacerbating economic inequality. While new roles in AI development and maintenance are emerging, the transition may leave many workers behind, necessitating robust reskilling initiatives. Additionally, biases embedded in AI systems—often reflecting historical prejudices in training data—can perpetuate discrimination in areas like hiring, lending, and law enforcement. Addressing these biases requires diverse, inclusive datasets and transparent algorithms. \n",
66
+ "\n",
67
+ "Ethical dilemmas further complicate AI’s trajectory. Autonomous weapons, for instance, raise moral questions about delegating life-and-death decisions to machines. Privacy concerns intensify as AI analyzes vast amounts of personal data, often without explicit consent. The 2023 EU AI Act, which classifies AI systems by risk level and imposes strict regulations on high-risk applications, reflects growing global efforts to establish ethical frameworks. However, enforcement remains inconsistent, highlighting the need for international collaboration to prevent misuse. \n",
68
+ "\n",
69
+ "The future of AI hinges on striking a balance between innovation and accountability. Governments, corporations, and academia must collaborate to develop policies that prioritize human welfare. Investments in AI literacy and education will empower individuals to adapt to technological shifts, while ethical AI design—centered on fairness, transparency, and privacy—can mitigate risks. Public dialogue is equally vital, ensuring that diverse perspectives shape AI’s evolution. \n",
70
+ "\n",
71
+ "In conclusion, AI represents both a remarkable opportunity and a profound responsibility. Its ability to solve complex problems and enhance human potential is undeniable, but unchecked, it risks amplifying societal divides and ethical crises. By fostering inclusive, ethical development, we can harness AI as a tool for collective progress rather than a source of division. The choices we make today will determine whether AI becomes a force for good—or a harbinger of unintended consequences."
72
+ ]
73
+ }
74
+ ],
75
+ "source": [
76
+ "for chunk in llm.stream(\n",
77
+ " [{\"role\": \"user\", \"content\": \"write 500 words essay on AI\"}]\n",
78
+ "):\n",
79
+ " if chunk.content:\n",
80
+ " print(chunk.content, end=\"\", flush=True)"
81
+ ]
82
+ },
83
+ {
84
+ "cell_type": "code",
85
+ "execution_count": null,
86
+ "id": "bf450c3d",
87
+ "metadata": {},
88
+ "outputs": [],
89
+ "source": []
90
+ }
91
+ ],
92
+ "metadata": {
93
+ "kernelspec": {
94
+ "display_name": "venv",
95
+ "language": "python",
96
+ "name": "python3"
97
+ },
98
+ "language_info": {
99
+ "codemirror_mode": {
100
+ "name": "ipython",
101
+ "version": 3
102
+ },
103
+ "file_extension": ".py",
104
+ "mimetype": "text/x-python",
105
+ "name": "python",
106
+ "nbconvert_exporter": "python",
107
+ "pygments_lexer": "ipython3",
108
+ "version": "3.10.12"
109
+ }
110
+ },
111
+ "nbformat": 4,
112
+ "nbformat_minor": 5
113
+ }
tool.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_tavily import TavilySearch
2
+ from langchain.tools import tool
3
+ from datetime import date
4
+
5
+ import platform
6
+ import json
7
+ import requests
8
+ import wikipedia
9
+ from bs4 import BeautifulSoup
10
+
11
+ from dotenv import load_dotenv
12
+
13
+ load_dotenv()
14
+
15
+ tool_tavily = TavilySearch(
16
+ max_results=5,
17
+ topic="general"
18
+ )
19
+
20
+ @tool
21
+ def time_date():
22
+ """ This give today date in format yyyy-mm-dd """
23
+ today = date.today()
24
+ return today
25
+
26
+
27
+ @tool
28
+ def calculator(expression: str) -> str:
29
+ """Evaluate mathematical expressions"""
30
+ try:
31
+ return str(eval(expression))
32
+ except Exception as e:
33
+ return str(e)
34
+
35
+ @tool
36
+ def python_exec(code: str) -> str:
37
+ """Execute Python code"""
38
+ try:
39
+ local_vars = {}
40
+ exec(code, {}, local_vars)
41
+ return str(local_vars)
42
+ except Exception as e:
43
+ return str(e)
44
+
45
+
46
+
47
+ @tool
48
+ def get_weather(city: str) -> str:
49
+ """Get current weather for a city"""
50
+ url = f"https://wttr.in/{city}?format=3"
51
+ return requests.get(url).text
52
+
53
+
54
+
55
+ @tool
56
+ def wikipedia_search(query: str) -> str:
57
+ """Search Wikipedia"""
58
+ try:
59
+ return wikipedia.summary(query, sentences=3)
60
+ except:
61
+ return "No result found"
62
+
63
+
64
+
65
+
66
+ @tool
67
+ def scrape_website(url: str) -> str:
68
+ """Extract text from a webpage"""
69
+ res = requests.get(url)
70
+ soup = BeautifulSoup(res.text, "html.parser")
71
+ return soup.get_text()[:2000]
72
+
73
+ @tool
74
+ def read_file(path: str) -> str:
75
+ """Read local file content"""
76
+ try:
77
+ with open(path, "r") as f:
78
+ return f.read()
79
+ except Exception as e:
80
+ return str(e)
81
+
82
+
83
+
84
+ @tool
85
+ def format_json(data: str) -> str:
86
+ """Format JSON string"""
87
+ try:
88
+ parsed = json.loads(data)
89
+ return json.dumps(parsed, indent=2)
90
+ except Exception as e:
91
+ return str(e)
92
+
93
+ @tool
94
+ def generate_sql(query: str) -> str:
95
+ """Convert natural language to SQL"""
96
+ return f"-- SQL Query for: {query}"
97
+
98
+
99
+ @tool
100
+ def system_info() -> str:
101
+ """Get system information"""
102
+ return platform.platform()
103
+
104
+ @tool
105
+ def save_user_preference(text: str) -> str:
106
+ """Save user preference or important info"""
107
+ # later connect to store.put()
108
+ return f"Saved memory: {text}"