Selcan Yukcu commited on
Commit
8ada35f
·
1 Parent(s): 64235d1

feat: langchain buffermemory try

Browse files
Files changed (2) hide show
  1. langchain_mcp_client.py +24 -44
  2. postgre_mcp_server.py +21 -19
langchain_mcp_client.py CHANGED
@@ -12,12 +12,16 @@ from utils import parse_mcp_output, classify_intent
12
  import logging
13
  from dotenv import load_dotenv
14
  from langgraph.checkpoint.memory import MemorySaver
 
 
 
 
15
 
16
 
17
 
18
  logger = logging.getLogger(__name__)
19
  load_dotenv()
20
- async def lc_mcp_exec(request: str, history) -> tuple[Any, Any]:
21
  """
22
  Execute the full PostgreSQL MCP pipeline: load summary, connect session,
23
  load memory and tools, build prompt, run agent, update memory.
@@ -34,9 +38,14 @@ async def lc_mcp_exec(request: str, history) -> tuple[Any, Any]:
34
  server_params = get_server_params()
35
 
36
  api_key = os.getenv("API_KEY")
37
- llm = init_chat_model(model="gemini-2.0-flash", model_provider="google_genai",
38
- api_key=api_key)
39
 
 
 
 
 
 
 
 
40
  async with stdio_client(server_params) as (read, write):
41
  async with ClientSession(read, write) as session:
42
  await session.initialize()
@@ -45,45 +54,19 @@ async def lc_mcp_exec(request: str, history) -> tuple[Any, Any]:
45
 
46
  intent = classify_intent(request)
47
 
48
- messages = []
49
- memory = MemorySaver()
50
- agent = create_react_agent(llm, tools, checkpointer=memory)
51
-
52
- messages.append(HumanMessage(content=request))
53
- if history:
54
- # Clear existing messages and rebuild from history
55
- messages = []
56
-
57
- # Process Gradio chat history format
58
- for msg in history:
59
- # Gradio format: {'role': 'user/assistant', 'metadata': None, 'content': 'message', 'options': None}
60
- role = msg.get('role', '')
61
- content = msg.get('content', '')
62
-
63
- if role == 'user' and content:
64
- messages.append(HumanMessage(content=content))
65
- elif role == 'assistant' and content:
66
- messages.append(AIMessage(content=content))
67
-
68
- # Add the current query
69
- messages.append(HumanMessage(content=request))
70
 
71
- prompt = await build_prompt(session, intent, request, tools, table_summary, messages)
 
72
  config = {"configurable": {"thread_id": "conversation_123"}}
73
- agent_response = await agent.ainvoke(
74
  {"messages": prompt},
75
  config
76
  )
 
77
 
78
- if "messages" in agent_response:
79
- response = agent_response["messages"][-1].content
80
- else:
81
- response = "No response generated"
82
-
83
- messages.append(AIMessage(content=response))
84
-
85
-
86
- return response, messages
87
 
88
 
89
  # ---------------- Helper Functions ---------------- #
@@ -103,7 +86,7 @@ async def load_and_enrich_tools(session: ClientSession):
103
  tools = await load_mcp_tools(session)
104
  return tools
105
 
106
- async def build_prompt(session, intent, request, tools, summary, messages):
107
  superset_prompt = await session.read_resource("resource://last_prompt")
108
  conversation_prompt = await session.read_resource("resource://base_prompt")
109
  # TODO: add uri's from config
@@ -114,11 +97,8 @@ async def build_prompt(session, intent, request, tools, summary, messages):
114
  )
115
  else:
116
  template = conversation_prompt.contents[0].text
117
- tools_str = "\n".join([f"- {tool.name}: {tool.description}" for tool in tools])
118
- return template.format(
119
- new_request=request,
120
- tools=tools_str,
121
- descriptions=summary,
122
- chat_history = messages
123
- )
124
 
 
12
  import logging
13
  from dotenv import load_dotenv
14
  from langgraph.checkpoint.memory import MemorySaver
15
+ from langchain.memory import ConversationBufferMemory
16
+ from langchain.agents import AgentExecutor
17
+ from langchain_google_genai import ChatGoogleGenerativeAI
18
+ from langchain_core.prompts import PromptTemplate
19
 
20
 
21
 
22
  logger = logging.getLogger(__name__)
23
  load_dotenv()
24
+ async def lc_mcp_exec(request: str, history) -> str:
25
  """
26
  Execute the full PostgreSQL MCP pipeline: load summary, connect session,
27
  load memory and tools, build prompt, run agent, update memory.
 
38
  server_params = get_server_params()
39
 
40
  api_key = os.getenv("API_KEY")
 
 
41
 
42
+ llm = init_chat_model(
43
+ model="gemini-2.0-flash",
44
+ model_provider="google_genai",
45
+ api_key=api_key,
46
+ temperature=0.5,
47
+ )
48
+ print(type(llm))
49
  async with stdio_client(server_params) as (read, write):
50
  async with ClientSession(read, write) as session:
51
  await session.initialize()
 
54
 
55
  intent = classify_intent(request)
56
 
57
+ memory = ConversationBufferMemory(return_messages=True)
58
+ agent = create_react_agent(llm, tools)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
+ prompt = await build_prompt(session, intent, request, tools, table_summary)
61
+ agent_executor = AgentExecutor(agent=agent, tools=tools, memory=memory, verbose=True)
62
  config = {"configurable": {"thread_id": "conversation_123"}}
63
+ result = await agent_executor.ainvoke(
64
  {"messages": prompt},
65
  config
66
  )
67
+ response = result.get("output", "No response generated")
68
 
69
+ return response
 
 
 
 
 
 
 
 
70
 
71
 
72
  # ---------------- Helper Functions ---------------- #
 
86
  tools = await load_mcp_tools(session)
87
  return tools
88
 
89
+ async def build_prompt(session, intent, request, tools, summary):
90
  superset_prompt = await session.read_resource("resource://last_prompt")
91
  conversation_prompt = await session.read_resource("resource://base_prompt")
92
  # TODO: add uri's from config
 
97
  )
98
  else:
99
  template = conversation_prompt.contents[0].text
100
+ #tools_str = "\n".join([f"- {tool.name}: {tool.description}" for tool in tools])
101
+ #return template.format( new_request=request, tools=tools_str, descriptions=summary )
102
+ prompt = PromptTemplate.from_template(template)
103
+ return prompt
 
 
 
104
 
postgre_mcp_server.py CHANGED
@@ -134,6 +134,7 @@ async def base_prompt_query() -> str:
134
  You can use the following FastMCP tools. These allow you to create **read-only** queries, such as `SELECT`, `COUNT`, or queries with `GROUP BY`, `ORDER BY`, and similar clauses. You may chain tools together to gather the necessary information before generating your SQL query.
135
 
136
  {tools}
 
137
 
138
  ---
139
 
@@ -149,37 +150,38 @@ async def base_prompt_query() -> str:
149
  > SELECT * FROM customers WHERE country = 'Germany';
150
  > ```
151
 
152
- ==========================
153
- # Output Format
154
- ==========================
155
-
156
- Present your final answer using the following structure **exactly**. When necessary, bold the important parts of your answer or use `` inline code blocks.:
157
-
158
- ```markdown
159
- # Result
160
- {{Take the result from the execute_query tool and format it nicely using Markdown. Use a Markdown table for tabular data (rows and columns) including headers. Use bullet points or items in markdown for answers that include lists of names or descriptions. Use plain text for single values or simple messages. Ensure data alignment and clarity.}}
161
-
162
- # Explanation
163
- {{Provide a concise explanation or interpretation of the results in 1-3 sentences. Explain what the data in the 'Result' section represents in the context of the user's request.}}
164
-
165
- # Query
166
- ```sql
167
- {{Display the exact SQL query you generated and executed here to answer the user's request.}}
168
 
169
  **Reminder:**
170
  **Every time you generate a SQL query, call **execute_query** right after and include the result in your final response.**
171
  **If you do not execute the generated SQL query, this will be the violation of the instructions**
172
  **Your final answer cannot be only a SQL query, you will have to call **execute_query** and give the result of the call with the SQL query.**
173
  ---
174
- {chat_history}
175
- ---
176
  =========================
177
  # New User Request
178
  =========================
179
 
180
  Please fulfill the following request based on the above context:
181
 
182
- {new_request}
 
 
183
  """
184
 
185
  return base_prompt
 
134
  You can use the following FastMCP tools. These allow you to create **read-only** queries, such as `SELECT`, `COUNT`, or queries with `GROUP BY`, `ORDER BY`, and similar clauses. You may chain tools together to gather the necessary information before generating your SQL query.
135
 
136
  {tools}
137
+ {tool_names}
138
 
139
  ---
140
 
 
150
  > SELECT * FROM customers WHERE country = 'Germany';
151
  > ```
152
 
153
+ ==========================
154
+ # Output Format
155
+ ==========================
156
+
157
+ Present your final answer using the following structure **exactly**. When necessary, bold the important parts of your answer or use `` inline code blocks.:
158
+
159
+ ```markdown
160
+ # Result
161
+ {{Take the result from the execute_query tool and format it nicely using Markdown. Use a Markdown table for tabular data (rows and columns) including headers. Use bullet points or items in markdown for answers that include lists of names or descriptions. Use plain text for single values or simple messages. Ensure data alignment and clarity.}}
162
+
163
+ # Explanation
164
+ {{Provide a concise explanation or interpretation of the results in 1-3 sentences. Explain what the data in the 'Result' section represents in the context of the user's request.}}
165
+
166
+ # Query
167
+ ```sql
168
+ {{Display the exact SQL query you generated and executed here to answer the user's request.}}
169
 
170
  **Reminder:**
171
  **Every time you generate a SQL query, call **execute_query** right after and include the result in your final response.**
172
  **If you do not execute the generated SQL query, this will be the violation of the instructions**
173
  **Your final answer cannot be only a SQL query, you will have to call **execute_query** and give the result of the call with the SQL query.**
174
  ---
175
+
 
176
  =========================
177
  # New User Request
178
  =========================
179
 
180
  Please fulfill the following request based on the above context:
181
 
182
+ {input}
183
+
184
+ Thought:{agent_scratchpad}
185
  """
186
 
187
  return base_prompt