Commit
·
a0d91e2
1
Parent(s):
ae4fa43
fix: memory and system messages
Browse files- .gitignore +4 -4
- chat_history.json +0 -1
- gradio_app.py +14 -0
- langchain_mcp_client.py +52 -92
- postgre_mcp_server.py +4 -10
- utils.py +0 -19
.gitignore
CHANGED
|
@@ -1,8 +1,8 @@
|
|
| 1 |
-
|
| 2 |
.env
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
.
|
| 6 |
|
| 7 |
# Byte-compiled / optimized / DLL files
|
| 8 |
__pycache__/
|
|
|
|
| 1 |
+
*.idea
|
| 2 |
.env
|
| 3 |
+
*.vscode
|
| 4 |
+
*.iml
|
| 5 |
+
misc.xml
|
| 6 |
|
| 7 |
# Byte-compiled / optimized / DLL files
|
| 8 |
__pycache__/
|
chat_history.json
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
[{"type": "human", "data": {"content": "list all tables", "additional_kwargs": {}, "response_metadata": {}, "type": "human", "name": null, "id": null, "example": false}}, {"type": "ai", "data": {"content": "# Result\nThe tables in the database are:\n* dim\\_agreement\n* dim\\_customer\n* dim\\_product\n* dim\\_product\\_order\\_item\n\n# Explanation\nThe `list_tables` tool was called to retrieve a list of all available tables in the database schema. The result shows the names of these tables.\n\n# Query\n```sql\nN/A\n```", "additional_kwargs": {}, "response_metadata": {}, "type": "ai", "name": null, "id": null, "example": false, "tool_calls": [], "invalid_tool_calls": [], "usage_metadata": null}}, {"type": "human", "data": {"content": "what was my previous request ?", "additional_kwargs": {}, "response_metadata": {}, "type": "human", "name": null, "id": null, "example": false}}, {"type": "ai", "data": {"content": "# Result\nlist all tables\n# Explanation\nThe user's previous request was to list all the tables in the database. This was determined by reviewing the previous turn in the conversation history.\n# Query\n```sql\nN/A\n```", "additional_kwargs": {}, "response_metadata": {}, "type": "ai", "name": null, "id": null, "example": false, "tool_calls": [], "invalid_tool_calls": [], "usage_metadata": null}}, {"type": "human", "data": {"content": "count tables", "additional_kwargs": {}, "response_metadata": {}, "type": "human", "name": null, "id": null, "example": false}}, {"type": "ai", "data": {"content": "```markdown\n# Result\n4\n# Explanation\nThe user requested the number of tables. The previous response listed the tables, and counting them gives the answer.\n# Query\n```sql\nN/A\n```", "additional_kwargs": {}, "response_metadata": {}, "type": "ai", "name": null, "id": null, "example": false, "tool_calls": [], "invalid_tool_calls": [], "usage_metadata": null}}, {"type": "human", "data": {"content": "what was my last message ?", "additional_kwargs": {}, "response_metadata": {}, "type": "human", "name": null, "id": null, "example": false}}, {"type": "ai", "data": {"content": "```markdown\n# Result\ncount tables\n# Explanation\nThe user's last message was \"count tables\". This was determined by reviewing the previous turn in the conversation history.\n# Query\n```sql\nN/A\n```", "additional_kwargs": {}, "response_metadata": {}, "type": "ai", "name": null, "id": null, "example": false, "tool_calls": [], "invalid_tool_calls": [], "usage_metadata": null}}, {"type": "human", "data": {"content": "what was my very first message in this conversation?", "additional_kwargs": {}, "response_metadata": {}, "type": "human", "name": null, "id": null, "example": false}}, {"type": "ai", "data": {"content": "```markdown\n# Result\nlist all tables\n# Explanation\nThe user's very first message in this conversation was \"list all tables\". This was determined by reviewing the conversation history.\n# Query\n```sql\nN/A\n```", "additional_kwargs": {}, "response_metadata": {}, "type": "ai", "name": null, "id": null, "example": false, "tool_calls": [], "invalid_tool_calls": [], "usage_metadata": null}}, {"type": "human", "data": {"content": "total number of sales in 2024", "additional_kwargs": {}, "response_metadata": {}, "type": "human", "name": null, "id": null, "example": false}}, {"type": "human", "data": {"content": "list all tables", "additional_kwargs": {}, "response_metadata": {}, "type": "human", "name": null, "id": null, "example": false}}, {"type": "human", "data": {"content": "list all tables", "additional_kwargs": {}, "response_metadata": {}, "type": "human", "name": null, "id": null, "example": false}}, {"type": "human", "data": {"content": "list all tables", "additional_kwargs": {}, "response_metadata": {}, "type": "human", "name": null, "id": null, "example": false}}, {"type": "ai", "data": {"content": "```markdown\n# Result\nThe tables in the database are:\n* dim\\_agreement\n* dim\\_customer\n* dim\\_product\n* dim\\_product\\_order\\_item\n\n# Explanation\nThe `list_tables` tool was called to retrieve a list of all available tables in the database schema. The result shows the names of these tables.\n\n# Query\n```sql\nN/A\n```", "additional_kwargs": {}, "response_metadata": {}, "type": "ai", "name": null, "id": null, "example": false, "tool_calls": [], "invalid_tool_calls": [], "usage_metadata": null}}, {"type": "human", "data": {"content": "list all tables", "additional_kwargs": {}, "response_metadata": {}, "type": "human", "name": null, "id": null, "example": false}}, {"type": "ai", "data": {"content": "```markdown\n# Result\nThe tables in the database are:\n* dim_agreement\n* dim_customer\n* dim_product\n* dim_product_order_item\n\n# Explanation\nThe `list_tables` tool was called to retrieve a list of all available tables in the database schema. The result shows the names of these tables.\n\n# Query\n```sql\nN/A\n```", "additional_kwargs": {}, "response_metadata": {}, "type": "ai", "name": null, "id": null, "example": false, "tool_calls": [], "invalid_tool_calls": [], "usage_metadata": null}}, {"type": "human", "data": {"content": "hi there. show me the largest table name", "additional_kwargs": {}, "response_metadata": {}, "type": "human", "name": null, "id": null, "example": false}}, {"type": "ai", "data": {"content": "I am sorry, I cannot directly determine the \"largest table name\" based on the available tools. I can list the tables and show their schemas, but I do not have a tool to determine table sizes. I would need a tool to get table sizes to fulfill this request. However, I can list the tables for you.\n\n```markdown\n# Result\nThe tables in the database are:\n* dim\\_agreement\n* dim\\_customer\n* dim\\_product\n* dim\\_product\\_order\\_item\n\n# Explanation\nThe `list_tables` tool was called to retrieve a list of all available tables in the database schema. The result shows the names of these tables.\n\n# Query\n```sql\nN/A\n```", "additional_kwargs": {}, "response_metadata": {}, "type": "ai", "name": null, "id": null, "example": false, "tool_calls": [], "invalid_tool_calls": [], "usage_metadata": null}}, {"type": "human", "data": {"content": "you are free to execute any sql query and get the size of each table and return me the count of largest one", "additional_kwargs": {}, "response_metadata": {}, "type": "human", "name": null, "id": null, "example": false}}, {"type": "ai", "data": {"content": "I am sorry, I cannot directly determine the \"largest table\" based on the available tools. I can list the tables and show their schemas, but I do not have a tool to determine table sizes. I would need a tool to get table sizes to fulfill this request. I can list the tables for you, but I cannot provide the count of the largest one.", "additional_kwargs": {}, "response_metadata": {}, "type": "ai", "name": null, "id": null, "example": false, "tool_calls": [], "invalid_tool_calls": [], "usage_metadata": null}}, {"type": "human", "data": {"content": "can you find the foreign keys and relationships ?", "additional_kwargs": {}, "response_metadata": {}, "type": "human", "name": null, "id": null, "example": false}}, {"type": "ai", "data": {"content": "I am unable to retrieve the foreign keys using the `get_foreign_keys` tool. It seems there's an issue with the tool's execution. However, based on the table schemas provided, I can infer the relationships.\n\n# Result\n\n* **dim\\_agreement**: Related to `dim_customer` via `customer_id`.\n* **dim\\_customer**: Likely related to `dim_product`, `dim_product_order_item`, and potentially other tables through their respective `customer_id` columns.\n* **dim\\_product**: Related to `dim_customer` via `customer_id`, `dim_agreement` via `agreement_id`, and likely other tables through foreign key relationships.\n* **dim\\_product\\_order\\_item**: Related to `dim_customer` via `customer_id`, `dim_agreement` via `agreement_id`, and likely related to a product order table via a `product_order_id`.\n\n# Explanation\n\nThe `get_foreign_keys` tool did not return the foreign key relationships. Therefore, I am relying on the schema descriptions to determine the relationships between the tables. The tables are connected through foreign keys, as described in the table descriptions.\n\n# Query\n\nN/A", "additional_kwargs": {}, "response_metadata": {}, "type": "ai", "name": null, "id": null, "example": false, "tool_calls": [], "invalid_tool_calls": [], "usage_metadata": null}}, {"type": "human", "data": {"content": "get all the foreign keys and summarize them", "additional_kwargs": {}, "response_metadata": {}, "type": "human", "name": null, "id": null, "example": false}}, {"type": "ai", "data": {"content": "I am sorry, I cannot directly retrieve and summarize all foreign keys using the available tools. The `get_foreign_keys` tool did not work in a previous turn. However, I can describe the relationships between the tables based on the schema descriptions.\n\n# Result\n\n* **dim\\_agreement**: Related to `dim_customer` via `customer_id`.\n* **dim\\_customer**: Likely related to `dim_product` and `dim_product_order_item` through their respective `customer_id` columns.\n* **dim\\_product**: Related to `dim_customer` via `customer_id`, `dim_agreement` via `agreement_id`, and likely other tables through foreign key relationships.\n* **dim\\_product\\_order\\_item**: Related to `dim_customer` via `customer_id`, `dim_agreement` via `agreement_id`, and likely related to a product order table via a `product_order_id`.\n\n# Explanation\n\nThe `get_foreign_keys` tool did not return the foreign key relationships. Therefore, I am relying on the schema descriptions to determine the relationships between the tables. The tables are connected through foreign keys, as described in the table descriptions.\n\n# Query\n```sql\nN/A\n```", "additional_kwargs": {}, "response_metadata": {}, "type": "ai", "name": null, "id": null, "example": false, "tool_calls": [], "invalid_tool_calls": [], "usage_metadata": null}}]
|
|
|
|
|
|
gradio_app.py
CHANGED
|
@@ -56,6 +56,20 @@ demo = gr.ChatInterface(
|
|
| 56 |
# flagging_mode="never"
|
| 57 |
# )
|
| 58 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
|
| 60 |
if __name__ == "__main__":
|
| 61 |
demo.launch()
|
|
|
|
| 56 |
# flagging_mode="never"
|
| 57 |
# )
|
| 58 |
|
| 59 |
+
async def build_prompt(session, request, tools, table_summary):
|
| 60 |
+
conversation_prompt = await session.read_resource("resource://base_prompt")
|
| 61 |
+
|
| 62 |
+
template = conversation_prompt.contents[0].text
|
| 63 |
+
tools_str = "\n".join([f"- {tool.name}: {tool.description}" for tool in tools])
|
| 64 |
+
|
| 65 |
+
return template.format(
|
| 66 |
+
new_request=request,
|
| 67 |
+
tools=tools_str,
|
| 68 |
+
descriptions=table_summary,
|
| 69 |
+
system_instructions="""You are a PostgreSQL database expert assistant.
|
| 70 |
+
Use the conversation history when available to maintain context.
|
| 71 |
+
For new conversations, focus on understanding the initial request."""
|
| 72 |
+
)
|
| 73 |
|
| 74 |
if __name__ == "__main__":
|
| 75 |
demo.launch()
|
langchain_mcp_client.py
CHANGED
|
@@ -5,57 +5,35 @@ from mcp import ClientSession, StdioServerParameters
|
|
| 5 |
from mcp.client.stdio import stdio_client
|
| 6 |
from langchain_mcp_adapters.tools import load_mcp_tools
|
| 7 |
from langgraph.prebuilt import create_react_agent
|
| 8 |
-
from langchain_core.
|
| 9 |
-
from langchain_core.messages import AIMessage, HumanMessage
|
| 10 |
-
from langchain.memory import ChatMessageHistory
|
| 11 |
from langchain_community.chat_message_histories import FileChatMessageHistory
|
| 12 |
from langchain.chat_models import init_chat_model
|
| 13 |
-
from utils import parse_mcp_output, classify_intent
|
| 14 |
import logging
|
| 15 |
from dotenv import load_dotenv
|
| 16 |
|
| 17 |
-
|
| 18 |
# Set up logging
|
| 19 |
logger = logging.getLogger(__name__)
|
| 20 |
|
| 21 |
-
# Load environment variables
|
| 22 |
load_dotenv()
|
| 23 |
|
| 24 |
-
async def lc_mcp_exec(request: str, history) ->
|
| 25 |
"""
|
| 26 |
-
Execute the
|
|
|
|
| 27 |
"""
|
| 28 |
try:
|
|
|
|
| 29 |
history_file = os.path.join(os.path.dirname(__file__), "chat_history.json")
|
| 30 |
|
| 31 |
-
# Initialize
|
| 32 |
-
if not os.path.exists(history_file) or os.path.getsize(history_file) == 0:
|
| 33 |
-
with open(history_file, 'w') as f:
|
| 34 |
-
json.dump({"messages": []}, f)
|
| 35 |
-
|
| 36 |
message_history = FileChatMessageHistory(file_path=history_file)
|
| 37 |
|
| 38 |
-
|
| 39 |
-
# Load existing messages or handle bootstrap scenario
|
| 40 |
-
existing_messages = message_history.messages
|
| 41 |
-
except json.JSONDecodeError:
|
| 42 |
-
# If JSON is corrupted, reinitialize the file
|
| 43 |
-
logger.warning("Chat history file corrupted, reinitializing...")
|
| 44 |
-
with open(history_file, 'w') as f:
|
| 45 |
-
json.dump({"messages": []}, f)
|
| 46 |
-
existing_messages = []
|
| 47 |
-
|
| 48 |
-
# Format existing messages properly
|
| 49 |
-
formatted_history = []
|
| 50 |
-
for msg in existing_messages:
|
| 51 |
-
if isinstance(msg, HumanMessage):
|
| 52 |
-
formatted_history.append(HumanMessage(content=msg.content))
|
| 53 |
-
elif isinstance(msg, AIMessage):
|
| 54 |
-
formatted_history.append(AIMessage(content=msg.content))
|
| 55 |
-
|
| 56 |
table_summary = load_table_summary(os.environ["TABLE_SUMMARY_PATH"])
|
| 57 |
server_params = get_server_params()
|
| 58 |
|
|
|
|
| 59 |
llm = init_chat_model(
|
| 60 |
model=os.getenv("GEMINI_MODEL"),
|
| 61 |
model_provider=os.getenv("GEMINI_MODEL_PROVIDER"),
|
|
@@ -67,60 +45,62 @@ async def lc_mcp_exec(request: str, history) -> tuple[Any, Any]:
|
|
| 67 |
async with ClientSession(read, write) as session:
|
| 68 |
await session.initialize()
|
| 69 |
|
|
|
|
| 70 |
tools = await load_and_enrich_tools(session)
|
| 71 |
|
| 72 |
-
#
|
|
|
|
|
|
|
|
|
|
| 73 |
message_history.add_user_message(request)
|
| 74 |
|
| 75 |
-
#
|
| 76 |
-
|
| 77 |
|
| 78 |
-
#
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
messages = [base_message]
|
| 82 |
-
|
| 83 |
-
# Add history if exists
|
| 84 |
-
if formatted_history:
|
| 85 |
-
messages.extend(formatted_history)
|
| 86 |
-
|
| 87 |
-
# Add current request
|
| 88 |
-
messages.append(HumanMessage(content=request))
|
| 89 |
-
|
| 90 |
-
# Build prompt with conversation context
|
| 91 |
-
prompt = await build_prompt(
|
| 92 |
-
session=session,
|
| 93 |
-
request=request,
|
| 94 |
-
tools=tools,
|
| 95 |
-
table_summary=table_summary,
|
| 96 |
-
chat_history=formatted_history
|
| 97 |
)
|
| 98 |
|
| 99 |
-
#
|
|
|
|
|
|
|
|
|
|
| 100 |
agent_response = await agent.ainvoke(
|
| 101 |
-
{
|
| 102 |
-
"messages": prompt,
|
| 103 |
-
"chat_history": [msg.content for msg in formatted_history]
|
| 104 |
-
},
|
| 105 |
config={"configurable": {"thread_id": "conversation_123"}}
|
| 106 |
)
|
| 107 |
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
else:
|
| 113 |
-
|
| 114 |
-
message_history.add_ai_message(response)
|
| 115 |
|
| 116 |
-
# Return
|
| 117 |
-
return
|
| 118 |
|
| 119 |
except Exception as e:
|
| 120 |
-
logger.error(f"Error in
|
| 121 |
-
|
| 122 |
-
return f"Error in conversation: {str(e)}", []
|
| 123 |
-
|
| 124 |
|
| 125 |
# ---------------- Helper Functions ---------------- #
|
| 126 |
|
|
@@ -128,10 +108,7 @@ def load_table_summary(path: str) -> str:
|
|
| 128 |
with open(path, 'r') as file:
|
| 129 |
return file.read()
|
| 130 |
|
| 131 |
-
|
| 132 |
def get_server_params() -> StdioServerParameters:
|
| 133 |
-
# TODO: give server params from config
|
| 134 |
-
load_dotenv()
|
| 135 |
return StdioServerParameters(
|
| 136 |
command="python",
|
| 137 |
args=[os.environ["MCP_SERVER_PATH"]],
|
|
@@ -143,30 +120,13 @@ async def load_and_enrich_tools(session: ClientSession):
|
|
| 143 |
return tools
|
| 144 |
|
| 145 |
|
| 146 |
-
async def build_prompt(session,
|
| 147 |
conversation_prompt = await session.read_resource("resource://base_prompt")
|
| 148 |
|
| 149 |
template = conversation_prompt.contents[0].text
|
| 150 |
tools_str = "\n".join([f"- {tool.name}: {tool.description}" for tool in tools])
|
| 151 |
|
| 152 |
-
# Handle history formatting with proper message access
|
| 153 |
-
history_str = ""
|
| 154 |
-
if chat_history:
|
| 155 |
-
history_sections = []
|
| 156 |
-
for msg in chat_history:
|
| 157 |
-
if isinstance(msg, HumanMessage):
|
| 158 |
-
history_sections.append(f"Previous Human Question:\n{msg.content}\n")
|
| 159 |
-
elif isinstance(msg, AIMessage):
|
| 160 |
-
history_sections.append(f"Previous Assistant Response:\n{msg.content}\n")
|
| 161 |
-
history_str = "\n".join(history_sections)
|
| 162 |
-
|
| 163 |
return template.format(
|
| 164 |
-
new_request=request,
|
| 165 |
tools=tools_str,
|
| 166 |
descriptions=table_summary,
|
| 167 |
-
|
| 168 |
-
system_instructions="""You are a PostgreSQL database expert assistant.
|
| 169 |
-
Use the conversation history when available to maintain context.
|
| 170 |
-
For new conversations, focus on understanding the initial request."""
|
| 171 |
-
)
|
| 172 |
-
|
|
|
|
| 5 |
from mcp.client.stdio import stdio_client
|
| 6 |
from langchain_mcp_adapters.tools import load_mcp_tools
|
| 7 |
from langgraph.prebuilt import create_react_agent
|
| 8 |
+
from langchain_core.messages import AIMessage, HumanMessage, ToolMessage, SystemMessage
|
|
|
|
|
|
|
| 9 |
from langchain_community.chat_message_histories import FileChatMessageHistory
|
| 10 |
from langchain.chat_models import init_chat_model
|
|
|
|
| 11 |
import logging
|
| 12 |
from dotenv import load_dotenv
|
| 13 |
|
|
|
|
| 14 |
# Set up logging
|
| 15 |
logger = logging.getLogger(__name__)
|
| 16 |
|
| 17 |
+
# Load environment variables
|
| 18 |
load_dotenv()
|
| 19 |
|
| 20 |
+
async def lc_mcp_exec(request: str, history=None) -> Tuple[str, list]:
|
| 21 |
"""
|
| 22 |
+
Execute the PostgreSQL MCP pipeline with persistent memory in a JSON file.
|
| 23 |
+
Returns the response and the updated message history.
|
| 24 |
"""
|
| 25 |
try:
|
| 26 |
+
# Define the path for the chat history JSON file
|
| 27 |
history_file = os.path.join(os.path.dirname(__file__), "chat_history.json")
|
| 28 |
|
| 29 |
+
# Initialize FileChatMessageHistory for persistent storage
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
message_history = FileChatMessageHistory(file_path=history_file)
|
| 31 |
|
| 32 |
+
# Load table summary and server parameters
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
table_summary = load_table_summary(os.environ["TABLE_SUMMARY_PATH"])
|
| 34 |
server_params = get_server_params()
|
| 35 |
|
| 36 |
+
# Initialize the LLM
|
| 37 |
llm = init_chat_model(
|
| 38 |
model=os.getenv("GEMINI_MODEL"),
|
| 39 |
model_provider=os.getenv("GEMINI_MODEL_PROVIDER"),
|
|
|
|
| 45 |
async with ClientSession(read, write) as session:
|
| 46 |
await session.initialize()
|
| 47 |
|
| 48 |
+
# Load tools
|
| 49 |
tools = await load_and_enrich_tools(session)
|
| 50 |
|
| 51 |
+
# Create the ReAct agent with tools
|
| 52 |
+
agent = create_react_agent(llm, tools)
|
| 53 |
+
|
| 54 |
+
# Add the new user message to history as HumanMessage
|
| 55 |
message_history.add_user_message(request)
|
| 56 |
|
| 57 |
+
# Prepare the system prompt for the agent
|
| 58 |
+
system_prompt = await build_prompt(session, tools, table_summary)
|
| 59 |
|
| 60 |
+
# Init the system message
|
| 61 |
+
system_message = SystemMessage(
|
| 62 |
+
content=system_prompt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
)
|
| 64 |
|
| 65 |
+
# Add the system message to the history
|
| 66 |
+
input_messages = [system_message] + message_history.messages
|
| 67 |
+
|
| 68 |
+
# Invoke the agent with the message list
|
| 69 |
agent_response = await agent.ainvoke(
|
| 70 |
+
{"messages": input_messages},
|
|
|
|
|
|
|
|
|
|
| 71 |
config={"configurable": {"thread_id": "conversation_123"}}
|
| 72 |
)
|
| 73 |
|
| 74 |
+
# Extract the latest response and save all new messages
|
| 75 |
+
response_content = "No response generated"
|
| 76 |
+
if "messages" in agent_response and agent_response["messages"]:
|
| 77 |
+
# Identify new messages (those not in input_messages)
|
| 78 |
+
new_messages = agent_response["messages"][len(input_messages):]
|
| 79 |
+
|
| 80 |
+
# Save all new messages to history
|
| 81 |
+
for msg in new_messages:
|
| 82 |
+
if isinstance(msg, AIMessage):
|
| 83 |
+
# Save AIMessage, including tool_calls if present
|
| 84 |
+
message_history.add_message(msg)
|
| 85 |
+
elif isinstance(msg, ToolMessage):
|
| 86 |
+
# Save ToolMessage with content and tool_call_id
|
| 87 |
+
message_history.add_message(msg)
|
| 88 |
+
else:
|
| 89 |
+
# Log unexpected message types for debugging
|
| 90 |
+
logger.debug(f"Skipping unexpected message type: {type(msg)}")
|
| 91 |
+
|
| 92 |
+
# Use the last message’s content as the response
|
| 93 |
+
response_content = agent_response["messages"][-1].content
|
| 94 |
+
|
| 95 |
else:
|
| 96 |
+
message_history.add_ai_message(response_content)
|
|
|
|
| 97 |
|
| 98 |
+
# Return the response and the updated history
|
| 99 |
+
return response_content, message_history.messages
|
| 100 |
|
| 101 |
except Exception as e:
|
| 102 |
+
logger.error(f"Error in execution: {str(e)}", exc_info=True)
|
| 103 |
+
return f"Error: {str(e)}", message_history.messages if 'message_history' in locals() else []
|
|
|
|
|
|
|
| 104 |
|
| 105 |
# ---------------- Helper Functions ---------------- #
|
| 106 |
|
|
|
|
| 108 |
with open(path, 'r') as file:
|
| 109 |
return file.read()
|
| 110 |
|
|
|
|
| 111 |
def get_server_params() -> StdioServerParameters:
|
|
|
|
|
|
|
| 112 |
return StdioServerParameters(
|
| 113 |
command="python",
|
| 114 |
args=[os.environ["MCP_SERVER_PATH"]],
|
|
|
|
| 120 |
return tools
|
| 121 |
|
| 122 |
|
| 123 |
+
async def build_prompt(session, tools, table_summary):
|
| 124 |
conversation_prompt = await session.read_resource("resource://base_prompt")
|
| 125 |
|
| 126 |
template = conversation_prompt.contents[0].text
|
| 127 |
tools_str = "\n".join([f"- {tool.name}: {tool.description}" for tool in tools])
|
| 128 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
return template.format(
|
|
|
|
| 130 |
tools=tools_str,
|
| 131 |
descriptions=table_summary,
|
| 132 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
postgre_mcp_server.py
CHANGED
|
@@ -155,7 +155,7 @@ You can use the following FastMCP tools. These allow you to create **read-only**
|
|
| 155 |
# Output Format
|
| 156 |
==========================
|
| 157 |
|
| 158 |
-
Present your final answer using the following structure **exactly
|
| 159 |
|
| 160 |
```markdown
|
| 161 |
# Result
|
|
@@ -167,21 +167,15 @@ Present your final answer using the following structure **exactly**. When necess
|
|
| 167 |
# Query
|
| 168 |
```sql
|
| 169 |
{{Display the exact SQL query you generated and executed here to answer the user's request.}}
|
|
|
|
|
|
|
|
|
|
| 170 |
|
| 171 |
**Reminder:**
|
| 172 |
**Every time you generate a SQL query, call **execute_query** right after and include the result in your final response.**
|
| 173 |
**If you do not execute the generated SQL query, this will be the violation of the instructions**
|
| 174 |
**Your final answer cannot be only a SQL query, you will have to call **execute_query** and give the result of the call with the SQL query.**
|
| 175 |
-
---
|
| 176 |
-
{chat_history}
|
| 177 |
-
---
|
| 178 |
-
=========================
|
| 179 |
-
# New User Request
|
| 180 |
-
=========================
|
| 181 |
-
|
| 182 |
-
Please fulfill the following request based on the above context:
|
| 183 |
|
| 184 |
-
{new_request}
|
| 185 |
"""
|
| 186 |
|
| 187 |
return base_prompt
|
|
|
|
| 155 |
# Output Format
|
| 156 |
==========================
|
| 157 |
|
| 158 |
+
Present your final answer using the following structure **exactly** in markdown language. When necessary, bold the important parts of your answer or use `` for inline code blocks:
|
| 159 |
|
| 160 |
```markdown
|
| 161 |
# Result
|
|
|
|
| 167 |
# Query
|
| 168 |
```sql
|
| 169 |
{{Display the exact SQL query you generated and executed here to answer the user's request.}}
|
| 170 |
+
```
|
| 171 |
+
|
| 172 |
+
```
|
| 173 |
|
| 174 |
**Reminder:**
|
| 175 |
**Every time you generate a SQL query, call **execute_query** right after and include the result in your final response.**
|
| 176 |
**If you do not execute the generated SQL query, this will be the violation of the instructions**
|
| 177 |
**Your final answer cannot be only a SQL query, you will have to call **execute_query** and give the result of the call with the SQL query.**
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 178 |
|
|
|
|
| 179 |
"""
|
| 180 |
|
| 181 |
return base_prompt
|
utils.py
CHANGED
|
@@ -102,22 +102,3 @@ def parse_mcp_output(output_dict):
|
|
| 102 |
return result, final_answer, last_tool_answer, query_store
|
| 103 |
|
| 104 |
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
def classify_intent(user_input: str) -> str:
|
| 109 |
-
user_input = user_input.lower().strip()
|
| 110 |
-
|
| 111 |
-
superset_keywords = [
|
| 112 |
-
"send to superset", "chart", "visualize", "visualise",
|
| 113 |
-
"plot", "graph", "send this", "send that", "create a chart",
|
| 114 |
-
"push to superset", "make a chart", "show chart", "dashboard", "send"
|
| 115 |
-
]
|
| 116 |
-
|
| 117 |
-
# Check for superset intent
|
| 118 |
-
if any(kw in user_input for kw in superset_keywords):
|
| 119 |
-
return "superset_request"
|
| 120 |
-
|
| 121 |
-
# Fallback
|
| 122 |
-
return "sql_request"
|
| 123 |
-
|
|
|
|
| 102 |
return result, final_answer, last_tool_answer, query_store
|
| 103 |
|
| 104 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|