Selcan Yukcu commited on
Commit
734767d
·
1 Parent(s): 7afc0de

feat: gradio and streamlit app options, formatting output

Browse files
gradio_app.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+ from pathlib import Path
3
+ import gradio as gr
4
+ import asyncio
5
+ from postgre_mcp_client import pg_mcp_exec
6
+
7
+ def load_db_configs():
8
+ """Load database configurations from configs.yaml"""
9
+ configs_path = Path("configs.yaml")
10
+
11
+ if not configs_path.exists():
12
+ raise FileNotFoundError("configs.yaml not found")
13
+
14
+ with open(configs_path) as f:
15
+ configs = yaml.safe_load(f)
16
+
17
+ return configs["db_configs"]
18
+
19
+ # Async-compatible wrapper
20
+ async def run_agent(request):
21
+ # configs = load_db_configs() # Optional if needed
22
+ final_answer, last_tool_answer, = await pg_mcp_exec(request)
23
+ return final_answer, last_tool_answer
24
+
25
+ # Gradio UI
26
+ demo = gr.Interface(
27
+ fn=run_agent,
28
+ inputs=gr.Textbox(
29
+ label="Natural Language Request",
30
+ placeholder="e.g., Show me the table of join posts and users tables."
31
+ ),
32
+ outputs=gr.Textbox(label="SQL Query / Result"),
33
+ title="PostgreSQL Query Agent",
34
+ description="Ask your database in natural language and get results using the smolagent executor."
35
+ )
36
+
37
+ if __name__ == "__main__":
38
+ demo.launch()
postgre_mcp_client.py CHANGED
@@ -44,12 +44,12 @@ async def pg_mcp_exec(request: str) -> str:
44
  agent = create_react_agent(llm, tools)
45
  agent_response = await agent.ainvoke({"messages": prompt})
46
 
47
- parsed_steps, _ = parse_mcp_output(agent_response)
48
  memory.update_from_parsed(parsed_steps, request)
49
 
50
  await handle_memory_save_or_reset(memory, request)
51
 
52
- return agent_response
53
 
54
 
55
  # ---------------- Helper Functions ---------------- #
 
44
  agent = create_react_agent(llm, tools)
45
  agent_response = await agent.ainvoke({"messages": prompt})
46
 
47
+ parsed_steps, final_answer, last_tool_answer, _ = parse_mcp_output(agent_response)
48
  memory.update_from_parsed(parsed_steps, request)
49
 
50
  await handle_memory_save_or_reset(memory, request)
51
 
52
+ return final_answer, last_tool_answer
53
 
54
 
55
  # ---------------- Helper Functions ---------------- #
postgre_smolagent_clinet.py CHANGED
@@ -46,10 +46,10 @@ async def pg_mcp_smolagent_exec(request: str) -> str:
46
  agent_response = agent.run(task=prompt, stream=False)
47
 
48
 
49
- parsed_steps, _ = parse_mcp_output(agent_response)
50
- memory.update_from_parsed(parsed_steps, request)
51
 
52
- await handle_memory_save_or_reset(memory, request)
53
 
54
  return agent_response
55
 
 
46
  agent_response = agent.run(task=prompt, stream=False)
47
 
48
 
49
+ #parsed_steps, _ = parse_mcp_output(agent_response)
50
+ #memory.update_from_parsed(parsed_steps, request)
51
 
52
+ #await handle_memory_save_or_reset(memory, request)
53
 
54
  return agent_response
55
 
requirements.txt ADDED
Binary file (264 Bytes). View file
 
streamlit_app.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+ from pathlib import Path
3
+ import streamlit as st
4
+ from postgre_mcp_client import pg_mcp_exec
5
+ import asyncio
6
+ import nest_asyncio
7
+ nest_asyncio.apply()
8
+
9
+ def load_db_configs():
10
+ """Load database configurations from configs.yaml"""
11
+ configs_path = Path("configs.yaml")
12
+
13
+ if not configs_path.exists():
14
+ st.error("configs.yaml not found")
15
+ return None
16
+
17
+ with open(configs_path) as f:
18
+ configs = yaml.safe_load(f)
19
+
20
+ return configs.get("db_configs", {})
21
+
22
+ def run_agent(message):
23
+ response = asyncio.run(pg_mcp_exec(message))
24
+ # Return in message format
25
+ return {"role": "assistant", "content": response}
26
+
27
+ # Streamlit UI
28
+ st.title("PostgreSQL Query Agent")
29
+ st.write("Ask your database in natural language and get results using the smolagent executor.")
30
+
31
+ user_input = st.text_input("Natural Language Request", placeholder="e.g., Show me the table of join posts and users tables.")
32
+
33
+ if st.button("Run Query"):
34
+ if user_input.strip():
35
+ result = run_agent(user_input)
36
+ st.text_area("SQL Query / Result", value=str(result), height=300)
37
+ else:
38
+ st.warning("Please enter a natural language request.")
utils.py CHANGED
@@ -8,7 +8,8 @@ def parse_mcp_output(output_dict):
8
  result = []
9
  messages = output_dict.get("messages", [])
10
  query_store = []
11
-
 
12
  for msg in messages:
13
  role_name = msg.__class__.__name__ # Example: HumanMessage, AIMessage, ToolMessage
14
  content = getattr(msg, "content", "")
@@ -33,9 +34,11 @@ def parse_mcp_output(output_dict):
33
  # Check for presence of "query" key
34
  if "query" in arguments_dict:
35
  #print("query detected!!!")
36
- print(f"=============== AI Reasoning Step ===============")
37
- print(content[0])
38
- print()
 
 
39
  print("=============== AI used the following tools ===============")
40
  print(tool_name)
41
  print()
@@ -73,6 +76,7 @@ def parse_mcp_output(output_dict):
73
  })
74
 
75
  else:
 
76
  #print(f"ai final answer:{content}")
77
  logger.info(f"ai final answer:{content}")
78
  print("=============== AI's final answer ===============")
@@ -87,6 +91,7 @@ def parse_mcp_output(output_dict):
87
  tool_name = getattr(msg, "name", None)
88
  print("=============== The tool returned the following response ===============")
89
  print(content)
 
90
  logger.info(f"tool response:{content}")
91
  result.append({
92
  "type": "tool_response",
@@ -94,7 +99,7 @@ def parse_mcp_output(output_dict):
94
  "response": content
95
  })
96
 
97
- return result, query_store
98
 
99
 
100
 
 
8
  result = []
9
  messages = output_dict.get("messages", [])
10
  query_store = []
11
+ last_tool_answer = ""
12
+ last_answer = ""
13
  for msg in messages:
14
  role_name = msg.__class__.__name__ # Example: HumanMessage, AIMessage, ToolMessage
15
  content = getattr(msg, "content", "")
 
34
  # Check for presence of "query" key
35
  if "query" in arguments_dict:
36
  #print("query detected!!!")
37
+
38
+ if content:
39
+ print(f"=============== AI Reasoning Step ===============")
40
+ print(content[0])
41
+ print()
42
  print("=============== AI used the following tools ===============")
43
  print(tool_name)
44
  print()
 
76
  })
77
 
78
  else:
79
+ final_answer = content
80
  #print(f"ai final answer:{content}")
81
  logger.info(f"ai final answer:{content}")
82
  print("=============== AI's final answer ===============")
 
91
  tool_name = getattr(msg, "name", None)
92
  print("=============== The tool returned the following response ===============")
93
  print(content)
94
+ last_tool_answer = content
95
  logger.info(f"tool response:{content}")
96
  result.append({
97
  "type": "tool_response",
 
99
  "response": content
100
  })
101
 
102
+ return result, final_answer, last_tool_answer, query_store
103
 
104
 
105