Gustav2811 commited on
Commit
9227492
·
1 Parent(s): 9a09ed2
Files changed (3) hide show
  1. .chainlit/config.toml +1 -1
  2. app.py +194 -36
  3. requirements.txt +2 -1
.chainlit/config.toml CHANGED
@@ -62,7 +62,7 @@ edit_message = true
62
  # Only the executables in the allow list can be used for MCP stdio server.
63
  # Only need the base name of the executable, e.g. "npx", not "/usr/bin/npx".
64
  # Please don't comment this line for now, we need it to parse the executable name.
65
- allowed_executables = [ "npx", "uvx" ]
66
 
67
  [UI]
68
  # Name of the assistant.
 
62
  # Only the executables in the allow list can be used for MCP stdio server.
63
  # Only need the base name of the executable, e.g. "npx", not "/usr/bin/npx".
64
  # Please don't comment this line for now, we need it to parse the executable name.
65
+ allowed_executables = [ "npx", "python", "node" ]
66
 
67
  [UI]
68
  # Name of the assistant.
app.py CHANGED
@@ -1,9 +1,11 @@
1
  import os
2
- from typing import Optional, Dict
3
  import asyncio
 
4
  import chainlit as cl
5
  import google.generativeai as genai
6
  from openai import AsyncOpenAI
 
7
 
8
 
9
  # --- Logging setup ---
@@ -39,51 +41,207 @@ def oauth_callback(
39
  return None
40
 
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  @cl.on_chat_start
43
  async def start():
44
  log_info("Chat session started.")
45
- await cl.Message(content="# Welcome!").send()
46
  cl.user_session.set("message_history", [])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
 
49
  @cl.on_message
50
  async def main(message: cl.Message):
51
- log_info("=" * 20)
52
- log_info(f"on_message triggered for: '{message.content}'")
53
-
54
- model = "gpt-4o" # Hardcoding for simplicity during debug
55
- temperature = 0.7
56
- message_history = cl.user_session.get("message_history", [])
57
- message_history.append({"role": "user", "content": message.content})
58
-
59
- try:
60
- # Create an empty message for streaming
61
- msg = cl.Message(content="", author="Assistant")
62
-
63
- log_info(f"Calling OpenAI model '{model}' with streaming...")
64
- if not openai_client:
65
- raise ValueError("OpenAI API key not configured")
66
-
67
- # Stream the response from OpenAI
68
- stream = await openai_client.chat.completions.create(
69
- model=model, messages=message_history, temperature=temperature, stream=True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  )
71
-
72
- log_info("Starting to stream response...")
73
- async for part in stream:
74
  if token := part.choices[0].delta.content or "":
75
- await msg.stream_token(token)
76
 
77
- log_info(f"Streaming complete. Final content: '{msg.content}'")
 
78
 
79
- # Update the message to finalize it
80
- await msg.update()
81
 
82
- # Update message history with the complete response
83
- message_history.append({"role": "assistant", "content": msg.content})
84
- cl.user_session.set("message_history", message_history)
85
- log_info("on_message finished successfully with streaming.")
86
-
87
- except Exception as e:
88
- log_error(f"An exception occurred: {str(e)}")
89
- await cl.Message(content=f"An error occurred: {str(e)}").send()
 
1
  import os
2
+ from typing import Optional, Dict, List, Any
3
  import asyncio
4
+ import json
5
  import chainlit as cl
6
  import google.generativeai as genai
7
  from openai import AsyncOpenAI
8
+ from mcp import ClientSession
9
 
10
 
11
  # --- Logging setup ---
 
41
  return None
42
 
43
 
44
+ @cl.on_mcp_connect
45
+ async def on_mcp_connect(connection, session: ClientSession):
46
+ """Called when a new MCP connection is established."""
47
+ await cl.Message(
48
+ f"Establishing connection with MCP server: `{connection.name}`..."
49
+ ).send()
50
+
51
+ try:
52
+ result = await session.list_tools()
53
+ tools_for_llm = [
54
+ {
55
+ "type": "function",
56
+ "function": {
57
+ "name": t.name,
58
+ "description": t.description,
59
+ "parameters": t.inputSchema,
60
+ },
61
+ }
62
+ for t in result.tools
63
+ ]
64
+
65
+ all_mcp_tools = cl.user_session.get("mcp_tools", {})
66
+ all_mcp_tools[connection.name] = tools_for_llm
67
+ cl.user_session.set("mcp_tools", all_mcp_tools)
68
+
69
+ tool_names = [t.name for t in result.tools]
70
+ await cl.Message(
71
+ content=f"✅ Connection to `{connection.name}` successful. Found tools:\n`{', '.join(tool_names)}`"
72
+ ).send()
73
+ except Exception as e:
74
+ await cl.ErrorMessage(
75
+ f"Error discovering tools for `{connection.name}`: {e}"
76
+ ).send()
77
+
78
+
79
+ @cl.on_mcp_disconnect
80
+ async def on_mcp_disconnect(name: str, session: ClientSession):
81
+ """Called when an MCP connection is terminated."""
82
+ await cl.Message(f"MCP connection `{name}` has been disconnected.").send()
83
+ all_mcp_tools = cl.user_session.get("mcp_tools", {})
84
+ if name in all_mcp_tools:
85
+ del all_mcp_tools[name]
86
+ cl.user_session.set("mcp_tools", all_mcp_tools)
87
+
88
+
89
  @cl.on_chat_start
90
  async def start():
91
  log_info("Chat session started.")
92
+ await cl.Message(content="# Welcome to Naked Insurance! How can I help?").send()
93
  cl.user_session.set("message_history", [])
94
+ cl.user_session.set("mcp_tools", {})
95
+
96
+
97
+ async def execute_tool_call(tool_call: Any):
98
+ """Helper function to find the correct MCP session and execute a tool call."""
99
+ tool_name = tool_call.function.name
100
+ tool_args_str = tool_call.function.arguments
101
+
102
+ history = cl.user_session.get("message_history")
103
+
104
+ mcp_connection_name = None
105
+ all_mcp_tools = cl.user_session.get("mcp_tools", {})
106
+ for conn_name, tools in all_mcp_tools.items():
107
+ if any(t["function"]["name"] == tool_name for t in tools):
108
+ mcp_connection_name = conn_name
109
+ break
110
+
111
+ if not mcp_connection_name:
112
+ error_msg = f"Error: Tool `{tool_name}` not found in any active MCP connection."
113
+ history.append(
114
+ {
115
+ "tool_call_id": tool_call.id,
116
+ "role": "tool",
117
+ "name": tool_name,
118
+ "content": error_msg,
119
+ }
120
+ )
121
+ return
122
+
123
+ async with cl.Step(type="tool", name=tool_name, input=tool_args_str) as step:
124
+ try:
125
+ mcp_session, _ = cl.context.session.mcp_sessions.get(mcp_connection_name)
126
+ tool_args = json.loads(tool_args_str)
127
+ tool_result = await mcp_session.call_tool(tool_name, tool_args)
128
+ step.output = tool_result.content
129
+ history.append(
130
+ {
131
+ "tool_call_id": tool_call.id,
132
+ "role": "tool",
133
+ "name": tool_name,
134
+ "content": tool_result.content,
135
+ }
136
+ )
137
+ except Exception as e:
138
+ error_msg = f"Error executing tool `{tool_name}`: {e}"
139
+ step.error = error_msg
140
+ history.append(
141
+ {
142
+ "tool_call_id": tool_call.id,
143
+ "role": "tool",
144
+ "name": tool_name,
145
+ "content": error_msg,
146
+ }
147
+ )
148
+
149
+ cl.user_session.set("message_history", history)
150
 
151
 
152
  @cl.on_message
153
  async def main(message: cl.Message):
154
+ history = cl.user_session.get("message_history")
155
+ history.append({"role": "user", "content": message.content})
156
+
157
+ # Aggregate tools from all connections
158
+ all_mcp_tools = cl.user_session.get("mcp_tools", {})
159
+ aggregated_tools = [
160
+ tool for conn_tools in all_mcp_tools.values() for tool in conn_tools
161
+ ]
162
+
163
+ # First API call to get assistant response or tool calls
164
+ stream = await openai_client.chat.completions.create(
165
+ model="gpt-4o",
166
+ messages=history,
167
+ tools=aggregated_tools if aggregated_tools else None,
168
+ stream=True,
169
+ )
170
+
171
+ # Handle the streaming response
172
+ output_message = cl.Message(content="", author="Assistant")
173
+ tool_calls = []
174
+ tool_calls_buffer = {} # Buffer to reconstruct tool calls that arrive in chunks
175
+
176
+ async for part in stream:
177
+ delta = part.choices[0].delta
178
+ if delta.content:
179
+ # Stream content tokens
180
+ await output_message.stream_token(delta.content)
181
+
182
+ if delta.tool_calls:
183
+ for tool_call_chunk in delta.tool_calls:
184
+ index = tool_call_chunk.index
185
+ if index not in tool_calls_buffer:
186
+ # First time we see this tool call, initialize it
187
+ tool_calls_buffer[index] = {
188
+ "id": tool_call_chunk.id or "",
189
+ "type": "function",
190
+ "function": {
191
+ "name": tool_call_chunk.function.name or "",
192
+ "arguments": "",
193
+ },
194
+ }
195
+
196
+ # Append argument chunks
197
+ if tool_call_chunk.function.arguments:
198
+ tool_calls_buffer[index]["function"][
199
+ "arguments"
200
+ ] += tool_call_chunk.function.arguments
201
+
202
+ # Finalize message if it has content
203
+ if output_message.content:
204
+ await output_message.update()
205
+
206
+ # If there were tool calls, process them
207
+ if tool_calls_buffer:
208
+ # Reconstruct the full tool calls from the buffer
209
+ assistant_message = {"role": "assistant", "content": None, "tool_calls": []}
210
+ for index in sorted(tool_calls_buffer.keys()):
211
+ assistant_message["tool_calls"].append(tool_calls_buffer[index])
212
+
213
+ history.append(assistant_message) # Add assistant's decision to use tools
214
+
215
+ # Execute all tool calls
216
+ for tool_call_dict in assistant_message["tool_calls"]:
217
+ # The OpenAI library expects a Pydantic model, not a dict, so we create a mock one
218
+ from pydantic import BaseModel
219
+
220
+ class Func(BaseModel):
221
+ name: str
222
+ arguments: str
223
+
224
+ class ToolCall(BaseModel):
225
+ id: str
226
+ function: Func
227
+ type: str
228
+
229
+ tool_call_obj = ToolCall(**tool_call_dict)
230
+ await execute_tool_call(tool_call_obj)
231
+
232
+ # Second API call with tool results to get the final natural language response
233
+ final_stream = await openai_client.chat.completions.create(
234
+ model="gpt-4o", messages=history, stream=True
235
  )
236
+ final_output_message = cl.Message(content="", author="Assistant")
237
+ async for part in final_stream:
 
238
  if token := part.choices[0].delta.content or "":
239
+ await final_output_message.stream_token(token)
240
 
241
+ await final_output_message.update()
242
+ history.append({"role": "assistant", "content": final_output_message.content})
243
 
244
+ else: # No tool calls, just a simple response
245
+ history.append({"role": "assistant", "content": output_message.content})
246
 
247
+ cl.user_session.set("message_history", history)
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -3,4 +3,5 @@ openai
3
  google-generativeai
4
  python-dotenv
5
  requests
6
- websockets
 
 
3
  google-generativeai
4
  python-dotenv
5
  requests
6
+ websockets
7
+ mcp