Update app.py
Browse files
app.py
CHANGED
|
@@ -10,20 +10,15 @@ from mcp import ClientSession, StdioServerParameters
|
|
| 10 |
from mcp.client.sse import sse_client
|
| 11 |
from mcp.types import CallToolResult
|
| 12 |
|
| 13 |
-
# Konfiguration
|
| 14 |
MCP_SERVER_URL = "https://mgokg-db-timetable-api.hf.space/gradio_api/mcp/"
|
| 15 |
-
|
|
|
|
| 16 |
|
| 17 |
-
async def
|
| 18 |
"""
|
| 19 |
-
|
| 20 |
-
1. Verbindet sich via SSE mit dem MCP Server.
|
| 21 |
-
2. Holt Tool-Definitionen.
|
| 22 |
-
3. Sendet User-Input + Tools an Gemini.
|
| 23 |
-
4. Führt Tool-Calls aus (falls Gemini das will).
|
| 24 |
-
5. Gibt die Antwort zurück.
|
| 25 |
"""
|
| 26 |
-
|
| 27 |
# 1. Gemini Client initialisieren
|
| 28 |
try:
|
| 29 |
client = genai.Client(api_key=os.environ.get("GEMINI_API_KEY"))
|
|
@@ -31,54 +26,52 @@ async def generate(input_text, history):
|
|
| 31 |
yield f"Error initializing Gemini client: {e}. Make sure GEMINI_API_KEY is set."
|
| 32 |
return
|
| 33 |
|
| 34 |
-
# 2. Verbindung zum MCP Server aufbauen (
|
| 35 |
try:
|
| 36 |
async with sse_client(url=MCP_SERVER_URL) as streams:
|
| 37 |
async with ClientSession(streams.read, streams.write) as session:
|
| 38 |
await session.initialize()
|
| 39 |
|
| 40 |
-
#
|
| 41 |
-
|
| 42 |
-
mcp_tools = mcp_list_tools_result.tools
|
| 43 |
|
| 44 |
-
#
|
| 45 |
gemini_tools_declarations = []
|
| 46 |
-
for tool in
|
| 47 |
-
|
| 48 |
-
description = tool.description or "Tool to query DB (Deutsche Bahn) train timetables and connections."
|
| 49 |
-
|
| 50 |
gemini_tools_declarations.append(
|
| 51 |
types.FunctionDeclaration(
|
| 52 |
name=tool.name,
|
| 53 |
-
description=
|
| 54 |
parameters=tool.inputSchema
|
| 55 |
)
|
| 56 |
)
|
| 57 |
|
| 58 |
-
#
|
| 59 |
tools_config = [
|
| 60 |
types.Tool(google_search=types.GoogleSearch()),
|
| 61 |
types.Tool(function_declarations=gemini_tools_declarations)
|
| 62 |
]
|
| 63 |
|
| 64 |
-
#
|
| 65 |
generate_content_config = types.GenerateContentConfig(
|
| 66 |
temperature=0.4,
|
| 67 |
tools=tools_config,
|
| 68 |
-
system_instruction="You are a helpful assistant.
|
| 69 |
response_mime_type="text/plain",
|
| 70 |
)
|
| 71 |
|
| 72 |
-
#
|
| 73 |
-
#
|
|
|
|
| 74 |
contents = [
|
| 75 |
types.Content(
|
| 76 |
role="user",
|
| 77 |
-
parts=[types.Part.from_text(text=
|
| 78 |
),
|
| 79 |
]
|
| 80 |
|
| 81 |
-
# ---
|
| 82 |
while True:
|
| 83 |
response_stream = await client.models.generate_content_stream(
|
| 84 |
model=MODEL_ID,
|
|
@@ -86,102 +79,101 @@ async def generate(input_text, history):
|
|
| 86 |
config=generate_content_config,
|
| 87 |
)
|
| 88 |
|
| 89 |
-
|
| 90 |
function_calls = []
|
| 91 |
|
| 92 |
-
# Stream verarbeiten
|
| 93 |
async for chunk in response_stream:
|
| 94 |
-
# Text Teile sammeln
|
| 95 |
if chunk.text:
|
| 96 |
-
|
| 97 |
-
yield
|
| 98 |
|
| 99 |
-
# Funktionsaufrufe sammeln (passieren meist am Ende oder in einem Block)
|
| 100 |
if chunk.function_calls:
|
| 101 |
for fc in chunk.function_calls:
|
| 102 |
function_calls.append(fc)
|
| 103 |
|
| 104 |
-
# Wenn keine Funktionsaufrufe -> Fertig
|
| 105 |
if not function_calls:
|
| 106 |
-
break
|
| 107 |
|
| 108 |
-
#
|
| 109 |
-
#
|
| 110 |
contents.append(types.Content(
|
| 111 |
role="model",
|
| 112 |
parts=[types.Part.from_function_call(name=fc.name, args=fc.args) for fc in function_calls]
|
| 113 |
))
|
| 114 |
|
| 115 |
-
#
|
| 116 |
for fc in function_calls:
|
| 117 |
-
yield f"\n\n*
|
| 118 |
|
| 119 |
try:
|
| 120 |
-
|
| 121 |
-
result: CallToolResult = await session.call_tool(
|
| 122 |
-
name=fc.name,
|
| 123 |
-
arguments=fc.args
|
| 124 |
-
)
|
| 125 |
|
| 126 |
-
# Ergebnis extrahieren
|
| 127 |
-
|
| 128 |
if result.content:
|
| 129 |
-
for
|
| 130 |
-
if
|
| 131 |
-
|
| 132 |
|
| 133 |
-
# Ergebnis zur
|
| 134 |
contents.append(types.Content(
|
| 135 |
role="tool",
|
| 136 |
parts=[types.Part.from_function_response(
|
| 137 |
name=fc.name,
|
| 138 |
-
response={"result":
|
| 139 |
)]
|
| 140 |
))
|
| 141 |
-
|
| 142 |
-
except Exception as tool_error:
|
| 143 |
-
error_msg = f"Error executing tool {fc.name}: {str(tool_error)}"
|
| 144 |
contents.append(types.Content(
|
| 145 |
role="tool",
|
| 146 |
parts=[types.Part.from_function_response(
|
| 147 |
name=fc.name,
|
| 148 |
-
response={"error":
|
| 149 |
)]
|
| 150 |
))
|
| 151 |
|
| 152 |
-
# Schleife läuft weiter ->
|
| 153 |
|
| 154 |
except Exception as e:
|
| 155 |
-
yield f"
|
| 156 |
|
| 157 |
|
| 158 |
if __name__ == '__main__':
|
| 159 |
with gr.Blocks() as demo:
|
| 160 |
-
gr.Markdown("# Gemini 2.0 Flash + Websearch + MCP (DB
|
| 161 |
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 176 |
history.append({"role": "assistant", "content": ""})
|
| 177 |
|
| 178 |
-
async for chunk in
|
| 179 |
history[-1]["content"] = chunk
|
| 180 |
yield history
|
| 181 |
|
| 182 |
-
msg.submit
|
| 183 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 184 |
)
|
| 185 |
-
clear.click(lambda: None, None, chatbot, queue=False)
|
| 186 |
|
| 187 |
demo.launch(show_error=True)
|
|
|
|
| 10 |
from mcp.client.sse import sse_client
|
| 11 |
from mcp.types import CallToolResult
|
| 12 |
|
| 13 |
+
# --- Konfiguration ---
|
| 14 |
MCP_SERVER_URL = "https://mgokg-db-timetable-api.hf.space/gradio_api/mcp/"
|
| 15 |
+
# Stelle sicher, dass du einen validen Model-Namen hast (z.B. gemini-2.0-flash-exp oder gemini-1.5-flash)
|
| 16 |
+
MODEL_ID = "gemini-2.0-flash-exp"
|
| 17 |
|
| 18 |
+
async def generate_response(user_input, history):
|
| 19 |
"""
|
| 20 |
+
Core Logic: Verbindet MCP, Google Search und Gemini.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
"""
|
|
|
|
| 22 |
# 1. Gemini Client initialisieren
|
| 23 |
try:
|
| 24 |
client = genai.Client(api_key=os.environ.get("GEMINI_API_KEY"))
|
|
|
|
| 26 |
yield f"Error initializing Gemini client: {e}. Make sure GEMINI_API_KEY is set."
|
| 27 |
return
|
| 28 |
|
| 29 |
+
# 2. Verbindung zum MCP Server aufbauen (SSE)
|
| 30 |
try:
|
| 31 |
async with sse_client(url=MCP_SERVER_URL) as streams:
|
| 32 |
async with ClientSession(streams.read, streams.write) as session:
|
| 33 |
await session.initialize()
|
| 34 |
|
| 35 |
+
# Tools vom MCP Server abrufen
|
| 36 |
+
mcp_list = await session.list_tools()
|
|
|
|
| 37 |
|
| 38 |
+
# Tools für Gemini konvertieren
|
| 39 |
gemini_tools_declarations = []
|
| 40 |
+
for tool in mcp_list.tools:
|
| 41 |
+
desc = tool.description or "Tool to query DB (Deutsche Bahn) train timetables."
|
|
|
|
|
|
|
| 42 |
gemini_tools_declarations.append(
|
| 43 |
types.FunctionDeclaration(
|
| 44 |
name=tool.name,
|
| 45 |
+
description=desc,
|
| 46 |
parameters=tool.inputSchema
|
| 47 |
)
|
| 48 |
)
|
| 49 |
|
| 50 |
+
# Tools zusammenstellen (Google Search + MCP)
|
| 51 |
tools_config = [
|
| 52 |
types.Tool(google_search=types.GoogleSearch()),
|
| 53 |
types.Tool(function_declarations=gemini_tools_declarations)
|
| 54 |
]
|
| 55 |
|
| 56 |
+
# Config
|
| 57 |
generate_content_config = types.GenerateContentConfig(
|
| 58 |
temperature=0.4,
|
| 59 |
tools=tools_config,
|
| 60 |
+
system_instruction="You are a helpful assistant. Use 'db_timetable_api_ui_wrapper' for German train connections. Use Google Search for other current info.",
|
| 61 |
response_mime_type="text/plain",
|
| 62 |
)
|
| 63 |
|
| 64 |
+
# History für Gemini aufbereiten
|
| 65 |
+
# Wir nehmen hier vereinfacht nur den aktuellen User-Input,
|
| 66 |
+
# für echte Multi-Turn-Gespräche müsste man 'history' parsen.
|
| 67 |
contents = [
|
| 68 |
types.Content(
|
| 69 |
role="user",
|
| 70 |
+
parts=[types.Part.from_text(text=user_input)],
|
| 71 |
),
|
| 72 |
]
|
| 73 |
|
| 74 |
+
# --- Loop für Tool Calls ---
|
| 75 |
while True:
|
| 76 |
response_stream = await client.models.generate_content_stream(
|
| 77 |
model=MODEL_ID,
|
|
|
|
| 79 |
config=generate_content_config,
|
| 80 |
)
|
| 81 |
|
| 82 |
+
full_text = ""
|
| 83 |
function_calls = []
|
| 84 |
|
|
|
|
| 85 |
async for chunk in response_stream:
|
|
|
|
| 86 |
if chunk.text:
|
| 87 |
+
full_text += chunk.text
|
| 88 |
+
yield full_text # Live-Update des Textes
|
| 89 |
|
|
|
|
| 90 |
if chunk.function_calls:
|
| 91 |
for fc in chunk.function_calls:
|
| 92 |
function_calls.append(fc)
|
| 93 |
|
|
|
|
| 94 |
if not function_calls:
|
| 95 |
+
break # Keine Tools mehr, wir sind fertig
|
| 96 |
|
| 97 |
+
# Tool Calls verarbeiten
|
| 98 |
+
# 1. Modell-Antwort zur History hinzufügen (damit es weiß, dass es gefragt hat)
|
| 99 |
contents.append(types.Content(
|
| 100 |
role="model",
|
| 101 |
parts=[types.Part.from_function_call(name=fc.name, args=fc.args) for fc in function_calls]
|
| 102 |
))
|
| 103 |
|
| 104 |
+
# 2. Tools ausführen
|
| 105 |
for fc in function_calls:
|
| 106 |
+
yield f"\n\n*Rufe MCP Tool auf: {fc.name}...*\n"
|
| 107 |
|
| 108 |
try:
|
| 109 |
+
result = await session.call_tool(name=fc.name, arguments=fc.args)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
|
| 111 |
+
# Ergebnis extrahieren
|
| 112 |
+
tool_output = ""
|
| 113 |
if result.content:
|
| 114 |
+
for item in result.content:
|
| 115 |
+
if item.type == "text":
|
| 116 |
+
tool_output += item.text
|
| 117 |
|
| 118 |
+
# Ergebnis zur History
|
| 119 |
contents.append(types.Content(
|
| 120 |
role="tool",
|
| 121 |
parts=[types.Part.from_function_response(
|
| 122 |
name=fc.name,
|
| 123 |
+
response={"result": tool_output}
|
| 124 |
)]
|
| 125 |
))
|
| 126 |
+
except Exception as e:
|
|
|
|
|
|
|
| 127 |
contents.append(types.Content(
|
| 128 |
role="tool",
|
| 129 |
parts=[types.Part.from_function_response(
|
| 130 |
name=fc.name,
|
| 131 |
+
response={"error": str(e)}
|
| 132 |
)]
|
| 133 |
))
|
| 134 |
|
| 135 |
+
# Schleife läuft weiter -> Gemini bekommt Tool-Ergebnisse und antwortet neu
|
| 136 |
|
| 137 |
except Exception as e:
|
| 138 |
+
yield f"Ein Fehler ist aufgetreten: {e}"
|
| 139 |
|
| 140 |
|
| 141 |
if __name__ == '__main__':
|
| 142 |
with gr.Blocks() as demo:
|
| 143 |
+
gr.Markdown("# Gemini 2.0 Flash + Websearch + MCP (DB)")
|
| 144 |
|
| 145 |
+
# Chat-Fenster
|
| 146 |
+
chatbot = gr.Chatbot(height=500, type="messages")
|
| 147 |
+
|
| 148 |
+
# Eingabebereich
|
| 149 |
+
with gr.Row():
|
| 150 |
+
msg = gr.Textbox(
|
| 151 |
+
scale=4,
|
| 152 |
+
show_label=False,
|
| 153 |
+
placeholder="Nachricht eingeben (z.B. Zug von Berlin nach München)...",
|
| 154 |
+
container=False
|
| 155 |
+
)
|
| 156 |
+
submit_btn = gr.Button("Send", scale=1, variant="primary")
|
| 157 |
+
|
| 158 |
+
# Hilfsfunktionen für Gradio Event-Handling
|
| 159 |
+
async def user_turn(user_message, history):
|
| 160 |
+
return "", history + [{"role": "user", "content": user_message}]
|
| 161 |
+
|
| 162 |
+
async def bot_turn(history):
|
| 163 |
+
user_message = history[-1]["content"]
|
| 164 |
history.append({"role": "assistant", "content": ""})
|
| 165 |
|
| 166 |
+
async for chunk in generate_response(user_message, history):
|
| 167 |
history[-1]["content"] = chunk
|
| 168 |
yield history
|
| 169 |
|
| 170 |
+
# Events: Sowohl Enter (msg.submit) als auch Klick (submit_btn.click) lösen das Gleiche aus
|
| 171 |
+
msg.submit(user_turn, [msg, chatbot], [msg, chatbot], queue=False).then(
|
| 172 |
+
bot_turn, [chatbot], [chatbot]
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
submit_btn.click(user_turn, [msg, chatbot], [msg, chatbot], queue=False).then(
|
| 176 |
+
bot_turn, [chatbot], [chatbot]
|
| 177 |
)
|
|
|
|
| 178 |
|
| 179 |
demo.launch(show_error=True)
|