mgokg commited on
Commit
298a5b1
·
verified ·
1 Parent(s): 6b9609d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -53
app.py CHANGED
@@ -1,66 +1,65 @@
1
  import os
2
  import asyncio
 
3
  from google import genai
4
  from google.genai import types
5
  from mcp import ClientSession
6
  from mcp.client.sse import sse_client
7
 
8
- # Konfiguration des MCP-Servers (Beispiel DB-Timetable auf Hugging Face)
9
- MCP_URL = "https://mgokg-db-timetable-api.hf.space/gradio_api/mcp/"
10
-
11
- async def fetch_train_connections(prompt: str):
12
- client = genai.Client(api_key=os.environ.get("GEMINI_API_KEY"))
13
- model_id = "gemini-flash-latest"
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
- # 1. Verbindung zum MCP-Server über SSE herstellen [3]
16
- async with sse_client(url=MCP_URL) as (read_stream, write_stream):
17
- async with ClientSession(read_stream, write_stream) as session:
18
- # Initialisierung der MCP-Sitzung [4], [5]
19
- await session.initialize()
 
 
 
 
 
 
20
 
21
- # 2. MCP-Tools abrufen und für Gemini konvertieren [4], [2]
22
- mcp_tools = await session.list_tools()
23
- tools = types.Tool(function_declarations=[
24
- {
25
- "name": tool.name,
26
- "description": tool.description,
27
- "parameters": tool.inputSchema,
28
- }
29
- for tool in mcp_tools.tools
30
- ])
31
 
32
- # 3. Anfrage an das Modell senden [6]
33
- contents = [types.Content(role="user", parts=[types.Part(text=prompt)])]
34
- response = await client.aio.models.generate_content(
35
- model=model_id,
36
- contents=contents,
37
- config=types.GenerateContentConfig(
38
- tools=[tools],
39
- temperature=0.4
40
- )
41
- )
42
 
43
- # 4. Tool-Calling Loop: Falls das Modell eine Zugverbindung sucht [7], [8]
44
- if response.candidates.content.parts.function_call:
45
- fc = response.candidates.content.parts.function_call
46
-
47
- # Tool auf dem MCP-Server ausführen
48
- tool_result = await session.call_tool(fc.name, fc.args)
49
-
50
- # Ergebnis an das Modell zurückgeben für die finale Antwort [8], [9]
51
- tool_response_part = types.Part.from_function_response(
52
- name=fc.name,
53
- response={"result": tool_result.content.text}
54
- )
55
-
56
- contents.append(response.candidates.content)
57
- contents.append(types.Content(role="user", parts=[tool_response_part]))
58
 
59
- final_response = await client.aio.models.generate_content(
60
- model=model_id,
61
- contents=contents,
62
- config=types.GenerateContentConfig(tools=[tools])
63
- )
64
- return final_response.text
65
 
66
- return response.text
 
 
1
  import os
2
  import asyncio
3
+ import gradio as gr
4
  from google import genai
5
  from google.genai import types
6
  from mcp import ClientSession
7
  from mcp.client.sse import sse_client
8
 
9
+ async def generate_response(input_text):
10
+ mcp_url = "https://mgokg-db-timetable-api.hf.space/gradio_api/mcp/"
11
+ try:
12
+ client = genai.Client(api_key=os.environ.get("GEMINI_API_KEY"))
13
+
14
+ # Verbindung zum MCP-Server via SSE (Server-Sent Events) [3]
15
+ async with sse_client(url=mcp_url) as (read_stream, write_stream):
16
+ async with ClientSession(read_stream, write_stream) as mcp_session:
17
+ await mcp_session.initialize() [4]
18
+
19
+ # Konfiguration der Tools (Google Search + MCP Tools) [4]
20
+ generate_content_config = types.GenerateContentConfig(
21
+ temperature=0.4,
22
+ tools=[
23
+ types.Tool(google_search=types.GoogleSearch()),
24
+ mcp_session # Reicht MCP-Tools an Gemini durch [4]
25
+ ],
26
+ )
27
 
28
+ response_text = ""
29
+ # Stream-Generierung der Antwort [4]
30
+ async for chunk in client.aio.models.generate_content_stream(
31
+ model="gemini-2.0-flash",
32
+ contents=input_text,
33
+ config=generate_content_config,
34
+ ):
35
+ if chunk.text:
36
+ response_text += chunk.text
37
+
38
+ return response_text, "" # Rückgabe für Markdown und Textbox-Reset [2]
39
 
40
+ except Exception as e:
41
+ return f"Verbindung zum DB-Fahrplan fehlgeschlagen: {str(e)}", ""
 
 
 
 
 
 
 
 
42
 
43
+ # Wrapper für die synchrone Gradio-Umgebung [2]
44
+ def gradio_wrapper(input_text):
45
+ return asyncio.run(generate_response(input_text))
 
 
 
 
 
 
 
46
 
47
+ # Aufbau des ursprünglichen Interfaces [2]
48
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
49
+ gr.Markdown("# 🚆 Gemini Flash + DB Timetable")
50
+
51
+ with gr.Row():
52
+ input_tx = gr.Textbox(
53
+ label="Anfrage",
54
+ placeholder="Wann fährt der nächste Zug von Berlin nach Hamburg?",
55
+ lines=3
56
+ )
57
+
58
+ btn = gr.Button("Senden", variant="primary")
59
+ output_md = gr.Markdown() # Bereich für die Antwort [2]
 
 
60
 
61
+ # Event-Handling: Klick löst die Generierung aus
62
+ btn.click(fn=gradio_wrapper, inputs=input_tx, outputs=[output_md, input_tx])
 
 
 
 
63
 
64
+ if __name__ == '__main__':
65
+ demo.launch()