mgokg commited on
Commit
3e1cf6c
·
verified ·
1 Parent(s): e04f166

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -164
app.py CHANGED
@@ -1,179 +1,65 @@
 
1
  import gradio as gr
2
  import os
3
- import asyncio
4
  import json
5
  from google import genai
6
  from google.genai import types
 
7
 
8
- # MCP Imports
9
- from mcp import ClientSession, StdioServerParameters
10
- from mcp.client.sse import sse_client
11
- from mcp.types import CallToolResult
12
 
13
- # --- Konfiguration ---
14
- MCP_SERVER_URL = "https://mgokg-db-timetable-api.hf.space/gradio_api/mcp/"
15
- # Stelle sicher, dass du einen validen Model-Namen hast (z.B. gemini-2.0-flash-exp oder gemini-1.5-flash)
16
- MODEL_ID = "gemini-2.0-flash-exp"
17
-
18
- async def generate_response(user_input, history):
19
- """
20
- Core Logic: Verbindet MCP, Google Search und Gemini.
21
- """
22
- # 1. Gemini Client initialisieren
23
  try:
24
- client = genai.Client(api_key=os.environ.get("GEMINI_API_KEY"))
 
 
25
  except Exception as e:
26
- yield f"Error initializing Gemini client: {e}. Make sure GEMINI_API_KEY is set."
27
- return
28
-
29
- # 2. Verbindung zum MCP Server aufbauen (SSE)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  try:
31
- async with sse_client(url=MCP_SERVER_URL) as streams:
32
- async with ClientSession(streams.read, streams.write) as session:
33
- await session.initialize()
34
-
35
- # Tools vom MCP Server abrufen
36
- mcp_list = await session.list_tools()
37
-
38
- # Tools für Gemini konvertieren
39
- gemini_tools_declarations = []
40
- for tool in mcp_list.tools:
41
- desc = tool.description or "Tool to query DB (Deutsche Bahn) train timetables."
42
- gemini_tools_declarations.append(
43
- types.FunctionDeclaration(
44
- name=tool.name,
45
- description=desc,
46
- parameters=tool.inputSchema
47
- )
48
- )
49
-
50
- # Tools zusammenstellen (Google Search + MCP)
51
- tools_config = [
52
- types.Tool(google_search=types.GoogleSearch()),
53
- types.Tool(function_declarations=gemini_tools_declarations)
54
- ]
55
-
56
- # Config
57
- generate_content_config = types.GenerateContentConfig(
58
- temperature=0.4,
59
- tools=tools_config,
60
- system_instruction="You are a helpful assistant. Use 'db_timetable_api_ui_wrapper' for German train connections. Use Google Search for other current info.",
61
- response_mime_type="text/plain",
62
- )
63
-
64
- # History für Gemini aufbereiten
65
- # Wir nehmen hier vereinfacht nur den aktuellen User-Input,
66
- # für echte Multi-Turn-Gespräche müsste man 'history' parsen.
67
- contents = [
68
- types.Content(
69
- role="user",
70
- parts=[types.Part.from_text(text=user_input)],
71
- ),
72
- ]
73
-
74
- # --- Loop für Tool Calls ---
75
- while True:
76
- response_stream = await client.models.generate_content_stream(
77
- model=MODEL_ID,
78
- contents=contents,
79
- config=generate_content_config,
80
- )
81
-
82
- full_text = ""
83
- function_calls = []
84
-
85
- async for chunk in response_stream:
86
- if chunk.text:
87
- full_text += chunk.text
88
- yield full_text # Live-Update des Textes
89
-
90
- if chunk.function_calls:
91
- for fc in chunk.function_calls:
92
- function_calls.append(fc)
93
-
94
- if not function_calls:
95
- break # Keine Tools mehr, wir sind fertig
96
-
97
- # Tool Calls verarbeiten
98
- # 1. Modell-Antwort zur History hinzufügen (damit es weiß, dass es gefragt hat)
99
- contents.append(types.Content(
100
- role="model",
101
- parts=[types.Part.from_function_call(name=fc.name, args=fc.args) for fc in function_calls]
102
- ))
103
-
104
- # 2. Tools ausführen
105
- for fc in function_calls:
106
- yield f"\n\n*Rufe MCP Tool auf: {fc.name}...*\n"
107
-
108
- try:
109
- result = await session.call_tool(name=fc.name, arguments=fc.args)
110
-
111
- # Ergebnis extrahieren
112
- tool_output = ""
113
- if result.content:
114
- for item in result.content:
115
- if item.type == "text":
116
- tool_output += item.text
117
-
118
- # Ergebnis zur History
119
- contents.append(types.Content(
120
- role="tool",
121
- parts=[types.Part.from_function_response(
122
- name=fc.name,
123
- response={"result": tool_output}
124
- )]
125
- ))
126
- except Exception as e:
127
- contents.append(types.Content(
128
- role="tool",
129
- parts=[types.Part.from_function_response(
130
- name=fc.name,
131
- response={"error": str(e)}
132
- )]
133
- ))
134
-
135
- # Schleife läuft weiter -> Gemini bekommt Tool-Ergebnisse und antwortet neu
136
-
137
  except Exception as e:
138
- yield f"Ein Fehler ist aufgetreten: {e}"
139
-
 
 
 
 
140
 
141
  if __name__ == '__main__':
142
- with gr.Blocks() as demo:
143
- gr.Markdown("# Gemini 2.0 Flash + Websearch + MCP (DB)")
144
-
145
- # Chat-Fenster
146
- chatbot = gr.Chatbot(height=500, type="messages")
147
-
148
- # Eingabebereich
149
- with gr.Row():
150
- msg = gr.Textbox(
151
- scale=4,
152
- show_label=False,
153
- placeholder="Nachricht eingeben (z.B. Zug von Berlin nach München)...",
154
- container=False
155
- )
156
- submit_btn = gr.Button("Send", scale=1, variant="primary")
157
-
158
- # Hilfsfunktionen für Gradio Event-Handling
159
- async def user_turn(user_message, history):
160
- return "", history + [{"role": "user", "content": user_message}]
161
 
162
- async def bot_turn(history):
163
- user_message = history[-1]["content"]
164
- history.append({"role": "assistant", "content": ""})
165
-
166
- async for chunk in generate_response(user_message, history):
167
- history[-1]["content"] = chunk
168
- yield history
169
-
170
- # Events: Sowohl Enter (msg.submit) als auch Klick (submit_btn.click) lösen das Gleiche aus
171
- msg.submit(user_turn, [msg, chatbot], [msg, chatbot], queue=False).then(
172
- bot_turn, [chatbot], [chatbot]
173
- )
174
-
175
- submit_btn.click(user_turn, [msg, chatbot], [msg, chatbot], queue=False).then(
176
- bot_turn, [chatbot], [chatbot]
177
- )
178
 
179
- demo.launch(show_error=True)
 
1
+ import base64
2
  import gradio as gr
3
  import os
 
4
  import json
5
  from google import genai
6
  from google.genai import types
7
+ from gradio_client import Client
8
 
 
 
 
 
9
 
10
+ def generate(input_text):
 
 
 
 
 
 
 
 
 
11
  try:
12
+ client = genai.Client(
13
+ api_key=os.environ.get("GEMINI_API_KEY"),
14
+ )
15
  except Exception as e:
16
+ return f"Error initializing client: {e}. Make sure GEMINI_API_KEY is set."
17
+
18
+ model = "gemini-flash-latest"
19
+ contents = [
20
+ types.Content(
21
+ role="user",
22
+ parts=[
23
+ types.Part.from_text(text=f"{input_text}"),
24
+ ],
25
+ ),
26
+ ]
27
+ tools = [
28
+ types.Tool(google_search=types.GoogleSearch()),
29
+ ]
30
+ generate_content_config = types.GenerateContentConfig(
31
+ temperature=0.4,
32
+ thinking_config = types.ThinkingConfig(
33
+ thinking_budget=0,
34
+ ),
35
+ tools=tools,
36
+ response_mime_type="text/plain",
37
+ )
38
+
39
+
40
+ response_text = ""
41
  try:
42
+ for chunk in client.models.generate_content_stream(
43
+ model=model,
44
+ contents=contents,
45
+ config=generate_content_config,
46
+ ):
47
+ response_text += chunk.text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  except Exception as e:
49
+ return f"Error during generation: {e}"
50
+ data = response_text
51
+ #data = clean_json_string(response_text)
52
+ data = data[:-1]
53
+ return response_text, ""
54
+
55
 
56
  if __name__ == '__main__':
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
+ with gr.Blocks() as demo:
59
+ title=gr.Markdown("# Gemini 2.0 Flash + Websearch")
60
+ output_textbox = gr.Markdown()
61
+ input_textbox = gr.Textbox(lines=3, label="", placeholder="Enter message here...")
62
+ submit_button = gr.Button("send")
63
+ submit_button.click(fn=generate,inputs=input_textbox,outputs=[output_textbox, input_textbox])
64
+ demo.launch(show_error=True)
 
 
 
 
 
 
 
 
 
65