mgokg commited on
Commit
70844b7
·
verified ·
1 Parent(s): ad75765

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -206
app.py CHANGED
@@ -1,220 +1,43 @@
1
- import gradio as gr
2
  import os
3
- import asyncio
4
- from typing import List
5
- from google import genai
6
- from google.genai import types
7
- from mcp import ClientSession, StdioServerParameters
8
- from mcp.client.stdio import stdio_client
9
-
10
- # MCP-Server Konfiguration (ohne ungültigen Transport-Parameter)
11
- server_params = StdioServerParameters(
12
- command="npx",
13
- args=[
14
- "mcp-remote",
15
- "https://mgokg-db-timetable-api.hf.space/gradio_api/mcp/"
16
- ]
17
- )
18
-
19
- async def generate(input_text):
20
- try:
21
- client = genai.Client(api_key=os.environ.get("GEMINI_API_KEY"))
22
- except Exception as e:
23
- return f"Fehler bei der Initialisierung: {e}", ""
24
-
25
- model = "gemini-flash-latest"
26
-
27
- try:
28
- # MCP-Session aufbauen
29
- async with stdio_client(server_params) as (read, write):
30
- async with ClientSession(read, write) as session:
31
- await session.initialize()
32
-
33
- # MCP-Tools abrufen
34
- mcp_tools_data = await session.list_tools()
35
-
36
- # Tools in Gemini-kompatibles Format konvertieren
37
- mcp_declarations = []
38
- for tool in mcp_tools_data.tools:
39
- # Sicherstellen, dass inputSchema korrekt formatiert ist
40
- schema = tool.inputSchema if tool.inputSchema else {
41
- "type": "object",
42
- "properties": {},
43
- "required": []
44
- }
45
-
46
- mcp_declarations.append(
47
- types.FunctionDeclaration(
48
- name=tool.name,
49
- description=tool.description or "MCP Tool",
50
- parameters=schema
51
- )
52
- )
53
-
54
- # Tools kombinieren: Google Search + MCP Tools
55
- tools = [types.Tool(function_declarations=mcp_declarations)]
56
-
57
-
58
- contents = [types.Content(
59
- role="user",
60
- parts=[types.Part.from_text(text=input_text)]
61
- )]
62
-
63
- # Erster API-Aufruf
64
- response = await client.aio.models.generate_content(
65
- model=model,
66
- contents=contents,
67
- config=types.GenerateContentConfig(
68
- tools=tools,
69
- temperature=0.4
70
- )
71
- )
72
-
73
- # Agentic Loop für Tool-Calls
74
- turn_count = 0
75
- max_turns = 5
76
-
77
- while hasattr(response.candidates[0].content, 'parts') and turn_count < max_turns:
78
- function_calls = [
79
- part for part in response.candidates[0].content.parts
80
- if hasattr(part, 'function_call') and part.function_call
81
- ]
82
-
83
- if not function_calls:
84
- break
85
-
86
- turn_count += 1
87
- contents.append(response.candidates[0].content)
88
- tool_responses = []
89
-
90
- for part in function_calls:
91
- fc = part.function_call
92
- try:
93
- # MCP-Tool ausführen
94
- tool_result = await session.call_tool(fc.name, dict(fc.args))
95
-
96
- # Ergebnis formatieren
97
- if tool_result.isError:
98
- result_text = f"Error: {tool_result.content[0].text if tool_result.content else 'Unknown error'}"
99
- else:
100
- result_text = tool_result.content[0].text if tool_result.content else "No result"
101
-
102
- tool_responses.append(
103
- types.Part.from_function_response(
104
- name=fc.name,
105
- response={"result": result_text}
106
- )
107
- )
108
- except Exception as e:
109
- tool_responses.append(
110
- types.Part.from_function_response(
111
- name=fc.name,
112
- response={"error": str(e)}
113
- )
114
- )
115
-
116
- contents.append(types.Content(role="function", parts=tool_responses))
117
-
118
- # Nächster API-Aufruf
119
- response = await client.aio.models.generate_content(
120
- model=model,
121
- contents=contents,
122
- config=types.GenerateContentConfig(tools=tools, temperature=0.4)
123
- )
124
-
125
- return response.text, ""
126
-
127
- except Exception as e:
128
- return f"Fehler während der Verarbeitung: {str(e)}", ""
129
-
130
- # Gradio UI Wrapper
131
- def ui_wrapper(input_text):
132
- try:
133
- return asyncio.run(generate(input_text))
134
- except Exception as e:
135
- return f"UI Fehler: {str(e)}", ""
136
-
137
- if __name__ == '__main__':
138
- with gr.Blocks() as demo:
139
- gr.Markdown("# Gemini 2.0 Flash + Google Search + DB Timetable (MCP)")
140
- output_textbox = gr.Markdown()
141
- input_textbox = gr.Textbox(
142
- lines=3,
143
- label="Anfrage",
144
- placeholder="z.B. Wie komme ich von Berlin nach Hamburg?"
145
- )
146
- submit_button = gr.Button("Senden")
147
-
148
- submit_button.click(
149
- fn=ui_wrapper,
150
- inputs=input_textbox,
151
- outputs=[output_textbox, input_textbox]
152
- )
153
-
154
- demo.launch(show_error=True)
155
-
156
-
157
- import base64
158
  import gradio as gr
159
- import os
160
- import json
161
  from google import genai
162
  from google.genai import types
163
- from gradio_client import Client
164
-
165
 
166
- def generate(input_text):
167
- try:
168
- client = genai.Client(
169
- api_key=os.environ.get("GEMINI_API_KEY"),
170
- )
171
- except Exception as e:
172
- return f"Error initializing client: {e}. Make sure GEMINI_API_KEY is set."
173
 
174
- model = "gemini-2.5-flash-lite"
 
175
  contents = [
176
  types.Content(
177
  role="user",
178
- parts=[
179
- types.Part.from_text(text=f"{input_text}"),
180
- ],
181
  ),
182
  ]
183
- tools = [
184
- types.Tool(google_search=types.GoogleSearch()),
185
- ]
186
- generate_content_config = types.GenerateContentConfig(
187
- temperature=0.4,
188
- thinking_config = types.ThinkingConfig(
189
- thinking_budget=0,
190
- ),
191
- tools=tools,
192
- response_mime_type="text/plain",
193
- )
194
-
195
 
196
  response_text = ""
197
- try:
198
- for chunk in client.models.generate_content_stream(
199
- model=model,
200
- contents=contents,
201
- config=generate_content_config,
202
- ):
203
- response_text += chunk.text
204
- except Exception as e:
205
- return f"Error during generation: {e}"
206
- data = response_text
207
- #data = clean_json_string(response_text)
208
- data = data[:-1]
209
- return response_text, ""
210
-
211
-
212
- if __name__ == '__main__':
 
 
213
 
214
- with gr.Blocks() as demo:
215
- title=gr.Markdown("# Gemini 2.0 Flash + Websearch")
216
- output_textbox = gr.Markdown()
217
- input_textbox = gr.Textbox(lines=3, label="", placeholder="Enter message here...")
218
- submit_button = gr.Button("send")
219
- submit_button.click(fn=generate,inputs=input_textbox,outputs=[output_textbox, input_textbox])
220
- demo.launch(show_error=True)
 
 
1
  import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import gradio as gr
 
 
3
  from google import genai
4
  from google.genai import types
 
 
5
 
6
+ # Client initialisieren
7
+ client = genai.Client(api_key=os.environ.get("GEMINI_API_KEY"))
 
 
 
 
 
8
 
9
+ def gemini_chat(prompt, history):
10
+ # Vorbereitung der Inhalte
11
  contents = [
12
  types.Content(
13
  role="user",
14
+ parts=[types.Part.from_text(text=prompt)],
 
 
15
  ),
16
  ]
17
+
18
+ # Konfiguration mit Google Search Tool
19
+ tools = [types.Tool(google_search=types.GoogleSearch())]
20
+ config = types.GenerateContentConfig(tools=tools)
 
 
 
 
 
 
 
 
21
 
22
  response_text = ""
23
+
24
+ # Streaming-Generator für Gradio
25
+ for chunk in client.models.generate_content_stream(
26
+ model="gemini-2.0-flash-001", # Aktuellste Version
27
+ contents=contents,
28
+ config=config,
29
+ ):
30
+ if chunk.text:
31
+ response_text += chunk.text
32
+ yield response_text
33
+
34
+ # Gradio Interface Setup
35
+ demo = gr.ChatInterface(
36
+ fn=gemini_chat,
37
+ title="Gemini Flash mit Google Search",
38
+ description="Gib eine Frage ein, und Gemini nutzt die Google Suche für aktuelle Antworten.",
39
+ type="messages"
40
+ )
41
 
42
+ if __name__ == "__main__":
43
+ demo.launch()