File size: 7,681 Bytes
312f785
 
 
 
 
 
 
 
 
5d6f0b5
312f785
 
 
 
5d6f0b5
312f785
 
 
 
 
 
 
c4f8ee5
312f785
aa3b854
c4f8ee5
5d6f0b5
 
 
 
 
0b463c5
5d6f0b5
 
0b463c5
5d6f0b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1379b38
42ef1e5
0b463c5
5d6f0b5
 
 
 
c4f8ee5
5d6f0b5
312f785
5d6f0b5
 
 
 
 
 
312f785
5d6f0b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312f785
5d6f0b5
 
 
 
 
 
312f785
 
 
5d6f0b5
312f785
5d6f0b5
 
 
 
 
312f785
 
 
 
 
 
 
5d6f0b5
 
312f785
 
42bd84e
298a5b1
ad2c811
42bd84e
0abe445
 
1f1770e
0abe445
cb83081
1f1770e
 
 
 
 
 
42bd84e
0abe445
c12ff7b
42bd84e
 
 
 
 
 
 
 
1f1770e
42bd84e
1f1770e
 
 
42bd84e
 
 
1f1770e
42bd84e
1f1770e
 
 
 
 
42bd84e
 
 
 
 
 
1f1770e
42bd84e
 
 
 
 
 
1f1770e
 
42bd84e
1f1770e
42bd84e
1f1770e
42bd84e
 
 
 
312f785
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
import gradio as gr
import os
import asyncio
from typing import List
from google import genai
from google.genai import types
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client

# MCP-Server Konfiguration (ohne ungültigen Transport-Parameter)
server_params = StdioServerParameters(
    command="npx",
    args=[
        "mcp-remote",
        "https://mgokg-db-timetable-api.hf.space/gradio_api/mcp/"
    ]
)

async def generate(input_text):
    try:
        client = genai.Client(api_key=os.environ.get("GEMINI_API_KEY"))
    except Exception as e:
        return f"Fehler bei der Initialisierung: {e}", ""

    model = "gemini-2.5-flash"

    try:
        # MCP-Session aufbauen
        async with stdio_client(server_params) as (read, write):
            async with ClientSession(read, write) as session:
                await session.initialize()
                
                # MCP-Tools abrufen
                mcp_tools_data = await session.list_tools()
                
                # Tools in Gemini-kompatibles Format konvertieren
                mcp_declarations = []
                for tool in mcp_tools_data.tools:
                    # Sicherstellen, dass inputSchema korrekt formatiert ist
                    schema = tool.inputSchema if tool.inputSchema else {
                        "type": "object",
                        "properties": {},
                        "required": []
                    }
                    
                    mcp_declarations.append(
                        types.FunctionDeclaration(
                            name=tool.name,
                            description=tool.description or "MCP Tool",
                            parameters=schema
                        )
                    )

                # Tools kombinieren: Google Search + MCP Tools
                tools = [types.Tool(function_declarations=mcp_declarations)]
            

                contents = [types.Content(
                    role="user", 
                    parts=[types.Part.from_text(text=input_text)]
                )]
                
                # Erster API-Aufruf
                response = await client.aio.models.generate_content(
                    model=model,
                    contents=contents,
                    config=types.GenerateContentConfig(
                        tools=tools,
                        temperature=0.4
                    )
                )
                
                # Agentic Loop für Tool-Calls
                turn_count = 0
                max_turns = 5
                
                while hasattr(response.candidates[0].content, 'parts') and turn_count < max_turns:
                    function_calls = [
                        part for part in response.candidates[0].content.parts 
                        if hasattr(part, 'function_call') and part.function_call
                    ]
                    
                    if not function_calls:
                        break
                    
                    turn_count += 1
                    contents.append(response.candidates[0].content)
                    tool_responses = []
                    
                    for part in function_calls:
                        fc = part.function_call
                        try:
                            # MCP-Tool ausführen
                            tool_result = await session.call_tool(fc.name, dict(fc.args))
                            
                            # Ergebnis formatieren
                            if tool_result.isError:
                                result_text = f"Error: {tool_result.content[0].text if tool_result.content else 'Unknown error'}"
                            else:
                                result_text = tool_result.content[0].text if tool_result.content else "No result"
                            
                            tool_responses.append(
                                types.Part.from_function_response(
                                    name=fc.name,
                                    response={"result": result_text}
                                )
                            )
                        except Exception as e:
                            tool_responses.append(
                                types.Part.from_function_response(
                                    name=fc.name,
                                    response={"error": str(e)}
                                )
                            )

                    contents.append(types.Content(role="function", parts=tool_responses))
                    
                    # Nächster API-Aufruf
                    response = await client.aio.models.generate_content(
                        model=model,
                        contents=contents,
                        config=types.GenerateContentConfig(tools=tools, temperature=0.4)
                    )

                return response.text, ""
    
    except Exception as e:
        return f"Fehler während der Verarbeitung: {str(e)}", ""

# Gradio UI Wrapper
def ui_wrapper(input_text):
    try:
        return asyncio.run(generate(input_text))
    except Exception as e:
        return f"UI Fehler: {str(e)}", ""

if __name__ == '__main__':
    with gr.Blocks() as demo:
        gr.Markdown("# Gemini 2.0 Flash + Google Search + DB Timetable (MCP)")
        output_textbox = gr.Markdown()
        input_textbox = gr.Textbox(
            lines=3, 
            label="Anfrage", 
            placeholder="z.B. Wie komme ich von Berlin nach Hamburg?"
        )
        submit_button = gr.Button("Senden")
        
        submit_button.click(
            fn=ui_wrapper,
            inputs=input_textbox,
            outputs=[output_textbox, input_textbox]
        )
    
    demo.launch(show_error=True)

"""
import base64
import gradio as gr
import os
import json
from google import genai
from google.genai import types
from gradio_client import Client


def generate(input_text):
    try:
        client = genai.Client(
            api_key=os.environ.get("GEMINI_API_KEY"),
        )
    except Exception as e:
        return f"Error initializing client: {e}.  Make sure GEMINI_API_KEY is set."

    model = "gemini-2.5-flash-lite"
    contents = [
        types.Content(
            role="user",
            parts=[
                types.Part.from_text(text=f"{input_text}"),
            ],
        ),
    ]
    tools = [
        types.Tool(google_search=types.GoogleSearch()),
    ]
    generate_content_config = types.GenerateContentConfig(
        temperature=0.4,
        thinking_config = types.ThinkingConfig(
            thinking_budget=0,
        ),
        tools=tools,
        response_mime_type="text/plain",
    )


    response_text = ""
    try:
      for chunk in client.models.generate_content_stream(
          model=model,
          contents=contents,
          config=generate_content_config,
      ):
          response_text += chunk.text
    except Exception as e:
        return f"Error during generation: {e}"
    data = response_text  
    #data = clean_json_string(response_text)
    data = data[:-1]
    return response_text, ""
   

if __name__ == '__main__':

    with gr.Blocks() as demo:
        title=gr.Markdown("# Gemini 2.0 Flash + Websearch")
        output_textbox = gr.Markdown()
        input_textbox = gr.Textbox(lines=3, label="", placeholder="Enter message here...")
        submit_button = gr.Button("send")
        submit_button.click(fn=generate,inputs=input_textbox,outputs=[output_textbox, input_textbox])
    demo.launch(show_error=True)
"""