mgokg commited on
Commit
6f9f02e
·
verified ·
1 Parent(s): cbc7caf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -35
app.py CHANGED
@@ -29,43 +29,38 @@ def clean_json_string(json_str):
29
  return cleaned_str # Return extracted part anyway
30
 
31
  def generate(input_text):
32
- try:
33
- client = genai.Client(
34
- api_key=os.environ.get("GEMINI_API_KEY"),
35
- )
36
-
37
- model = "gemini-2.0-flash" # Hinweis: gemini-3 existiert aktuell noch nicht
38
- contents = [
39
- types.Content(
40
- role="user",
41
- parts=[
42
- types.Part.from_text(text=input_text),
43
- ],
44
- ),
45
- ]
46
- tools = [
47
- types.Tool(google_search=types.GoogleSearch()),
48
- ]
49
- generate_content_config = types.GenerateContentConfig(
50
- temperature=0.35,
51
- # thinking_config ist aktuell nur für spezielle Modelle verfügbar
52
- # thinking_config=types.ThinkingConfig(thinking_level="HIGH"),
53
- tools=tools,
54
- )
 
55
 
56
- full_response = ""
57
- for chunk in client.models.generate_content_stream(
58
- model=model,
59
- contents=contents,
60
- config=generate_content_config,
61
- ):
62
- if chunk.text:
63
- full_response += chunk.text
64
- # Wir geben zwei Werte zurück, da Gradio zwei Outputs erwartet
65
- yield full_response, ""
66
 
67
- except Exception as e:
68
- yield f"Error: {e}", input_text
69
 
70
 
71
  def generate1(input_text):
 
29
  return cleaned_str # Return extracted part anyway
30
 
31
  def generate(input_text):
32
+ client = genai.Client(
33
+ api_key=os.environ.get("GEMINI_API_KEY"),
34
+ )
35
+
36
+ model = "gemini-3-flash-preview"
37
+ contents = [
38
+ types.Content(
39
+ role="user",
40
+ parts=[
41
+ types.Part.from_text(text=f"{input_text}"),
42
+ ],
43
+ ),
44
+ ]
45
+ tools = [
46
+ types.Tool(googleSearch=types.GoogleSearch(
47
+ )),
48
+ ]
49
+ generate_content_config = types.GenerateContentConfig(
50
+ temperature=0.35,
51
+ thinking_config=types.ThinkingConfig(
52
+ thinking_level="HIGH",
53
+ ),
54
+ tools=tools,
55
+ )
56
 
57
+ for chunk in client.models.generate_content_stream(
58
+ model=model,
59
+ contents=contents,
60
+ config=generate_content_config,
61
+ ):
62
+ print(chunk.text, end="")
 
 
 
 
63
 
 
 
64
 
65
 
66
  def generate1(input_text):