dtometzki commited on
Commit
3ffbe14
·
verified ·
1 Parent(s): 5809146

Update app_1.py

Browse files
Files changed (1) hide show
  1. app_1.py +33 -19
app_1.py CHANGED
@@ -1,5 +1,6 @@
1
  import os
2
  import gradio as gr
 
3
  from google import genai
4
  from google.genai import types
5
 
@@ -8,6 +9,8 @@ client = genai.Client(
8
  api_key=os.environ.get("GEMINI_API_KEY"),
9
  )
10
 
 
 
11
  def model_chat(message, history):
12
  try:
13
  contents = []
@@ -36,34 +39,45 @@ def model_chat(message, history):
36
  if last_role == "user":
37
  contents.append(types.Content(role="model", parts=[types.Part.from_text(text="[Keine Antwort erhalten]")]))
38
 
39
- # 2. Aktuelle Nachricht & Dateien (Multimodal)
40
  current_parts = []
41
 
42
- # Text-Teil hinzufügen
43
  if message["text"]:
44
  current_parts.append(types.Part.from_text(text=message["text"]))
45
 
46
- # Datei-Teile hinzufügen
47
  for file_path in message["files"]:
48
- # Da wir im Lite-Modell sind, ist eine einfache MIME-Bestimmung ratsam
49
- mime_type = "image/jpeg"
50
- if file_path.lower().endswith(".pdf"):
51
- mime_type = "application/pdf"
52
- elif file_path.lower().endswith((".png", ".webp")):
53
- mime_type = f"image/{file_path.split('.')[-1]}"
54
 
55
- with open(file_path, "rb") as f:
56
- current_parts.append(
57
- types.Part.from_bytes(data=f.read(), mime_type=mime_type)
58
- )
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
- # Falls nur eine Datei ohne Text gesendet wurde
61
  if not current_parts:
62
- current_parts.append(types.Part.from_text(text="Analysiere diese Datei."))
 
63
 
64
  contents.append(types.Content(role="user", parts=current_parts))
65
 
66
- # 3. Konfiguration (Unverändert wie gewünscht)
67
  model_id = "gemini-3.1-flash-lite-preview"
68
 
69
  tools = [types.Tool(googleSearch=types.GoogleSearch())]
@@ -73,7 +87,7 @@ def model_chat(message, history):
73
  tools=tools,
74
  )
75
 
76
- # 4. Stream
77
  response_text = ""
78
  for chunk in client.models.generate_content_stream(
79
  model=model_id,
@@ -87,11 +101,11 @@ def model_chat(message, history):
87
  except Exception as e:
88
  yield f"❌ Fehler: {str(e)}"
89
 
90
- # Gradio Interface mit multimodal=True
91
  demo = gr.ChatInterface(
92
  fn=model_chat,
93
  title="Gemini Thinking AI",
94
- description="KI mit Suchfunktion und Datei-Upload.",
95
  multimodal=True,
96
  )
97
 
 
1
  import os
2
  import gradio as gr
3
+ import mimetypes
4
  from google import genai
5
  from google.genai import types
6
 
 
9
  api_key=os.environ.get("GEMINI_API_KEY"),
10
  )
11
 
12
+ MAX_FILE_SIZE_MB = 2
13
+
14
  def model_chat(message, history):
15
  try:
16
  contents = []
 
39
  if last_role == "user":
40
  contents.append(types.Content(role="model", parts=[types.Part.from_text(text="[Keine Antwort erhalten]")]))
41
 
42
+ # 2. Aktuelle Nachricht & Datei-Upload (Universell mit 2MB Limit)
43
  current_parts = []
44
 
45
+ # Text hinzufügen
46
  if message["text"]:
47
  current_parts.append(types.Part.from_text(text=message["text"]))
48
 
49
+ # Dateien verarbeiten
50
  for file_path in message["files"]:
51
+ file_size = os.path.getsize(file_path) / (1024 * 1024) # In MB
52
+
53
+ if file_size > MAX_FILE_SIZE_MB:
54
+ yield f"⚠️ Datei '{os.path.basename(file_path)}' überspringt das 2 MB Limit ({file_size:.2f} MB)."
55
+ continue
 
56
 
57
+ mime_type, _ = mimetypes.guess_type(file_path)
58
+ mime_type = mime_type or "application/octet-stream"
59
+
60
+ # Unterscheidung: Text vs. Binär (Bild, PDF, etc.)
61
+ if mime_type.startswith("text/"):
62
+ try:
63
+ with open(file_path, "r", encoding="utf-8", errors="replace") as f:
64
+ content_str = f.read()
65
+ current_parts.append(types.Part.from_text(text=f"Dateiinhalt ({os.path.basename(file_path)}):\n\n{content_str}"))
66
+ except Exception:
67
+ # Fallback auf Bytes, falls Text-Lesen scheitert
68
+ with open(file_path, "rb") as f:
69
+ current_parts.append(types.Part.from_bytes(data=f.read(), mime_type=mime_type))
70
+ else:
71
+ with open(file_path, "rb") as f:
72
+ current_parts.append(types.Part.from_bytes(data=f.read(), mime_type=mime_type))
73
 
 
74
  if not current_parts:
75
+ yield "Bitte gib eine Nachricht ein oder lade eine passende Datei hoch."
76
+ return
77
 
78
  contents.append(types.Content(role="user", parts=current_parts))
79
 
80
+ # 3. Konfiguration (Unverändert: gemini-3.1-flash-lite-preview)
81
  model_id = "gemini-3.1-flash-lite-preview"
82
 
83
  tools = [types.Tool(googleSearch=types.GoogleSearch())]
 
87
  tools=tools,
88
  )
89
 
90
+ # 4. Stream starten
91
  response_text = ""
92
  for chunk in client.models.generate_content_stream(
93
  model=model_id,
 
101
  except Exception as e:
102
  yield f"❌ Fehler: {str(e)}"
103
 
104
+ # Gradio Interface
105
  demo = gr.ChatInterface(
106
  fn=model_chat,
107
  title="Gemini Thinking AI",
108
+ description="KI mit Suche und universellem Datei-Upload (max. 2 MB).",
109
  multimodal=True,
110
  )
111