Alibrown commited on
Commit
7ecd1b4
·
verified ·
1 Parent(s): dab596d

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +237 -0
app.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import streamlit as st
3
+ import tempfile
4
+ import io
5
+ import pandas as pd
6
+ import zipfile
7
+ import PyPDF2
8
+
9
+ # Importe für das Gemini SDK
10
+ import google.generativeai as genai
11
+ from google.generativeai.errors import APIError
12
+ from PIL import Image # Bleibt, um PIL-Objekte zu behandeln
13
+
14
+ # ----------------------------------------------------
15
+ # 🚨 BEHOBENE KRITISCHE FIXES (Du hast diese bereits!)
16
+ # Wird beibehalten, um die Stabilität in restriktiven Umgebungen zu gewährleisten.
17
+ # ----------------------------------------------------
18
+ TEMP_STREAMLIT_HOME = os.path.join(tempfile.gettempdir(), "st_config_workaround")
19
+ os.makedirs(TEMP_STREAMLIT_HOME, exist_ok=True)
20
+ os.environ["STREAMLIT_HOME"] = TEMP_STREAMLIT_HOME
21
+ os.environ["STREAMLIT_GATHER_USAGE_STATS"] = "false"
22
+ CONFIG_PATH = os.path.join(TEMP_STREAMLIT_HOME, "config.toml")
23
+ CONFIG_CONTENT = """
24
+ [browser]
25
+ gatherUsageStats = false
26
+ """
27
+ if not os.path.exists(CONFIG_PATH):
28
+ try:
29
+ with open(CONFIG_PATH, "w") as f:
30
+ f.write(CONFIG_CONTENT)
31
+ except:
32
+ pass # Ignoriere, wenn das Schreiben in /tmp fehlschlägt
33
+
34
+ # ----------------------------------------------------
35
+ # ENDE DER WORKAROUNDS
36
+ # ----------------------------------------------------
37
+
38
+
39
+ # --- Konfiguration der Seite ---
40
+ st.set_page_config(page_title="Gemini AI Chat", layout="wide", initial_sidebar_state="expanded")
41
+ st.title("🤖 Gemini AI Chat Interface")
42
+ st.markdown("""
43
+ **Welcome to the Gemini AI Chat Interface!**
44
+ Chat seamlessly with Google's advanced Gemini AI models, supporting multiple input types.
45
+ """)
46
+
47
+ # Session State Management
48
+ if "messages" not in st.session_state:
49
+ st.session_session.messages = []
50
+ if "uploaded_content" not in st.session_state:
51
+ st.session_state.uploaded_content = None
52
+
53
+ # --- Funktionen zur Dateiverarbeitung ---
54
+
55
+ # 🛑 encode_image wird entfernt, da das SDK PIL-Objekte direkt verarbeitet.
56
+
57
+ def process_file(uploaded_file):
58
+ """Verarbeitet die hochgeladene Datei und extrahiert den Inhalt."""
59
+ file_type = uploaded_file.name.split('.')[-1].lower()
60
+ text_extensions = ('.txt', '.csv', '.py', '.html', '.js', '.css', '.json', '.xml', '.sql', '.xlsx')
61
+
62
+ if file_type in ["jpg", "jpeg", "png"]:
63
+ # WICHTIG: Das PIL-Image-Objekt direkt speichern
64
+ return {"type": "image", "content": Image.open(uploaded_file).convert('RGB')}
65
+
66
+ if file_type in ["txt"] + [ext.strip('.') for ext in text_extensions if ext not in ('.csv', '.xlsx')]:
67
+ return {"type": "text", "content": uploaded_file.read().decode("utf-8", errors='ignore')}
68
+
69
+ if file_type in ["csv", "xlsx"]:
70
+ try:
71
+ df = pd.read_csv(uploaded_file) if file_type == "csv" else pd.read_excel(uploaded_file)
72
+ return {"type": "text", "content": df.to_string()}
73
+ except Exception as e:
74
+ return {"type": "error", "content": f"Failed to read tabular data: {e}"}
75
+
76
+ if file_type == "pdf":
77
+ try:
78
+ reader = PyPDF2.PdfReader(uploaded_file)
79
+ return {"type": "text", "content": "".join(page.extract_text() for page in reader.pages if page.extract_text())}
80
+ except Exception as e:
81
+ return {"type": "error", "content": f"Failed to read PDF: {e}"}
82
+
83
+ if file_type == "zip":
84
+ try:
85
+ with zipfile.ZipFile(uploaded_file) as z:
86
+ newline = "\n"
87
+ content = f"ZIP Contents (Processing text files only):{newline}"
88
+ for file_info in z.infolist():
89
+ if not file_info.is_dir() and file_info.filename.lower().endswith(text_extensions):
90
+ with z.open(file_info.filename) as file:
91
+ file_content = file.read().decode('utf-8', errors='ignore')
92
+ content += f"{newline}📄 {file_info.filename}:{newline}{file_content}{newline}"
93
+ elif not file_info.is_dir():
94
+ content += f"{newline}⚠️ Binärdatei/Unbekannte Datei ignoriert: {file_info.filename}{newline}"
95
+ return {"type": "text", "content": content}
96
+ except Exception as e:
97
+ return {"type": "error", "content": f"Failed to process ZIP: {e}"}
98
+
99
+ return {"type": "error", "content": "Unsupported file format"}
100
+
101
+
102
+ # --- Sidebar für Einstellungen ---
103
+ with st.sidebar:
104
+ st.header("⚙️ API Settings")
105
+
106
+ # API Key Management
107
+ api_key = st.text_input("Google AI API Key", type="password")
108
+
109
+ # Optimierte Modell-Liste
110
+ model_list = [
111
+ "gemini-2.5-flash",
112
+ "gemini-2.5-pro",
113
+ "gemini-1.5-flash",
114
+ "gemini-1.5-pro",
115
+ ]
116
+
117
+ model = st.selectbox("Model", model_list)
118
+
119
+ st.caption("❗ Alle **2.5er** und **1.5er** Modelle sind **Vision-fähig** (Bilder, Dateien).")
120
+
121
+ temperature = st.slider("Temperature", 0.0, 1.0, 0.7)
122
+ max_tokens = st.slider("Max Tokens", 1, 100000, 1000)
123
+
124
+ if st.button("🔄 Chat Reset (Full)"):
125
+ st.session_state.messages = []
126
+ st.session_state.uploaded_content = None
127
+ st.experimental_rerun()
128
+
129
+
130
+ # --- Datei Upload & Vorschau ---
131
+ uploaded_file = st.file_uploader("Upload File (Image/Text/PDF/ZIP)",
132
+ type=["jpg", "jpeg", "png", "txt", "pdf", "zip", "csv", "xlsx", "html", "css", "js", "py"])
133
+
134
+ if uploaded_file and st.session_state.uploaded_content is None:
135
+ st.session_state.uploaded_content = process_file(uploaded_file)
136
+
137
+ if st.session_state.uploaded_content:
138
+ processed = st.session_state.uploaded_content
139
+ st.subheader("Current File Attachment:")
140
+
141
+ if processed["type"] == "image":
142
+ st.image(processed["content"], caption="Attached Image", width=300)
143
+ elif processed["type"] == "text":
144
+ st.text_area("File Preview", processed["content"], height=150)
145
+ elif processed["type"] == "error":
146
+ st.error(f"Error processing file: {processed['content']}")
147
+
148
+ if st.button("❌ Clear Uploaded File Attachment"):
149
+ st.session_state.uploaded_content = None
150
+ st.experimental_rerun()
151
+
152
+
153
+ # --- Chat Verlauf anzeigen ---
154
+ for message in st.session_state.messages:
155
+ # Anzeigen des reinen Textinhalts
156
+ with st.chat_message(message["role"]):
157
+ st.markdown(message["content"])
158
+
159
+ # --- Chat-Eingabe verarbeiten ---
160
+ if prompt := st.chat_input("Your message..."):
161
+ if not api_key:
162
+ st.warning("API Key benötigt!")
163
+ st.stop()
164
+
165
+ # 1. API konfigurieren
166
+ genai.configure(api_key=api_key)
167
+ model_instance = genai.GenerativeModel(model)
168
+
169
+ # 2. History und neuen Content für den API-Call vorbereiten
170
+
171
+ # Konvertiere die Streamlit-History in das Gemini-Format (role: user/model, parts: [{text: ...}, {image: ...}])
172
+ contents = []
173
+ for msg in st.session_state.messages:
174
+ role_map = {"user": "user", "assistant": "model"}
175
+ contents.append({"role": role_map.get(msg["role"]), "parts": [{"text": msg["content"]}]})
176
+
177
+
178
+ # 3. Den neuen User-Prompt hinzufügen
179
+ current_parts = [{"text": prompt}]
180
+
181
+ # 4. Dateiinhalt hinzufügen (falls vorhanden)
182
+ if st.session_state.uploaded_content:
183
+ content_data = st.session_state.uploaded_content
184
+
185
+ if content_data["type"] == "image":
186
+ # Füge das PIL-Objekt direkt als Teil hinzu
187
+ current_parts.append(content_data["content"])
188
+
189
+ elif content_data["type"] == "text":
190
+ # Füge den Text-Inhalt zum Prompt-Text hinzu
191
+ current_parts[0]["text"] += f"\n\n[Attached File Content]\n{content_data['content']}"
192
+
193
+ # Hinzufügen des vollständigen letzten User-Eintrags zum History-Array
194
+ contents.append({"role": "user", "parts": current_parts})
195
+
196
+ # 5. Nachricht zur Streamlit-Historie hinzufügen und anzeigen
197
+ # Wir fügen den reinen Text-Prompt zur Streamlit-History hinzu, um die Darstellung einfach zu halten
198
+ st.session_state.messages.append({"role": "user", "content": prompt})
199
+ with st.chat_message("user"):
200
+ st.markdown(prompt)
201
+
202
+ # 6. Antwort generieren
203
+ with st.spinner("Gemini is thinking..."):
204
+ try:
205
+ response = model_instance.generate_content(
206
+ contents, # Das vollständige History-Array übergeben
207
+ generation_config=genai.types.GenerateContentConfig(
208
+ temperature=temperature,
209
+ max_output_tokens=max_tokens
210
+ )
211
+ )
212
+
213
+ response_text = response.text
214
+ with st.chat_message("assistant"):
215
+ st.markdown(response_text)
216
+ st.session_state.messages.append({"role": "assistant", "content": response_text})
217
+
218
+ except APIError as e:
219
+ st.error(f"Gemini API Error: {str(e)}. Bitte prüfen Sie den API Key und die Modell-Wahl.")
220
+ except Exception as e:
221
+ st.error(f"General Error: {str(e)}")
222
+
223
+
224
+ # Instructions in the sidebar
225
+ with st.sidebar:
226
+ st.markdown("""
227
+ ---
228
+ ## 📝 Instructions:
229
+ 1. Enter your **Google AI API Key**
230
+ 2. Select a **Gemini 2.5/1.5** model (all are multimodal)
231
+ 3. Adjust parameters (Temperature/Tokens)
232
+ 4. Upload a file (optional: **Image, Text, PDF, ZIP, CSV/XLSX**)
233
+ 5. Type your message and press Enter
234
+
235
+ ### About
236
+ 🔗 [GitHub Profile](https://github.com/volkansah) | 📂 [Project Repository](https://github.com/volkansah/gemini-ai-chat)
237
+ """)