File size: 10,420 Bytes
322738b
c20e16c
 
 
 
 
 
 
 
 
 
 
 
322738b
c20e16c
 
 
 
 
322738b
 
c20e16c
322738b
 
c20e16c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
322738b
c20e16c
322738b
c20e16c
 
9d0c975
c20e16c
9d0c975
 
 
 
 
 
 
 
 
 
 
322738b
9d0c975
c20e16c
9d0c975
 
 
c20e16c
322738b
c20e16c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82a7a9e
c20e16c
82a7a9e
c20e16c
 
 
 
 
 
 
 
 
 
 
 
 
 
82a7a9e
c20e16c
 
 
 
82a7a9e
c20e16c
 
322738b
c20e16c
322738b
c20e16c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
import streamlit as st
import google.generativeai as genai
from PIL import Image
import io
import base64
import pandas as pd
import zipfile
import PyPDF2

# Konfiguration der Seite
st.set_page_config(page_title="Gemini AI Chat", layout="wide")

st.title("🤖 Gemini AI Chat Interface")
st.markdown("""
**Welcome to the Gemini AI Chat Interface!**
Chat seamlessly with Google's advanced Gemini AI models, supporting multiple input types.
🔗 [GitHub Profile](https://github.com/volkansah) | 
📂 [Project Repository](https://github.com/volkansah/gemini-ai-chat) | 
💬 [Soon](https://aicodecraft.io)
""")

# Session State Management
if "messages" not in st.session_state:
    st.session_state.messages = []
if "uploaded_content" not in st.session_state:
    st.session_state.uploaded_content = None

# Funktionen zur Dateiverarbeitung
def encode_image(image):
    buffered = io.BytesIO()
    image.save(buffered, format="JPEG")
    return base64.b64encode(buffered.getvalue()).decode('utf-8')

def process_file(uploaded_file):
    """Verarbeitet die hochgeladene Datei und extrahiert den Inhalt."""
    file_type = uploaded_file.name.split('.')[-1].lower()
    
    # Text-basierte Erweiterungen für ZIP-Verarbeitung
    text_extensions = ('.txt', '.csv', '.py', '.html', '.js', '.css', 
                       '.php', '.json', '.xml', '.c', '.cpp', '.java', 
                       '.cs', '.rb', '.go', '.ts', '.swift', '.kt', '.rs', '.sh', '.sql', '.xlsx')
    
    if file_type in ["jpg", "jpeg", "png"]:
        return {"type": "image", "content": Image.open(uploaded_file).convert('RGB')}
    
    if file_type in ["txt"] + [ext.strip('.') for ext in text_extensions if ext not in ('.csv', '.xlsx')]:
        return {"type": "text", "content": uploaded_file.read().decode("utf-8", errors='ignore')}
    
    if file_type in ["csv", "xlsx"]:
        try:
            # Versuch, Datei als CSV oder Excel zu lesen
            if file_type == "csv":
                df = pd.read_csv(uploaded_file)
            else: # xlsx
                df = pd.read_excel(uploaded_file)
            return {"type": "text", "content": df.to_string()}
        except Exception as e:
            return {"type": "error", "content": f"Failed to read tabular data: {e}"}
    
    if file_type == "pdf":
        try:
            reader = PyPDF2.PdfReader(uploaded_file)
            return {"type": "text", "content": "".join(page.extract_text() for page in reader.pages if page.extract_text())}
        except Exception as e:
             return {"type": "error", "content": f"Failed to read PDF: {e}"}
    
    if file_type == "zip":
        try:
            with zipfile.ZipFile(uploaded_file) as z:
                newline = "\n"
                content = f"ZIP Contents (Processing text files only):{newline}"
                
                for file_info in z.infolist():
                    if not file_info.is_dir():
                        try:
                            # Prüfen, ob die Datei eine Text-Erweiterung hat
                            if file_info.filename.lower().endswith(text_extensions):
                                with z.open(file_info.filename) as file:
                                    # Decode mit 'ignore', falls es Probleme gibt
                                    file_content = file.read().decode('utf-8', errors='ignore')
                                    content += f"{newline}📄 {file_info.filename}:{newline}{file_content}{newline}"
                            else:
                                content += f"{newline}⚠️ Binärdatei/Unbekannte Datei ignoriert: {file_info.filename}{newline}"
                        except Exception as e:
                            content += f"{newline}❌ Fehler beim Lesen von {file_info.filename}: {str(e)}{newline}"
                
                return {"type": "text", "content": content}
        except Exception as e:
            return {"type": "error", "content": f"Failed to process ZIP: {e}"}
    
    return {"type": "error", "content": "Unsupported file format"}

# Sidebar für Einstellungen
with st.sidebar:
    api_key = st.text_input("Google AI API Key", type="password")
    
    # Modell-Liste bereinigt und auf die neuesten 2.5-Modelle fokussiert
    model_list = [
        # --- Aktuelle Flaggschiffe (Standard & Pro) ---
        "gemini-2.5-flash",       # Standard, schnell, multimodal (Vision-fähig)
        "gemini-2.5-pro",         # Flagship, bestes Reasoning, multimodal (Vision-fähig)
        
        # --- Vorherige Generation (als Fallback/Alternative) ---
        "gemini-1.5-flash",       
        "gemini-1.5-pro",         
        
        # --- Legacy-Modelle (Text-only oder ältere Endpunkte) ---
        "gemini-2.0-flash",       
        "gemini-1.0-pro",         # Älterer stabiler Endpunkt
    ]
    
    model = st.selectbox("Model", model_list)
    
    # Wichtiger Hinweis: 2.5er Modelle sind standardmäßig Vision-fähig
    st.caption("❗ Alle **2.5er** Modelle sind **Vision-fähig** (Bilder, Dateien).")
    
    temperature = st.slider("Temperature", 0.0, 1.0, 0.7)
    max_tokens = st.slider("Max Tokens", 1, 100000, 1000)
# Datei-Upload-Kontrolle
uploaded_file = st.file_uploader("Upload File (Image/Text/PDF/ZIP)", 
                                 type=["jpg", "jpeg", "png", "txt", "pdf", "zip", 
                                       "csv", "xlsx", "html", "css", "php", "js", "py"])

# Logik zur Dateiverarbeitung und Vorschau
if uploaded_file and st.session_state.uploaded_content is None:
    # Nur verarbeiten, wenn eine neue Datei hochgeladen wird und kein Inhalt im State ist
    processed = process_file(uploaded_file)
    st.session_state.uploaded_content = processed

# Vorschau anzeigen, wenn Inhalt vorhanden
if st.session_state.uploaded_content:
    processed = st.session_state.uploaded_content
    
    st.subheader("Current File Attachment:")
    
    if processed["type"] == "image":
        st.image(processed["content"], caption="Attached Image", use_container_width=False, width=300)
    elif processed["type"] == "text":
        st.text_area("File Preview", processed["content"], height=150)
    elif processed["type"] == "error":
         st.error(f"Error processing file: {processed['content']}")
    
    # NEU: Clear Button
    if st.button("❌ Clear Uploaded File Attachment"):
        st.session_state.uploaded_content = None
        # Da st.file_uploader selbst nicht einfach resettet,
        # informieren wir den Nutzer, dass der Zustand gelöscht ist.
        st.info("Attachment cleared! Reload the page to reset the upload field completely.")


# Chat-Historie anzeigen
for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

# Chat-Eingabe verarbeiten
if prompt := st.chat_input("Your message..."):
    if not api_key:
        st.warning("API Key benötigt!")
        st.stop()
    
    # NEU: Spinner hinzugefügt
    with st.spinner("Gemini is thinking..."):
        try:
            # API konfigurieren
            genai.configure(api_key=api_key)
            
            # Modell auswählen
            model_instance = genai.GenerativeModel(model)
            
            # Inhalt vorbereiten
            content = [{"text": prompt}]
            
            # Dateiinhalt hinzufügen
            if st.session_state.uploaded_content:
                if st.session_state.uploaded_content["type"] == "image":
                    # Überprüfung, ob ein Vision-Modell ausgewählt ist
                    if "vision" not in model.lower() and "pro" not in model.lower():
                        st.error("Bitte ein Vision- oder Pro-Modell für Bilder auswählen!")
                        st.stop()
                    
                    content.append({
                        "inline_data": {
                            "mime_type": "image/jpeg",
                            "data": encode_image(st.session_state.uploaded_content["content"])
                        }
                    })
                elif st.session_state.uploaded_content["type"] == "text":
                    # Text-Inhalt dem Prompt hinzufügen
                    content[0]["text"] += f"\n\n[Attached File Content]\n{st.session_state.uploaded_content['content']}"
            
            # Nachricht zur Historie hinzufügen und anzeigen
            st.session_state.messages.append({"role": "user", "content": prompt})
            with st.chat_message("user"):
                st.markdown(prompt)
            
            # Antwort generieren
            response = model_instance.generate_content(
                content,
                generation_config=genai.types.GenerationConfig(
                    temperature=temperature,
                    max_output_tokens=max_tokens
                )
            )
            
            # Überprüfen, ob die Antwort gültig ist
            if not response.candidates:
                st.error("API Error: Keine gültige Antwort erhalten. Überprüfe die Eingabe oder das Modell.")
            else:
                # Antwort anzeigen und zur Historie hinzufügen
                response_text = response.text
                with st.chat_message("assistant"):
                    st.markdown(response_text)
                st.session_state.messages.append({"role": "assistant", "content": response_text})
            
        except Exception as e:
            st.error(f"API Error: {str(e)}")
            # Zusätzliche Überprüfung für Visionsfehler
            if st.session_state.uploaded_content and st.session_state.uploaded_content["type"] == "image" and "vision" not in model.lower() and "pro" not in model.lower():
                st.error("Detail-Fehler: Für Bilder MUSS ein Vision-fähiger Modell (z.B. 1.5 Pro) ausgewählt werden.")

# Instructions in the sidebar
with st.sidebar:
    st.markdown("""
    ---
    ## 📝 Instructions:
    1. Enter your Google AI API key
    2. Select a model (use **Pro/Vision** models for image analysis)
    3. Adjust parameters (Temperature/Tokens)
    4. Upload a file (optional, supports **Image, Text, PDF, ZIP, CSV/XLSX**)
    5. Type your message and press Enter
    
    ### About
    🔗 [GitHub Profile](https://github.com/volkansah) | 
    📂 [Project Repository](https://github.com/volkansah/gemini-ai-chat) | 
    💬 [Soon](https://aicodecraft.io)
    """)