Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import google.generativeai as genai | |
| from PIL import Image | |
| import io | |
| import base64 | |
| import pandas as pd | |
| import zipfile | |
| import PyPDF2 | |
| # Konfiguration der Seite | |
| st.set_page_config(page_title="Gemini AI Chat", layout="wide") | |
| st.title("🤖 Gemini AI Chat Interface") | |
| st.markdown(""" | |
| **Welcome to the Gemini AI Chat Interface!** | |
| Chat seamlessly with Google's advanced Gemini AI models, supporting multiple input types. | |
| 🔗 [GitHub Profile](https://github.com/volkansah) | | |
| 📂 [Project Repository](https://github.com/volkansah/gemini-ai-chat) | | |
| 💬 [Soon](https://aicodecraft.io) | |
| """) | |
| # Session State Management | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [] | |
| if "uploaded_content" not in st.session_state: | |
| st.session_state.uploaded_content = None | |
| # Funktionen zur Dateiverarbeitung | |
| def encode_image(image): | |
| buffered = io.BytesIO() | |
| image.save(buffered, format="JPEG") | |
| return base64.b64encode(buffered.getvalue()).decode('utf-8') | |
| def process_file(uploaded_file): | |
| """Verarbeitet die hochgeladene Datei und extrahiert den Inhalt.""" | |
| file_type = uploaded_file.name.split('.')[-1].lower() | |
| # Text-basierte Erweiterungen für ZIP-Verarbeitung | |
| text_extensions = ('.txt', '.csv', '.py', '.html', '.js', '.css', | |
| '.php', '.json', '.xml', '.c', '.cpp', '.java', | |
| '.cs', '.rb', '.go', '.ts', '.swift', '.kt', '.rs', '.sh', '.sql', '.xlsx') | |
| if file_type in ["jpg", "jpeg", "png"]: | |
| return {"type": "image", "content": Image.open(uploaded_file).convert('RGB')} | |
| if file_type in ["txt"] + [ext.strip('.') for ext in text_extensions if ext not in ('.csv', '.xlsx')]: | |
| return {"type": "text", "content": uploaded_file.read().decode("utf-8", errors='ignore')} | |
| if file_type in ["csv", "xlsx"]: | |
| try: | |
| # Versuch, Datei als CSV oder Excel zu lesen | |
| if file_type == "csv": | |
| df = pd.read_csv(uploaded_file) | |
| else: # xlsx | |
| df = pd.read_excel(uploaded_file) | |
| return {"type": "text", "content": df.to_string()} | |
| except Exception as e: | |
| return {"type": "error", "content": f"Failed to read tabular data: {e}"} | |
| if file_type == "pdf": | |
| try: | |
| reader = PyPDF2.PdfReader(uploaded_file) | |
| return {"type": "text", "content": "".join(page.extract_text() for page in reader.pages if page.extract_text())} | |
| except Exception as e: | |
| return {"type": "error", "content": f"Failed to read PDF: {e}"} | |
| if file_type == "zip": | |
| try: | |
| with zipfile.ZipFile(uploaded_file) as z: | |
| newline = "\n" | |
| content = f"ZIP Contents (Processing text files only):{newline}" | |
| for file_info in z.infolist(): | |
| if not file_info.is_dir(): | |
| try: | |
| # Prüfen, ob die Datei eine Text-Erweiterung hat | |
| if file_info.filename.lower().endswith(text_extensions): | |
| with z.open(file_info.filename) as file: | |
| # Decode mit 'ignore', falls es Probleme gibt | |
| file_content = file.read().decode('utf-8', errors='ignore') | |
| content += f"{newline}📄 {file_info.filename}:{newline}{file_content}{newline}" | |
| else: | |
| content += f"{newline}⚠️ Binärdatei/Unbekannte Datei ignoriert: {file_info.filename}{newline}" | |
| except Exception as e: | |
| content += f"{newline}❌ Fehler beim Lesen von {file_info.filename}: {str(e)}{newline}" | |
| return {"type": "text", "content": content} | |
| except Exception as e: | |
| return {"type": "error", "content": f"Failed to process ZIP: {e}"} | |
| return {"type": "error", "content": "Unsupported file format"} | |
| # Sidebar für Einstellungen | |
| with st.sidebar: | |
| api_key = st.text_input("Google AI API Key", type="password") | |
| # Modell-Liste bereinigt und auf die neuesten 2.5-Modelle fokussiert | |
| model_list = [ | |
| # --- Aktuelle Flaggschiffe (Standard & Pro) --- | |
| "gemini-2.5-flash", # Standard, schnell, multimodal (Vision-fähig) | |
| "gemini-2.5-pro", # Flagship, bestes Reasoning, multimodal (Vision-fähig) | |
| # --- Vorherige Generation (als Fallback/Alternative) --- | |
| "gemini-1.5-flash", | |
| "gemini-1.5-pro", | |
| # --- Legacy-Modelle (Text-only oder ältere Endpunkte) --- | |
| "gemini-2.0-flash", | |
| "gemini-1.0-pro", # Älterer stabiler Endpunkt | |
| ] | |
| model = st.selectbox("Model", model_list) | |
| # Wichtiger Hinweis: 2.5er Modelle sind standardmäßig Vision-fähig | |
| st.caption("❗ Alle **2.5er** Modelle sind **Vision-fähig** (Bilder, Dateien).") | |
| temperature = st.slider("Temperature", 0.0, 1.0, 0.7) | |
| max_tokens = st.slider("Max Tokens", 1, 100000, 1000) | |
| # Datei-Upload-Kontrolle | |
| uploaded_file = st.file_uploader("Upload File (Image/Text/PDF/ZIP)", | |
| type=["jpg", "jpeg", "png", "txt", "pdf", "zip", | |
| "csv", "xlsx", "html", "css", "php", "js", "py"]) | |
| # Logik zur Dateiverarbeitung und Vorschau | |
| if uploaded_file and st.session_state.uploaded_content is None: | |
| # Nur verarbeiten, wenn eine neue Datei hochgeladen wird und kein Inhalt im State ist | |
| processed = process_file(uploaded_file) | |
| st.session_state.uploaded_content = processed | |
| # Vorschau anzeigen, wenn Inhalt vorhanden | |
| if st.session_state.uploaded_content: | |
| processed = st.session_state.uploaded_content | |
| st.subheader("Current File Attachment:") | |
| if processed["type"] == "image": | |
| st.image(processed["content"], caption="Attached Image", use_container_width=False, width=300) | |
| elif processed["type"] == "text": | |
| st.text_area("File Preview", processed["content"], height=150) | |
| elif processed["type"] == "error": | |
| st.error(f"Error processing file: {processed['content']}") | |
| # NEU: Clear Button | |
| if st.button("❌ Clear Uploaded File Attachment"): | |
| st.session_state.uploaded_content = None | |
| # Da st.file_uploader selbst nicht einfach resettet, | |
| # informieren wir den Nutzer, dass der Zustand gelöscht ist. | |
| st.info("Attachment cleared! Reload the page to reset the upload field completely.") | |
| # Chat-Historie anzeigen | |
| for message in st.session_state.messages: | |
| with st.chat_message(message["role"]): | |
| st.markdown(message["content"]) | |
| # Chat-Eingabe verarbeiten | |
| if prompt := st.chat_input("Your message..."): | |
| if not api_key: | |
| st.warning("API Key benötigt!") | |
| st.stop() | |
| # NEU: Spinner hinzugefügt | |
| with st.spinner("Gemini is thinking..."): | |
| try: | |
| # API konfigurieren | |
| genai.configure(api_key=api_key) | |
| # Modell auswählen | |
| model_instance = genai.GenerativeModel(model) | |
| # Inhalt vorbereiten | |
| content = [{"text": prompt}] | |
| # Dateiinhalt hinzufügen | |
| if st.session_state.uploaded_content: | |
| if st.session_state.uploaded_content["type"] == "image": | |
| # Überprüfung, ob ein Vision-Modell ausgewählt ist | |
| if "vision" not in model.lower() and "pro" not in model.lower(): | |
| st.error("Bitte ein Vision- oder Pro-Modell für Bilder auswählen!") | |
| st.stop() | |
| content.append({ | |
| "inline_data": { | |
| "mime_type": "image/jpeg", | |
| "data": encode_image(st.session_state.uploaded_content["content"]) | |
| } | |
| }) | |
| elif st.session_state.uploaded_content["type"] == "text": | |
| # Text-Inhalt dem Prompt hinzufügen | |
| content[0]["text"] += f"\n\n[Attached File Content]\n{st.session_state.uploaded_content['content']}" | |
| # Nachricht zur Historie hinzufügen und anzeigen | |
| st.session_state.messages.append({"role": "user", "content": prompt}) | |
| with st.chat_message("user"): | |
| st.markdown(prompt) | |
| # Antwort generieren | |
| response = model_instance.generate_content( | |
| content, | |
| generation_config=genai.types.GenerationConfig( | |
| temperature=temperature, | |
| max_output_tokens=max_tokens | |
| ) | |
| ) | |
| # Überprüfen, ob die Antwort gültig ist | |
| if not response.candidates: | |
| st.error("API Error: Keine gültige Antwort erhalten. Überprüfe die Eingabe oder das Modell.") | |
| else: | |
| # Antwort anzeigen und zur Historie hinzufügen | |
| response_text = response.text | |
| with st.chat_message("assistant"): | |
| st.markdown(response_text) | |
| st.session_state.messages.append({"role": "assistant", "content": response_text}) | |
| except Exception as e: | |
| st.error(f"API Error: {str(e)}") | |
| # Zusätzliche Überprüfung für Visionsfehler | |
| if st.session_state.uploaded_content and st.session_state.uploaded_content["type"] == "image" and "vision" not in model.lower() and "pro" not in model.lower(): | |
| st.error("Detail-Fehler: Für Bilder MUSS ein Vision-fähiger Modell (z.B. 1.5 Pro) ausgewählt werden.") | |
| # Instructions in the sidebar | |
| with st.sidebar: | |
| st.markdown(""" | |
| --- | |
| ## 📝 Instructions: | |
| 1. Enter your Google AI API key | |
| 2. Select a model (use **Pro/Vision** models for image analysis) | |
| 3. Adjust parameters (Temperature/Tokens) | |
| 4. Upload a file (optional, supports **Image, Text, PDF, ZIP, CSV/XLSX**) | |
| 5. Type your message and press Enter | |
| ### About | |
| 🔗 [GitHub Profile](https://github.com/volkansah) | | |
| 📂 [Project Repository](https://github.com/volkansah/gemini-ai-chat) | | |
| 💬 [Soon](https://aicodecraft.io) | |
| """) |