| import streamlit as st |
| import google.generativeai as genai |
| from PIL import Image |
| import io |
| import base64 |
| import pandas as pd |
| import zipfile |
| import PyPDF2 |
|
|
| |
| st.set_page_config(page_title="Gemini AI Chat", layout="wide") |
|
|
| st.title("🤖 Gemini AI Chat Interface") |
| st.markdown(""" |
| **Welcome to the Gemini AI Chat Interface!** |
| Chat seamlessly with Google's advanced Gemini AI models, supporting multiple input types. |
| 🔗 [GitHub Profile](https://github.com/volkansah) | |
| 📂 [Project Repository](https://github.com/volkansah/gemini-ai-chat) | |
| 💬 [Soon](https://aicodecraft.io) |
| """) |
|
|
| |
| if "messages" not in st.session_state: |
| st.session_state.messages = [] |
| if "uploaded_content" not in st.session_state: |
| st.session_state.uploaded_content = None |
|
|
| |
| def encode_image(image): |
| buffered = io.BytesIO() |
| image.save(buffered, format="JPEG") |
| return base64.b64encode(buffered.getvalue()).decode('utf-8') |
|
|
| def process_file(uploaded_file): |
| """Verarbeitet die hochgeladene Datei und extrahiert den Inhalt.""" |
| file_type = uploaded_file.name.split('.')[-1].lower() |
| |
| |
| text_extensions = ('.txt', '.csv', '.py', '.html', '.js', '.css', |
| '.php', '.json', '.xml', '.c', '.cpp', '.java', |
| '.cs', '.rb', '.go', '.ts', '.swift', '.kt', '.rs', '.sh', '.sql', '.xlsx') |
| |
| if file_type in ["jpg", "jpeg", "png"]: |
| return {"type": "image", "content": Image.open(uploaded_file).convert('RGB')} |
| |
| if file_type in ["txt"] + [ext.strip('.') for ext in text_extensions if ext not in ('.csv', '.xlsx')]: |
| return {"type": "text", "content": uploaded_file.read().decode("utf-8", errors='ignore')} |
| |
| if file_type in ["csv", "xlsx"]: |
| try: |
| |
| if file_type == "csv": |
| df = pd.read_csv(uploaded_file) |
| else: |
| df = pd.read_excel(uploaded_file) |
| return {"type": "text", "content": df.to_string()} |
| except Exception as e: |
| return {"type": "error", "content": f"Failed to read tabular data: {e}"} |
| |
| if file_type == "pdf": |
| try: |
| reader = PyPDF2.PdfReader(uploaded_file) |
| return {"type": "text", "content": "".join(page.extract_text() for page in reader.pages if page.extract_text())} |
| except Exception as e: |
| return {"type": "error", "content": f"Failed to read PDF: {e}"} |
| |
| if file_type == "zip": |
| try: |
| with zipfile.ZipFile(uploaded_file) as z: |
| newline = "\n" |
| content = f"ZIP Contents (Processing text files only):{newline}" |
| |
| for file_info in z.infolist(): |
| if not file_info.is_dir(): |
| try: |
| |
| if file_info.filename.lower().endswith(text_extensions): |
| with z.open(file_info.filename) as file: |
| |
| file_content = file.read().decode('utf-8', errors='ignore') |
| content += f"{newline}📄 {file_info.filename}:{newline}{file_content}{newline}" |
| else: |
| content += f"{newline}⚠️ Binärdatei/Unbekannte Datei ignoriert: {file_info.filename}{newline}" |
| except Exception as e: |
| content += f"{newline}❌ Fehler beim Lesen von {file_info.filename}: {str(e)}{newline}" |
| |
| return {"type": "text", "content": content} |
| except Exception as e: |
| return {"type": "error", "content": f"Failed to process ZIP: {e}"} |
| |
| return {"type": "error", "content": "Unsupported file format"} |
|
|
| |
| with st.sidebar: |
| api_key = st.text_input("Google AI API Key", type="password") |
| |
| |
| model_list = [ |
| |
| "gemini-2.5-flash", |
| "gemini-2.5-pro", |
| |
| |
| "gemini-1.5-flash", |
| "gemini-1.5-pro", |
| |
| |
| "gemini-2.0-flash", |
| "gemini-1.0-pro", |
| ] |
| |
| model = st.selectbox("Model", model_list) |
| |
| |
| st.caption("❗ Alle **2.5er** Modelle sind **Vision-fähig** (Bilder, Dateien).") |
| |
| temperature = st.slider("Temperature", 0.0, 1.0, 0.7) |
| max_tokens = st.slider("Max Tokens", 1, 100000, 1000) |
| |
| uploaded_file = st.file_uploader("Upload File (Image/Text/PDF/ZIP)", |
| type=["jpg", "jpeg", "png", "txt", "pdf", "zip", |
| "csv", "xlsx", "html", "css", "php", "js", "py"]) |
|
|
| |
| if uploaded_file and st.session_state.uploaded_content is None: |
| |
| processed = process_file(uploaded_file) |
| st.session_state.uploaded_content = processed |
|
|
| |
| if st.session_state.uploaded_content: |
| processed = st.session_state.uploaded_content |
| |
| st.subheader("Current File Attachment:") |
| |
| if processed["type"] == "image": |
| st.image(processed["content"], caption="Attached Image", use_container_width=False, width=300) |
| elif processed["type"] == "text": |
| st.text_area("File Preview", processed["content"], height=150) |
| elif processed["type"] == "error": |
| st.error(f"Error processing file: {processed['content']}") |
| |
| |
| if st.button("❌ Clear Uploaded File Attachment"): |
| st.session_state.uploaded_content = None |
| |
| |
| st.info("Attachment cleared! Reload the page to reset the upload field completely.") |
|
|
|
|
| |
| for message in st.session_state.messages: |
| with st.chat_message(message["role"]): |
| st.markdown(message["content"]) |
|
|
| |
| if prompt := st.chat_input("Your message..."): |
| if not api_key: |
| st.warning("API Key benötigt!") |
| st.stop() |
| |
| |
| with st.spinner("Gemini is thinking..."): |
| try: |
| |
| genai.configure(api_key=api_key) |
| |
| |
| model_instance = genai.GenerativeModel(model) |
| |
| |
| content = [{"text": prompt}] |
| |
| |
| if st.session_state.uploaded_content: |
| if st.session_state.uploaded_content["type"] == "image": |
| |
| if "vision" not in model.lower() and "pro" not in model.lower(): |
| st.error("Bitte ein Vision- oder Pro-Modell für Bilder auswählen!") |
| st.stop() |
| |
| content.append({ |
| "inline_data": { |
| "mime_type": "image/jpeg", |
| "data": encode_image(st.session_state.uploaded_content["content"]) |
| } |
| }) |
| elif st.session_state.uploaded_content["type"] == "text": |
| |
| content[0]["text"] += f"\n\n[Attached File Content]\n{st.session_state.uploaded_content['content']}" |
| |
| |
| st.session_state.messages.append({"role": "user", "content": prompt}) |
| with st.chat_message("user"): |
| st.markdown(prompt) |
| |
| |
| response = model_instance.generate_content( |
| content, |
| generation_config=genai.types.GenerationConfig( |
| temperature=temperature, |
| max_output_tokens=max_tokens |
| ) |
| ) |
| |
| |
| if not response.candidates: |
| st.error("API Error: Keine gültige Antwort erhalten. Überprüfe die Eingabe oder das Modell.") |
| else: |
| |
| response_text = response.text |
| with st.chat_message("assistant"): |
| st.markdown(response_text) |
| st.session_state.messages.append({"role": "assistant", "content": response_text}) |
| |
| except Exception as e: |
| st.error(f"API Error: {str(e)}") |
| |
| if st.session_state.uploaded_content and st.session_state.uploaded_content["type"] == "image" and "vision" not in model.lower() and "pro" not in model.lower(): |
| st.error("Detail-Fehler: Für Bilder MUSS ein Vision-fähiger Modell (z.B. 1.5 Pro) ausgewählt werden.") |
|
|
| |
| with st.sidebar: |
| st.markdown(""" |
| --- |
| ## 📝 Instructions: |
| 1. Enter your Google AI API key |
| 2. Select a model (use **Pro/Vision** models for image analysis) |
| 3. Adjust parameters (Temperature/Tokens) |
| 4. Upload a file (optional, supports **Image, Text, PDF, ZIP, CSV/XLSX**) |
| 5. Type your message and press Enter |
| |
| ### About |
| 🔗 [GitHub Profile](https://github.com/volkansah) | |
| 📂 [Project Repository](https://github.com/volkansah/gemini-ai-chat) | |
| 💬 [Soon](https://aicodecraft.io) |
| """) |