| | import os |
| | import streamlit as st |
| | import tempfile |
| | import io |
| | import pandas as pd |
| | import zipfile |
| | import PyPDF2 |
| |
|
| | |
| | import google.generativeai as genai |
| | from google.generativeai.errors import APIError |
| | from PIL import Image |
| |
|
| | |
| | |
| | |
| | |
| | TEMP_STREAMLIT_HOME = os.path.join(tempfile.gettempdir(), "st_config_workaround") |
| | os.makedirs(TEMP_STREAMLIT_HOME, exist_ok=True) |
| | os.environ["STREAMLIT_HOME"] = TEMP_STREAMLIT_HOME |
| | os.environ["STREAMLIT_GATHER_USAGE_STATS"] = "false" |
| | CONFIG_PATH = os.path.join(TEMP_STREAMLIT_HOME, "config.toml") |
| | CONFIG_CONTENT = """ |
| | [browser] |
| | gatherUsageStats = false |
| | """ |
| | if not os.path.exists(CONFIG_PATH): |
| | try: |
| | with open(CONFIG_PATH, "w") as f: |
| | f.write(CONFIG_CONTENT) |
| | except: |
| | pass |
| |
|
| | |
| | |
| | |
| |
|
| |
|
| | |
| | st.set_page_config(page_title="Gemini AI Chat", layout="wide", initial_sidebar_state="expanded") |
| | st.title("🤖 Gemini AI Chat Interface") |
| | st.markdown(""" |
| | **Welcome to the Gemini AI Chat Interface!** |
| | Chat seamlessly with Google's advanced Gemini AI models, supporting multiple input types. |
| | """) |
| |
|
| | |
| | if "messages" not in st.session_state: |
| | st.session_session.messages = [] |
| | if "uploaded_content" not in st.session_state: |
| | st.session_state.uploaded_content = None |
| |
|
| | |
| |
|
| | |
| |
|
| | def process_file(uploaded_file): |
| | """Verarbeitet die hochgeladene Datei und extrahiert den Inhalt.""" |
| | file_type = uploaded_file.name.split('.')[-1].lower() |
| | text_extensions = ('.txt', '.csv', '.py', '.html', '.js', '.css', '.json', '.xml', '.sql', '.xlsx') |
| | |
| | if file_type in ["jpg", "jpeg", "png"]: |
| | |
| | return {"type": "image", "content": Image.open(uploaded_file).convert('RGB')} |
| | |
| | if file_type in ["txt"] + [ext.strip('.') for ext in text_extensions if ext not in ('.csv', '.xlsx')]: |
| | return {"type": "text", "content": uploaded_file.read().decode("utf-8", errors='ignore')} |
| | |
| | if file_type in ["csv", "xlsx"]: |
| | try: |
| | df = pd.read_csv(uploaded_file) if file_type == "csv" else pd.read_excel(uploaded_file) |
| | return {"type": "text", "content": df.to_string()} |
| | except Exception as e: |
| | return {"type": "error", "content": f"Failed to read tabular data: {e}"} |
| | |
| | if file_type == "pdf": |
| | try: |
| | reader = PyPDF2.PdfReader(uploaded_file) |
| | return {"type": "text", "content": "".join(page.extract_text() for page in reader.pages if page.extract_text())} |
| | except Exception as e: |
| | return {"type": "error", "content": f"Failed to read PDF: {e}"} |
| | |
| | if file_type == "zip": |
| | try: |
| | with zipfile.ZipFile(uploaded_file) as z: |
| | newline = "\n" |
| | content = f"ZIP Contents (Processing text files only):{newline}" |
| | for file_info in z.infolist(): |
| | if not file_info.is_dir() and file_info.filename.lower().endswith(text_extensions): |
| | with z.open(file_info.filename) as file: |
| | file_content = file.read().decode('utf-8', errors='ignore') |
| | content += f"{newline}📄 {file_info.filename}:{newline}{file_content}{newline}" |
| | elif not file_info.is_dir(): |
| | content += f"{newline}⚠️ Binärdatei/Unbekannte Datei ignoriert: {file_info.filename}{newline}" |
| | return {"type": "text", "content": content} |
| | except Exception as e: |
| | return {"type": "error", "content": f"Failed to process ZIP: {e}"} |
| | |
| | return {"type": "error", "content": "Unsupported file format"} |
| |
|
| |
|
| | |
| | with st.sidebar: |
| | st.header("⚙️ API Settings") |
| | |
| | |
| | api_key = st.text_input("Google AI API Key", type="password") |
| | |
| | |
| | model_list = [ |
| | "gemini-2.5-flash", |
| | "gemini-2.5-pro", |
| | "gemini-1.5-flash", |
| | "gemini-1.5-pro", |
| | ] |
| | |
| | model = st.selectbox("Model", model_list) |
| | |
| | st.caption("❗ Alle **2.5er** und **1.5er** Modelle sind **Vision-fähig** (Bilder, Dateien).") |
| | |
| | temperature = st.slider("Temperature", 0.0, 1.0, 0.7) |
| | max_tokens = st.slider("Max Tokens", 1, 100000, 1000) |
| | |
| | if st.button("🔄 Chat Reset (Full)"): |
| | st.session_state.messages = [] |
| | st.session_state.uploaded_content = None |
| | st.experimental_rerun() |
| |
|
| |
|
| | |
| | uploaded_file = st.file_uploader("Upload File (Image/Text/PDF/ZIP)", |
| | type=["jpg", "jpeg", "png", "txt", "pdf", "zip", "csv", "xlsx", "html", "css", "js", "py"]) |
| |
|
| | if uploaded_file and st.session_state.uploaded_content is None: |
| | st.session_state.uploaded_content = process_file(uploaded_file) |
| |
|
| | if st.session_state.uploaded_content: |
| | processed = st.session_state.uploaded_content |
| | st.subheader("Current File Attachment:") |
| | |
| | if processed["type"] == "image": |
| | st.image(processed["content"], caption="Attached Image", width=300) |
| | elif processed["type"] == "text": |
| | st.text_area("File Preview", processed["content"], height=150) |
| | elif processed["type"] == "error": |
| | st.error(f"Error processing file: {processed['content']}") |
| | |
| | if st.button("❌ Clear Uploaded File Attachment"): |
| | st.session_state.uploaded_content = None |
| | st.experimental_rerun() |
| |
|
| |
|
| | |
| | for message in st.session_state.messages: |
| | |
| | with st.chat_message(message["role"]): |
| | st.markdown(message["content"]) |
| |
|
| | |
| | if prompt := st.chat_input("Your message..."): |
| | if not api_key: |
| | st.warning("API Key benötigt!") |
| | st.stop() |
| | |
| | |
| | genai.configure(api_key=api_key) |
| | model_instance = genai.GenerativeModel(model) |
| |
|
| | |
| | |
| | |
| | contents = [] |
| | for msg in st.session_state.messages: |
| | role_map = {"user": "user", "assistant": "model"} |
| | contents.append({"role": role_map.get(msg["role"]), "parts": [{"text": msg["content"]}]}) |
| | |
| | |
| | |
| | current_parts = [{"text": prompt}] |
| | |
| | |
| | if st.session_state.uploaded_content: |
| | content_data = st.session_state.uploaded_content |
| | |
| | if content_data["type"] == "image": |
| | |
| | current_parts.append(content_data["content"]) |
| | |
| | elif content_data["type"] == "text": |
| | |
| | current_parts[0]["text"] += f"\n\n[Attached File Content]\n{content_data['content']}" |
| |
|
| | |
| | contents.append({"role": "user", "parts": current_parts}) |
| |
|
| | |
| | |
| | st.session_state.messages.append({"role": "user", "content": prompt}) |
| | with st.chat_message("user"): |
| | st.markdown(prompt) |
| |
|
| | |
| | with st.spinner("Gemini is thinking..."): |
| | try: |
| | response = model_instance.generate_content( |
| | contents, |
| | generation_config=genai.types.GenerateContentConfig( |
| | temperature=temperature, |
| | max_output_tokens=max_tokens |
| | ) |
| | ) |
| | |
| | response_text = response.text |
| | with st.chat_message("assistant"): |
| | st.markdown(response_text) |
| | st.session_state.messages.append({"role": "assistant", "content": response_text}) |
| | |
| | except APIError as e: |
| | st.error(f"Gemini API Error: {str(e)}. Bitte prüfen Sie den API Key und die Modell-Wahl.") |
| | except Exception as e: |
| | st.error(f"General Error: {str(e)}") |
| |
|
| |
|
| | |
| | with st.sidebar: |
| | st.markdown(""" |
| | --- |
| | ## 📝 Instructions: |
| | 1. Enter your **Google AI API Key** |
| | 2. Select a **Gemini 2.5/1.5** model (all are multimodal) |
| | 3. Adjust parameters (Temperature/Tokens) |
| | 4. Upload a file (optional: **Image, Text, PDF, ZIP, CSV/XLSX**) |
| | 5. Type your message and press Enter |
| | |
| | ### About |
| | 🔗 [GitHub Profile](https://github.com/volkansah) | 📂 [Project Repository](https://github.com/volkansah/gemini-ai-chat) |
| | """) |