Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,136 +1,119 @@
|
|
|
|
|
| 1 |
import streamlit as st
|
|
|
|
| 2 |
import requests
|
| 3 |
import json
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
# --- Konfiguration ---
|
| 13 |
-
|
|
|
|
| 14 |
OPENROUTER_API_BASE = "https://openrouter.ai/api/v1"
|
| 15 |
|
| 16 |
# --- Page Title ---
|
| 17 |
-
st.title("💸 OpenRouter
|
| 18 |
st.markdown("""
|
| 19 |
-
**Willkommen im
|
| 20 |
-
Chatte mit **kostenlosen (Free-Tier)** Modellen über die OpenRouter API.
|
| 21 |
-
Alle Modelle unterliegen den OpenRouter-Ratenbegrenzungen.
|
| 22 |
""")
|
| 23 |
|
| 24 |
# --- Session State Management ---
|
| 25 |
-
# Verwenden des einheitlichen Schlüssels 'messages'
|
| 26 |
if "messages" not in st.session_state:
|
| 27 |
st.session_state.messages = []
|
| 28 |
-
|
| 29 |
-
st.session_state.uploaded_content = None
|
| 30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
|
| 32 |
-
# --- Datei-Verarbeitung (Multimodal, basierend auf Gemini-UI) ---
|
| 33 |
-
def encode_image(image):
|
| 34 |
-
"""Encodiert ein PIL-Image-Objekt in einen Base64-String."""
|
| 35 |
-
buf = io.BytesIO()
|
| 36 |
-
image.save(buf, format="JPEG")
|
| 37 |
-
return base64.b64encode(buf.getvalue()).decode("utf-8")
|
| 38 |
-
|
| 39 |
-
def process_file(uploaded_file):
|
| 40 |
-
"""Verarbeitet die hochgeladene Datei (Text, Bild, PDF, ZIP) und extrahiert den Inhalt."""
|
| 41 |
-
file_type = uploaded_file.name.split('.')[-1].lower()
|
| 42 |
-
text_exts = ('.txt', '.csv', '.py', '.html', '.js', '.css', '.json', '.xml', '.sql', '.xlsx')
|
| 43 |
-
|
| 44 |
-
if file_type in ["jpg", "jpeg", "png"]:
|
| 45 |
-
return {"type": "image", "content": Image.open(uploaded_file).convert('RGB')}
|
| 46 |
-
|
| 47 |
-
if file_type in ["txt"] + [ext.strip('.') for ext in text_exts if ext not in ('.csv', '.xlsx')]:
|
| 48 |
-
return {"type": "text", "content": uploaded_file.read().decode("utf-8", errors="ignore")}
|
| 49 |
-
|
| 50 |
-
if file_type in ["csv", "xlsx"]:
|
| 51 |
-
try:
|
| 52 |
-
if file_type == "csv":
|
| 53 |
-
df = pd.read_csv(uploaded_file)
|
| 54 |
-
else: # xlsx
|
| 55 |
-
df = pd.read_excel(uploaded_file)
|
| 56 |
-
return {"type": "text", "content": df.to_string()}
|
| 57 |
-
except Exception as e:
|
| 58 |
-
return {"type": "error", "content": f"Fehler beim Lesen der Tabelle: {e}"}
|
| 59 |
-
|
| 60 |
-
if file_type == "pdf":
|
| 61 |
-
try:
|
| 62 |
-
reader = PyPDF2.PdfReader(uploaded_file)
|
| 63 |
-
# Extrahieren des Textes und Zusammenführen aller Seiten
|
| 64 |
-
return {"type": "text", "content": "".join(page.extract_text() or "" for page in reader.pages)}
|
| 65 |
-
except Exception as e:
|
| 66 |
-
return {"type": "error", "content": f"PDF Fehler: {e}"}
|
| 67 |
-
|
| 68 |
-
if file_type == "zip":
|
| 69 |
-
try:
|
| 70 |
-
with zipfile.ZipFile(uploaded_file) as z:
|
| 71 |
-
content = "ZIP Contents:\n"
|
| 72 |
-
for f in z.infolist():
|
| 73 |
-
if not f.is_dir() and f.filename.lower().endswith(text_exts):
|
| 74 |
-
content += f"\n📄 {f.filename}:\n"
|
| 75 |
-
# Sicherstellen, dass das Lesen und Dekodieren robust ist
|
| 76 |
-
content += z.read(f.filename).decode("utf-8", errors="ignore")
|
| 77 |
-
return {"type": "text", "content": content or "ZIP enthält keine lesbaren Textdateien."}
|
| 78 |
-
except Exception as e:
|
| 79 |
-
return {"type": "error", "content": f"ZIP Fehler: {e}"}
|
| 80 |
-
|
| 81 |
-
return {"type": "error", "content": "Nicht unterstütztes Dateiformat."}
|
| 82 |
|
| 83 |
# --- Sidebar ---
|
| 84 |
with st.sidebar:
|
| 85 |
st.header("⚙️ API Settings")
|
| 86 |
api_key = st.text_input("OpenRouter API Key", type="password")
|
| 87 |
|
| 88 |
-
# --- Manuelle Modelle
|
| 89 |
FREE_MODEL_LIST = [
|
| 90 |
"cognitivecomputations/dolphin-mistral-24b-venice-edition:free",
|
| 91 |
"deepseek/deepseek-chat-v3",
|
| 92 |
"google/gemma-2-9b-it",
|
| 93 |
"mistralai/mistral-7b-instruct-v0.2",
|
| 94 |
"qwen/qwen2-72b-instruct",
|
| 95 |
-
"nousresearch/nous-hermes-2-mixtral-8x7b-dpo",
|
| 96 |
]
|
| 97 |
model = st.selectbox("Wähle ein Modell", FREE_MODEL_LIST, index=0)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 98 |
temperature = st.slider("Temperature", 0.0, 1.0, 0.7)
|
| 99 |
-
max_tokens = st.slider(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 100 |
|
| 101 |
if st.button("🔄 Chat Reset"):
|
| 102 |
st.session_state.messages = []
|
| 103 |
-
st.
|
| 104 |
-
st.
|
| 105 |
|
| 106 |
st.markdown("""
|
| 107 |
---
|
| 108 |
-
|
| 109 |
-
Dein API-Key wird nur **lokal** verwendet.
|
| 110 |
""")
|
| 111 |
|
| 112 |
-
# --- Datei Upload ---
|
| 113 |
-
uploaded_file = st.file_uploader("Upload File (optional)",
|
| 114 |
-
type=["jpg", "jpeg", "png", "txt", "pdf", "zip", "csv", "xlsx", "html", "css", "js", "py"])
|
| 115 |
-
|
| 116 |
-
# Logik zur Dateiverarbeitung und Vorschau
|
| 117 |
-
if uploaded_file and st.session_state.uploaded_content is None:
|
| 118 |
-
st.session_state.uploaded_content = process_file(uploaded_file)
|
| 119 |
-
|
| 120 |
-
if st.session_state.uploaded_content:
|
| 121 |
-
processed = st.session_state.uploaded_content
|
| 122 |
-
st.subheader("📎 Current Attachment:")
|
| 123 |
-
|
| 124 |
-
if processed["type"] == "image":
|
| 125 |
-
st.image(processed["content"], caption="Attached Image", width=300)
|
| 126 |
-
elif processed["type"] == "text":
|
| 127 |
-
st.text_area("File Preview", processed["content"], height=150)
|
| 128 |
-
elif processed["type"] == "error":
|
| 129 |
-
st.error(processed["content"])
|
| 130 |
-
|
| 131 |
-
if st.button("❌ Remove Attachment"):
|
| 132 |
-
st.session_state.uploaded_content = None
|
| 133 |
-
st.experimental_rerun() # Nötig, um den file_uploader optisch zu resetten
|
| 134 |
|
| 135 |
|
| 136 |
# --- Chat Verlauf anzeigen ---
|
|
@@ -139,15 +122,13 @@ for msg in st.session_state.messages:
|
|
| 139 |
st.markdown(msg["content"])
|
| 140 |
|
| 141 |
|
| 142 |
-
# --- API Request Funktion ---
|
| 143 |
def call_openrouter(model, messages, temp, max_tok, key):
|
| 144 |
-
"""Führt den API-Aufruf an OpenRouter (OpenAI-Schema) durch."""
|
| 145 |
headers = {
|
| 146 |
"Authorization": f"Bearer {key}",
|
| 147 |
"Content-Type": "application/json",
|
| 148 |
-
|
| 149 |
-
"
|
| 150 |
-
"X-Title": "OpenRouter-Free-Interface",
|
| 151 |
}
|
| 152 |
payload = {
|
| 153 |
"model": model,
|
|
@@ -160,14 +141,12 @@ def call_openrouter(model, messages, temp, max_tok, key):
|
|
| 160 |
|
| 161 |
if res.status_code == 200:
|
| 162 |
try:
|
| 163 |
-
# KORRIGIERT: Stabile Extraktion der Antwort
|
| 164 |
return res.json()["choices"][0]["message"]["content"]
|
| 165 |
except (KeyError, IndexError):
|
| 166 |
raise Exception("Fehlerhafte API-Antwort: Konnte Antworttext nicht extrahieren.")
|
| 167 |
else:
|
| 168 |
try:
|
| 169 |
err = res.json()
|
| 170 |
-
# Versuch, die spezifische Fehlermeldung zu finden
|
| 171 |
msg = err.get("error", {}).get("message", res.text)
|
| 172 |
except:
|
| 173 |
msg = res.text
|
|
@@ -188,24 +167,6 @@ if prompt := st.chat_input("Deine Nachricht..."):
|
|
| 188 |
# API Nachrichten vorbereiten (für Chatverlauf)
|
| 189 |
messages = [{"role": m["role"], "content": m["content"]} for m in st.session_state.messages]
|
| 190 |
|
| 191 |
-
# Datei anhängen, falls vorhanden (Multimodalitäts-Handling)
|
| 192 |
-
if st.session_state.uploaded_content:
|
| 193 |
-
content = st.session_state.uploaded_content
|
| 194 |
-
|
| 195 |
-
# OpenRouter/OpenAI Multimodalität: Bilder als 'url' mit Base64
|
| 196 |
-
if content["type"] == "image":
|
| 197 |
-
base64_img = encode_image(content["content"])
|
| 198 |
-
|
| 199 |
-
# Aufbau des Multimodal-Contents für die API
|
| 200 |
-
messages[-1]["content"] = [
|
| 201 |
-
{"type": "text", "text": prompt},
|
| 202 |
-
{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_img}"}}
|
| 203 |
-
]
|
| 204 |
-
|
| 205 |
-
# Text-Dateien einfach dem letzten Prompt anhängen
|
| 206 |
-
elif content["type"] == "text":
|
| 207 |
-
messages[-1]["content"] += f"\n\n[Attached File Content]\n{content['content']}"
|
| 208 |
-
|
| 209 |
# Antwort generieren
|
| 210 |
with st.chat_message("assistant"):
|
| 211 |
with st.spinner(f"Fragend {model}..."):
|
|
|
|
| 1 |
+
import os
|
| 2 |
import streamlit as st
|
| 3 |
+
import tempfile
|
| 4 |
import requests
|
| 5 |
import json
|
| 6 |
+
# Entfernte Imports: PIL.Image, io, base64, pandas, zipfile, PyPDF2
|
| 7 |
+
# Da kein File Manager mehr benötigt wird.
|
| 8 |
+
|
| 9 |
+
# ----------------------------------------------------
|
| 10 |
+
# 🚨 KRITISCHE FIXES FÜR DEN PERMISSION ERROR
|
| 11 |
+
# Zwingt Streamlit, seine temporären/Konfigurationsdateien
|
| 12 |
+
# in den beschreibbaren /tmp-Bereich zu schreiben.
|
| 13 |
+
# ----------------------------------------------------
|
| 14 |
+
|
| 15 |
+
# 1. Temporären, beschreibbaren Pfad erstellen
|
| 16 |
+
TEMP_STREAMLIT_HOME = os.path.join(tempfile.gettempdir(), "st_config_workaround")
|
| 17 |
+
os.makedirs(TEMP_STREAMLIT_HOME, exist_ok=True)
|
| 18 |
+
|
| 19 |
+
# 2. Umgebungsvariablen setzen
|
| 20 |
+
os.environ["STREAMLIT_HOME"] = TEMP_STREAMLIT_HOME
|
| 21 |
+
os.environ["STREAMLIT_GATHER_USAGE_STATS"] = "false"
|
| 22 |
+
|
| 23 |
+
# 3. Minimale config.toml erstellen, um Schreibversuche zu unterbinden
|
| 24 |
+
CONFIG_PATH = os.path.join(TEMP_STREAMLIT_HOME, "config.toml")
|
| 25 |
+
CONFIG_CONTENT = """
|
| 26 |
+
[browser]
|
| 27 |
+
gatherUsageStats = false
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
if not os.path.exists(CONFIG_PATH):
|
| 31 |
+
try:
|
| 32 |
+
with open(CONFIG_PATH, "w") as f:
|
| 33 |
+
f.write(CONFIG_CONTENT)
|
| 34 |
+
except Exception as e:
|
| 35 |
+
print(f"WARNUNG: Konnte config.toml nicht schreiben: {e}")
|
| 36 |
+
|
| 37 |
+
# ----------------------------------------------------
|
| 38 |
+
# Ende der Workarounds
|
| 39 |
+
# ----------------------------------------------------
|
| 40 |
|
| 41 |
# --- Konfiguration ---
|
| 42 |
+
# Hinweis: Layout-Einstellung 'wide' bleibt erhalten
|
| 43 |
+
st.set_page_config(page_title="OpenRouter Minimal Chat UI", layout="wide")
|
| 44 |
OPENROUTER_API_BASE = "https://openrouter.ai/api/v1"
|
| 45 |
|
| 46 |
# --- Page Title ---
|
| 47 |
+
st.title("💸 OpenRouter Minimal Chat Interface")
|
| 48 |
st.markdown("""
|
| 49 |
+
**Willkommen im OpenRouter Minimal Chat Interface!**
|
| 50 |
+
Chatte mit **kostenlosen (Free-Tier)** Modellen über die OpenRouter API. Nur Text-Chat.
|
|
|
|
| 51 |
""")
|
| 52 |
|
| 53 |
# --- Session State Management ---
|
|
|
|
| 54 |
if "messages" not in st.session_state:
|
| 55 |
st.session_state.messages = []
|
| 56 |
+
# st.session_state.uploaded_content wurde entfernt.
|
|
|
|
| 57 |
|
| 58 |
+
# --- Context-Length Fetch (Wird beibehalten, da für den Slider wichtig) ---
|
| 59 |
+
def fetch_model_contexts(api_key):
|
| 60 |
+
"""Lädt alle Modelle + deren context_length."""
|
| 61 |
+
if not api_key:
|
| 62 |
+
return {}
|
| 63 |
+
headers = {"Authorization": f"Bearer {api_key}"}
|
| 64 |
+
try:
|
| 65 |
+
res = requests.get(f"{OPENROUTER_API_BASE}/models", headers=headers, timeout=10)
|
| 66 |
+
contexts = {}
|
| 67 |
+
if res.status_code == 200:
|
| 68 |
+
for m in res.json().get("data", []):
|
| 69 |
+
mid = m.get("id")
|
| 70 |
+
ctx = m.get("context_length", 4096)
|
| 71 |
+
contexts[mid] = ctx
|
| 72 |
+
return contexts
|
| 73 |
+
except Exception as e:
|
| 74 |
+
return {}
|
| 75 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
|
| 77 |
# --- Sidebar ---
|
| 78 |
with st.sidebar:
|
| 79 |
st.header("⚙️ API Settings")
|
| 80 |
api_key = st.text_input("OpenRouter API Key", type="password")
|
| 81 |
|
| 82 |
+
# --- Manuelle Modelle ---
|
| 83 |
FREE_MODEL_LIST = [
|
| 84 |
"cognitivecomputations/dolphin-mistral-24b-venice-edition:free",
|
| 85 |
"deepseek/deepseek-chat-v3",
|
| 86 |
"google/gemma-2-9b-it",
|
| 87 |
"mistralai/mistral-7b-instruct-v0.2",
|
| 88 |
"qwen/qwen2-72b-instruct",
|
| 89 |
+
"nousresearch/nous-hermes-2-mixtral-8x7b-dpo",
|
| 90 |
]
|
| 91 |
model = st.selectbox("Wähle ein Modell", FREE_MODEL_LIST, index=0)
|
| 92 |
+
|
| 93 |
+
# Kontextlänge holen (mit Fallback)
|
| 94 |
+
model_contexts = fetch_model_contexts(api_key)
|
| 95 |
+
default_ctx = model_contexts.get(model, 4096)
|
| 96 |
+
|
| 97 |
temperature = st.slider("Temperature", 0.0, 1.0, 0.7)
|
| 98 |
+
max_tokens = st.slider(
|
| 99 |
+
f"Max Output Tokens (max {default_ctx})",
|
| 100 |
+
1,
|
| 101 |
+
min(default_ctx, 32000),
|
| 102 |
+
min(512, default_ctx)
|
| 103 |
+
)
|
| 104 |
+
st.caption(f"🔢 Model Context Length (Fallback 4096): {default_ctx}")
|
| 105 |
|
| 106 |
if st.button("🔄 Chat Reset"):
|
| 107 |
st.session_state.messages = []
|
| 108 |
+
st.success("Chat-Verlauf gelöscht.")
|
| 109 |
+
st.experimental_rerun()
|
| 110 |
|
| 111 |
st.markdown("""
|
| 112 |
---
|
| 113 |
+
**Minimal UI:** Nur Text-Chat.
|
|
|
|
| 114 |
""")
|
| 115 |
|
| 116 |
+
# --- Datei Upload Sektion komplett entfernt ---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 117 |
|
| 118 |
|
| 119 |
# --- Chat Verlauf anzeigen ---
|
|
|
|
| 122 |
st.markdown(msg["content"])
|
| 123 |
|
| 124 |
|
| 125 |
+
# --- API Request Funktion (Unverändert) ---
|
| 126 |
def call_openrouter(model, messages, temp, max_tok, key):
|
|
|
|
| 127 |
headers = {
|
| 128 |
"Authorization": f"Bearer {key}",
|
| 129 |
"Content-Type": "application/json",
|
| 130 |
+
"Referer": "https://aicodecraft.io",
|
| 131 |
+
"X-Title": "OpenRouter-Minimal-Interface",
|
|
|
|
| 132 |
}
|
| 133 |
payload = {
|
| 134 |
"model": model,
|
|
|
|
| 141 |
|
| 142 |
if res.status_code == 200:
|
| 143 |
try:
|
|
|
|
| 144 |
return res.json()["choices"][0]["message"]["content"]
|
| 145 |
except (KeyError, IndexError):
|
| 146 |
raise Exception("Fehlerhafte API-Antwort: Konnte Antworttext nicht extrahieren.")
|
| 147 |
else:
|
| 148 |
try:
|
| 149 |
err = res.json()
|
|
|
|
| 150 |
msg = err.get("error", {}).get("message", res.text)
|
| 151 |
except:
|
| 152 |
msg = res.text
|
|
|
|
| 167 |
# API Nachrichten vorbereiten (für Chatverlauf)
|
| 168 |
messages = [{"role": m["role"], "content": m["content"]} for m in st.session_state.messages]
|
| 169 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 170 |
# Antwort generieren
|
| 171 |
with st.chat_message("assistant"):
|
| 172 |
with st.spinner(f"Fragend {model}..."):
|