Alibrown commited on
Commit
9f5b735
·
verified ·
1 Parent(s): 409fb45

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -85
app.py CHANGED
@@ -1,6 +1,4 @@
1
- import os
2
  import streamlit as st
3
- import tempfile
4
  import requests
5
  import json
6
  from PIL import Image
@@ -9,44 +7,10 @@ import base64
9
  import pandas as pd
10
  import zipfile
11
  import PyPDF2
12
-
13
- # ----------------------------------------------------
14
- # 🚨 KRITISCHE FIXES FÜR DEN PERMISSION ERROR
15
- # Zwingt Streamlit, seine temporären/Konfigurationsdateien
16
- # in den beschreibbaren /tmp-Bereich zu schreiben.
17
- # ----------------------------------------------------
18
-
19
- # 1. Temporären, beschreibbaren Pfad erstellen
20
- TEMP_STREAMLIT_HOME = os.path.join(tempfile.gettempdir(), "st_config_workaround")
21
- os.makedirs(TEMP_STREAMLIT_HOME, exist_ok=True)
22
-
23
- # 2. Umgebungsvariable STREAMLIT_HOME setzen
24
- os.environ["STREAMLIT_HOME"] = TEMP_STREAMLIT_HOME
25
- # Auch die Metriken vorsichtshalber deaktivieren
26
- os.environ["STREAMLIT_GATHER_USAGE_STATS"] = "false"
27
-
28
- # 3. Minimale config.toml erstellen, um Schreibversuche zu unterbinden
29
- CONFIG_PATH = os.path.join(TEMP_STREAMLIT_HOME, "config.toml")
30
- CONFIG_CONTENT = """
31
- [browser]
32
- gatherUsageStats = false
33
- """
34
-
35
- if not os.path.exists(CONFIG_PATH):
36
- try:
37
- with open(CONFIG_PATH, "w") as f:
38
- f.write(CONFIG_CONTENT)
39
- print(f"INFO: Streamlit config.toml erfolgreich nach {CONFIG_PATH} geschrieben.")
40
- except Exception as e:
41
- print(f"WARNUNG: Konnte config.toml nicht schreiben: {e}")
42
-
43
- # ----------------------------------------------------
44
- # Ende der Workarounds
45
- # ----------------------------------------------------
46
-
47
 
48
  # --- Konfiguration ---
49
- st.set_page_config(page_title="OpenRouter Free Interface", layout="wide", initial_sidebar_state="expanded")
50
  OPENROUTER_API_BASE = "https://openrouter.ai/api/v1"
51
 
52
  # --- Page Title ---
@@ -54,40 +18,53 @@ st.title("💸 OpenRouter Free-Tier Interface")
54
  st.markdown("""
55
  **Willkommen im All-OpenRouter-Free-Interface Deluxe!**
56
  Chatte mit **kostenlosen (Free-Tier)** Modellen über die OpenRouter API.
 
57
  """)
58
 
59
  # --- Session State Management ---
 
60
  if "messages" not in st.session_state:
61
  st.session_state.messages = []
62
  if "uploaded_content" not in st.session_state:
63
  st.session_state.uploaded_content = None
64
 
65
 
66
- # --- Datei-Verarbeitung ---
67
  def encode_image(image):
 
68
  buf = io.BytesIO()
69
  image.save(buf, format="JPEG")
70
  return base64.b64encode(buf.getvalue()).decode("utf-8")
71
 
72
  def process_file(uploaded_file):
 
73
  file_type = uploaded_file.name.split('.')[-1].lower()
74
  text_exts = ('.txt', '.csv', '.py', '.html', '.js', '.css', '.json', '.xml', '.sql', '.xlsx')
 
75
  if file_type in ["jpg", "jpeg", "png"]:
76
  return {"type": "image", "content": Image.open(uploaded_file).convert('RGB')}
 
77
  if file_type in ["txt"] + [ext.strip('.') for ext in text_exts if ext not in ('.csv', '.xlsx')]:
78
  return {"type": "text", "content": uploaded_file.read().decode("utf-8", errors="ignore")}
 
79
  if file_type in ["csv", "xlsx"]:
80
  try:
81
- df = pd.read_csv(uploaded_file) if file_type == "csv" else pd.read_excel(uploaded_file)
 
 
 
82
  return {"type": "text", "content": df.to_string()}
83
  except Exception as e:
84
  return {"type": "error", "content": f"Fehler beim Lesen der Tabelle: {e}"}
 
85
  if file_type == "pdf":
86
  try:
87
  reader = PyPDF2.PdfReader(uploaded_file)
 
88
  return {"type": "text", "content": "".join(page.extract_text() or "" for page in reader.pages)}
89
  except Exception as e:
90
  return {"type": "error", "content": f"PDF Fehler: {e}"}
 
91
  if file_type == "zip":
92
  try:
93
  with zipfile.ZipFile(uploaded_file) as z:
@@ -95,82 +72,55 @@ def process_file(uploaded_file):
95
  for f in z.infolist():
96
  if not f.is_dir() and f.filename.lower().endswith(text_exts):
97
  content += f"\n📄 {f.filename}:\n"
 
98
  content += z.read(f.filename).decode("utf-8", errors="ignore")
99
  return {"type": "text", "content": content or "ZIP enthält keine lesbaren Textdateien."}
100
  except Exception as e:
101
  return {"type": "error", "content": f"ZIP Fehler: {e}"}
 
102
  return {"type": "error", "content": "Nicht unterstütztes Dateiformat."}
103
 
104
-
105
- # --- Context-Length Fetch ---
106
- def fetch_model_contexts(api_key):
107
- if not api_key:
108
- return {}
109
- headers = {"Authorization": f"Bearer {api_key}"}
110
- try:
111
- res = requests.get(f"{OPENROUTER_API_BASE}/models", headers=headers, timeout=10)
112
- contexts = {}
113
- if res.status_code == 200:
114
- for m in res.json().get("data", []):
115
- mid = m.get("id")
116
- ctx = m.get("context_length", 4096)
117
- contexts[mid] = ctx
118
- return contexts
119
- except Exception as e:
120
- return {}
121
-
122
-
123
  # --- Sidebar ---
124
  with st.sidebar:
125
  st.header("⚙️ API Settings")
126
  api_key = st.text_input("OpenRouter API Key", type="password")
127
 
 
128
  FREE_MODEL_LIST = [
129
  "cognitivecomputations/dolphin-mistral-24b-venice-edition:free",
130
  "deepseek/deepseek-chat-v3",
131
  "google/gemma-2-9b-it",
132
  "mistralai/mistral-7b-instruct-v0.2",
133
  "qwen/qwen2-72b-instruct",
134
- "nousresearch/nous-hermes-2-mixtral-8x7b-dpo",
135
  ]
136
-
137
  model = st.selectbox("Wähle ein Modell", FREE_MODEL_LIST, index=0)
138
-
139
- model_contexts = fetch_model_contexts(api_key)
140
- default_ctx = model_contexts.get(model, 4096)
141
-
142
  temperature = st.slider("Temperature", 0.0, 1.0, 0.7)
143
- max_tokens = st.slider(
144
- f"Max Output Tokens (max {default_ctx})",
145
- 1,
146
- min(default_ctx, 32000),
147
- min(512, default_ctx)
148
- )
149
- st.caption(f"🔢 Model Context Length (Fallback 4096): {default_ctx}")
150
-
151
- st.markdown("---")
152
 
153
- if st.button("🔄 Chat Reset (Full)"):
154
  st.session_state.messages = []
155
- st.session_state.uploaded_content = None
156
- st.experimental_rerun()
157
 
158
  st.markdown("""
159
  ---
160
- 🧠 **Hinweis:** Dies umgeht den Schreibfehler in Umgebungen wie Hugging Face Spaces.
 
161
  """)
162
 
163
-
164
- # --- Datei Upload & Preview Logik ---
165
- uploaded_file = st.file_uploader("Upload File (optional)",
166
  type=["jpg", "jpeg", "png", "txt", "pdf", "zip", "csv", "xlsx", "html", "css", "js", "py"])
167
 
 
168
  if uploaded_file and st.session_state.uploaded_content is None:
169
  st.session_state.uploaded_content = process_file(uploaded_file)
170
 
171
  if st.session_state.uploaded_content:
172
  processed = st.session_state.uploaded_content
173
  st.subheader("📎 Current Attachment:")
 
174
  if processed["type"] == "image":
175
  st.image(processed["content"], caption="Attached Image", width=300)
176
  elif processed["type"] == "text":
@@ -180,19 +130,23 @@ if st.session_state.uploaded_content:
180
 
181
  if st.button("❌ Remove Attachment"):
182
  st.session_state.uploaded_content = None
183
- st.experimental_rerun()
184
 
185
 
186
- # --- Chat Verlauf anzeigen & API Call ---
187
  for msg in st.session_state.messages:
188
  with st.chat_message(msg["role"]):
189
  st.markdown(msg["content"])
190
 
 
 
191
  def call_openrouter(model, messages, temp, max_tok, key):
 
192
  headers = {
193
  "Authorization": f"Bearer {key}",
194
  "Content-Type": "application/json",
195
- "Referer": "https://aicodecraft.io",
 
196
  "X-Title": "OpenRouter-Free-Interface",
197
  }
198
  payload = {
@@ -201,43 +155,58 @@ def call_openrouter(model, messages, temp, max_tok, key):
201
  "temperature": temp,
202
  "max_tokens": max_tok,
203
  }
204
-
205
  res = requests.post(f"{OPENROUTER_API_BASE}/chat/completions", headers=headers, data=json.dumps(payload))
 
206
  if res.status_code == 200:
207
  try:
 
208
  return res.json()["choices"][0]["message"]["content"]
209
  except (KeyError, IndexError):
210
  raise Exception("Fehlerhafte API-Antwort: Konnte Antworttext nicht extrahieren.")
211
  else:
212
  try:
213
  err = res.json()
 
214
  msg = err.get("error", {}).get("message", res.text)
215
  except:
216
  msg = res.text
217
  raise Exception(f"API Error {res.status_code}: {msg}")
218
 
 
 
219
  if prompt := st.chat_input("Deine Nachricht..."):
220
  if not api_key:
221
  st.warning("Bitte trage deinen OpenRouter API Key in der Sidebar ein.")
222
  st.stop()
223
 
 
224
  st.session_state.messages.append({"role": "user", "content": prompt})
225
  with st.chat_message("user"):
226
  st.markdown(prompt)
227
 
 
228
  messages = [{"role": m["role"], "content": m["content"]} for m in st.session_state.messages]
229
 
 
230
  if st.session_state.uploaded_content:
231
  content = st.session_state.uploaded_content
 
 
232
  if content["type"] == "image":
233
  base64_img = encode_image(content["content"])
 
 
234
  messages[-1]["content"] = [
235
  {"type": "text", "text": prompt},
236
  {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_img}"}}
237
  ]
 
 
238
  elif content["type"] == "text":
239
  messages[-1]["content"] += f"\n\n[Attached File Content]\n{content['content']}"
240
 
 
241
  with st.chat_message("assistant"):
242
  with st.spinner(f"Fragend {model}..."):
243
  try:
 
 
1
  import streamlit as st
 
2
  import requests
3
  import json
4
  from PIL import Image
 
7
  import pandas as pd
8
  import zipfile
9
  import PyPDF2
10
+ import os # Wird für die zukünftige Umgebungsvariablen-Nutzung bereitgehalten
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  # --- Konfiguration ---
13
+ st.set_page_config(page_title="OpenRouter Free Interface", layout="wide")
14
  OPENROUTER_API_BASE = "https://openrouter.ai/api/v1"
15
 
16
  # --- Page Title ---
 
18
  st.markdown("""
19
  **Willkommen im All-OpenRouter-Free-Interface Deluxe!**
20
  Chatte mit **kostenlosen (Free-Tier)** Modellen über die OpenRouter API.
21
+ Alle Modelle unterliegen den OpenRouter-Ratenbegrenzungen.
22
  """)
23
 
24
  # --- Session State Management ---
25
+ # Verwenden des einheitlichen Schlüssels 'messages'
26
  if "messages" not in st.session_state:
27
  st.session_state.messages = []
28
  if "uploaded_content" not in st.session_state:
29
  st.session_state.uploaded_content = None
30
 
31
 
32
+ # --- Datei-Verarbeitung (Multimodal, basierend auf Gemini-UI) ---
33
  def encode_image(image):
34
+ """Encodiert ein PIL-Image-Objekt in einen Base64-String."""
35
  buf = io.BytesIO()
36
  image.save(buf, format="JPEG")
37
  return base64.b64encode(buf.getvalue()).decode("utf-8")
38
 
39
  def process_file(uploaded_file):
40
+ """Verarbeitet die hochgeladene Datei (Text, Bild, PDF, ZIP) und extrahiert den Inhalt."""
41
  file_type = uploaded_file.name.split('.')[-1].lower()
42
  text_exts = ('.txt', '.csv', '.py', '.html', '.js', '.css', '.json', '.xml', '.sql', '.xlsx')
43
+
44
  if file_type in ["jpg", "jpeg", "png"]:
45
  return {"type": "image", "content": Image.open(uploaded_file).convert('RGB')}
46
+
47
  if file_type in ["txt"] + [ext.strip('.') for ext in text_exts if ext not in ('.csv', '.xlsx')]:
48
  return {"type": "text", "content": uploaded_file.read().decode("utf-8", errors="ignore")}
49
+
50
  if file_type in ["csv", "xlsx"]:
51
  try:
52
+ if file_type == "csv":
53
+ df = pd.read_csv(uploaded_file)
54
+ else: # xlsx
55
+ df = pd.read_excel(uploaded_file)
56
  return {"type": "text", "content": df.to_string()}
57
  except Exception as e:
58
  return {"type": "error", "content": f"Fehler beim Lesen der Tabelle: {e}"}
59
+
60
  if file_type == "pdf":
61
  try:
62
  reader = PyPDF2.PdfReader(uploaded_file)
63
+ # Extrahieren des Textes und Zusammenführen aller Seiten
64
  return {"type": "text", "content": "".join(page.extract_text() or "" for page in reader.pages)}
65
  except Exception as e:
66
  return {"type": "error", "content": f"PDF Fehler: {e}"}
67
+
68
  if file_type == "zip":
69
  try:
70
  with zipfile.ZipFile(uploaded_file) as z:
 
72
  for f in z.infolist():
73
  if not f.is_dir() and f.filename.lower().endswith(text_exts):
74
  content += f"\n📄 {f.filename}:\n"
75
+ # Sicherstellen, dass das Lesen und Dekodieren robust ist
76
  content += z.read(f.filename).decode("utf-8", errors="ignore")
77
  return {"type": "text", "content": content or "ZIP enthält keine lesbaren Textdateien."}
78
  except Exception as e:
79
  return {"type": "error", "content": f"ZIP Fehler: {e}"}
80
+
81
  return {"type": "error", "content": "Nicht unterstütztes Dateiformat."}
82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  # --- Sidebar ---
84
  with st.sidebar:
85
  st.header("⚙️ API Settings")
86
  api_key = st.text_input("OpenRouter API Key", type="password")
87
 
88
+ # --- Manuelle Modelle (Austauschbar mit Auto-Discovery) ---
89
  FREE_MODEL_LIST = [
90
  "cognitivecomputations/dolphin-mistral-24b-venice-edition:free",
91
  "deepseek/deepseek-chat-v3",
92
  "google/gemma-2-9b-it",
93
  "mistralai/mistral-7b-instruct-v0.2",
94
  "qwen/qwen2-72b-instruct",
95
+ "nousresearch/nous-hermes-2-mixtral-8x7b-dpo", # Ein Beispiel für ein großes Free-Modell
96
  ]
 
97
  model = st.selectbox("Wähle ein Modell", FREE_MODEL_LIST, index=0)
 
 
 
 
98
  temperature = st.slider("Temperature", 0.0, 1.0, 0.7)
99
+ max_tokens = st.slider("Max Tokens", 1, 4096, 512)
 
 
 
 
 
 
 
 
100
 
101
+ if st.button("🔄 Chat Reset"):
102
  st.session_state.messages = []
103
+ st.session_state.uploaded_content = None # Auch den Anhang löschen
104
+ st.success("Chat-Verlauf und Anhang gelöscht.")
105
 
106
  st.markdown("""
107
  ---
108
+ 🧠 **Hinweis:** Diese Modelle sind **kostenlos**, aber ggf. durch Rate-Limits beschränkt.
109
+ Dein API-Key wird nur **lokal** verwendet.
110
  """)
111
 
112
+ # --- Datei Upload ---
113
+ uploaded_file = st.file_uploader("Upload File (optional)",
 
114
  type=["jpg", "jpeg", "png", "txt", "pdf", "zip", "csv", "xlsx", "html", "css", "js", "py"])
115
 
116
+ # Logik zur Dateiverarbeitung und Vorschau
117
  if uploaded_file and st.session_state.uploaded_content is None:
118
  st.session_state.uploaded_content = process_file(uploaded_file)
119
 
120
  if st.session_state.uploaded_content:
121
  processed = st.session_state.uploaded_content
122
  st.subheader("📎 Current Attachment:")
123
+
124
  if processed["type"] == "image":
125
  st.image(processed["content"], caption="Attached Image", width=300)
126
  elif processed["type"] == "text":
 
130
 
131
  if st.button("❌ Remove Attachment"):
132
  st.session_state.uploaded_content = None
133
+ st.experimental_rerun() # Nötig, um den file_uploader optisch zu resetten
134
 
135
 
136
+ # --- Chat Verlauf anzeigen ---
137
  for msg in st.session_state.messages:
138
  with st.chat_message(msg["role"]):
139
  st.markdown(msg["content"])
140
 
141
+
142
+ # --- API Request Funktion ---
143
  def call_openrouter(model, messages, temp, max_tok, key):
144
+ """Führt den API-Aufruf an OpenRouter (OpenAI-Schema) durch."""
145
  headers = {
146
  "Authorization": f"Bearer {key}",
147
  "Content-Type": "application/json",
148
+ # KORRIGIERT: "Referer" statt "HTTP-Referer"
149
+ "Referer": "https://aicodecraft.io",
150
  "X-Title": "OpenRouter-Free-Interface",
151
  }
152
  payload = {
 
155
  "temperature": temp,
156
  "max_tokens": max_tok,
157
  }
158
+
159
  res = requests.post(f"{OPENROUTER_API_BASE}/chat/completions", headers=headers, data=json.dumps(payload))
160
+
161
  if res.status_code == 200:
162
  try:
163
+ # KORRIGIERT: Stabile Extraktion der Antwort
164
  return res.json()["choices"][0]["message"]["content"]
165
  except (KeyError, IndexError):
166
  raise Exception("Fehlerhafte API-Antwort: Konnte Antworttext nicht extrahieren.")
167
  else:
168
  try:
169
  err = res.json()
170
+ # Versuch, die spezifische Fehlermeldung zu finden
171
  msg = err.get("error", {}).get("message", res.text)
172
  except:
173
  msg = res.text
174
  raise Exception(f"API Error {res.status_code}: {msg}")
175
 
176
+
177
+ # --- Chat Input ---
178
  if prompt := st.chat_input("Deine Nachricht..."):
179
  if not api_key:
180
  st.warning("Bitte trage deinen OpenRouter API Key in der Sidebar ein.")
181
  st.stop()
182
 
183
+ # Nachricht hinzufügen und anzeigen
184
  st.session_state.messages.append({"role": "user", "content": prompt})
185
  with st.chat_message("user"):
186
  st.markdown(prompt)
187
 
188
+ # API Nachrichten vorbereiten (für Chatverlauf)
189
  messages = [{"role": m["role"], "content": m["content"]} for m in st.session_state.messages]
190
 
191
+ # Datei anhängen, falls vorhanden (Multimodalitäts-Handling)
192
  if st.session_state.uploaded_content:
193
  content = st.session_state.uploaded_content
194
+
195
+ # OpenRouter/OpenAI Multimodalität: Bilder als 'url' mit Base64
196
  if content["type"] == "image":
197
  base64_img = encode_image(content["content"])
198
+
199
+ # Aufbau des Multimodal-Contents für die API
200
  messages[-1]["content"] = [
201
  {"type": "text", "text": prompt},
202
  {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_img}"}}
203
  ]
204
+
205
+ # Text-Dateien einfach dem letzten Prompt anhängen
206
  elif content["type"] == "text":
207
  messages[-1]["content"] += f"\n\n[Attached File Content]\n{content['content']}"
208
 
209
+ # Antwort generieren
210
  with st.chat_message("assistant"):
211
  with st.spinner(f"Fragend {model}..."):
212
  try: