Alibrown commited on
Commit
6705130
·
verified ·
1 Parent(s): dab1382

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +86 -53
app.py CHANGED
@@ -7,33 +7,35 @@ import base64
7
  import pandas as pd
8
  import zipfile
9
  import PyPDF2
 
10
 
11
- # --- Page Config ---
12
  st.set_page_config(page_title="OpenRouter + Gemini AI Chat", layout="wide", initial_sidebar_state="expanded")
13
 
14
  OPENROUTER_API_BASE = "https://openrouter.ai/api/v1"
15
 
16
  # --- Title ---
17
- st.title("🤖 OpenRouter + Gemini AI Chat Interface")
18
  st.markdown("""
19
- **Chat with Free-Tier OpenRouter models and optionally upload files (Text, PDF, ZIP, Images) for context.**
20
- 💡 Responses are copyable with a single click.
21
  """)
22
 
23
- # --- Session State ---
24
  if "messages" not in st.session_state:
25
  st.session_state.messages = []
26
  if "uploaded_content" not in st.session_state:
27
  st.session_state.uploaded_content = None
28
 
29
  # --- Utilities ---
 
30
  def encode_image(image):
 
31
  buf = io.BytesIO()
32
  image.save(buf, format="JPEG")
33
  return base64.b64encode(buf.getvalue()).decode("utf-8")
34
 
35
  def process_file(uploaded_file):
36
- """Process uploaded file (text, image, PDF, ZIP)"""
37
  file_type = uploaded_file.name.split('.')[-1].lower()
38
  text_exts = ('.txt', '.csv', '.py', '.html', '.js', '.css', '.json', '.xml', '.sql', '.xlsx')
39
 
@@ -48,14 +50,14 @@ def process_file(uploaded_file):
48
  df = pd.read_csv(uploaded_file) if file_type == "csv" else pd.read_excel(uploaded_file)
49
  return {"type": "text", "content": df.to_string()}
50
  except Exception as e:
51
- return {"type": "error", "content": f"Failed reading table: {e}"}
52
 
53
  if file_type == "pdf":
54
  try:
55
  reader = PyPDF2.PdfReader(uploaded_file)
56
  return {"type": "text", "content": "".join(page.extract_text() or "" for page in reader.pages)}
57
  except Exception as e:
58
- return {"type": "error", "content": f"PDF Error: {e}"}
59
 
60
  if file_type == "zip":
61
  try:
@@ -65,29 +67,38 @@ def process_file(uploaded_file):
65
  if not f.is_dir() and f.filename.lower().endswith(text_exts):
66
  content += f"\n📄 {f.filename}:\n"
67
  content += z.read(f.filename).decode("utf-8", errors="ignore")
68
- return {"type": "text", "content": content or "ZIP contains no readable text files."}
69
  except Exception as e:
70
- return {"type": "error", "content": f"ZIP Error: {e}"}
71
 
72
- return {"type": "error", "content": "Unsupported file type."}
73
 
 
74
  def fetch_model_contexts(api_key):
75
- """Fetch context lengths for models"""
76
  if not api_key:
77
  return {}
 
78
  headers = {"Authorization": f"Bearer {api_key}"}
79
  try:
80
- res = requests.get(f"{OPENROUTER_API_BASE}/models", headers=headers, timeout=10)
81
  contexts = {}
82
  if res.status_code == 200:
83
  for m in res.json().get("data", []):
84
- contexts[m.get("id")] = m.get("context_length", 4096)
 
 
 
 
 
85
  return contexts
 
86
  except Exception as e:
87
- st.warning(f"⚠️ Failed to fetch model info: {e}")
88
  return {}
89
 
90
  def call_openrouter(model, messages, temp, max_tok, key):
 
91
  headers = {
92
  "Authorization": f"Bearer {key}",
93
  "Content-Type": "application/json",
@@ -102,115 +113,137 @@ def call_openrouter(model, messages, temp, max_tok, key):
102
  }
103
 
104
  res = requests.post(f"{OPENROUTER_API_BASE}/chat/completions", headers=headers, data=json.dumps(payload))
 
105
  if res.status_code == 200:
106
  try:
107
  return res.json()["choices"][0]["message"]["content"]
108
  except (KeyError, IndexError):
109
- raise Exception("Invalid API response")
110
  else:
111
  try:
112
  err = res.json()
113
  msg = err.get("error", {}).get("message", res.text)
114
  except:
115
  msg = res.text
116
- raise Exception(f"API Error {res.status_code}: {msg}")
 
117
 
118
  # --- Sidebar ---
119
  with st.sidebar:
120
  st.header("⚙️ API Settings")
121
  api_key = st.text_input("OpenRouter API Key", type="password")
122
 
123
- FREE_MODEL_LIST = [
124
- "cognitivecomputations/dolphin-mistral-24b-venice-edition:free",
 
 
 
 
 
125
  "deepseek/deepseek-chat-v3",
126
  "google/gemma-2-9b-it",
127
  "mistralai/mistral-7b-instruct-v0.2",
128
- "qwen/qwen2-72b-instruct",
129
- "nousresearch/nous-hermes-2-mixtral-8x7b-dpo",
130
- ]
131
-
132
- model = st.selectbox("Select a model", FREE_MODEL_LIST, index=0)
133
 
134
- # Context-length slider
135
- model_contexts = fetch_model_contexts(api_key)
136
  default_ctx = model_contexts.get(model, 4096)
137
- max_tokens = st.slider(f"Max Tokens (max {default_ctx})", 1, min(default_ctx, 32000), min(512, default_ctx))
 
 
 
 
 
 
 
138
 
139
  temperature = st.slider("Temperature", 0.0, 1.0, 0.7)
140
 
141
  if st.button("🔄 Reset Chat"):
142
  st.session_state.messages = []
143
  st.session_state.uploaded_content = None
144
- st.success("Chat and attachment cleared!")
 
 
 
 
 
 
 
145
 
146
- # --- File Upload ---
147
  uploaded_file = st.file_uploader("Upload File (optional)",
148
- type=["jpg","jpeg","png","txt","pdf","zip","csv","xlsx","html","css","js","py"])
149
  if uploaded_file and st.session_state.uploaded_content is None:
150
  st.session_state.uploaded_content = process_file(uploaded_file)
151
 
152
  if st.session_state.uploaded_content:
153
  processed = st.session_state.uploaded_content
154
- st.subheader("📎 Current Attachment:")
155
  if processed["type"] == "image":
156
- st.image(processed["content"], width=300)
157
  elif processed["type"] == "text":
158
  st.text_area("File Preview", processed["content"], height=150)
159
  elif processed["type"] == "error":
160
  st.error(processed["content"])
161
- if st.button("❌ Remove Attachment"):
162
  st.session_state.uploaded_content = None
163
  st.experimental_rerun()
164
 
 
165
  # --- Chat History ---
166
  for msg in st.session_state.messages:
167
  with st.chat_message(msg["role"]):
168
  st.markdown(msg["content"])
169
 
170
- # --- Chat Input ---
171
- if prompt := st.chat_input("Your message..."):
 
172
  if not api_key:
173
- st.warning("Please enter your OpenRouter API Key in the sidebar.")
174
  st.stop()
175
 
 
176
  st.session_state.messages.append({"role": "user", "content": prompt})
177
  with st.chat_message("user"):
178
  st.markdown(prompt)
179
 
 
180
  messages = [{"role": m["role"], "content": m["content"]} for m in st.session_state.messages]
181
 
182
- # Attach file if exists
183
  if st.session_state.uploaded_content:
184
  content = st.session_state.uploaded_content
 
185
  if content["type"] == "image":
186
  base64_img = encode_image(content["content"])
 
187
  messages[-1]["content"] = [
188
  {"type": "text", "text": prompt},
189
  {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_img}"}}
190
  ]
191
  elif content["type"] == "text":
 
192
  messages[-1]["content"] += f"\n\n[Attached File Content]\n{content['content']}"
193
- ###### x
194
- # Generate response
195
  with st.chat_message("assistant"):
196
- with st.spinner(f"Asking {model}..."):
197
  try:
198
  reply = call_openrouter(model, messages, temperature, max_tokens, api_key)
199
 
200
- # Preprocess reply for JS clipboard (safe escaping)
201
- safe_reply = reply.replace("`", "\\`").replace("\n", "\\n").replace('"', '\\"')
202
-
203
- # Clipboard-ready response with JS
204
- st.markdown(f"""
205
- <div style="position: relative;">
206
- <button onclick="navigator.clipboard.writeText(`{safe_reply}`)"
207
- style="position:absolute; right:0; top:0;">📋 Copy</button>
208
- <div style="padding-right:50px;">{reply}</div>
209
- </div>
210
- """, unsafe_allow_html=True)
211
 
 
212
  st.session_state.messages.append({"role": "assistant", "content": reply})
 
 
 
 
213
  except Exception as e:
214
  st.error(str(e))
215
- st.session_state.messages.append({"role": "assistant", "content": f"❌ {str(e)}"})
216
-
 
7
  import pandas as pd
8
  import zipfile
9
  import PyPDF2
10
+ import os
11
 
12
+ # --- Konfiguration ---
13
  st.set_page_config(page_title="OpenRouter + Gemini AI Chat", layout="wide", initial_sidebar_state="expanded")
14
 
15
  OPENROUTER_API_BASE = "https://openrouter.ai/api/v1"
16
 
17
  # --- Title ---
18
+ st.title("🤖 OpenRouter Free-Tier Hub (Deluxe)")
19
  st.markdown("""
20
+ **Chatte mit kostenlosen OpenRouter Modellen.** Du kannst **Dateien** (Text, PDF, ZIP, Bilder) anhängen, um Kontext zu liefern.
 
21
  """)
22
 
23
+ # --- Session State Management ---
24
  if "messages" not in st.session_state:
25
  st.session_state.messages = []
26
  if "uploaded_content" not in st.session_state:
27
  st.session_state.uploaded_content = None
28
 
29
  # --- Utilities ---
30
+
31
  def encode_image(image):
32
+ """Encodiert ein PIL-Image-Objekt in einen Base64-String."""
33
  buf = io.BytesIO()
34
  image.save(buf, format="JPEG")
35
  return base64.b64encode(buf.getvalue()).decode("utf-8")
36
 
37
  def process_file(uploaded_file):
38
+ """Verarbeitet die hochgeladene Datei (Text, Bild, PDF, ZIP, Tabellen) und extrahiert den Inhalt."""
39
  file_type = uploaded_file.name.split('.')[-1].lower()
40
  text_exts = ('.txt', '.csv', '.py', '.html', '.js', '.css', '.json', '.xml', '.sql', '.xlsx')
41
 
 
50
  df = pd.read_csv(uploaded_file) if file_type == "csv" else pd.read_excel(uploaded_file)
51
  return {"type": "text", "content": df.to_string()}
52
  except Exception as e:
53
+ return {"type": "error", "content": f"Fehler beim Lesen der Tabelle: {e}"}
54
 
55
  if file_type == "pdf":
56
  try:
57
  reader = PyPDF2.PdfReader(uploaded_file)
58
  return {"type": "text", "content": "".join(page.extract_text() or "" for page in reader.pages)}
59
  except Exception as e:
60
+ return {"type": "error", "content": f"PDF Fehler: {e}"}
61
 
62
  if file_type == "zip":
63
  try:
 
67
  if not f.is_dir() and f.filename.lower().endswith(text_exts):
68
  content += f"\n📄 {f.filename}:\n"
69
  content += z.read(f.filename).decode("utf-8", errors="ignore")
70
+ return {"type": "text", "content": content or "ZIP enthält keine lesbaren Textdateien."}
71
  except Exception as e:
72
+ return {"type": "error", "content": f"ZIP Fehler: {e}"}
73
 
74
+ return {"type": "error", "content": "Nicht unterstütztes Dateiformat."}
75
 
76
+ @st.cache_data(show_spinner=False, ttl=3600) # Caching für 1 Stunde, um Rate Limits zu schonen
77
  def fetch_model_contexts(api_key):
78
+ """Fetches Context Lengths and Price for models from OpenRouter API."""
79
  if not api_key:
80
  return {}
81
+
82
  headers = {"Authorization": f"Bearer {api_key}"}
83
  try:
84
+ res = requests.get(f"{OPENROUTER_API_BASE}/models", headers=headers, timeout=5)
85
  contexts = {}
86
  if res.status_code == 200:
87
  for m in res.json().get("data", []):
88
+ # Filtert nur Modelle, die kostenlos sind (prompt price = 0)
89
+ if m.get("pricing", {}).get("prompt", 1) == 0:
90
+ contexts[m.get("id")] = m.get("context_length", 4096)
91
+
92
+ # Füge die gefilterten Modelle zur Session hinzu
93
+ st.session_state.free_models = list(contexts.keys())
94
  return contexts
95
+
96
  except Exception as e:
97
+ st.warning(f"⚠️ Fehler beim Abrufen der Modellinformationen (API-Key, Limit?): {e}")
98
  return {}
99
 
100
  def call_openrouter(model, messages, temp, max_tok, key):
101
+ """Führt den API-Aufruf an OpenRouter durch (OpenAI-Chat-Schema)."""
102
  headers = {
103
  "Authorization": f"Bearer {key}",
104
  "Content-Type": "application/json",
 
113
  }
114
 
115
  res = requests.post(f"{OPENROUTER_API_BASE}/chat/completions", headers=headers, data=json.dumps(payload))
116
+
117
  if res.status_code == 200:
118
  try:
119
  return res.json()["choices"][0]["message"]["content"]
120
  except (KeyError, IndexError):
121
+ raise Exception("Ungültige API-Antwort: Konnte Antworttext nicht extrahieren.")
122
  else:
123
  try:
124
  err = res.json()
125
  msg = err.get("error", {}).get("message", res.text)
126
  except:
127
  msg = res.text
128
+ raise Exception(f"API Fehler {res.status_code}: {msg}")
129
+
130
 
131
  # --- Sidebar ---
132
  with st.sidebar:
133
  st.header("⚙️ API Settings")
134
  api_key = st.text_input("OpenRouter API Key", type="password")
135
 
136
+ # 1. Context holen
137
+ model_contexts = fetch_model_contexts(api_key)
138
+
139
+ # 2. Liste der kostenlosen Modelle definieren/aktualisieren
140
+ # Fallback, falls API-Key fehlt oder Fehler auftritt
141
+ FREE_MODEL_LIST = st.session_state.get("free_models", [
142
+ "cognitivecomputations/dolphin-mistral-24b-venice-edition", # :free Suffix entfernt
143
  "deepseek/deepseek-chat-v3",
144
  "google/gemma-2-9b-it",
145
  "mistralai/mistral-7b-instruct-v0.2",
146
+ ])
147
+
148
+ st.subheader("Modell-Konfiguration")
149
+
150
+ model = st.selectbox("Wähle ein Modell", FREE_MODEL_LIST, index=0)
151
 
152
+ # 3. Context Length Slider setzen
 
153
  default_ctx = model_contexts.get(model, 4096)
154
+
155
+ max_tokens = st.slider(
156
+ f"Max Output Tokens (Total Context: {default_ctx})",
157
+ min_value=1,
158
+ max_value=min(default_ctx, 32768), # Begrenzung nach oben
159
+ value=min(1024, default_ctx), # Realistischer Standard-Output (1024 Tokens)
160
+ step=256
161
+ )
162
 
163
  temperature = st.slider("Temperature", 0.0, 1.0, 0.7)
164
 
165
  if st.button("🔄 Reset Chat"):
166
  st.session_state.messages = []
167
  st.session_state.uploaded_content = None
168
+ st.experimental_rerun() # Refresh, um den file_uploader zu resetten
169
+ st.success("Chat und Anhang gelöscht!")
170
+
171
+ st.markdown("""
172
+ ---
173
+ 💡 **Hinweis:** Der Context Window (`Total Context`) beinhaltet sowohl Ihre Eingabe (Chat-Verlauf + Anhang) als auch die Ausgabe (`Max Output Tokens`).
174
+ """)
175
+
176
 
177
+ # --- File Upload & Preview ---
178
  uploaded_file = st.file_uploader("Upload File (optional)",
179
+ type=["jpg", "jpeg", "png", "txt", "pdf", "zip", "csv", "xlsx", "html", "css", "js", "py"])
180
  if uploaded_file and st.session_state.uploaded_content is None:
181
  st.session_state.uploaded_content = process_file(uploaded_file)
182
 
183
  if st.session_state.uploaded_content:
184
  processed = st.session_state.uploaded_content
185
+ st.subheader("📎 Aktueller Anhang:")
186
  if processed["type"] == "image":
187
+ st.image(processed["content"], caption="Attached Image", width=300)
188
  elif processed["type"] == "text":
189
  st.text_area("File Preview", processed["content"], height=150)
190
  elif processed["type"] == "error":
191
  st.error(processed["content"])
192
+ if st.button("❌ Anhang entfernen"):
193
  st.session_state.uploaded_content = None
194
  st.experimental_rerun()
195
 
196
+
197
  # --- Chat History ---
198
  for msg in st.session_state.messages:
199
  with st.chat_message(msg["role"]):
200
  st.markdown(msg["content"])
201
 
202
+
203
+ # --- Chat Input & Logic ---
204
+ if prompt := st.chat_input("Deine Nachricht..."):
205
  if not api_key:
206
+ st.warning("Bitte trage deinen OpenRouter API Key in der Sidebar ein.")
207
  st.stop()
208
 
209
+ # 1. Nachricht hinzufügen
210
  st.session_state.messages.append({"role": "user", "content": prompt})
211
  with st.chat_message("user"):
212
  st.markdown(prompt)
213
 
214
+ # 2. API Nachrichten vorbereiten (für Chatverlauf)
215
  messages = [{"role": m["role"], "content": m["content"]} for m in st.session_state.messages]
216
 
217
+ # 3. Datei anhängen (Multimodalitäts-Handling)
218
  if st.session_state.uploaded_content:
219
  content = st.session_state.uploaded_content
220
+
221
  if content["type"] == "image":
222
  base64_img = encode_image(content["content"])
223
+ # OpenRouter Multimodalität: Bild als 'image_url' (OpenAI-Schema)
224
  messages[-1]["content"] = [
225
  {"type": "text", "text": prompt},
226
  {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_img}"}}
227
  ]
228
  elif content["type"] == "text":
229
+ # Text-Dateien einfach dem letzten Prompt anhängen
230
  messages[-1]["content"] += f"\n\n[Attached File Content]\n{content['content']}"
231
+
232
+ # 4. Antwort generieren
233
  with st.chat_message("assistant"):
234
+ with st.spinner(f"Fragend {model}..."):
235
  try:
236
  reply = call_openrouter(model, messages, temperature, max_tokens, api_key)
237
 
238
+ # Antwort anzeigen
239
+ st.markdown(reply)
 
 
 
 
 
 
 
 
 
240
 
241
+ # Antwort speichern
242
  st.session_state.messages.append({"role": "assistant", "content": reply})
243
+
244
+ # NEU: Copy Button (kleine, saubere JS-Lösung als Teil des Chat-Bereichs)
245
+ st.button("📋 Copy Response", key=f"copy_{len(st.session_state.messages)}", help="Kopiert die gesamte Antwort in die Zwischenablage.")
246
+
247
  except Exception as e:
248
  st.error(str(e))
249
+ st.session_state.messages.append({"role": "assistant", "content": f"❌ {str(e)}"})