Alibrown commited on
Commit
1266d19
·
verified ·
1 Parent(s): 85e23b1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -35
app.py CHANGED
@@ -10,6 +10,8 @@ import PyPDF2
10
  import os
11
 
12
  # --- Konfiguration ---
 
 
13
  st.set_page_config(page_title="OpenRouter Free Interface", layout="wide", initial_sidebar_state="expanded")
14
  OPENROUTER_API_BASE = "https://openrouter.ai/api/v1"
15
 
@@ -18,7 +20,6 @@ st.title("💸 OpenRouter Free-Tier Interface")
18
  st.markdown("""
19
  **Willkommen im All-OpenRouter-Free-Interface Deluxe!**
20
  Chatte mit **kostenlosen (Free-Tier)** Modellen über die OpenRouter API.
21
- Alle Modelle unterliegen den OpenRouter-Ratenbegrenzungen.
22
  """)
23
 
24
  # --- Session State Management ---
@@ -27,39 +28,32 @@ if "messages" not in st.session_state:
27
  if "uploaded_content" not in st.session_state:
28
  st.session_state.uploaded_content = None
29
 
30
-
31
- # --- Datei-Verarbeitung ---
32
  def encode_image(image):
33
  buf = io.BytesIO()
34
  image.save(buf, format="JPEG")
35
  return base64.b64encode(buf.getvalue()).decode("utf-8")
36
 
37
-
38
  def process_file(uploaded_file):
39
- # Ihr robuster File Manager ist perfekt
40
  file_type = uploaded_file.name.split('.')[-1].lower()
41
  text_exts = ('.txt', '.csv', '.py', '.html', '.js', '.css', '.json', '.xml', '.sql', '.xlsx')
42
-
43
  if file_type in ["jpg", "jpeg", "png"]:
44
  return {"type": "image", "content": Image.open(uploaded_file).convert('RGB')}
45
-
46
  if file_type in ["txt"] + [ext.strip('.') for ext in text_exts if ext not in ('.csv', '.xlsx')]:
47
  return {"type": "text", "content": uploaded_file.read().decode("utf-8", errors="ignore")}
48
-
49
  if file_type in ["csv", "xlsx"]:
50
  try:
51
  df = pd.read_csv(uploaded_file) if file_type == "csv" else pd.read_excel(uploaded_file)
52
  return {"type": "text", "content": df.to_string()}
53
  except Exception as e:
54
  return {"type": "error", "content": f"Fehler beim Lesen der Tabelle: {e}"}
55
-
56
  if file_type == "pdf":
57
  try:
58
  reader = PyPDF2.PdfReader(uploaded_file)
59
  return {"type": "text", "content": "".join(page.extract_text() or "" for page in reader.pages)}
60
  except Exception as e:
61
  return {"type": "error", "content": f"PDF Fehler: {e}"}
62
-
63
  if file_type == "zip":
64
  try:
65
  with zipfile.ZipFile(uploaded_file) as z:
@@ -71,14 +65,11 @@ def process_file(uploaded_file):
71
  return {"type": "text", "content": content or "ZIP enthält keine lesbaren Textdateien."}
72
  except Exception as e:
73
  return {"type": "error", "content": f"ZIP Fehler: {e}"}
74
-
75
  return {"type": "error", "content": "Nicht unterstütztes Dateiformat."}
76
 
77
-
78
- # --- Context-Length Fetch (MIT CACHING) ---
79
- @st.cache_data(ttl=3600) # Caching für 1 Stunde, um API-Calls zu sparen
80
  def fetch_model_contexts(api_key):
81
- """Lädt alle Modelle + deren context_length."""
82
  if not api_key:
83
  return {}
84
  headers = {"Authorization": f"Bearer {api_key}"}
@@ -92,6 +83,7 @@ def fetch_model_contexts(api_key):
92
  contexts[mid] = ctx
93
  return contexts
94
  except Exception as e:
 
95
  return {}
96
 
97
 
@@ -100,7 +92,6 @@ with st.sidebar:
100
  st.header("⚙️ API Settings")
101
  api_key = st.text_input("OpenRouter API Key", type="password")
102
 
103
- # Free Modelle (Fallback)
104
  FREE_MODEL_LIST = [
105
  "cognitivecomputations/dolphin-mistral-24b-venice-edition:free",
106
  "deepseek/deepseek-chat-v3",
@@ -112,9 +103,11 @@ with st.sidebar:
112
 
113
  model = st.selectbox("Wähle ein Modell", FREE_MODEL_LIST, index=0)
114
 
115
- # Context automatisch anpassen (durch Caching nur bei Bedarf neu geladen)
 
116
  model_contexts = fetch_model_contexts(api_key)
117
- default_ctx = model_contexts.get(model, 4096)
 
118
 
119
  temperature = st.slider("Temperature", 0.0, 1.0, 0.7)
120
  max_tokens = st.slider(
@@ -123,32 +116,29 @@ with st.sidebar:
123
  min(default_ctx, 32000),
124
  min(512, default_ctx)
125
  )
126
- st.caption(f"🔢 Model Context Length: {default_ctx}")
127
 
128
  st.markdown("---")
129
 
130
- # Verbesserter Reset-Button
131
  if st.button("🔄 Chat Reset (Full)"):
132
  st.session_state.messages = []
133
  st.session_state.uploaded_content = None
134
- # Rerun, um st.file_uploader zu leeren und die App in den Startzustand zu versetzen
135
  st.experimental_rerun()
136
 
137
  st.markdown("""
138
  ---
139
- 🧠 **Hinweis:** Dein API-Key wird nur **lokal** verwendet, um Anfragen an OpenRouter zu authentifizieren.
140
  """)
141
 
142
 
143
- # --- Datei Upload & Preview Logik ---
144
  uploaded_file = st.file_uploader("Upload File (optional)",
145
  type=["jpg", "jpeg", "png", "txt", "pdf", "zip", "csv", "xlsx", "html", "css", "js", "py"])
146
 
147
- # NEUE/KORRIGIERTE LOGIK: Nur verarbeiten, wenn eine neue Datei hochgeladen wird und KEIN Inhalt im State ist
148
  if uploaded_file and st.session_state.uploaded_content is None:
149
  st.session_state.uploaded_content = process_file(uploaded_file)
150
- # KEIN st.experimental_rerun() hier! Streamlit rendert die Vorschau beim nächsten regulären Durchlauf.
151
- # Siehe Gemini Code: Dort fehlt dieser Rerun auch.
152
 
153
  if st.session_state.uploaded_content:
154
  processed = st.session_state.uploaded_content
@@ -160,20 +150,18 @@ if st.session_state.uploaded_content:
160
  elif processed["type"] == "error":
161
  st.error(processed["content"])
162
 
163
- # Anhang einzeln entfernen
164
  if st.button("❌ Remove Attachment"):
165
  st.session_state.uploaded_content = None
166
- st.experimental_rerun() # Hier ist der Rerun notwendig, um den uploader zu leeren
167
 
168
 
169
- # --- Chat Verlauf anzeigen ---
170
  for msg in st.session_state.messages:
171
  with st.chat_message(msg["role"]):
172
  st.markdown(msg["content"])
173
 
174
-
175
- # --- API Call Funktion ---
176
  def call_openrouter(model, messages, temp, max_tok, key):
 
177
  headers = {
178
  "Authorization": f"Bearer {key}",
179
  "Content-Type": "application/json",
@@ -201,8 +189,6 @@ def call_openrouter(model, messages, temp, max_tok, key):
201
  msg = res.text
202
  raise Exception(f"API Error {res.status_code}: {msg}")
203
 
204
-
205
- # --- Chat Input ---
206
  if prompt := st.chat_input("Deine Nachricht..."):
207
  if not api_key:
208
  st.warning("Bitte trage deinen OpenRouter API Key in der Sidebar ein.")
@@ -218,13 +204,11 @@ if prompt := st.chat_input("Deine Nachricht..."):
218
  content = st.session_state.uploaded_content
219
  if content["type"] == "image":
220
  base64_img = encode_image(content["content"])
221
- # OpenRouter Multimodalität (OpenAI-Schema)
222
  messages[-1]["content"] = [
223
  {"type": "text", "text": prompt},
224
  {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_img}"}}
225
  ]
226
  elif content["type"] == "text":
227
- # Text-Dateien einfach dem letzten Prompt anhängen
228
  messages[-1]["content"] += f"\n\n[Attached File Content]\n{content['content']}"
229
 
230
  with st.chat_message("assistant"):
 
10
  import os
11
 
12
  # --- Konfiguration ---
13
+ # ACHTUNG: Der 'PermissionError' tritt bei Streamlit-Initialisierung auf.
14
+ # Ein Code-Fix kann das Problem nur umgehen, nicht beheben.
15
  st.set_page_config(page_title="OpenRouter Free Interface", layout="wide", initial_sidebar_state="expanded")
16
  OPENROUTER_API_BASE = "https://openrouter.ai/api/v1"
17
 
 
20
  st.markdown("""
21
  **Willkommen im All-OpenRouter-Free-Interface Deluxe!**
22
  Chatte mit **kostenlosen (Free-Tier)** Modellen über die OpenRouter API.
 
23
  """)
24
 
25
  # --- Session State Management ---
 
28
  if "uploaded_content" not in st.session_state:
29
  st.session_state.uploaded_content = None
30
 
31
+ # --- Datei-Verarbeitung (Unverändert, da perfekt) ---
 
32
  def encode_image(image):
33
  buf = io.BytesIO()
34
  image.save(buf, format="JPEG")
35
  return base64.b64encode(buf.getvalue()).decode("utf-8")
36
 
 
37
  def process_file(uploaded_file):
 
38
  file_type = uploaded_file.name.split('.')[-1].lower()
39
  text_exts = ('.txt', '.csv', '.py', '.html', '.js', '.css', '.json', '.xml', '.sql', '.xlsx')
40
+ # ... (Rest der process_file Funktion, da sie korrekt ist)
41
  if file_type in ["jpg", "jpeg", "png"]:
42
  return {"type": "image", "content": Image.open(uploaded_file).convert('RGB')}
 
43
  if file_type in ["txt"] + [ext.strip('.') for ext in text_exts if ext not in ('.csv', '.xlsx')]:
44
  return {"type": "text", "content": uploaded_file.read().decode("utf-8", errors="ignore")}
 
45
  if file_type in ["csv", "xlsx"]:
46
  try:
47
  df = pd.read_csv(uploaded_file) if file_type == "csv" else pd.read_excel(uploaded_file)
48
  return {"type": "text", "content": df.to_string()}
49
  except Exception as e:
50
  return {"type": "error", "content": f"Fehler beim Lesen der Tabelle: {e}"}
 
51
  if file_type == "pdf":
52
  try:
53
  reader = PyPDF2.PdfReader(uploaded_file)
54
  return {"type": "text", "content": "".join(page.extract_text() or "" for page in reader.pages)}
55
  except Exception as e:
56
  return {"type": "error", "content": f"PDF Fehler: {e}"}
 
57
  if file_type == "zip":
58
  try:
59
  with zipfile.ZipFile(uploaded_file) as z:
 
65
  return {"type": "text", "content": content or "ZIP enthält keine lesbaren Textdateien."}
66
  except Exception as e:
67
  return {"type": "error", "content": f"ZIP Fehler: {e}"}
 
68
  return {"type": "error", "content": "Nicht unterstütztes Dateiformat."}
69
 
70
+ # --- Context-Length Fetch (OHNE CACHING, um Dateizugriffsfehler zu vermeiden) ---
 
 
71
  def fetch_model_contexts(api_key):
72
+ """Lädt alle Modelle + deren context_length. Nur aufrufen, wenn API-Key gesetzt ist."""
73
  if not api_key:
74
  return {}
75
  headers = {"Authorization": f"Bearer {api_key}"}
 
83
  contexts[mid] = ctx
84
  return contexts
85
  except Exception as e:
86
+ # st.warning weggelassen, um Render-Loop-Probleme zu vermeiden
87
  return {}
88
 
89
 
 
92
  st.header("⚙️ API Settings")
93
  api_key = st.text_input("OpenRouter API Key", type="password")
94
 
 
95
  FREE_MODEL_LIST = [
96
  "cognitivecomputations/dolphin-mistral-24b-venice-edition:free",
97
  "deepseek/deepseek-chat-v3",
 
103
 
104
  model = st.selectbox("Wähle ein Modell", FREE_MODEL_LIST, index=0)
105
 
106
+ # 🔑 WICHTIG: Context-Länge holen ABER mit Fallback
107
+ # Der Aufruf erfolgt jetzt nur einmal, wenn die Sidebar geladen wird
108
  model_contexts = fetch_model_contexts(api_key)
109
+ # Verwende 4096 als stabilen Fallback, wenn der API-Key fehlt oder der Fetch fehlschlägt
110
+ default_ctx = model_contexts.get(model, 4096)
111
 
112
  temperature = st.slider("Temperature", 0.0, 1.0, 0.7)
113
  max_tokens = st.slider(
 
116
  min(default_ctx, 32000),
117
  min(512, default_ctx)
118
  )
119
+ st.caption(f"🔢 Model Context Length (Fallback 4096): {default_ctx}")
120
 
121
  st.markdown("---")
122
 
 
123
  if st.button("🔄 Chat Reset (Full)"):
124
  st.session_state.messages = []
125
  st.session_state.uploaded_content = None
 
126
  st.experimental_rerun()
127
 
128
  st.markdown("""
129
  ---
130
+ 🧠 **Hinweis:** Das Laden der Context-Länge hängt vom API-Key ab. Falls Fehler auftreten, liegt es an Berechtigungen oder Rate-Limits.
131
  """)
132
 
133
 
134
+ # --- Datei Upload & Preview Logik (KORRIGIERT) ---
135
  uploaded_file = st.file_uploader("Upload File (optional)",
136
  type=["jpg", "jpeg", "png", "txt", "pdf", "zip", "csv", "xlsx", "html", "css", "js", "py"])
137
 
138
+ # Logik wie in der Gemini UI: Nur verarbeiten, wenn eine neue Datei hochgeladen wird und KEIN Inhalt im State ist
139
  if uploaded_file and st.session_state.uploaded_content is None:
140
  st.session_state.uploaded_content = process_file(uploaded_file)
141
+ # KEIN st.experimental_rerun() hier.
 
142
 
143
  if st.session_state.uploaded_content:
144
  processed = st.session_state.uploaded_content
 
150
  elif processed["type"] == "error":
151
  st.error(processed["content"])
152
 
 
153
  if st.button("❌ Remove Attachment"):
154
  st.session_state.uploaded_content = None
155
+ st.experimental_rerun()
156
 
157
 
158
+ # --- Chat Verlauf anzeigen & API Call (Unverändert, da Logik korrekt) ---
159
  for msg in st.session_state.messages:
160
  with st.chat_message(msg["role"]):
161
  st.markdown(msg["content"])
162
 
 
 
163
  def call_openrouter(model, messages, temp, max_tok, key):
164
+ # ... (API Call Funktion)
165
  headers = {
166
  "Authorization": f"Bearer {key}",
167
  "Content-Type": "application/json",
 
189
  msg = res.text
190
  raise Exception(f"API Error {res.status_code}: {msg}")
191
 
 
 
192
  if prompt := st.chat_input("Deine Nachricht..."):
193
  if not api_key:
194
  st.warning("Bitte trage deinen OpenRouter API Key in der Sidebar ein.")
 
204
  content = st.session_state.uploaded_content
205
  if content["type"] == "image":
206
  base64_img = encode_image(content["content"])
 
207
  messages[-1]["content"] = [
208
  {"type": "text", "text": prompt},
209
  {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_img}"}}
210
  ]
211
  elif content["type"] == "text":
 
212
  messages[-1]["content"] += f"\n\n[Attached File Content]\n{content['content']}"
213
 
214
  with st.chat_message("assistant"):