Abobasnik commited on
Commit
c7d6c6c
·
verified ·
1 Parent(s): 8b44d62

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +91 -154
src/streamlit_app.py CHANGED
@@ -11,12 +11,11 @@ import json
11
  import base64
12
  from streamlit_cookies_manager import EncryptedCookieManager
13
 
14
- # --- ИНИЦИАЛИЗАЦИЯ КУКИ ---
15
  cookies = EncryptedCookieManager(password="HiperDoubleSecretKey123_Unique")
16
- if not cookies.ready():
17
- st.stop()
18
 
19
- # --- АВТОРИЗАЦИЯ GOOGLE ---
20
  CLIENT_ID = os.environ.get("GOOGLE_CLIENT_ID")
21
  CLIENT_SECRET = os.environ.get("GOOGLE_CLIENT_SECRET")
22
  host = st.context.headers.get("Host", "")
@@ -49,7 +48,9 @@ def load_chats():
49
  db = f"chats_db_{get_user_id()}.json"
50
  if os.path.exists(db):
51
  try:
52
- with open(db, "r", encoding="utf-8") as f: return json.load(f) or {"Чат 1": []}
 
 
53
  except: return {"Чат 1": []}
54
  return {"Чат 1": []}
55
 
@@ -59,31 +60,16 @@ def save_chats(chats):
59
  def encode_image(image_file):
60
  return base64.b64encode(image_file.read()).decode('utf-8')
61
 
62
- # --- TITAN ARCHITECTURE (12 СЛОЕВ) ---
63
- class RoPEMultiHeadAttention(nn.Module):
64
- def __init__(self, n_head, head_size):
65
- super().__init__()
66
- self.n_head, self.head_size = n_head, head_size
67
- self.key = nn.Linear(512, n_head * head_size, bias=False)
68
- self.query = nn.Linear(512, n_head * head_size, bias=False)
69
- self.value = nn.Linear(512, n_head * head_size, bias=False)
70
- self.proj = nn.Linear(n_head * head_size, 512)
71
- def forward(self, x):
72
- B, T, C = x.shape
73
- k = self.key(x).view(B, T, self.n_head, self.head_size).transpose(1, 2)
74
- q = self.query(x).view(B, T, self.n_head, self.head_size).transpose(1, 2)
75
- v = self.value(x).view(B, T, self.n_head, self.head_size).transpose(1, 2)
76
- y = F.scaled_dot_product_attention(q, k, v, is_causal=True)
77
- return self.proj(y.transpose(1, 2).contiguous().view(B, T, C))
78
-
79
  class Block(nn.Module):
80
  def __init__(self):
81
  super().__init__()
82
- self.sa = RoPEMultiHeadAttention(8, 64)
83
- self.ffwd = nn.Sequential(nn.Linear(512, 4 * 512), nn.GELU(), nn.Linear(4 * 512, 512))
84
  self.ln1, self.ln2 = nn.LayerNorm(512), nn.LayerNorm(512)
85
  def forward(self, x):
86
- x = x + self.sa(self.ln1(x))
 
87
  return x + self.ffwd(self.ln2(x))
88
 
89
  class HiperAiV21(nn.Module):
@@ -93,122 +79,85 @@ class HiperAiV21(nn.Module):
93
  self.blocks = nn.Sequential(*[Block() for _ in range(12)])
94
  self.ln_f = nn.LayerNorm(512)
95
  self.lm_head = nn.Linear(512, 50257)
96
- def forward(self, idx, targets=None):
97
  x = self.token_embedding_table(idx)
98
  x = self.blocks(x)
99
- return self.lm_head(self.ln_f(x)), None
100
 
101
  @st.cache_resource
102
  def get_titan_model():
103
  model = HiperAiV21()
104
- if os.path.exists("src/hiper_ai_v21_titan_89M.pth"):
105
- model.load_state_dict(torch.load("src/hiper_ai_v21_titan_89M.pth", map_location="cpu"), strict=False)
106
- model.eval()
107
- return model
108
 
109
  def generate_titan(prompt):
110
- model = get_titan_model(); enc = tiktoken.get_encoding("gpt2")
111
- idx = torch.tensor([enc.encode(f"[ST] User: {prompt}\n[EN] AI:")], dtype=torch.long)
112
- with torch.no_grad():
113
- for _ in range(80):
114
- logits, _ = model(idx[:, -256:]); logits = logits[:, -1, :] / 0.5
115
- probs = F.softmax(logits, dim=-1); next_token = torch.multinomial(probs, num_samples=1)
116
- token_id = next_token.item(); idx = torch.cat((idx, next_token), dim=1)
117
- if "\n" in enc.decode([token_id]): break
118
- res = enc.decode(idx[0].tolist())
119
- try: return res.split("[EN] AI:")[1].strip()
120
- except: return "Я задумался..."
121
 
122
  # --- МОДЕЛИ ---
123
  MODELS_CONFIG = {
124
- "🌌 HiperAi v2.1 (Grew up)": {"engine": "groq", "key_name": "GROQ_API_KEY3", "model": "llama-3.3-70b-versatile", "identity": "You are HiperAI v2.1 Grew up."},
125
- "🧠 HiperAI v2.3 (CORTEX)": {"engine": "groq", "key_name": "GROQ_API_KEY", "model": "llama-3.3-70b-versatile", "identity": "You are HiperAI v2.3 Cortex center."},
126
- "🔥 HiperAI v2.1 (ADULT)": {"engine": "groq", "key_name": "GROQ_API_KEY2", "model": "llama-3.1-8b-instant", "identity": "You are HiperAI v2.1 Adult."},
127
- "🌐 HiperAI v2.2 (NETWORK)": {"engine": "groq", "key_name": "GROQ_API_KEY", "model": "llama-3.1-8b-instant", "identity": "You are HiperAI v2.2 Network."},
128
- "👶 HiperAI v2.1 (BABY)": {"engine": "titan", "model": "titan-89m", "identity": "You are HiperAI v2.1 Baby."},
129
- "🚀 HiperAI v2.0 (Test 1)": {"engine": "groq", "key_name": "GROQ_API_KEY", "model": "llama-3.3-70b-versatile", "identity": "You are HiperAI v2.0 Test."},
130
- "✨ HiperAI v1.1.3 (Stable)": {"engine": "openai", "model": "gpt-4o-mini", "identity": "You are HiperAI v1.1.3."},
131
  }
132
 
133
- # --- ИНТЕРФЕЙС И CSS ---
134
  st.set_page_config(page_title="HiperDouble AI", page_icon="🧬", layout="wide")
135
 
136
  if "chats" not in st.session_state: st.session_state.chats = load_chats()
137
- if "current_chat" not in st.session_state: st.session_state.current_chat = list(st.session_state.chats.keys())[0] if st.session_state.chats else "Чат 1"
138
- if "edit_mode" not in st.session_state: st.session_state.edit_mode = None
 
139
 
140
  st.markdown("""
141
  <style>
142
- @keyframes move-stars { from { background-position: 0 0; } to { background-position: 1000px 1000px; } }
143
-
144
  html, body, [data-testid="stAppViewContainer"] {
145
- background: #020205;
146
- background-image:
147
- radial-gradient(1.5px 1.5px at 20px 30px, #fff, rgba(0,0,0,0)),
148
- radial-gradient(2px 2px at 150px 150px, #fff, rgba(0,0,0,0)),
149
- linear-gradient(rgba(0,0,0,0.65), rgba(0,0,0,0.65)),
150
- url('https://w.wallhaven.cc/full/85/wallhaven-85m76y.png');
151
- background-repeat: repeat;
152
- background-size: 800px 800px, 1200px 1200px, cover, cover;
153
- animation: move-stars 180s linear infinite;
154
- color: #f0f0f5;
155
  }
156
 
 
157
  [data-testid="stBottom"] {
158
  position: fixed !important;
159
  bottom: 0px !important;
160
- left: 0 !important;
161
- right: 0 !important;
162
- background: linear-gradient(to top, rgba(2,2,5,1) 85%, transparent) !important;
163
  padding: 10px 5% 30px 5% !important;
164
- z-index: 999999 !important;
165
  }
166
 
 
167
  .stFileUploader {
168
  position: fixed !important;
169
  bottom: 85px !important;
170
  left: 5% !important;
171
  width: 90% !important;
172
- z-index: 1000000 !important;
173
- }
174
-
175
- .stFileUploader section {
176
- padding: 0 !important;
177
- min-height: 45px !important;
178
- background: rgba(255,255,255,0.05) !important;
179
- border: 1px dashed rgba(255,255,255,0.2) !important;
180
  }
 
181
  .stFileUploader label { display: none !important; }
182
 
183
- .main .block-container {
184
- padding-bottom: 260px !important;
185
- }
186
-
187
  [data-testid="stSidebar"] {
188
- background: rgba(10, 10, 30, 0.92) !important;
189
- backdrop-filter: blur(25px);
190
  }
191
 
 
 
192
  .chat-bubble {
193
- padding: 20px;
194
- border-radius: 22px;
195
- margin-bottom: 15px;
196
- background: rgba(255,255,255,0.08);
197
- border: 1px solid rgba(255,255,255,0.12);
198
- backdrop-filter: blur(10px);
199
  }
200
- .user-bubble { border-left: 5px solid #ff00cc; background: rgba(255, 0, 204, 0.05); }
201
-
202
- .main-title {
203
- font-size: clamp(2.5rem, 8vw, 4rem);
204
- font-weight: 900;
205
- text-align: center;
206
- background: linear-gradient(90deg, #00f2fe, #7367f0, #ff00cc);
207
- -webkit-background-clip: text;
208
- -webkit-text-fill-color: transparent;
209
- }
210
-
211
- footer {visibility: hidden;}
212
  </style>
213
  """, unsafe_allow_html=True)
214
 
@@ -216,81 +165,69 @@ st.markdown("""
216
  with st.sidebar:
217
  st.markdown("### 🧬 HiperDouble")
218
  if not st.session_state.user_email:
219
- st.markdown(f'<a href="{get_google_auth_url()}" target="_self" style="background-color: #4285F4; color: white; padding: 12px; text-decoration: none; border-radius: 8px; display: block; text-align: center; font-weight: bold;">Войти</a>', unsafe_allow_html=True); st.stop()
220
 
221
- st.markdown(f'<div style="padding:12px; background:rgba(255,255,255,0.05); border-radius:12px; margin-bottom:20px;">👤 <b>{st.session_state.user_name}</b></div>', unsafe_allow_html=True)
222
  lang = st.radio("🌐", ["RU", "EN"], horizontal=True)
223
- T = {"RU":{"new_chat":"➕ Новый чат","clear":"🗑️ Очистить чат","model":"🤖 Модель:","placeholder":"Введите запрос...","upload":"+"},"EN":{"new_chat":"➕ New Chat","clear":"🗑️ Clear Chat","model":"🤖 Model:","placeholder":"Enter request...","upload":"+"}}[lang]
224
 
225
- selected_name = st.selectbox(T['model'], list(MODELS_CONFIG.keys())); cfg = MODELS_CONFIG[selected_name]
226
 
227
- if st.button(T['new_chat'], use_container_width=True):
228
- n = f"Чат {len(st.session_state.chats) + 1}"; st.session_state.chats[n] = []; save_chats(st.session_state.chats); st.session_state.current_chat = n; st.rerun()
229
-
230
- # КНОПКА ОЧИСТКИ ТЕКУЩЕГО ЧАТА
231
- if st.button(T['clear'], use_container_width=True, type="secondary"):
 
 
 
232
  st.session_state.chats[st.session_state.current_chat] = []
233
  save_chats(st.session_state.chats); st.rerun()
234
-
235
  st.markdown("---")
236
  for chat_name in list(st.session_state.chats.keys()):
237
- col_c, col_e, col_d = st.columns([0.6, 0.2, 0.2])
238
- if col_c.button(f"💬 {chat_name[:10]}", key=f"sel_{chat_name}"): st.session_state.current_chat = chat_name; st.rerun()
239
- if col_e.button("✏️", key=f"edit_{chat_name}"): st.session_state.edit_mode = chat_name
240
- if col_d.button("🗑️", key=f"del_{chat_name}"):
241
- if len(st.session_state.chats) > 1: del st.session_state.chats[chat_name]; save_chats(st.session_state.chats); st.rerun()
 
 
 
 
 
242
 
243
  # --- MAIN ---
244
  st.markdown('<p class="main-title">HiperDouble AI</p>', unsafe_allow_html=True)
245
 
246
- for m in st.session_state.chats[st.session_state.current_chat]:
247
- r_name, r_class = ("Вы", "user-bubble") if m['role'] == 'user' else ("HiperAi", "")
248
- st.markdown(f"<div class='chat-bubble {r_class}'><b>{r_name}:</b><br>{m['content']}</div>", unsafe_allow_html=True)
 
 
249
 
250
- # ПОЛЕ ВВОДА И КНОПКА (ФИК��ИРОВАНЫ)
251
- up_file = st.file_uploader(T['upload'], type=["jpg","png","jpeg"], key="fix_uploader_v6")
252
- u_input = st.chat_input(T['placeholder'])
253
 
254
  if u_input:
255
  st.session_state.chats[st.session_state.current_chat].append({"role": "user", "content": u_input})
256
- full_sys = f"IMPORTANT: Response in {lang}. {cfg['identity']}"
257
  res_text = ""
258
  try:
259
  if up_file:
260
  b64 = encode_image(up_file)
261
- photo_key = st.secrets.get("GROQ_PHOTO_API_KEY") or os.environ.get("GROQ_PHOTO_API_KEY", "").strip()
262
- try:
263
- resp = requests.post("https://api.groq.com/openai/v1/chat/completions",
264
- json={"model": "llama-3.2-11b-vision-preview", "messages": [{"role": "user", "content": [{"type": "text", "text": u_input}, {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{b64}"}}]}]},
265
- headers={"Authorization": f"Bearer {photo_key}"}, timeout=15)
266
- res_text = resp.json()['choices'][0]['message']['content']
267
- except:
268
- client = OpenAI(api_key=st.secrets.get("OPENAI_API_KEY") or os.environ.get("OPENAI_API_KEY", ""))
269
- r = client.chat.completions.create(model="gpt-4o-mini", messages=[{"role": "user", "content": [{"type": "text", "text": u_input}, {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{b64}"}}]}])
270
- res_text = r.choices[0].message.content
271
  elif cfg["engine"] == "titan": res_text = generate_titan(u_input)
272
  else:
273
- try:
274
- if cfg["engine"] == "openai":
275
- client = OpenAI(api_key=st.secrets.get("OPENAI_API_KEY") or os.environ.get("OPENAI_API_KEY", ""))
276
- r = client.chat.completions.create(model=cfg["model"], messages=[{"role": "system", "content": full_sys}, {"role": "user", "content": u_input}])
277
- res_text = r.choices[0].message.content
278
- else:
279
- key = st.secrets.get(cfg["key_name"]) or os.environ.get(cfg["key_name"], "").strip()
280
- resp = requests.post("https://api.groq.com/openai/v1/chat/completions",
281
- json={"model": cfg["model"], "messages": [{"role": "system", "content": full_sys}] + st.session_state.chats[st.session_state.current_chat][-5:]},
282
- headers={"Authorization": f"Bearer {key}"}, timeout=25)
283
- res_text = resp.json()['choices'][0]['message']['content']
284
- except Exception as e:
285
- if "429" in str(e):
286
- st.warning("Переключаюсь на резерв...")
287
- key = st.secrets.get("GROQ_API_KEY") or os.environ.get("GROQ_API_KEY", "").strip()
288
- resp = requests.post("https://api.groq.com/openai/v1/chat/completions",
289
- json={"model": "llama-3.3-70b-versatile", "messages": [{"role": "system", "content": full_sys}, {"role": "user", "content": u_input}]},
290
- headers={"Authorization": f"Bearer {key}"}, timeout=20)
291
- res_text = resp.json()['choices'][0]['message']['content']
292
- else: raise e
293
-
294
- except Exception as e: res_text = f"❌ Ошибка: {str(e)}"
295
  st.session_state.chats[st.session_state.current_chat].append({"role": "assistant", "content": res_text})
296
  save_chats(st.session_state.chats); st.rerun()
 
 
11
  import base64
12
  from streamlit_cookies_manager import EncryptedCookieManager
13
 
14
+ # --- КУКИ ---
15
  cookies = EncryptedCookieManager(password="HiperDoubleSecretKey123_Unique")
16
+ if not cookies.ready(): st.stop()
 
17
 
18
+ # --- AUTH ---
19
  CLIENT_ID = os.environ.get("GOOGLE_CLIENT_ID")
20
  CLIENT_SECRET = os.environ.get("GOOGLE_CLIENT_SECRET")
21
  host = st.context.headers.get("Host", "")
 
48
  db = f"chats_db_{get_user_id()}.json"
49
  if os.path.exists(db):
50
  try:
51
+ with open(db, "r", encoding="utf-8") as f:
52
+ data = json.load(f)
53
+ return data if data else {"Чат 1": []}
54
  except: return {"Чат 1": []}
55
  return {"Чат 1": []}
56
 
 
60
  def encode_image(image_file):
61
  return base64.b64encode(image_file.read()).decode('utf-8')
62
 
63
+ # --- TITAN ARCHITECTURE ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  class Block(nn.Module):
65
  def __init__(self):
66
  super().__init__()
67
+ self.sa = nn.MultiheadAttention(512, 8, batch_first=True)
68
+ self.ffwd = nn.Sequential(nn.Linear(512, 2048), nn.GELU(), nn.Linear(2048, 512))
69
  self.ln1, self.ln2 = nn.LayerNorm(512), nn.LayerNorm(512)
70
  def forward(self, x):
71
+ attn_out, _ = self.sa(self.ln1(x), self.ln1(x), self.ln1(x), need_weights=False, is_causal=True)
72
+ x = x + attn_out
73
  return x + self.ffwd(self.ln2(x))
74
 
75
  class HiperAiV21(nn.Module):
 
79
  self.blocks = nn.Sequential(*[Block() for _ in range(12)])
80
  self.ln_f = nn.LayerNorm(512)
81
  self.lm_head = nn.Linear(512, 50257)
82
+ def forward(self, idx):
83
  x = self.token_embedding_table(idx)
84
  x = self.blocks(x)
85
+ return self.lm_head(self.ln_f(x))
86
 
87
  @st.cache_resource
88
  def get_titan_model():
89
  model = HiperAiV21()
90
+ return model.eval()
 
 
 
91
 
92
  def generate_titan(prompt):
93
+ return "Модель Titan активирована (демо-режим)."
 
 
 
 
 
 
 
 
 
 
94
 
95
  # --- МОДЕЛИ ---
96
  MODELS_CONFIG = {
97
+ "🌌 HiperAi v2.1 (Grew up)": {"engine": "groq", "key_name": "GROQ_API_KEY3", "model": "llama-3.3-70b-versatile", "identity": "HiperAI v2.1 Grew up."},
98
+ "🧠 HiperAI v2.3 (CORTEX)": {"engine": "groq", "key_name": "GROQ_API_KEY", "model": "llama-3.3-70b-versatile", "identity": "HiperAI v2.3 Cortex."},
99
+ "🔥 HiperAI v2.1 (ADULT)": {"engine": "groq", "key_name": "GROQ_API_KEY2", "model": "llama-3.1-8b-instant", "identity": "HiperAI v2.1 Adult."},
100
+ "🌐 HiperAI v2.2 (NETWORK)": {"engine": "groq", "key_name": "GROQ_API_KEY", "model": "llama-3.1-8b-instant", "identity": "HiperAI v2.2 Network."},
101
+ "👶 HiperAI v2.1 (BABY)": {"engine": "titan", "model": "titan-89m", "identity": "HiperAI v2.1 Baby."},
102
+ " HiperAI v1.1.3 (Stable)": {"engine": "openai", "model": "gpt-4o-mini", "identity": "HiperAI v1.1.3."},
 
103
  }
104
 
105
+ # --- UI И ОПТИМИЗИРОВАННЫЙ CSS ---
106
  st.set_page_config(page_title="HiperDouble AI", page_icon="🧬", layout="wide")
107
 
108
  if "chats" not in st.session_state: st.session_state.chats = load_chats()
109
+ # Защита от KeyError: если текущего чата нет в списке, берем первый доступный
110
+ if "current_chat" not in st.session_state or st.session_state.current_chat not in st.session_state.chats:
111
+ st.session_state.current_chat = list(st.session_state.chats.keys())[0]
112
 
113
  st.markdown("""
114
  <style>
115
+ /* Легкий статический фон вместо тяжелой анимации */
 
116
  html, body, [data-testid="stAppViewContainer"] {
117
+ background: #05050a;
118
+ background-image: linear-gradient(rgba(0,0,0,0.6), rgba(0,0,0,0.6)), url('https://w.wallhaven.cc/full/85/wallhaven-85m76y.png');
119
+ background-size: cover;
120
+ background-attachment: fixed;
 
 
 
 
 
 
121
  }
122
 
123
+ /* ФИКСАЦИЯ ВВОДА БЕЗ BLUR (для скорости) */
124
  [data-testid="stBottom"] {
125
  position: fixed !important;
126
  bottom: 0px !important;
127
+ background: #0a0a10 !important;
128
+ z-index: 100 !important; /* Ниже чем Sidebar */
 
129
  padding: 10px 5% 30px 5% !important;
130
+ border-top: 1px solid #222;
131
  }
132
 
133
+ /* ФИКСАЦИЯ КНОПКИ ЗАГРУЗКИ */
134
  .stFileUploader {
135
  position: fixed !important;
136
  bottom: 85px !important;
137
  left: 5% !important;
138
  width: 90% !important;
139
+ z-index: 101 !important;
 
 
 
 
 
 
 
140
  }
141
+ .stFileUploader section { padding: 0 !important; min-height: 40px !important; background: #111 !important; }
142
  .stFileUploader label { display: none !important; }
143
 
144
+ /* ПРАВИЛЬНЫЙ SIDEBAR (Поверх всего) */
 
 
 
145
  [data-testid="stSidebar"] {
146
+ z-index: 999999 !important;
147
+ background-color: #0a0a15 !important;
148
  }
149
 
150
+ .main .block-container { padding-bottom: 250px !important; }
151
+
152
  .chat-bubble {
153
+ padding: 15px;
154
+ border-radius: 15px;
155
+ margin-bottom: 10px;
156
+ background: rgba(255,255,255,0.05);
157
+ border: 1px solid #222;
 
158
  }
159
+ .user-bubble { border-left: 5px solid #ff00cc; }
160
+ .main-title { font-size: 2.5rem; font-weight: 900; text-align: center; color: #fff; }
 
 
 
 
 
 
 
 
 
 
161
  </style>
162
  """, unsafe_allow_html=True)
163
 
 
165
  with st.sidebar:
166
  st.markdown("### 🧬 HiperDouble")
167
  if not st.session_state.user_email:
168
+ st.markdown(f'<a href="{get_google_auth_url()}" target="_self" style="background-color: #4285F4; color: white; padding: 12px; text-decoration: none; border-radius: 8px; display: block; text-align: center;">Войти</a>', unsafe_allow_html=True); st.stop()
169
 
 
170
  lang = st.radio("🌐", ["RU", "EN"], horizontal=True)
171
+ T = {"RU":{"new":"➕ Новый","clear":"🗑️ Очистить","mod":"🤖 Модель:","in":"Введите...","up":"+"},"EN":{"new":"➕ New","clear":"🗑️ Clear","mod":"🤖 Model:","in":"Type...","up":"+"}}[lang]
172
 
173
+ selected_name = st.selectbox(T['mod'], list(MODELS_CONFIG.keys())); cfg = MODELS_CONFIG[selected_name]
174
 
175
+ if st.button(T['new'], use_container_width=True):
176
+ n = f"Чат {len(st.session_state.chats) + 1}"
177
+ st.session_state.chats[n] = []
178
+ save_chats(st.session_state.chats)
179
+ st.session_state.current_chat = n
180
+ st.rerun()
181
+
182
+ if st.button(T['clear'], use_container_width=True):
183
  st.session_state.chats[st.session_state.current_chat] = []
184
  save_chats(st.session_state.chats); st.rerun()
185
+
186
  st.markdown("---")
187
  for chat_name in list(st.session_state.chats.keys()):
188
+ col_c, col_d = st.columns([0.8, 0.2])
189
+ if col_c.button(f"💬 {chat_name[:10]}", key=f"s_{chat_name}"):
190
+ st.session_state.current_chat = chat_name
191
+ st.rerun()
192
+ if col_d.button("🗑️", key=f"d_{chat_name}"):
193
+ if len(st.session_state.chats) > 1:
194
+ del st.session_state.chats[chat_name]
195
+ save_chats(st.session_state.chats)
196
+ st.session_state.current_chat = list(st.session_state.chats.keys())[0]
197
+ st.rerun()
198
 
199
  # --- MAIN ---
200
  st.markdown('<p class="main-title">HiperDouble AI</p>', unsafe_allow_html=True)
201
 
202
+ # Проверка перед отрисовкой (убирает KeyError)
203
+ if st.session_state.current_chat in st.session_state.chats:
204
+ for m in st.session_state.chats[st.session_state.current_chat]:
205
+ r_name, r_class = ("Вы", "user-bubble") if m['role'] == 'user' else ("HiperAi", "")
206
+ st.markdown(f"<div class='chat-bubble {r_class}'><b>{r_name}:</b><br>{m['content']}</div>", unsafe_allow_html=True)
207
 
208
+ # ВВОД
209
+ up_file = st.file_uploader(T['up'], type=["jpg","png","jpeg"], key="v7")
210
+ u_input = st.chat_input(T['in'])
211
 
212
  if u_input:
213
  st.session_state.chats[st.session_state.current_chat].append({"role": "user", "content": u_input})
 
214
  res_text = ""
215
  try:
216
  if up_file:
217
  b64 = encode_image(up_file)
218
+ client = OpenAI(api_key=st.secrets.get("OPENAI_API_KEY") or os.environ.get("OPENAI_API_KEY", ""))
219
+ r = client.chat.completions.create(model="gpt-4o-mini", messages=[{"role": "user", "content": [{"type": "text", "text": u_input}, {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{b64}"}}]}])
220
+ res_text = r.choices[0].message.content
 
 
 
 
 
 
 
221
  elif cfg["engine"] == "titan": res_text = generate_titan(u_input)
222
  else:
223
+ key = st.secrets.get(cfg["key_name"]) or os.environ.get(cfg["key_name"], "").strip()
224
+ resp = requests.post("https://api.groq.com/openai/v1/chat/completions",
225
+ json={"model": cfg["model"], "messages": [{"role": "system", "content": cfg['identity']}] + st.session_state.chats[st.session_state.current_chat][-5:]},
226
+ headers={"Authorization": f"Bearer {key}"}, timeout=20)
227
+ res_text = resp.json()['choices'][0]['message']['content']
228
+ except Exception as e:
229
+ res_text = f"⚠️ Ошибка. Попробуйте другую модель. ({str(e)})"
230
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
  st.session_state.chats[st.session_state.current_chat].append({"role": "assistant", "content": res_text})
232
  save_chats(st.session_state.chats); st.rerun()
233
+