Gems234 commited on
Commit
a48aff7
·
verified ·
1 Parent(s): 51a0e10

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -273
app.py CHANGED
@@ -7,7 +7,7 @@ from llama_cpp import Llama
7
 
8
  # -------------------------
9
  # TÉLÉCHARGEMENT DU MODÈLE HUGGING FACE
10
- # ------------------------
11
  MODEL_REPO = "mradermacher/Alisia-7B-it-GGUF"
12
  MODEL_NAME = "Alisia-7B-it.Q4_K_M.gguf"
13
  MODEL_PATH = f"/tmp/{MODEL_NAME}"
@@ -41,7 +41,7 @@ print("⚡ Chargement du modèle Alisia-7B-it depuis Hugging Face...")
41
  llm = Llama(
42
  model_path=MODEL_PATH,
43
  n_ctx=2048,
44
- n_gpu_layers=0, # Désactivé pour CPU
45
  n_threads=8,
46
  verbose=False
47
  )
@@ -66,13 +66,8 @@ def get_conv_names():
66
  return list(conversations.keys())
67
 
68
  def build_conversation_prompt(history, new_message):
69
- """
70
- Construit le prompt complet avec l'historique de conversation
71
- System prompt UNIQUEMENT au début, ensuite seulement les Q/R
72
- """
73
  prompt = ""
74
 
75
- # System prompt UNIQUEMENT si c'est le tout premier message de toute conversation
76
  if not any(any(conv) for conv in conversations.values()):
77
  prompt += """Your name is Alisia, you are created by the Alisia research team.
78
  Below is an instruction that describes a task, paired with an input that provides further context.
@@ -80,11 +75,9 @@ Write a response that appropriately completes the request.
80
 
81
  """
82
 
83
- # Ajouter tout l'historique de conversation (seulement les Q/R)
84
  for user_msg, assistant_msg in history:
85
  prompt += f"### Instruction:\n{user_msg}\n\n### Response:\n{assistant_msg}\n\n"
86
 
87
- # Ajouter le nouveau message
88
  prompt += f"### Instruction:\n{new_message}\n\n### Response:\n"
89
 
90
  return prompt
@@ -106,7 +99,6 @@ def send_message_stream(user_message, displayed_history, current_chat_name):
106
  local_hist.append((str(user_message), ""))
107
  yield local_hist, ""
108
 
109
- # Utiliser le format CORRECT Alpaca
110
  formatted_prompt = build_conversation_prompt(local_hist[:-1], str(user_message))
111
 
112
  partial = ""
@@ -127,7 +119,6 @@ def send_message_stream(user_message, displayed_history, current_chat_name):
127
  token = chunk["choices"][0].get("text", "")
128
  if token:
129
  partial += token
130
- # Nettoyer et mettre à jour l'affichage
131
  cleaned = clean_output(partial)
132
  local_hist[-1] = (str(user_message), cleaned)
133
  yield local_hist, ""
@@ -138,7 +129,6 @@ def send_message_stream(user_message, displayed_history, current_chat_name):
138
  yield local_hist, ""
139
 
140
  finally:
141
- # Sauvegarder l'historique après la génération
142
  with lock:
143
  conversations[current_chat_name] = local_hist.copy()
144
  yield local_hist, ""
@@ -146,10 +136,6 @@ def send_message_stream(user_message, displayed_history, current_chat_name):
146
  # -------------------------
147
  # Fonctions pour l'interface
148
  # -------------------------
149
- def toggle_history(visible_state):
150
- new_state = not bool(visible_state)
151
- return new_state, gr.update(visible=new_state)
152
-
153
  def new_conversation():
154
  with lock:
155
  name = f"Conversation {len(conversations) + 1}"
@@ -174,267 +160,41 @@ def clear_chat():
174
  # -------------------------
175
  # Interface Gradio
176
  # -------------------------
177
- css = """
178
- :root {
179
- --primary-color: #4f46e5;
180
- --primary-hover: #4338ca;
181
- --chat-bg: #0f172a;
182
- --input-bg: #1e293b;
183
- }
184
-
185
- #topbar {
186
- display: flex;
187
- align-items: center;
188
- gap: 12px;
189
- padding: 10px;
190
- background: var(--chat-bg);
191
- color: #fff;
192
- border-bottom: 1px solid #334155;
193
- }
194
-
195
- #leftcol {
196
- background: #111218;
197
- color: #fff;
198
- padding: 12px;
199
- min-height: 520px;
200
- border-right: 1px solid #334155;
201
- transition: all 0.3s ease;
202
- }
203
-
204
- #chatcol {
205
- padding: 12px;
206
- height: 100%;
207
- display: flex;
208
- flex-direction: column;
209
- }
210
-
211
- .hamburger {
212
- font-size: 20px;
213
- background: transparent;
214
- color: #fff;
215
- border: none;
216
- cursor: pointer;
217
- padding: 8px;
218
- border-radius: 50%;
219
- transition: background 0.2s;
220
- }
221
-
222
- .hamburger:hover {
223
- background: #334155;
224
- }
225
-
226
- #chat-container {
227
- flex-grow: 1;
228
- display: flex;
229
- flex-direction: column;
230
- height: 100%;
231
- }
232
-
233
- #chatbot {
234
- flex-grow: 1;
235
- height: calc(100vh - 200px) !important;
236
- min-height: 500px;
237
- background: var(--chat-bg);
238
- border-radius: 16px;
239
- padding: 20px;
240
- overflow-y: auto;
241
- border: 1px solid #334155;
242
- box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
243
- }
244
-
245
- #input-container {
246
- display: flex;
247
- gap: 8px;
248
- padding: 16px 0;
249
- align-items: center;
250
- }
251
-
252
- #msg_input {
253
- flex-grow: 1;
254
- background: var(--input-bg);
255
- color: #fff;
256
- border: 1px solid #334155;
257
- border-radius: 24px;
258
- padding: 16px 20px;
259
- font-size: 16px;
260
- box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.1);
261
- transition: all 0.3s ease;
262
- }
263
-
264
- #msg_input:focus {
265
- outline: none;
266
- border-color: var(--primary-color);
267
- box-shadow: 0 0 0 3px rgba(79, 70, 229, 0.2);
268
- }
269
-
270
- #send_btn {
271
- background: var(--primary-color);
272
- color: white;
273
- border: none;
274
- border-radius: 24px;
275
- padding: 14px 20px;
276
- height: 50px;
277
- min-width: 80px;
278
- font-weight: 600;
279
- cursor: pointer;
280
- transition: background 0.2s;
281
- display: flex;
282
- align-items: center;
283
- justify-content: center;
284
- }
285
-
286
- #send_btn:hover {
287
- background: var(--primary-hover);
288
- }
289
-
290
- #stop_btn {
291
- background: #ef4444;
292
- color: white;
293
- border: none;
294
- border-radius: 24px;
295
- padding: 14px 20px;
296
- height: 50px;
297
- min-width: 80px;
298
- font-weight: 600;
299
- cursor: pointer;
300
- transition: background 0.2s;
301
- display: flex;
302
- align-items: center;
303
- justify-content: center;
304
- }
305
-
306
- #stop_btn:hover {
307
- background: #dc2626;
308
- }
309
-
310
- .conversation-list {
311
- margin-top: 20px;
312
- max-height: calc(100vh - 200px);
313
- overflow-y: auto;
314
- }
315
-
316
- .conversation-item {
317
- padding: 12px 16px;
318
- border-radius: 12px;
319
- margin-bottom: 8px;
320
- cursor: pointer;
321
- transition: background 0.2s;
322
- }
323
-
324
- .conversation-item:hover {
325
- background: #1e293b;
326
- }
327
-
328
- .conversation-item.active {
329
- background: var(--primary-color);
330
- color: white;
331
- }
332
-
333
- .alisia-badge {
334
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
335
- color: white;
336
- padding: 4px 8px;
337
- border-radius: 12px;
338
- font-size: 12px;
339
- font-weight: bold;
340
- margin-left: 8px;
341
- }
342
-
343
- .clear-btn {
344
- background: #94a3b8 !important;
345
- color: white !important;
346
- border: none;
347
- border-radius: 12px;
348
- padding: 10px 16px;
349
- margin-top: 10px;
350
- cursor: pointer;
351
- }
352
-
353
- .clear-btn:hover {
354
- background: #64748b !important;
355
- }
356
- """
357
-
358
- with gr.Blocks(css=css, title="Alisia Chat", theme=gr.themes.Soft()) as demo:
359
- history_visible = gr.State(True)
360
  current_chat = gr.State("Conversation 1")
361
 
362
- with gr.Row(elem_id="topbar"):
363
- menu_btn = gr.Button("☰", elem_classes="hamburger")
364
- gr.Markdown("### 💬 Alisia <span class='alisia-badge'>AI Assistant</span>", elem_id="title")
365
- gr.HTML("<div style='flex:1'></div>")
366
- gr.Markdown("<small style='color:#94a3b8'>Hugging Face • Alisia-7B-it</small>")
367
-
368
  with gr.Row():
369
- with gr.Column(scale=1, visible=True, elem_id="leftcol") as left_column:
370
- with gr.Column(elem_classes="conversation-list"):
371
  conv_dropdown = gr.Dropdown(
372
  choices=get_conv_names(),
373
  value="Conversation 1",
374
  label="Conversations",
375
- interactive=True,
376
- elem_classes="conversation-item"
377
  )
378
- new_conv_btn = gr.Button(
379
- "➕ Nouvelle conversation",
380
- variant="primary",
381
- elem_classes="conversation-item"
 
 
 
 
 
 
 
382
  )
383
- clear_btn = gr.Button(
384
- "🗑️ Effacer chat",
385
- elem_classes="clear-btn"
386
- )
387
- gr.Markdown("## Format Alpaca", elem_classes="conversation-header")
388
- gr.Markdown("""
389
- <div style="color: #94a3b8; font-size: 14px;">
390
- ✅ Historique Q/R pur<br>
391
- ✅ System prompt unique<br>
392
- ✅ Multiples conversations
393
- </div>
394
- """, elem_classes="conversation-subheader")
395
-
396
- with gr.Column(scale=3, elem_id="chatcol"):
397
- with gr.Column(elem_id="chat-container"):
398
- chatbot = gr.Chatbot(
399
- label="Alisia",
400
- elem_id="chatbot",
401
- show_label=False
402
- )
403
- with gr.Row(elem_id="input-container"):
404
- msg_input = gr.Textbox(
405
- placeholder="Posez votre question à Alisia…",
406
- lines=3,
407
- show_label=False,
408
- elem_id="msg_input"
409
- )
410
- send_btn = gr.Button(
411
- "Envoyer",
412
- variant="primary",
413
- elem_id="send_btn"
414
- )
415
- stop_btn = gr.Button(
416
- "Arrêter",
417
- variant="stop",
418
- elem_id="stop_btn",
419
- visible=False
420
- )
421
 
422
  # Événements
423
- menu_btn.click(
424
- fn=toggle_history,
425
- inputs=[history_visible],
426
- outputs=[history_visible, left_column]
427
- )
428
-
429
  new_conv_btn.click(
430
  fn=new_conversation,
431
- inputs=None,
432
  outputs=[conv_dropdown, chatbot, current_chat]
433
  )
434
 
435
  clear_btn.click(
436
  fn=clear_chat,
437
- inputs=None,
438
  outputs=[chatbot, current_chat]
439
  )
440
 
@@ -446,41 +206,34 @@ with gr.Blocks(css=css, title="Alisia Chat", theme=gr.themes.Soft()) as demo:
446
 
447
  send_btn.click(
448
  fn=lambda: (gr.update(visible=False), gr.update(visible=True)),
449
- inputs=None,
450
  outputs=[send_btn, stop_btn],
451
  queue=False
452
  ).then(
453
  fn=send_message_stream,
454
  inputs=[msg_input, chatbot, current_chat],
455
  outputs=[chatbot, msg_input],
456
- queue=True
457
  ).then(
458
  fn=lambda: (gr.update(visible=True), gr.update(visible=False)),
459
- inputs=None,
460
  outputs=[send_btn, stop_btn],
461
  queue=False
462
  )
463
 
464
  msg_input.submit(
465
  fn=lambda: (gr.update(visible=False), gr.update(visible=True)),
466
- inputs=None,
467
  outputs=[send_btn, stop_btn],
468
  queue=False
469
  ).then(
470
  fn=send_message_stream,
471
  inputs=[msg_input, chatbot, current_chat],
472
  outputs=[chatbot, msg_input],
473
- queue=True
474
  ).then(
475
  fn=lambda: (gr.update(visible=True), gr.update(visible=False)),
476
- inputs=None,
477
  outputs=[send_btn, stop_btn],
478
  queue=False
479
  )
480
 
481
  stop_btn.click(
482
  fn=request_stop,
483
- inputs=None,
484
  outputs=None
485
  )
486
 
@@ -491,11 +244,10 @@ if __name__ == "__main__":
491
  print("🚀 Démarrage du serveur Alisia...")
492
  print("📱 Préparation du lien de partage...")
493
 
494
- # Lancement avec partage activé
495
  demo.launch(
496
- share=True, # Crée un lien public
497
- server_name="0.0.0.0", # Accepte les connexions externes
498
- server_port=7860, # Port standard
499
- debug=False, # Mode non-verbose pour performance
500
- show_error=True # Affiche les erreurs
501
- )
 
7
 
8
  # -------------------------
9
  # TÉLÉCHARGEMENT DU MODÈLE HUGGING FACE
10
+ # -------------------------
11
  MODEL_REPO = "mradermacher/Alisia-7B-it-GGUF"
12
  MODEL_NAME = "Alisia-7B-it.Q4_K_M.gguf"
13
  MODEL_PATH = f"/tmp/{MODEL_NAME}"
 
41
  llm = Llama(
42
  model_path=MODEL_PATH,
43
  n_ctx=2048,
44
+ n_gpu_layers=0,
45
  n_threads=8,
46
  verbose=False
47
  )
 
66
  return list(conversations.keys())
67
 
68
  def build_conversation_prompt(history, new_message):
 
 
 
 
69
  prompt = ""
70
 
 
71
  if not any(any(conv) for conv in conversations.values()):
72
  prompt += """Your name is Alisia, you are created by the Alisia research team.
73
  Below is an instruction that describes a task, paired with an input that provides further context.
 
75
 
76
  """
77
 
 
78
  for user_msg, assistant_msg in history:
79
  prompt += f"### Instruction:\n{user_msg}\n\n### Response:\n{assistant_msg}\n\n"
80
 
 
81
  prompt += f"### Instruction:\n{new_message}\n\n### Response:\n"
82
 
83
  return prompt
 
99
  local_hist.append((str(user_message), ""))
100
  yield local_hist, ""
101
 
 
102
  formatted_prompt = build_conversation_prompt(local_hist[:-1], str(user_message))
103
 
104
  partial = ""
 
119
  token = chunk["choices"][0].get("text", "")
120
  if token:
121
  partial += token
 
122
  cleaned = clean_output(partial)
123
  local_hist[-1] = (str(user_message), cleaned)
124
  yield local_hist, ""
 
129
  yield local_hist, ""
130
 
131
  finally:
 
132
  with lock:
133
  conversations[current_chat_name] = local_hist.copy()
134
  yield local_hist, ""
 
136
  # -------------------------
137
  # Fonctions pour l'interface
138
  # -------------------------
 
 
 
 
139
  def new_conversation():
140
  with lock:
141
  name = f"Conversation {len(conversations) + 1}"
 
160
  # -------------------------
161
  # Interface Gradio
162
  # -------------------------
163
+ with gr.Blocks(title="Alisia Chat", theme=gr.themes.Soft()) as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
  current_chat = gr.State("Conversation 1")
165
 
 
 
 
 
 
 
166
  with gr.Row():
167
+ with gr.Column(scale=1):
168
+ with gr.Accordion("Conversations", open=True):
169
  conv_dropdown = gr.Dropdown(
170
  choices=get_conv_names(),
171
  value="Conversation 1",
172
  label="Conversations",
173
+ interactive=True
 
174
  )
175
+ with gr.Row():
176
+ new_conv_btn = gr.Button("➕ Nouvelle conversation")
177
+ clear_btn = gr.Button("🗑️ Effacer chat")
178
+
179
+ with gr.Column(scale=3):
180
+ chatbot = gr.Chatbot(label="Alisia")
181
+ with gr.Row():
182
+ msg_input = gr.Textbox(
183
+ placeholder="Posez votre question à Alisia…",
184
+ lines=3,
185
+ scale=4,
186
  )
187
+ send_btn = gr.Button("Envoyer", scale=1)
188
+ stop_btn = gr.Button("Arrêter", visible=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189
 
190
  # Événements
 
 
 
 
 
 
191
  new_conv_btn.click(
192
  fn=new_conversation,
 
193
  outputs=[conv_dropdown, chatbot, current_chat]
194
  )
195
 
196
  clear_btn.click(
197
  fn=clear_chat,
 
198
  outputs=[chatbot, current_chat]
199
  )
200
 
 
206
 
207
  send_btn.click(
208
  fn=lambda: (gr.update(visible=False), gr.update(visible=True)),
 
209
  outputs=[send_btn, stop_btn],
210
  queue=False
211
  ).then(
212
  fn=send_message_stream,
213
  inputs=[msg_input, chatbot, current_chat],
214
  outputs=[chatbot, msg_input],
 
215
  ).then(
216
  fn=lambda: (gr.update(visible=True), gr.update(visible=False)),
 
217
  outputs=[send_btn, stop_btn],
218
  queue=False
219
  )
220
 
221
  msg_input.submit(
222
  fn=lambda: (gr.update(visible=False), gr.update(visible=True)),
 
223
  outputs=[send_btn, stop_btn],
224
  queue=False
225
  ).then(
226
  fn=send_message_stream,
227
  inputs=[msg_input, chatbot, current_chat],
228
  outputs=[chatbot, msg_input],
 
229
  ).then(
230
  fn=lambda: (gr.update(visible=True), gr.update(visible=False)),
 
231
  outputs=[send_btn, stop_btn],
232
  queue=False
233
  )
234
 
235
  stop_btn.click(
236
  fn=request_stop,
 
237
  outputs=None
238
  )
239
 
 
244
  print("🚀 Démarrage du serveur Alisia...")
245
  print("📱 Préparation du lien de partage...")
246
 
 
247
  demo.launch(
248
+ share=True,
249
+ server_name="0.0.0.0",
250
+ server_port=7860,
251
+ debug=False,
252
+ show_error=True
253
+ )