Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -325,7 +325,6 @@ Grounded in auditory neuroscience (Norman-Haignere 2025, Vollan 2025). Zero embe
|
|
| 325 |
label="",
|
| 326 |
height=480,
|
| 327 |
show_label=False,
|
| 328 |
-
type="tuples",
|
| 329 |
)
|
| 330 |
|
| 331 |
retrieval_info = gr.Markdown("", elem_id="retrieval-info")
|
|
@@ -443,14 +442,16 @@ Commands: type `:coverage` to see sweep focus • `:summary` for index stats •
|
|
| 443 |
return
|
| 444 |
|
| 445 |
if state.retriever is None:
|
| 446 |
-
chat_history
|
|
|
|
| 447 |
yield chat_history, ""
|
| 448 |
return
|
| 449 |
|
| 450 |
# Handle commands
|
| 451 |
cmd_response, is_cmd = handle_command(msg)
|
| 452 |
if is_cmd:
|
| 453 |
-
chat_history
|
|
|
|
| 454 |
yield chat_history, ""
|
| 455 |
return
|
| 456 |
|
|
@@ -481,10 +482,14 @@ Commands: type `:coverage` to see sweep focus • `:summary` for index stats •
|
|
| 481 |
# Stream response
|
| 482 |
client = get_client(hf_token)
|
| 483 |
partial = ""
|
| 484 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 485 |
for token in stream_response(client, model, messages):
|
| 486 |
partial += token
|
| 487 |
-
chat_history[-1] =
|
| 488 |
yield chat_history, retrieval_display
|
| 489 |
|
| 490 |
# Save to history
|
|
|
|
| 325 |
label="",
|
| 326 |
height=480,
|
| 327 |
show_label=False,
|
|
|
|
| 328 |
)
|
| 329 |
|
| 330 |
retrieval_info = gr.Markdown("", elem_id="retrieval-info")
|
|
|
|
| 442 |
return
|
| 443 |
|
| 444 |
if state.retriever is None:
|
| 445 |
+
chat_history.append({"role": "user", "content": msg})
|
| 446 |
+
chat_history.append({"role": "assistant", "content": "⚠️ Please load a document first."})
|
| 447 |
yield chat_history, ""
|
| 448 |
return
|
| 449 |
|
| 450 |
# Handle commands
|
| 451 |
cmd_response, is_cmd = handle_command(msg)
|
| 452 |
if is_cmd:
|
| 453 |
+
chat_history.append({"role": "user", "content": msg})
|
| 454 |
+
chat_history.append({"role": "assistant", "content": cmd_response})
|
| 455 |
yield chat_history, ""
|
| 456 |
return
|
| 457 |
|
|
|
|
| 482 |
# Stream response
|
| 483 |
client = get_client(hf_token)
|
| 484 |
partial = ""
|
| 485 |
+
|
| 486 |
+
# Gradio 6 messages format
|
| 487 |
+
chat_history.append({"role": "user", "content": msg})
|
| 488 |
+
chat_history.append({"role": "assistant", "content": ""})
|
| 489 |
+
|
| 490 |
for token in stream_response(client, model, messages):
|
| 491 |
partial += token
|
| 492 |
+
chat_history[-1]["content"] = partial
|
| 493 |
yield chat_history, retrieval_display
|
| 494 |
|
| 495 |
# Save to history
|