Lzy01241010 commited on
Commit
0c72122
Β·
1 Parent(s): b4bf7c4

Settings: condenser first in radio + help; flatten Model wrapper so its label aligns with the rest

Browse files
Files changed (1) hide show
  1. app.py +11 -3
app.py CHANGED
@@ -667,7 +667,14 @@ gradio-app > div {
667
  #quest-temperature .gr-form,
668
  #quest-temperature .container,
669
  #quest-temperature .wrap-inner,
670
- #quest-temperature .head {
 
 
 
 
 
 
 
671
  background: transparent !important;
672
  border: 0 !important;
673
  outline: 0 !important;
@@ -1455,7 +1462,7 @@ def _trace_to_json(state: "AgentState", used_model: str) -> str:
1455
  )
1456
 
1457
 
1458
- MEMORY_STRATEGIES = ("vanilla", "condenser", "discard_all", "hide_tool_result")
1459
 
1460
 
1461
  def _normalize_memory_strategy(strategy: str) -> str:
@@ -1899,6 +1906,7 @@ with gr.Blocks(
1899
  label="Model",
1900
  value=QUEST_MODEL_ID,
1901
  interactive=False,
 
1902
  )
1903
  memory_strategy = gr.Radio(
1904
  label="Memory Strategy",
@@ -1908,8 +1916,8 @@ with gr.Blocks(
1908
  )
1909
  gr.HTML(
1910
  '<div class="memory-help">'
1911
- '<b>vanilla</b> β€” memory management disabled; the full conversation history is kept.<br>'
1912
  '<b>condenser</b> (default) β€” when context grows large, a State Summarizer LLM compresses earlier turns into a structured JSON of trusted/untrusted/uncertain claims, visited sources, and prior search queries; the agent continues with that compact state.<br>'
 
1913
  '<b>discard_all</b> β€” when context grows large, the entire message history is reset, restarting the agent from the original question with no accumulated context.<br>'
1914
  '<b>hide_tool_result</b> β€” when context grows large, older tool responses are pruned; only the most recent tool result is kept.'
1915
  '</div>'
 
667
  #quest-temperature .gr-form,
668
  #quest-temperature .container,
669
  #quest-temperature .wrap-inner,
670
+ #quest-temperature .head,
671
+ #quest-model,
672
+ #quest-model > div,
673
+ #quest-model .form,
674
+ #quest-model .gr-form,
675
+ #quest-model .container,
676
+ #quest-model .wrap-inner,
677
+ #quest-model .head {
678
  background: transparent !important;
679
  border: 0 !important;
680
  outline: 0 !important;
 
1462
  )
1463
 
1464
 
1465
+ MEMORY_STRATEGIES = ("condenser", "vanilla", "discard_all", "hide_tool_result")
1466
 
1467
 
1468
  def _normalize_memory_strategy(strategy: str) -> str:
 
1906
  label="Model",
1907
  value=QUEST_MODEL_ID,
1908
  interactive=False,
1909
+ elem_id="quest-model",
1910
  )
1911
  memory_strategy = gr.Radio(
1912
  label="Memory Strategy",
 
1916
  )
1917
  gr.HTML(
1918
  '<div class="memory-help">'
 
1919
  '<b>condenser</b> (default) β€” when context grows large, a State Summarizer LLM compresses earlier turns into a structured JSON of trusted/untrusted/uncertain claims, visited sources, and prior search queries; the agent continues with that compact state.<br>'
1920
+ '<b>vanilla</b> β€” memory management disabled; the full conversation history is kept.<br>'
1921
  '<b>discard_all</b> β€” when context grows large, the entire message history is reset, restarting the agent from the original question with no accumulated context.<br>'
1922
  '<b>hide_tool_result</b> β€” when context grows large, older tool responses are pruned; only the most recent tool result is kept.'
1923
  '</div>'