cstr commited on
Commit
bec3dbc
Β·
verified Β·
1 Parent(s): 2560199

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -4
app.py CHANGED
@@ -209,6 +209,7 @@ def run_transplant(
209
  model=model_val,
210
  api_key=llm_api_key.strip() or None,
211
  )
 
212
  llm_cfg.para_batch_size = int(llm_batch_size)
213
  llm_cfg.blueprint_context_chars = int(llm_context_chars)
214
 
@@ -491,7 +492,7 @@ Leave **Provider** at `(none)` to skip the LLM pass entirely (fast, structural-o
491
  maximum=50,
492
  step=1,
493
  value=15,
494
- info="Smaller = more calls, larger = may hit context limits.",
495
  )
496
  llm_context_chars = gr.Slider(
497
  label="Blueprint context (chars sent for style guide generation)",
@@ -515,14 +516,20 @@ Leave **Provider** at `(none)` to skip the LLM pass entirely (fast, structural-o
515
  file_count="multiple",
516
  )
517
 
518
- # Auto-fill default model when provider changes
519
  def _on_provider_change(provider):
520
- return _default_model_for_provider(provider)
 
 
 
 
 
 
521
 
522
  llm_provider.change(
523
  fn=_on_provider_change,
524
  inputs=[llm_provider],
525
- outputs=[llm_model],
526
  )
527
 
528
  # ── System status ──────────────────────────────────────────────
 
209
  model=model_val,
210
  api_key=llm_api_key.strip() or None,
211
  )
212
+ # Use the actual slider value
213
  llm_cfg.para_batch_size = int(llm_batch_size)
214
  llm_cfg.blueprint_context_chars = int(llm_context_chars)
215
 
 
492
  maximum=50,
493
  step=1,
494
  value=15,
495
+ info="Groq: 5 recommended. Others: 15-20. Smaller = more calls, but safer against rate limits.",
496
  )
497
  llm_context_chars = gr.Slider(
498
  label="Blueprint context (chars sent for style guide generation)",
 
516
  file_count="multiple",
517
  )
518
 
519
+ # Auto-fill default model and batch size when provider changes
520
  def _on_provider_change(provider):
521
+ if provider == "(none)":
522
+ return gr.update(value="auto"), gr.update(value=15)
523
+
524
+ defaults = PROVIDER_DEFAULTS.get(provider, {})
525
+ model = defaults.get("model", "auto")
526
+ batch = defaults.get("batch_size", 15)
527
+ return gr.update(value=model), gr.update(value=batch)
528
 
529
  llm_provider.change(
530
  fn=_on_provider_change,
531
  inputs=[llm_provider],
532
+ outputs=[llm_model, llm_batch_size],
533
  )
534
 
535
  # ── System status ──────────────────────────────────────────────