Luigi commited on
Commit
4465a4a
Β·
1 Parent(s): fc7c6c6

refactor: reorganize UI layout for better UX

Browse files

Major UI restructuring:
- Add Tabs for model selection (Preset Models / Custom GGUF)
- Move Language + File Upload to top as 'Input' section
- Split Advanced Settings into Hardware Config and Inference Params
- Move Debug panel inside Advanced Settings accordion
- Move 'Copy Thinking' button to Thinking group (was in Summary)
- Simplify header/instructions by removing unnecessary Row/Column wrappers
- Move State components outside visual Groups for cleaner code
- Hide download_output until file is generated
- Add separate custom_info_output for Custom GGUF tab model info

Removes toggle_custom_model_ui handler (replaced by Tabs)

Files changed (1) hide show
  1. app.py +188 -179
app.py CHANGED
@@ -1517,94 +1517,115 @@ def create_interface():
1517
  css=custom_css
1518
  ) as demo:
1519
 
1520
- # Header section
1521
- with gr.Row():
1522
- with gr.Column():
1523
- gr.HTML("""
1524
- <div class="app-header">
1525
- <h1>πŸ“„ Tiny Scribe</h1>
1526
- <p>AI-Powered Transcript Summarization with Real-Time Streaming</p>
1527
- <div class="model-badge">
1528
- <span>Select a model below to get started</span>
1529
- </div>
1530
- </div>
1531
- """)
1532
 
1533
- # Instructions
1534
- with gr.Row():
1535
- with gr.Column():
1536
- gr.HTML("""
1537
- <div class="instructions">
1538
- <strong>πŸ“‹ How to use:</strong>
1539
- <ul>
1540
- <li>Upload a .txt file containing your transcript, notes, or document</li>
1541
- <li>Click "Generate Summary" to start AI processing</li>
1542
- <li>Watch the <strong>Thinking Process</strong> (left) - see how the AI reasons</li>
1543
- <li>Read the <strong>Final Summary</strong> (right) - the polished result</li>
1544
- <li>Both outputs stream in real-time as the AI generates content</li>
1545
- </ul>
1546
- </div>
1547
- """)
1548
 
1549
  # Main content area
1550
  with gr.Row():
1551
- # Left column - Input
1552
  with gr.Column(scale=1):
 
 
 
 
1553
  with gr.Group():
1554
- gr.HTML('<div class="section-header"><span class="section-icon">🌐</span> Output Language</div>')
1555
 
1556
  language_selector = gr.Dropdown(
1557
  choices=[("English", "en"), ("Traditional Chinese (zh-TW)", "zh-TW")],
1558
  value="en",
1559
- label="Select Language",
1560
  info="Choose the target language for your summary"
1561
  )
1562
 
1563
- # Model Selection - Moved to top level for easy access
1564
- gr.HTML('<div class="section-header" style="margin-top: 20px;"><span class="section-icon">πŸ€–</span> Model</div>')
1565
-
1566
- model_dropdown = gr.Dropdown(
1567
- choices=[(info["name"] + (" ⚑" if info.get("supports_reasoning", False) and not info.get("supports_toggle", False) else ""), key) for key, info in AVAILABLE_MODELS.items()],
1568
- value=DEFAULT_MODEL_KEY,
1569
- label="Select Model",
1570
- info="Models ordered by size (0.6B to 30B). Smaller = faster. Large files need bigger context. ⚑ = Always-reasoning models."
1571
  )
 
 
 
 
 
1572
 
1573
- enable_reasoning = gr.Checkbox(
1574
- value=True,
1575
- label="Enable Reasoning Mode",
1576
- info="Uses /think for deeper analysis (slower) or /no_think for direct output (faster). Only available for Qwen3 models.",
1577
- interactive=True,
1578
- visible=AVAILABLE_MODELS[DEFAULT_MODEL_KEY].get("supports_toggle", False)
1579
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1580
 
1581
- # Custom Model UI (hidden by default, shown when custom_hf selected)
1582
- with gr.Group(visible=False) as custom_model_group:
1583
- gr.HTML('<div class="section-header" style="margin-top: 20px;"><span class="section-icon">πŸ”§</span> Load Custom GGUF Model</div>')
1584
 
1585
- # NEW: Native HF Hub Search Component
1586
  model_search_input = HuggingfaceHubSearch(
1587
  label="πŸ” Search HuggingFace Models",
1588
- placeholder="Type model name to search (e.g., 'llama', 'qwen', 'phi')",
1589
  search_type="model",
1590
  )
1591
 
1592
- # Hidden fields to store discovered file data
1593
- custom_repo_files = gr.State([])
1594
-
1595
  # File dropdown (populated after repo discovery)
1596
  custom_file_dropdown = gr.Dropdown(
1597
- label="πŸ“¦ Select GGUF File (Precision)",
1598
  choices=[],
1599
  value=None,
1600
- info="Available GGUF files will appear after selecting a model above",
1601
  interactive=True,
1602
- visible=True,
1603
  )
1604
 
1605
- # Action buttons
1606
- with gr.Row():
1607
- load_btn = gr.Button("⬇️ Load Selected Model", variant="primary", size="sm")
1608
 
1609
  # Status message
1610
  custom_status = gr.Textbox(
@@ -1615,102 +1636,110 @@ def create_interface():
1615
  )
1616
 
1617
  retry_btn = gr.Button("πŸ”„ Retry", variant="secondary", visible=False)
 
 
 
 
 
 
 
 
 
 
 
 
1618
 
1619
- gr.HTML('<div class="section-header" style="margin-top: 20px;"><span class="section-icon">πŸ“€</span> Upload File</div>')
 
1620
 
1621
- file_input = gr.File(
1622
- label="Drag & drop or click to upload",
1623
- file_types=[".txt"],
1624
- type="filepath",
1625
- elem_classes=["file-upload-area"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1626
  )
1627
 
1628
- with gr.Accordion("βš™οΈ Advanced Settings", open=False):
1629
- with gr.Group(elem_classes=["advanced-settings"]):
1630
- gr.HTML('<div class="section-header" style="margin-top: 10px;"><span class="section-icon">πŸ–₯️</span> Hardware Configuration</div>')
1631
-
1632
- thread_config_dropdown = gr.Dropdown(
1633
- choices=[
1634
- ("HF Spaces Free Tier (2 vCPUs)", "free"),
1635
- ("HF Spaces CPU Upgrade (8 vCPUs)", "upgrade"),
1636
- ("Custom (manual)", "custom"),
1637
- ],
1638
- value=DEFAULT_THREAD_PRESET,
1639
- label="CPU Thread Preset",
1640
- info="Select hardware tier or specify custom thread count"
1641
- )
1642
-
1643
- custom_threads_slider = gr.Slider(
1644
- minimum=1,
1645
- maximum=32,
1646
- value=DEFAULT_CUSTOM_THREADS if DEFAULT_CUSTOM_THREADS > 0 else 4,
1647
- step=1,
1648
- label="Custom Thread Count",
1649
- info="Number of CPU threads for model inference (1-32)",
1650
- visible=DEFAULT_THREAD_PRESET == "custom"
1651
- )
1652
-
1653
- temperature_slider = gr.Slider(
1654
- minimum=0.0,
1655
- maximum=2.0,
1656
- value=0.6,
1657
- step=0.1,
1658
- label="Temperature",
1659
- info="Lower = more focused, Higher = more creative"
1660
- )
1661
- max_tokens = gr.Slider(
1662
- minimum=256,
1663
- maximum=4096,
1664
- value=2048,
1665
- step=256,
1666
- label="Max Output Tokens",
1667
- info="Higher = more detailed summary"
1668
- )
1669
- top_p = gr.Slider(
1670
- minimum=0.0,
1671
- maximum=1.0,
1672
- value=0.95,
1673
- step=0.05,
1674
- label="Top P (Nucleus Sampling)",
1675
- info="Lower = more focused, Higher = more diverse"
1676
- )
1677
- top_k = gr.Slider(
1678
- minimum=0,
1679
- maximum=100,
1680
- value=20,
1681
- step=5,
1682
- label="Top K",
1683
- info="Limits token selection to top K tokens (0 = disabled)"
1684
- )
1685
 
1686
- submit_btn = gr.Button(
1687
- "✨ Generate Summary",
1688
- variant="primary",
1689
- elem_classes=["submit-btn"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1690
  )
1691
-
1692
- # Hidden state to store generation metrics
1693
- metrics_state = gr.State(value={})
1694
-
1695
- # Hidden state to store loaded custom model
1696
- custom_model_state = gr.State(value=None)
1697
 
1698
- # Hidden state to store custom model metadata (repo_id, filename, size)
1699
- custom_model_metadata = gr.State(value={
1700
- "repo_id": None,
1701
- "filename": None,
1702
- "size_mb": 0,
1703
- })
 
 
 
 
1704
 
1705
- # Model info section (dynamic)
1706
- with gr.Group():
1707
- gr.HTML('<div class="section-header"><span class="section-icon">πŸ“Š</span> Model Information</div>')
1708
- # Get default thread count for initial display
1709
- _default_threads = DEFAULT_CUSTOM_THREADS if DEFAULT_CUSTOM_THREADS > 0 else 2
1710
- info_output = gr.Markdown(
1711
- value=get_model_info(DEFAULT_MODEL_KEY, n_threads=_default_threads)[0],
1712
- elem_classes=["stats-grid"]
1713
- )
 
 
 
 
 
 
 
 
 
 
 
1714
 
1715
  # Right column - Outputs
1716
  with gr.Column(scale=2):
@@ -1725,6 +1754,8 @@ def create_interface():
1725
  placeholder="The AI's reasoning process will appear here in real-time...",
1726
  elem_classes=["thinking-box"]
1727
  )
 
 
1728
 
1729
  # Summary Output
1730
  with gr.Group():
@@ -1734,25 +1765,13 @@ def create_interface():
1734
  elem_classes=["summary-box"]
1735
  )
1736
 
1737
- # Action buttons for outputs
1738
  with gr.Row():
1739
  copy_summary_btn = gr.Button("πŸ“‹ Copy Summary", size="sm")
1740
- copy_thinking_btn = gr.Button("πŸ“‹ Copy Thinking", size="sm")
1741
  download_btn = gr.Button("⬇️ Download (JSON)", size="sm")
1742
 
1743
- # File output component for download
1744
- download_output = gr.File(label="Download JSON", visible=True)
1745
-
1746
- # Debug: System Prompt display
1747
- with gr.Accordion("πŸ› Debug: System Prompt", open=False):
1748
- system_prompt_debug = gr.Textbox(
1749
- label="System Prompt (Read-Only)",
1750
- lines=5,
1751
- max_lines=10,
1752
- interactive=False,
1753
- value="Select a model and click 'Generate Summary' to see the system prompt.",
1754
- info="This shows the exact system prompt sent to the LLM"
1755
- )
1756
 
1757
  # Function to update settings when model changes
1758
  def update_settings_on_model_change(model_key, thread_config, custom_threads, custom_metadata=None):
@@ -1823,17 +1842,7 @@ def create_interface():
1823
  # NEW: Custom Model Loader Event Handlers
1824
  # ==========================================
1825
 
1826
- # Show/hide custom model UI based on model selection
1827
- def toggle_custom_model_ui(model_key):
1828
- """Show or hide custom model UI based on selection."""
1829
- is_custom = model_key == "custom_hf"
1830
- return gr.update(visible=is_custom)
1831
-
1832
- model_dropdown.change(
1833
- fn=toggle_custom_model_ui,
1834
- inputs=[model_dropdown],
1835
- outputs=[custom_model_group],
1836
- )
1837
 
1838
  # Update system prompt debug when model or reasoning changes
1839
  def update_system_prompt_debug(model_key, enable_reasoning, language):
@@ -2014,9 +2023,9 @@ def create_interface():
2014
  inputs=[model_search_input, custom_file_dropdown, custom_repo_files],
2015
  outputs=[custom_status, retry_btn, custom_model_state, custom_model_metadata],
2016
  ).then(
2017
- fn=update_settings_on_model_change,
2018
- inputs=[model_dropdown, thread_config_dropdown, custom_threads_slider, custom_model_metadata],
2019
- outputs=[temperature_slider, top_p, top_k, info_output],
2020
  )
2021
 
2022
  # Retry button - same as load
@@ -2025,9 +2034,9 @@ def create_interface():
2025
  inputs=[model_search_input, custom_file_dropdown, custom_repo_files],
2026
  outputs=[custom_status, retry_btn, custom_model_state, custom_model_metadata],
2027
  ).then(
2028
- fn=update_settings_on_model_change,
2029
- inputs=[model_dropdown, thread_config_dropdown, custom_threads_slider, custom_model_metadata],
2030
- outputs=[temperature_slider, top_p, top_k, info_output],
2031
  )
2032
 
2033
  # Also update submit button to use custom model state
 
1517
  css=custom_css
1518
  ) as demo:
1519
 
1520
+ # Header section (simplified - no Row/Column wrapper needed for full-width)
1521
+ gr.HTML("""
1522
+ <div class="app-header">
1523
+ <h1>πŸ“„ Tiny Scribe</h1>
1524
+ <p>AI-Powered Transcript Summarization with Real-Time Streaming</p>
1525
+ <div class="model-badge">
1526
+ <span>Select a model below to get started</span>
1527
+ </div>
1528
+ </div>
1529
+ """)
 
 
1530
 
1531
+ # Instructions (simplified)
1532
+ gr.HTML("""
1533
+ <div class="instructions">
1534
+ <strong>πŸ“‹ How to use:</strong>
1535
+ <ul>
1536
+ <li>Upload a .txt file containing your transcript, notes, or document</li>
1537
+ <li>Click "Generate Summary" to start AI processing</li>
1538
+ <li>Watch the <strong>Thinking Process</strong> (left) - see how the AI reasons</li>
1539
+ <li>Read the <strong>Final Summary</strong> (right) - the polished result</li>
1540
+ <li>Both outputs stream in real-time as the AI generates content</li>
1541
+ </ul>
1542
+ </div>
1543
+ """)
 
 
1544
 
1545
  # Main content area
1546
  with gr.Row():
1547
+ # Left column - Configuration
1548
  with gr.Column(scale=1):
1549
+
1550
+ # ==========================================
1551
+ # Section 1: Input Configuration (Language + File)
1552
+ # ==========================================
1553
  with gr.Group():
1554
+ gr.HTML('<div class="section-header"><span class="section-icon">πŸ“€</span> Input</div>')
1555
 
1556
  language_selector = gr.Dropdown(
1557
  choices=[("English", "en"), ("Traditional Chinese (zh-TW)", "zh-TW")],
1558
  value="en",
1559
+ label="🌐 Output Language",
1560
  info="Choose the target language for your summary"
1561
  )
1562
 
1563
+ file_input = gr.File(
1564
+ label="πŸ“„ Upload Transcript (.txt)",
1565
+ file_types=[".txt"],
1566
+ type="filepath",
1567
+ elem_classes=["file-upload-area"]
 
 
 
1568
  )
1569
+
1570
+ # ==========================================
1571
+ # Section 2: Model Selection (Tabs)
1572
+ # ==========================================
1573
+ with gr.Tabs() as model_tabs:
1574
 
1575
+ # --- Tab 1: Preset Models ---
1576
+ with gr.TabItem("πŸ€– Preset Models"):
1577
+ # Filter out custom_hf from preset choices
1578
+ preset_choices = [
1579
+ (info["name"] + (" ⚑" if info.get("supports_reasoning", False) and not info.get("supports_toggle", False) else ""), key)
1580
+ for key, info in AVAILABLE_MODELS.items()
1581
+ if key != "custom_hf"
1582
+ ]
1583
+
1584
+ model_dropdown = gr.Dropdown(
1585
+ choices=preset_choices,
1586
+ value=DEFAULT_MODEL_KEY,
1587
+ label="Select Model",
1588
+ info="Smaller = faster. ⚑ = Always-reasoning models."
1589
+ )
1590
+
1591
+ enable_reasoning = gr.Checkbox(
1592
+ value=True,
1593
+ label="Enable Reasoning Mode",
1594
+ info="Uses /think for deeper analysis (slower) or /no_think for direct output (faster).",
1595
+ interactive=True,
1596
+ visible=AVAILABLE_MODELS[DEFAULT_MODEL_KEY].get("supports_toggle", False)
1597
+ )
1598
+
1599
+ # Model info for preset models
1600
+ gr.HTML('<div class="section-header" style="margin-top: 12px;"><span class="section-icon">πŸ“Š</span> Model Information</div>')
1601
+ _default_threads = DEFAULT_CUSTOM_THREADS if DEFAULT_CUSTOM_THREADS > 0 else 2
1602
+ info_output = gr.Markdown(
1603
+ value=get_model_info(DEFAULT_MODEL_KEY, n_threads=_default_threads)[0],
1604
+ elem_classes=["stats-grid"]
1605
+ )
1606
 
1607
+ # --- Tab 2: Custom GGUF ---
1608
+ with gr.TabItem("πŸ”§ Custom GGUF"):
1609
+ gr.HTML('<div style="font-size: 0.85em; color: #64748b; margin-bottom: 10px;">Load any GGUF model from HuggingFace Hub</div>')
1610
 
1611
+ # HF Hub Search Component
1612
  model_search_input = HuggingfaceHubSearch(
1613
  label="πŸ” Search HuggingFace Models",
1614
+ placeholder="Type model name (e.g., 'qwen', 'phi', 'llama')",
1615
  search_type="model",
1616
  )
1617
 
 
 
 
1618
  # File dropdown (populated after repo discovery)
1619
  custom_file_dropdown = gr.Dropdown(
1620
+ label="πŸ“¦ Select GGUF File",
1621
  choices=[],
1622
  value=None,
1623
+ info="GGUF files appear after selecting a model above",
1624
  interactive=True,
 
1625
  )
1626
 
1627
+ # Load button
1628
+ load_btn = gr.Button("⬇️ Load Selected Model", variant="primary", size="sm")
 
1629
 
1630
  # Status message
1631
  custom_status = gr.Textbox(
 
1636
  )
1637
 
1638
  retry_btn = gr.Button("πŸ”„ Retry", variant="secondary", visible=False)
1639
+
1640
+ # Model info for custom models (shows after loading)
1641
+ gr.HTML('<div class="section-header" style="margin-top: 12px;"><span class="section-icon">πŸ“Š</span> Custom Model Info</div>')
1642
+ custom_info_output = gr.Markdown(
1643
+ value="*Load a model to see its specifications...*",
1644
+ elem_classes=["stats-grid"]
1645
+ )
1646
+
1647
+ # ==========================================
1648
+ # Section 3: Advanced Settings
1649
+ # ==========================================
1650
+ with gr.Accordion("βš™οΈ Advanced Settings", open=False):
1651
 
1652
+ # Hardware Configuration
1653
+ gr.HTML('<div class="section-header"><span class="section-icon">πŸ–₯️</span> Hardware Configuration</div>')
1654
 
1655
+ thread_config_dropdown = gr.Dropdown(
1656
+ choices=[
1657
+ ("HF Spaces Free Tier (2 vCPUs)", "free"),
1658
+ ("HF Spaces CPU Upgrade (8 vCPUs)", "upgrade"),
1659
+ ("Custom (manual)", "custom"),
1660
+ ],
1661
+ value=DEFAULT_THREAD_PRESET,
1662
+ label="CPU Thread Preset",
1663
+ info="Select hardware tier or specify custom thread count"
1664
+ )
1665
+
1666
+ custom_threads_slider = gr.Slider(
1667
+ minimum=1,
1668
+ maximum=32,
1669
+ value=DEFAULT_CUSTOM_THREADS if DEFAULT_CUSTOM_THREADS > 0 else 4,
1670
+ step=1,
1671
+ label="Custom Thread Count",
1672
+ info="Number of CPU threads for model inference (1-32)",
1673
+ visible=DEFAULT_THREAD_PRESET == "custom"
1674
  )
1675
 
1676
+ # Inference Parameters
1677
+ gr.HTML('<div class="section-header" style="margin-top: 16px;"><span class="section-icon">πŸŽ›οΈ</span> Inference Parameters</div>')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1678
 
1679
+ temperature_slider = gr.Slider(
1680
+ minimum=0.0,
1681
+ maximum=2.0,
1682
+ value=0.6,
1683
+ step=0.1,
1684
+ label="Temperature",
1685
+ info="Lower = more focused, Higher = more creative"
1686
+ )
1687
+ max_tokens = gr.Slider(
1688
+ minimum=256,
1689
+ maximum=4096,
1690
+ value=2048,
1691
+ step=256,
1692
+ label="Max Output Tokens",
1693
+ info="Higher = more detailed summary"
1694
+ )
1695
+ top_p = gr.Slider(
1696
+ minimum=0.0,
1697
+ maximum=1.0,
1698
+ value=0.95,
1699
+ step=0.05,
1700
+ label="Top P (Nucleus Sampling)",
1701
+ info="Lower = more focused, Higher = more diverse"
1702
+ )
1703
+ top_k = gr.Slider(
1704
+ minimum=0,
1705
+ maximum=100,
1706
+ value=20,
1707
+ step=5,
1708
+ label="Top K",
1709
+ info="Limits token selection to top K tokens (0 = disabled)"
1710
  )
 
 
 
 
 
 
1711
 
1712
+ # Debug Tools (nested accordion)
1713
+ with gr.Accordion("πŸ› Debug Tools", open=False):
1714
+ system_prompt_debug = gr.Textbox(
1715
+ label="System Prompt (Read-Only)",
1716
+ lines=5,
1717
+ max_lines=10,
1718
+ interactive=False,
1719
+ value="Select a model and click 'Generate Summary' to see the system prompt.",
1720
+ info="This shows the exact system prompt sent to the LLM"
1721
+ )
1722
 
1723
+ # ==========================================
1724
+ # Submit Button
1725
+ # ==========================================
1726
+ submit_btn = gr.Button(
1727
+ "✨ Generate Summary",
1728
+ variant="primary",
1729
+ elem_classes=["submit-btn"]
1730
+ )
1731
+
1732
+ # ==========================================
1733
+ # State Components (invisible, outside visual groups)
1734
+ # ==========================================
1735
+ metrics_state = gr.State(value={})
1736
+ custom_model_state = gr.State(value=None)
1737
+ custom_model_metadata = gr.State(value={
1738
+ "repo_id": None,
1739
+ "filename": None,
1740
+ "size_mb": 0,
1741
+ })
1742
+ custom_repo_files = gr.State([])
1743
 
1744
  # Right column - Outputs
1745
  with gr.Column(scale=2):
 
1754
  placeholder="The AI's reasoning process will appear here in real-time...",
1755
  elem_classes=["thinking-box"]
1756
  )
1757
+ # Copy Thinking button - now in the correct group
1758
+ copy_thinking_btn = gr.Button("πŸ“‹ Copy Thinking", size="sm")
1759
 
1760
  # Summary Output
1761
  with gr.Group():
 
1765
  elem_classes=["summary-box"]
1766
  )
1767
 
1768
+ # Action buttons for summary
1769
  with gr.Row():
1770
  copy_summary_btn = gr.Button("πŸ“‹ Copy Summary", size="sm")
 
1771
  download_btn = gr.Button("⬇️ Download (JSON)", size="sm")
1772
 
1773
+ # File output component for download (hidden until generated)
1774
+ download_output = gr.File(label="Download JSON", visible=False)
 
 
 
 
 
 
 
 
 
 
 
1775
 
1776
  # Function to update settings when model changes
1777
  def update_settings_on_model_change(model_key, thread_config, custom_threads, custom_metadata=None):
 
1842
  # NEW: Custom Model Loader Event Handlers
1843
  # ==========================================
1844
 
1845
+ # Note: toggle_custom_model_ui removed - now using Tabs instead of hidden Group
 
 
 
 
 
 
 
 
 
 
1846
 
1847
  # Update system prompt debug when model or reasoning changes
1848
  def update_system_prompt_debug(model_key, enable_reasoning, language):
 
2023
  inputs=[model_search_input, custom_file_dropdown, custom_repo_files],
2024
  outputs=[custom_status, retry_btn, custom_model_state, custom_model_metadata],
2025
  ).then(
2026
+ fn=lambda metadata, thread_config, custom_threads: get_model_info("custom_hf", n_threads=get_thread_count(thread_config, custom_threads), custom_metadata=metadata)[0],
2027
+ inputs=[custom_model_metadata, thread_config_dropdown, custom_threads_slider],
2028
+ outputs=[custom_info_output],
2029
  )
2030
 
2031
  # Retry button - same as load
 
2034
  inputs=[model_search_input, custom_file_dropdown, custom_repo_files],
2035
  outputs=[custom_status, retry_btn, custom_model_state, custom_model_metadata],
2036
  ).then(
2037
+ fn=lambda metadata, thread_config, custom_threads: get_model_info("custom_hf", n_threads=get_thread_count(thread_config, custom_threads), custom_metadata=metadata)[0],
2038
+ inputs=[custom_model_metadata, thread_config_dropdown, custom_threads_slider],
2039
+ outputs=[custom_info_output],
2040
  )
2041
 
2042
  # Also update submit button to use custom model state