Heavy / src /multi_web_chat.py
justinhew
Deploy to HF Spaces
ea81a05
"""Gradio web interface with CHAT MODE for Heavy multi-model system."""
import asyncio
import gradio as gr
from .multi_web import (
process_chat_message,
AVAILABLE_MODELS,
load_config
)
# Create Chat-focused Gradio interface
with gr.Blocks(
title="Heavy Multi-Model - Chat Mode",
theme=gr.themes.Soft()
) as demo:
gr.Markdown(
"""
# πŸ’¬ Heavy Multi-Model 2.0 - Chat Mode with Context
**Have multi-turn conversations!** The AI remembers your conversation history and can reference previous exchanges.
**Available Models:** GPT-5, GPT-5.1, Gemini 3 Pro Preview, Gemini 2.5 Pro, Claude 4.5 Sonnet, GPT-4.1 Mini, Gemini 2.0 Flash, Llama 3.1 70B
"""
)
# State for conversation history
chat_state = gr.State([])
with gr.Row():
with gr.Column(scale=3):
# API Keys
with gr.Group():
api_key_input = gr.Textbox(
label="πŸ”‘ OpenRouter API Key",
placeholder="Enter your OpenRouter API key (sk-or-v1-...)",
type="password",
info="Get your key from https://openrouter.ai/keys"
)
# Tavily Web Search
use_tavily_checkbox = gr.Checkbox(
label="πŸ” Enable Web Search (Tavily)",
value=False,
info="Give agents access to real-time web information"
)
tavily_api_key_input = gr.Textbox(
label="πŸ”‘ Tavily API Key (Optional)",
placeholder="Enter your Tavily API key (tvly-...)",
type="password",
info="Get your key from https://tavily.com",
visible=False
)
# Model Configuration
with gr.Accordion("🎯 Model Configuration", open=True):
mode_radio = gr.Radio(
choices=[
"Single Model (all roles use same model)",
"Multi-Model (assign different models to each role)",
"Use make-it-heavy (original repo)"
],
value="Single Model (all roles use same model)",
label="Mode"
)
# Single model selector
with gr.Group(visible=True) as single_model_group:
single_model_dropdown = gr.Dropdown(
choices=AVAILABLE_MODELS,
value="claude-4.5-sonnet",
label="Model for All Roles"
)
# Multi-model selectors
with gr.Group(visible=False) as multi_model_group:
orchestrator_dropdown = gr.Dropdown(
choices=AVAILABLE_MODELS,
value="claude-4.5-sonnet",
label="Orchestrator Model"
)
agent_dropdown = gr.Dropdown(
choices=AVAILABLE_MODELS,
value="gpt-5.1",
label="Agent Model"
)
synthesizer_dropdown = gr.Dropdown(
choices=AVAILABLE_MODELS,
value="gemini-3-pro-preview",
label="Synthesizer Model"
)
# Analysis Settings
with gr.Accordion("βš™οΈ Analysis Settings", open=False):
num_agents_slider = gr.Slider(
minimum=2,
maximum=8,
value=4,
step=1,
label="Number of Agents"
)
show_thoughts_checkbox = gr.Checkbox(
label="Show Agent Thoughts",
value=False,
info="Display detailed agent analyses"
)
# Chat Interface
gr.Markdown("### πŸ’¬ Conversation")
chatbot = gr.Chatbot(
value=[],
label="Chat History",
height=400,
type="messages"
)
with gr.Row():
msg_input = gr.Textbox(
label="Your Message",
placeholder="Ask anything... The AI will remember our conversation!",
lines=2,
scale=4
)
send_btn = gr.Button("Send πŸš€", variant="primary", scale=1)
clear_btn = gr.Button("πŸ—‘οΈ Clear Conversation", variant="secondary")
with gr.Column(scale=1):
gr.Markdown(
"""
### πŸ’¬ Chat Mode Features
**Conversation Memory:**
- AI remembers previous messages
- Can reference earlier topics
- Natural multi-turn dialogue
**How It Works:**
1. **Orchestrator**: Breaks your query into questions (with context)
2. **Agents**: Analyze in parallel (aware of conversation)
3. **Synthesizer**: Creates contextual response
### Tips
- Ask follow-up questions
- Request clarifications
- Build on previous answers
- Clear chat to start fresh
### Model Recommendations
- **Claude 4.5**: Best reasoning
- **GPT-5**: Creative responses
- **GPT-5.1**: Frontier reasoning + creativity
- **Gemini 3 Pro Preview**: Multimodal depth
- **Gemini 2.5 Pro**: Great synthesis
- **Enable web search** for current info!
"""
)
# Analysis Details (expandable)
with gr.Accordion("πŸ“Š Latest Analysis Details", open=False):
model_info_output = gr.Markdown(label="Model Configuration")
questions_output = gr.Textbox(label="Generated Questions", lines=4, interactive=False)
agents_output = gr.Markdown(label="Agent Analyses")
# Event handlers
def toggle_model_selection(mode):
if mode == "Single Model (all roles use same model)":
return gr.update(visible=True), gr.update(visible=False)
elif mode == "Multi-Model (assign different models to each role)":
return gr.update(visible=False), gr.update(visible=True)
else:
return gr.update(visible=False), gr.update(visible=False)
def toggle_tavily_key(use_tavily):
return gr.update(visible=use_tavily)
def clear_chat():
return [], []
def handle_message(
message, history, num_agents, show_thoughts, mode,
single_model, orch_model, agent_model, synth_model,
api_key, use_tavily, tavily_key
):
"""Handle chat message and update UI."""
updated_history, model_info, questions, agents, _ = process_chat_message(
message, history, num_agents, show_thoughts, mode,
single_model, orch_model, agent_model, synth_model,
api_key, use_tavily, tavily_key
)
# Convert history format for Gradio Chatbot
chat_display = []
for msg in updated_history:
if msg["role"] == "user":
chat_display.append({"role": "user", "content": msg["content"]})
else:
chat_display.append({"role": "assistant", "content": msg["content"]})
return chat_display, updated_history, "", model_info, questions, agents
# Wire up events
mode_radio.change(
fn=toggle_model_selection,
inputs=[mode_radio],
outputs=[single_model_group, multi_model_group]
)
use_tavily_checkbox.change(
fn=toggle_tavily_key,
inputs=[use_tavily_checkbox],
outputs=[tavily_api_key_input]
)
send_btn.click(
fn=handle_message,
inputs=[
msg_input, chat_state, num_agents_slider, show_thoughts_checkbox,
mode_radio, single_model_dropdown,
orchestrator_dropdown, agent_dropdown, synthesizer_dropdown,
api_key_input, use_tavily_checkbox, tavily_api_key_input
],
outputs=[chatbot, chat_state, msg_input, model_info_output, questions_output, agents_output]
)
msg_input.submit(
fn=handle_message,
inputs=[
msg_input, chat_state, num_agents_slider, show_thoughts_checkbox,
mode_radio, single_model_dropdown,
orchestrator_dropdown, agent_dropdown, synthesizer_dropdown,
api_key_input, use_tavily_checkbox, tavily_api_key_input
],
outputs=[chatbot, chat_state, msg_input, model_info_output, questions_output, agents_output]
)
clear_btn.click(
fn=clear_chat,
outputs=[chatbot, chat_state]
)
gr.Markdown(
"""
---
**How to Use:**
1. Enter your OpenRouter API key (required)
2. (Optional) Enable web search and add Tavily key
3. Choose your model configuration
4. Start chatting! The AI remembers your conversation.
**Note:** Your API keys are only used for this session and never stored.
"""
)
def launch(share=True, server_port=7862):
"""Launch the chat interface."""
demo.launch(
share=share,
server_port=server_port,
server_name="0.0.0.0",
show_error=True,
inbrowser=True
)
if __name__ == "__main__":
launch()