trading-tools / web /components /provider_config.py
Deploy Bot
Deploy Trading Analysis Platform to HuggingFace Spaces
a1bf219
"""Provider configuration UI components for LLM provider and routing policy selection."""
from typing import Optional, Tuple
import gradio as gr
def create_provider_selector() -> gr.Dropdown:
"""Create LLM provider dropdown selector.
Returns:
Gradio Dropdown component for provider selection
"""
return gr.Dropdown(
choices=["openai", "anthropic", "huggingface", "qwen"],
value="huggingface",
label="LLM Provider",
info="AI model provider for analysis (HuggingFace = Inference Providers with routing)",
elem_id="llm_provider_selector",
)
def create_routing_policy_selector() -> gr.Dropdown:
"""Create routing policy dropdown selector for HuggingFace Inference Providers.
Includes routing strategies (:fastest, :cheapest) and explicit provider selection.
Returns:
Gradio Dropdown component for routing policy selection
"""
return gr.Dropdown(
choices=[
("Default Model (Llama 3.3 70B)", "meta-llama/Llama-3.3-70B-Instruct"),
("--- Routing Strategies ---", None),
("πŸš€ Fastest Response", ":fastest"),
("πŸ’° Cheapest Cost", ":cheapest"),
("πŸ”„ Auto (Balanced)", "auto"),
("--- Explicit Providers ---", None),
("Groq (Ultra-fast)", "groq"),
("Together AI", "together"),
("Replicate", "replicate"),
("Cerebras (High Performance)", "cerebras"),
("Fireworks AI", "fireworks"),
("DeepInfra", "deepinfra"),
],
value="meta-llama/Llama-3.3-70B-Instruct",
label="HuggingFace Routing Policy",
info="Select routing strategy or specific provider (only applies when HuggingFace is selected)",
visible=True,
elem_id="routing_policy_selector",
)
def create_api_key_input() -> gr.Textbox:
"""Create API key input field for HuggingFace token.
Returns:
Gradio Textbox component for API key input
"""
return gr.Textbox(
label="HuggingFace Token (HF_TOKEN)",
placeholder="hf_...",
type="password",
info="Get your free token from https://huggingface.co/settings/tokens",
elem_id="hf_token_input",
)
def update_routing_policy_visibility(provider: str) -> dict:
"""Update visibility of routing policy selector based on selected provider.
Args:
provider: Selected LLM provider (openai, anthropic, huggingface, qwen)
Returns:
Dictionary with visibility update for routing policy selector
"""
return gr.update(visible=(provider == "huggingface"))
def format_provider_status(
provider: str, routing_policy: Optional[str] = None, model: Optional[str] = None
) -> str:
"""Format provider configuration status message.
Args:
provider: Selected LLM provider
routing_policy: Selected routing policy (if HuggingFace)
model: Explicit model name (if provided)
Returns:
Formatted status string
"""
if provider == "huggingface":
if routing_policy:
# Determine display name
if routing_policy.startswith(":"):
policy_display = routing_policy.upper()
elif "/" in routing_policy:
policy_display = routing_policy.split("/")[-1]
else:
policy_display = routing_policy.title()
return f"βœ“ Provider: HuggingFace | Routing: {policy_display}"
else:
return "βœ“ Provider: HuggingFace | Routing: Default"
else:
provider_names = {
"openai": "OpenAI",
"anthropic": "Anthropic (Claude)",
"qwen": "Qwen (DashScope)",
}
display_name = provider_names.get(provider, provider.title())
if model:
return f"βœ“ Provider: {display_name} | Model: {model}"
return f"βœ“ Provider: {display_name}"
def create_provider_preset_buttons() -> Tuple[gr.Button, gr.Button, gr.Button]:
"""Create preset configuration buttons for common scenarios.
Returns:
Tuple of (cost_optimized_btn, performance_optimized_btn, balanced_btn)
"""
with gr.Row():
cost_btn = gr.Button("πŸ’° Cost Optimized", variant="secondary", size="sm")
perf_btn = gr.Button("πŸš€ Performance Optimized", variant="secondary", size="sm")
balanced_btn = gr.Button("βš–οΈ Balanced", variant="secondary", size="sm")
return cost_btn, perf_btn, balanced_btn
def apply_cost_optimized_preset() -> Tuple[str, str]:
"""Apply cost-optimized preset configuration.
Returns:
Tuple of (provider, routing_policy)
"""
return "huggingface", ":cheapest"
def apply_performance_optimized_preset() -> Tuple[str, str]:
"""Apply performance-optimized preset configuration.
Returns:
Tuple of (provider, routing_policy)
"""
return "huggingface", ":fastest"
def apply_balanced_preset() -> Tuple[str, str]:
"""Apply balanced preset configuration.
Returns:
Tuple of (provider, routing_policy)
"""
return "huggingface", "auto"
def create_provider_info_panel() -> gr.Markdown:
"""Create informational panel about provider options.
Returns:
Gradio Markdown component with provider information
"""
info_text = """
### πŸ€– Provider Options
**HuggingFace Inference Providers** (Recommended for cost savings)
- Access to 22+ providers through unified router
- Routing strategies: `:fastest` (speed), `:cheapest` (cost)
- Many providers offer generous free tiers
- Estimated savings: 90%+ vs OpenAI GPT-4
**OpenAI**
- GPT-4o, GPT-4o-mini
- Reliable and consistent
- Higher cost but proven performance
**Anthropic**
- Claude Sonnet 4.5, Claude Opus 4.5
- Excellent reasoning capabilities
- Premium pricing for premium quality
**Qwen**
- Qwen 2.5 models via DashScope
- Cost-effective option
- Good for Chinese language support
### πŸ’‘ Routing Strategies (HuggingFace only)
- **:cheapest** - Prioritize lowest cost providers (often free tier)
- **:fastest** - Prioritize fastest response times
- **auto** - Balanced selection based on availability
- **Explicit** - Route to specific provider (groq, together, etc.)
"""
return gr.Markdown(info_text, visible=False)
def toggle_provider_info(visible: bool) -> dict:
"""Toggle visibility of provider information panel.
Args:
visible: Current visibility state
Returns:
Dictionary with visibility update
"""
return gr.update(visible=not visible)