|
|
"Gradio UI for DeepBoner agent with MCP server support." |
|
|
|
|
|
import os |
|
|
from collections.abc import AsyncGenerator |
|
|
from typing import Any, Literal |
|
|
|
|
|
import gradio as gr |
|
|
from pydantic_ai.models.anthropic import AnthropicModel |
|
|
from pydantic_ai.models.openai import OpenAIChatModel |
|
|
from pydantic_ai.providers.anthropic import AnthropicProvider |
|
|
from pydantic_ai.providers.openai import OpenAIProvider |
|
|
|
|
|
from src.agent_factory.judges import HFInferenceJudgeHandler, JudgeHandler, MockJudgeHandler |
|
|
from src.config.domain import ResearchDomain |
|
|
from src.orchestrators import create_orchestrator |
|
|
from src.tools.clinicaltrials import ClinicalTrialsTool |
|
|
from src.tools.europepmc import EuropePMCTool |
|
|
from src.tools.openalex import OpenAlexTool |
|
|
from src.tools.pubmed import PubMedTool |
|
|
from src.tools.search_handler import SearchHandler |
|
|
from src.utils.config import settings |
|
|
from src.utils.exceptions import ConfigurationError |
|
|
from src.utils.models import OrchestratorConfig |
|
|
from src.utils.service_loader import warmup_services |
|
|
|
|
|
OrchestratorMode = Literal["simple", "magentic", "advanced", "hierarchical"] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
CUSTOM_CSS = """ |
|
|
.api-key-input input { |
|
|
background-color: #1f2937 !important; |
|
|
color: white !important; |
|
|
border-color: #374151 !important; |
|
|
} |
|
|
.api-key-input input:focus, |
|
|
.api-key-input input:focus-visible { |
|
|
background-color: #1f2937 !important; |
|
|
color: white !important; |
|
|
border-color: #e879f9 !important; |
|
|
outline: none !important; |
|
|
} |
|
|
/* Override aggressive browser autofill styling */ |
|
|
.api-key-input input:-webkit-autofill, |
|
|
.api-key-input input:-webkit-autofill:hover, |
|
|
.api-key-input input:-webkit-autofill:focus { |
|
|
-webkit-box-shadow: 0 0 0px 1000px #1f2937 inset !important; |
|
|
-webkit-text-fill-color: white !important; |
|
|
caret-color: white !important; |
|
|
transition: background-color 5000s ease-in-out 0s; |
|
|
} |
|
|
""" |
|
|
|
|
|
|
|
|
def configure_orchestrator( |
|
|
use_mock: bool = False, |
|
|
mode: OrchestratorMode = "simple", |
|
|
user_api_key: str | None = None, |
|
|
domain: str | ResearchDomain | None = None, |
|
|
) -> tuple[Any, str]: |
|
|
""" |
|
|
Create an orchestrator instance. |
|
|
|
|
|
Args: |
|
|
use_mock: If True, use MockJudgeHandler (no API key needed) |
|
|
mode: Orchestrator mode ("simple" or "advanced") |
|
|
user_api_key: Optional user-provided API key (BYOK) - auto-detects provider |
|
|
domain: Research domain (defaults to "sexual_health") |
|
|
|
|
|
Returns: |
|
|
Tuple of (Orchestrator instance, backend_name) |
|
|
""" |
|
|
|
|
|
config = OrchestratorConfig( |
|
|
max_iterations=10, |
|
|
max_results_per_tool=10, |
|
|
) |
|
|
|
|
|
|
|
|
search_handler = SearchHandler( |
|
|
tools=[PubMedTool(), ClinicalTrialsTool(), EuropePMCTool(), OpenAlexTool()], |
|
|
timeout=config.search_timeout, |
|
|
) |
|
|
|
|
|
|
|
|
judge_handler: JudgeHandler | MockJudgeHandler | HFInferenceJudgeHandler |
|
|
backend_info = "Unknown" |
|
|
|
|
|
|
|
|
if use_mock: |
|
|
judge_handler = MockJudgeHandler(domain=domain) |
|
|
backend_info = "Mock (Testing)" |
|
|
|
|
|
|
|
|
elif user_api_key and user_api_key.strip(): |
|
|
|
|
|
model: AnthropicModel | OpenAIChatModel |
|
|
if user_api_key.startswith("sk-ant-"): |
|
|
|
|
|
anthropic_provider = AnthropicProvider(api_key=user_api_key) |
|
|
model = AnthropicModel(settings.anthropic_model, provider=anthropic_provider) |
|
|
backend_info = "Paid API (Anthropic)" |
|
|
elif user_api_key.startswith("sk-"): |
|
|
|
|
|
openai_provider = OpenAIProvider(api_key=user_api_key) |
|
|
model = OpenAIChatModel(settings.openai_model, provider=openai_provider) |
|
|
backend_info = "Paid API (OpenAI)" |
|
|
else: |
|
|
raise ConfigurationError( |
|
|
"Invalid API key format. Expected sk-... (OpenAI) or sk-ant-... (Anthropic)" |
|
|
) |
|
|
judge_handler = JudgeHandler(model=model, domain=domain) |
|
|
|
|
|
|
|
|
elif settings.has_openai_key: |
|
|
judge_handler = JudgeHandler(model=None, domain=domain) |
|
|
backend_info = "Paid API (OpenAI from env)" |
|
|
|
|
|
elif settings.has_anthropic_key: |
|
|
judge_handler = JudgeHandler(model=None, domain=domain) |
|
|
backend_info = "Paid API (Anthropic from env)" |
|
|
|
|
|
|
|
|
else: |
|
|
judge_handler = HFInferenceJudgeHandler(domain=domain) |
|
|
backend_info = "Free Tier (Llama 3.1 / Mistral)" |
|
|
|
|
|
orchestrator = create_orchestrator( |
|
|
search_handler=search_handler, |
|
|
judge_handler=judge_handler, |
|
|
config=config, |
|
|
mode=mode, |
|
|
api_key=user_api_key, |
|
|
domain=domain, |
|
|
) |
|
|
|
|
|
return orchestrator, backend_info |
|
|
|
|
|
|
|
|
def _validate_inputs( |
|
|
mode: str, |
|
|
api_key: str | None, |
|
|
api_key_state: str | None, |
|
|
) -> tuple[OrchestratorMode, str | None, bool]: |
|
|
"""Validate inputs and determine mode/key status. |
|
|
|
|
|
Returns: |
|
|
Tuple of (validated_mode, effective_user_key, has_paid_key) |
|
|
""" |
|
|
|
|
|
valid_modes: set[str] = {"simple", "magentic", "advanced", "hierarchical"} |
|
|
mode_validated: OrchestratorMode = mode if mode in valid_modes else "simple" |
|
|
|
|
|
|
|
|
user_api_key = (api_key or api_key_state or "").strip() or None |
|
|
|
|
|
|
|
|
has_openai = settings.has_openai_key |
|
|
has_anthropic = settings.has_anthropic_key |
|
|
is_openai_user_key = ( |
|
|
user_api_key and user_api_key.startswith("sk-") and not user_api_key.startswith("sk-ant-") |
|
|
) |
|
|
has_paid_key = has_openai or has_anthropic or bool(user_api_key) |
|
|
|
|
|
|
|
|
if mode_validated == "advanced" and not (has_openai or is_openai_user_key): |
|
|
mode_validated = "simple" |
|
|
|
|
|
return mode_validated, user_api_key, has_paid_key |
|
|
|
|
|
|
|
|
async def research_agent( |
|
|
message: str, |
|
|
history: list[dict[str, Any]], |
|
|
mode: str = "simple", |
|
|
domain: str = "sexual_health", |
|
|
api_key: str = "", |
|
|
api_key_state: str = "", |
|
|
progress: gr.Progress = gr.Progress(), |
|
|
) -> AsyncGenerator[str, None]: |
|
|
""" |
|
|
Gradio chat function that runs the research agent. |
|
|
|
|
|
Args: |
|
|
message: User's research question |
|
|
history: Chat history (Gradio format) |
|
|
mode: Orchestrator mode ("simple" or "advanced") |
|
|
domain: Research domain |
|
|
api_key: Optional user-provided API key (BYOK - auto-detects provider) |
|
|
api_key_state: Persistent API key state (survives example clicks) |
|
|
progress: Gradio progress tracker |
|
|
|
|
|
Yields: |
|
|
Markdown-formatted responses for streaming |
|
|
""" |
|
|
if not message.strip(): |
|
|
yield "Please enter a research question." |
|
|
return |
|
|
|
|
|
|
|
|
domain_str = domain or "sexual_health" |
|
|
|
|
|
|
|
|
mode_validated, user_api_key, has_paid_key = _validate_inputs(mode, api_key, api_key_state) |
|
|
|
|
|
|
|
|
if mode == "advanced" and mode_validated == "simple": |
|
|
yield ( |
|
|
"β οΈ **Warning**: Advanced mode currently requires OpenAI API key. " |
|
|
"Anthropic keys only work in Simple mode. Falling back to Simple.\n\n" |
|
|
) |
|
|
|
|
|
if not has_paid_key: |
|
|
yield ( |
|
|
"π€ **Free Tier**: Using HuggingFace Inference (Llama 3.1 / Mistral) for AI analysis.\n" |
|
|
"For premium models, enter an OpenAI or Anthropic API key below.\n\n" |
|
|
) |
|
|
|
|
|
|
|
|
response_parts: list[str] = [] |
|
|
streaming_buffer = "" |
|
|
|
|
|
try: |
|
|
|
|
|
orchestrator, backend_name = configure_orchestrator( |
|
|
use_mock=False, |
|
|
mode=mode_validated, |
|
|
user_api_key=user_api_key, |
|
|
domain=domain_str, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
domain_display = domain_str.replace("_", " ").title() |
|
|
yield ( |
|
|
f"π§ **Backend**: {backend_name} | **Domain**: {domain_display}\n\n" |
|
|
"β³ **Processing...** Searching PubMed, ClinicalTrials.gov, Europe PMC, OpenAlex...\n" |
|
|
) |
|
|
|
|
|
async for event in orchestrator.run(message): |
|
|
|
|
|
if event.type == "started": |
|
|
progress(0, desc="Starting research...") |
|
|
elif event.type == "thinking": |
|
|
progress(0.1, desc="Multi-agent reasoning...") |
|
|
elif event.type == "progress": |
|
|
|
|
|
p = 0.15 |
|
|
max_iters = getattr(orchestrator, "_max_rounds", None) or getattr( |
|
|
getattr(orchestrator, "config", None), "max_iterations", 10 |
|
|
) |
|
|
if event.iteration: |
|
|
|
|
|
p = 0.2 + (0.7 * (min(event.iteration, max_iters) / max_iters)) |
|
|
progress(p, desc=event.message) |
|
|
|
|
|
|
|
|
if event.type == "streaming": |
|
|
|
|
|
streaming_buffer += event.message |
|
|
|
|
|
|
|
|
current_parts = [*response_parts, f"π‘ **STREAMING**: {streaming_buffer}"] |
|
|
yield "\n\n".join(current_parts) |
|
|
continue |
|
|
|
|
|
|
|
|
if streaming_buffer: |
|
|
response_parts.append(f"π‘ **STREAMING**: {streaming_buffer}") |
|
|
streaming_buffer = "" |
|
|
|
|
|
|
|
|
if event.type == "complete": |
|
|
response_parts.append(event.message) |
|
|
yield "\n\n".join(response_parts) |
|
|
else: |
|
|
|
|
|
event_md = event.to_markdown() |
|
|
response_parts.append(event_md) |
|
|
|
|
|
yield "\n\n".join(response_parts) |
|
|
|
|
|
|
|
|
if streaming_buffer: |
|
|
response_parts.append(f"π‘ **STREAMING**: {streaming_buffer}") |
|
|
yield "\n\n".join(response_parts) |
|
|
|
|
|
except Exception as e: |
|
|
yield f"β **Error**: {e!s}" |
|
|
|
|
|
|
|
|
def create_demo() -> tuple[gr.ChatInterface, gr.Accordion]: |
|
|
""" |
|
|
Create the Gradio demo interface with MCP support. |
|
|
|
|
|
Returns: |
|
|
Configured Gradio Blocks interface with MCP server enabled |
|
|
""" |
|
|
additional_inputs_accordion = gr.Accordion( |
|
|
label="βοΈ Mode & API Key (Free tier works!)", open=False |
|
|
) |
|
|
|
|
|
|
|
|
api_key_state = gr.State("") |
|
|
|
|
|
|
|
|
|
|
|
description = ( |
|
|
"<div style='text-align: center;'>" |
|
|
"<em>AI-Powered Research Agent β searches PubMed, " |
|
|
"ClinicalTrials.gov, Europe PMC & OpenAlex</em><br><br>" |
|
|
"Deep research for sexual wellness, ED treatments, hormone therapy, " |
|
|
"libido, and reproductive health - for all genders." |
|
|
"</div>" |
|
|
"<hr style='margin: 1em auto; width: 80%; border: none; " |
|
|
"border-top: 1px solid #374151;'>" |
|
|
"<div style='text-align: center;'>" |
|
|
"<em>Research tool only β not for medical advice.</em><br>" |
|
|
"<strong>MCP Server Active</strong>: Connect Claude Desktop to " |
|
|
"<code>/gradio_api/mcp/</code>" |
|
|
"</div>" |
|
|
) |
|
|
|
|
|
demo = gr.ChatInterface( |
|
|
fn=research_agent, |
|
|
title="π DeepBoner", |
|
|
description=description, |
|
|
examples=[ |
|
|
[ |
|
|
"What drugs improve female libido post-menopause?", |
|
|
"simple", |
|
|
"sexual_health", |
|
|
None, |
|
|
None, |
|
|
], |
|
|
[ |
|
|
"Testosterone therapy for hypoactive sexual desire disorder?", |
|
|
"simple", |
|
|
"sexual_health", |
|
|
None, |
|
|
None, |
|
|
], |
|
|
[ |
|
|
"Clinical trials for PDE5 inhibitors alternatives?", |
|
|
"advanced", |
|
|
"sexual_health", |
|
|
None, |
|
|
None, |
|
|
], |
|
|
], |
|
|
additional_inputs_accordion=additional_inputs_accordion, |
|
|
additional_inputs=[ |
|
|
gr.Radio( |
|
|
choices=["simple", "advanced"], |
|
|
value="simple", |
|
|
label="Orchestrator Mode", |
|
|
info="β‘ Simple: Free/Any | π¬ Advanced: OpenAI (Deep Research)", |
|
|
), |
|
|
gr.Dropdown( |
|
|
choices=[d.value for d in ResearchDomain], |
|
|
value="sexual_health", |
|
|
label="Research Domain", |
|
|
info="DeepBoner specializes in sexual health research", |
|
|
visible=False, |
|
|
), |
|
|
gr.Textbox( |
|
|
label="π API Key (Optional)", |
|
|
placeholder="sk-... (OpenAI) or sk-ant-... (Anthropic)", |
|
|
type="password", |
|
|
info="Leave empty for free tier. Auto-detects provider from key prefix.", |
|
|
elem_classes=["api-key-input"], |
|
|
), |
|
|
api_key_state, |
|
|
], |
|
|
) |
|
|
|
|
|
return demo, additional_inputs_accordion |
|
|
|
|
|
|
|
|
def main() -> None: |
|
|
"""Run the Gradio app with MCP server enabled.""" |
|
|
warmup_services() |
|
|
demo, _ = create_demo() |
|
|
demo.launch( |
|
|
server_name=os.getenv("GRADIO_SERVER_NAME", "0.0.0.0"), |
|
|
server_port=7860, |
|
|
share=False, |
|
|
mcp_server=True, |
|
|
ssr_mode=False, |
|
|
css=CUSTOM_CSS, |
|
|
) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|