facemelter's picture
Added provider-specific prompt infrastructure; thinking and progress indicators to chat ui
68723f3 verified
"""
Agent creation and configuration.
Unified agent factory using subagent architecture for all modes.
"""
from langchain_openai import ChatOpenAI
from .config import AgentConfig
from .mcp_clients import MCPClientManager
class AgentFactory:
"""Factory for creating agents using unified subagent architecture."""
@staticmethod
async def create_subagent_orchestrator(
model: str,
api_key: str,
provider: str,
mode: str = "Single Agent (All Tools)"
):
"""
Create agent using subagent architecture (always uses subagent system).
Args:
model: LLM model name
api_key: API key for the provider
provider: LLM provider ("openai" or "huggingface")
mode: Agent mode (e.g., "Single Agent (All Tools)", "Specialized Subagents (3 Specialists)")
Returns:
Configured agent (single subagent or router workflow)
"""
from .subagent_config import SubAgentConfig
from .subagent_supervisor import create_supervisor_workflow
from .subagent_factory import SubAgentFactory
from langchain_openai import ChatOpenAI
from langchain_anthropic import ChatAnthropic
# Get mode configuration
mode_config = SubAgentConfig.get_mode_config(mode)
print(f"[AGENT]: Creating agent in '{mode}' mode")
# Create LLM based on provider
if provider == "huggingface":
llm = ChatOpenAI(
base_url="https://router.huggingface.co/v1",
api_key=api_key,
model=model,
temperature=AgentConfig.HF_TEMPERATURE,
streaming=True
)
elif provider == "anthropic":
llm = ChatAnthropic(
model=model,
api_key=api_key,
temperature=AgentConfig.ANTHROPIC_TEMPERATURE,
streaming=True
)
else: # openai
llm = ChatOpenAI(
model=model,
api_key=api_key,
temperature=AgentConfig.OPENAI_TEMPERATURE,
streaming=True
)
# Get all MCP tools
client = await MCPClientManager.create_multi_server_client()
tools = await MCPClientManager.get_tools(client)
# Create agent based on mode
if mode_config["use_router"]:
# Multi-agent mode: create router with specialists
print(f"[AGENT]: Creating supervisor with subagents: {mode_config['subagents']}")
workflow = await create_supervisor_workflow(tools, llm, provider=provider)
return workflow
else:
# Single agent mode: create one subagent directly
subagent_name = mode_config["subagents"][0]
print(f"[AGENT]: Creating single subagent: {subagent_name}")
# Create agent with memory for streaming support
from langchain.agents import create_agent
from langgraph.checkpoint.memory import InMemorySaver
# create_agent auto-compiles, so pass checkpointer and name directly
# Filter tools based on subagent configuration
# Pass provider to get provider-specific prompts
subagent_defs = SubAgentConfig.get_subagent_definitions(provider=provider)
subagent_tools = subagent_defs["generalist"]["tools"]
filtered_tools = [tool for tool in tools if tool.name in subagent_tools]
print(f"[AGENT]: Filtered {len(filtered_tools)} tools for {subagent_name}: {[t.name for t in filtered_tools]}")
agent = create_agent(
model=llm,
tools=filtered_tools,
system_prompt=subagent_defs["generalist"]["prompt"],
checkpointer=InMemorySaver(),
name=subagent_name
)
return agent