File size: 8,142 Bytes
6a911c8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 |
"""
LLM utility functions for DeepCode project.
This module provides common LLM-related utilities to avoid circular imports
and reduce code duplication across the project.
"""
import os
import yaml
from typing import Any, Type, Dict, Tuple
# Import LLM classes
from mcp_agent.workflows.llm.augmented_llm_anthropic import AnthropicAugmentedLLM
from mcp_agent.workflows.llm.augmented_llm_openai import OpenAIAugmentedLLM
def get_preferred_llm_class(config_path: str = "mcp_agent.secrets.yaml") -> Type[Any]:
"""
Automatically select the LLM class based on API key availability in configuration.
Reads from YAML config file and returns AnthropicAugmentedLLM if anthropic.api_key
is available, otherwise returns OpenAIAugmentedLLM.
Args:
config_path: Path to the YAML configuration file
Returns:
class: The preferred LLM class
"""
try:
# Try to read the configuration file
if os.path.exists(config_path):
with open(config_path, "r", encoding="utf-8") as f:
config = yaml.safe_load(f)
# Check for anthropic API key in config
anthropic_config = config.get("anthropic", {})
anthropic_key = anthropic_config.get("api_key", "")
if anthropic_key and anthropic_key.strip() and not anthropic_key == "":
# print("๐ค Using AnthropicAugmentedLLM (Anthropic API key found in config)")
return AnthropicAugmentedLLM
else:
# print("๐ค Using OpenAIAugmentedLLM (Anthropic API key not configured)")
return OpenAIAugmentedLLM
else:
print(f"๐ค Config file {config_path} not found, using OpenAIAugmentedLLM")
return OpenAIAugmentedLLM
except Exception as e:
print(f"๐ค Error reading config file {config_path}: {e}")
print("๐ค Falling back to OpenAIAugmentedLLM")
return OpenAIAugmentedLLM
def get_default_models(config_path: str = "mcp_agent.config.yaml"):
"""
Get default models from configuration file.
Args:
config_path: Path to the configuration file
Returns:
dict: Dictionary with 'anthropic' and 'openai' default models
"""
try:
if os.path.exists(config_path):
with open(config_path, "r", encoding="utf-8") as f:
config = yaml.safe_load(f)
# Handle null values in config sections
anthropic_config = config.get("anthropic") or {}
openai_config = config.get("openai") or {}
anthropic_model = anthropic_config.get(
"default_model", "claude-sonnet-4-20250514"
)
openai_model = openai_config.get("default_model", "o3-mini")
return {"anthropic": anthropic_model, "openai": openai_model}
else:
print(f"Config file {config_path} not found, using default models")
return {"anthropic": "claude-sonnet-4-20250514", "openai": "o3-mini"}
except Exception as e:
print(f"โError reading config file {config_path}: {e}")
return {"anthropic": "claude-sonnet-4-20250514", "openai": "o3-mini"}
def get_document_segmentation_config(
config_path: str = "mcp_agent.config.yaml",
) -> Dict[str, Any]:
"""
Get document segmentation configuration from config file.
Args:
config_path: Path to the main configuration file
Returns:
Dict containing segmentation configuration with default values
"""
try:
if os.path.exists(config_path):
with open(config_path, "r", encoding="utf-8") as f:
config = yaml.safe_load(f)
# Get document segmentation config with defaults
seg_config = config.get("document_segmentation", {})
return {
"enabled": seg_config.get("enabled", True),
"size_threshold_chars": seg_config.get("size_threshold_chars", 50000),
}
else:
print(
f"๐ Config file {config_path} not found, using default segmentation settings"
)
return {"enabled": True, "size_threshold_chars": 50000}
except Exception as e:
print(f"๐ Error reading segmentation config from {config_path}: {e}")
print("๐ Using default segmentation settings")
return {"enabled": True, "size_threshold_chars": 50000}
def should_use_document_segmentation(
document_content: str, config_path: str = "mcp_agent.config.yaml"
) -> Tuple[bool, str]:
"""
Determine whether to use document segmentation based on configuration and document size.
Args:
document_content: The content of the document to analyze
config_path: Path to the configuration file
Returns:
Tuple of (should_segment, reason) where:
- should_segment: Boolean indicating whether to use segmentation
- reason: String explaining the decision
"""
seg_config = get_document_segmentation_config(config_path)
if not seg_config["enabled"]:
return False, "Document segmentation disabled in configuration"
doc_size = len(document_content)
threshold = seg_config["size_threshold_chars"]
if doc_size > threshold:
return (
True,
f"Document size ({doc_size:,} chars) exceeds threshold ({threshold:,} chars)",
)
else:
return (
False,
f"Document size ({doc_size:,} chars) below threshold ({threshold:,} chars)",
)
def get_adaptive_agent_config(
use_segmentation: bool, search_server_names: list = None
) -> Dict[str, list]:
"""
Get adaptive agent configuration based on whether to use document segmentation.
Args:
use_segmentation: Whether to include document-segmentation server
search_server_names: Base search server names (from get_search_server_names)
Returns:
Dict containing server configurations for different agents
"""
if search_server_names is None:
search_server_names = []
# Base configuration
config = {
"concept_analysis": [],
"algorithm_analysis": search_server_names.copy(),
"code_planner": search_server_names.copy(),
}
# Add document-segmentation server if needed
if use_segmentation:
config["concept_analysis"] = ["document-segmentation"]
if "document-segmentation" not in config["algorithm_analysis"]:
config["algorithm_analysis"].append("document-segmentation")
if "document-segmentation" not in config["code_planner"]:
config["code_planner"].append("document-segmentation")
else:
config["concept_analysis"] = ["filesystem"]
if "filesystem" not in config["algorithm_analysis"]:
config["algorithm_analysis"].append("filesystem")
if "filesystem" not in config["code_planner"]:
config["code_planner"].append("filesystem")
return config
def get_adaptive_prompts(use_segmentation: bool) -> Dict[str, str]:
"""
Get appropriate prompt versions based on segmentation usage.
Args:
use_segmentation: Whether to use segmented reading prompts
Returns:
Dict containing prompt configurations
"""
# Import here to avoid circular imports
from prompts.code_prompts import (
PAPER_CONCEPT_ANALYSIS_PROMPT,
PAPER_ALGORITHM_ANALYSIS_PROMPT,
CODE_PLANNING_PROMPT,
PAPER_CONCEPT_ANALYSIS_PROMPT_TRADITIONAL,
PAPER_ALGORITHM_ANALYSIS_PROMPT_TRADITIONAL,
CODE_PLANNING_PROMPT_TRADITIONAL,
)
if use_segmentation:
return {
"concept_analysis": PAPER_CONCEPT_ANALYSIS_PROMPT,
"algorithm_analysis": PAPER_ALGORITHM_ANALYSIS_PROMPT,
"code_planning": CODE_PLANNING_PROMPT,
}
else:
return {
"concept_analysis": PAPER_CONCEPT_ANALYSIS_PROMPT_TRADITIONAL,
"algorithm_analysis": PAPER_ALGORITHM_ANALYSIS_PROMPT_TRADITIONAL,
"code_planning": CODE_PLANNING_PROMPT_TRADITIONAL,
}
|