water3 / agent /core /level2_config.py
onewayto's picture
Upload 187 files
070daf8 verified
"""
Level 2 Configuration - Single model configuration via environment variables
"""
import os
from dataclasses import dataclass
from typing import Optional
@dataclass
class LLMConfig:
"""Single LLM model configuration from environment"""
model: str
api_key: str
base_url: Optional[str] = None
temperature: float = 0.7
max_tokens: int = 4096
@classmethod
def from_env(cls) -> 'LLMConfig':
"""Load configuration from environment variables"""
return cls(
model=os.getenv("LLM_MODEL", "gpt-4-turbo-preview"),
api_key=os.getenv("LLM_API_KEY", ""),
base_url=os.getenv("LLM_BASE_URL"),
temperature=float(os.getenv("LLM_TEMPERATURE", "0.7")),
max_tokens=int(os.getenv("LLM_MAX_TOKENS", "4096"))
)
@dataclass
class Level2Config:
"""Level 2 advanced configuration"""
# Reasoning
enable_multi_pass_reasoning: bool = True
max_reasoning_depth: int = 5
complexity_threshold_high: int = 70
complexity_threshold_low: int = 30
# Caching
enable_semantic_cache: bool = True
cache_similarity_threshold: float = 0.92
cache_ttl_seconds: int = 604800 # 7 days
# Optimization
enable_parallel_execution: bool = True
enable_auto_retry: bool = True
max_tool_retries: int = 3
tool_timeout_base: float = 30.0
# Observability
enable_metrics: bool = True
enable_anomaly_detection: bool = True
anomaly_threshold_multiplier: float = 2.5
# Memory
enable_contextual_memory: bool = True
max_context_tokens: int = 2000
@classmethod
def from_env(cls) -> 'Level2Config':
"""Load Level 2 configuration from environment"""
return cls(
enable_multi_pass_reasoning=os.getenv("ENABLE_MULTI_PASS", "true").lower() == "true",
max_reasoning_depth=int(os.getenv("MAX_REASONING_DEPTH", "5")),
complexity_threshold_high=int(os.getenv("COMPLEXITY_HIGH", "70")),
complexity_threshold_low=int(os.getenv("COMPLEXITY_LOW", "30")),
enable_semantic_cache=os.getenv("ENABLE_SEMANTIC_CACHE", "true").lower() == "true",
cache_similarity_threshold=float(os.getenv("CACHE_SIMILARITY", "0.92")),
cache_ttl_seconds=int(os.getenv("CACHE_TTL", "604800")),
enable_parallel_execution=os.getenv("ENABLE_PARALLEL", "true").lower() == "true",
enable_auto_retry=os.getenv("ENABLE_AUTO_RETRY", "true").lower() == "true",
max_tool_retries=int(os.getenv("MAX_RETRIES", "3")),
tool_timeout_base=float(os.getenv("TOOL_TIMEOUT", "30.0")),
enable_metrics=os.getenv("ENABLE_METRICS", "true").lower() == "true",
enable_anomaly_detection=os.getenv("ENABLE_ANOMALY", "true").lower() == "true",
anomaly_threshold_multiplier=float(os.getenv("ANOMALY_THRESHOLD", "2.5")),
enable_contextual_memory=os.getenv("ENABLE_MEMORY", "true").lower() == "true",
max_context_tokens=int(os.getenv("MAX_CONTEXT_TOKENS", "2000"))
)
# Global configurations
llm_config = LLMConfig.from_env()
level2_config = Level2Config.from_env()