Spaces:
Sleeping
Sleeping
File size: 4,322 Bytes
087ac11 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 |
"""
Configuration Loader
Loads settings from config.yaml for agent personas and prompts
"""
import yaml
import os
from typing import Dict, Any
class Config:
"""
Configuration manager for the Review Intelligence System
Loads and provides access to config.yaml settings
"""
def __init__(self, config_file: str = "config.yaml"):
self.config_file = config_file
self.config = self._load_config()
def _load_config(self) -> Dict[str, Any]:
"""Load configuration from YAML file"""
if not os.path.exists(self.config_file):
print(f"⚠️ Config file not found: {self.config_file}")
print(" Using default configuration")
return self._default_config()
try:
with open(self.config_file, 'r') as f:
config = yaml.safe_load(f)
print(f"✅ Configuration loaded from {self.config_file}")
return config
except Exception as e:
print(f"⚠️ Error loading config: {e}")
print(" Using default configuration")
return self._default_config()
def _default_config(self) -> Dict[str, Any]:
"""Return default configuration if YAML not available"""
return {
'models': {
'stage1': {
'llm1': {'name': 'Qwen/Qwen2.5-72B-Instruct', 'temperature': 0.1},
'llm2': {'name': 'mistralai/Mistral-7B-Instruct-v0.3', 'temperature': 0.1},
'manager': {'name': 'meta-llama/Llama-3.1-8B-Instruct', 'temperature': 0.1}
},
'stage2': {
'best_model': {'name': 'cardiffnlp/twitter-roberta-base-sentiment-latest'},
'alternate_model': {'name': 'finiteautomata/bertweet-base-sentiment-analysis'}
},
'stage3': {
'llm3': {'name': 'meta-llama/Llama-3.1-70B-Instruct', 'temperature': 0.1}
}
}
}
def get_model(self, stage: str, model_key: str) -> Dict[str, Any]:
"""Get model configuration for a specific stage"""
return self.config.get('models', {}).get(stage, {}).get(model_key, {})
def get_persona(self, agent: str) -> Dict[str, Any]:
"""Get persona configuration for an agent"""
return self.config.get('personas', {}).get(agent, {})
def get_prompt_template(self, template_name: str) -> str:
"""Get prompt template"""
return self.config.get('prompt_templates', {}).get(template_name, '')
def get_classification_rules(self) -> Dict[str, Any]:
"""Get classification rules"""
return self.config.get('classification_rules', {})
def get_sentiment_settings(self) -> Dict[str, Any]:
"""Get sentiment analysis settings"""
return self.config.get('sentiment', {})
def get_batch_settings(self) -> Dict[str, Any]:
"""Get batch analysis settings"""
return self.config.get('batch_analysis', {})
def get_processing_settings(self) -> Dict[str, Any]:
"""Get processing settings"""
return self.config.get('processing', {})
def get_dashboard_settings(self) -> Dict[str, Any]:
"""Get dashboard settings"""
return self.config.get('dashboard', {})
# Singleton instance
_config_instance = None
def get_config(config_file: str = "config.yaml") -> Config:
"""Get or create config singleton"""
global _config_instance
if _config_instance is None:
_config_instance = Config(config_file)
return _config_instance
if __name__ == "__main__":
# Test config loader
print("\n" + "="*60)
print("🧪 TESTING CONFIG LOADER")
print("="*60 + "\n")
config = get_config()
# Test model access
llm1_config = config.get_model('stage1', 'llm1')
print(f"LLM1 Model: {llm1_config.get('name', 'Not found')}")
# Test persona access
llm1_persona = config.get_persona('llm1')
print(f"LLM1 Persona: {llm1_persona.get('name', 'Not found')}")
# Test prompt template
prompt = config.get_prompt_template('stage1_llm1')
print(f"Prompt template loaded: {len(prompt)} characters")
print("\n✅ Config loader test complete!")
|