Debashis
Initial commit: Security Incident Analyzer with LLM integration
0355450
"""Configuration management for the analyzer."""
import os
from enum import Enum
from typing import Optional
class LLMProvider(str, Enum):
"""Supported LLM providers."""
OPENAI = "openai"
LOCAL = "local"
MOCK = "mock"
class Config:
"""Application configuration from environment variables."""
def __init__(self):
self.llm_provider = LLMProvider(
os.getenv("LLM_PROVIDER", "mock").lower()
)
self.openai_api_key: Optional[str] = os.getenv("OPENAI_API_KEY")
self.llm_model: Optional[str] = os.getenv("LLM_MODEL")
self.debug = os.getenv("DEBUG", "false").lower() == "true"
def validate(self) -> None:
"""Validate configuration based on selected provider."""
if self.llm_provider == LLMProvider.OPENAI and not self.openai_api_key:
raise ValueError(
"OPENAI_API_KEY environment variable required "
"when LLM_PROVIDER=openai"
)
@property
def model_name(self) -> str:
"""Get the model name for the selected provider."""
if self.llm_model:
return self.llm_model
defaults = {
LLMProvider.OPENAI: "gpt-4-turbo",
LLMProvider.LOCAL: "mistral:7b",
LLMProvider.MOCK: "mock-analyzer-v1",
}
return defaults.get(self.llm_provider, "mock-analyzer-v1")
# Global config instance
config = Config()