Spaces:
Running on Zero
Running on Zero
| """ | |
| AI Accident Analysis β Application Configuration | |
| All settings are loaded from environment variables / .env file. | |
| Reuses the same pydantic-settings pattern as PhotoSearchApp. | |
| """ | |
| import os | |
| from pathlib import Path | |
| from typing import List | |
| from pydantic_settings import BaseSettings | |
| from pydantic import Field | |
| class Settings(BaseSettings): | |
| """Central configuration β all values come from .env or environment.""" | |
| # --- Model --- | |
| model_id: str = Field(default="LiquidAI/LFM2-VL-3B", description="HuggingFace model ID for vision") | |
| chat_model_id: str = Field(default="LiquidAI/LFM2.5-1.2B-Instruct", description="HuggingFace model ID for text chatbot") | |
| model_torch_dtype: str = Field(default="bfloat16", description="Torch dtype: bfloat16, float16, float32") | |
| model_max_new_tokens: int = Field(default=1024, description="Max tokens β higher for detailed accident analysis") | |
| model_repetition_penalty: float = Field(default=1.2, description="Penalty for repeating tokens") | |
| model_temperature: float = Field(default=0.3, description="Low temperature for deterministic analysis") | |
| model_trust_remote_code: bool = Field(default=True, description="Trust remote code in model repo") | |
| # --- Device --- | |
| device: str = Field(default="auto", description="Device: auto, mps, cuda, cpu") | |
| # --- Server --- | |
| host: str = Field(default="0.0.0.0") | |
| port: int = Field(default=8001) | |
| debug: bool = Field(default=True) | |
| workers: int = Field(default=1) | |
| # --- Upload --- | |
| max_file_size_mb: int = Field(default=15, description="Max single file size in MB") | |
| max_photos_per_case: int = Field(default=20, description="Max photos per accident case") | |
| allowed_extensions: str = Field(default="jpg,jpeg,png,webp", description="Comma-separated allowed extensions") | |
| # --- Analysis --- | |
| confidence_threshold: float = Field(default=0.6, description="Min confidence for rule violation match") | |
| fault_min_violations: int = Field(default=1, description="Min violations needed to assign fault") | |
| # --- Security --- | |
| cors_origins: str = Field(default="http://localhost:8001,http://127.0.0.1:8001") | |
| # --- Logging --- | |
| log_level: str = Field(default="INFO") | |
| log_file_max_bytes: int = Field(default=10485760) # 10MB | |
| log_file_backup_count: int = Field(default=5) | |
| # --- Paths --- | |
| upload_dir: str = Field(default="uploads") | |
| data_dir: str = Field(default="data") | |
| log_dir: str = Field(default="logs") | |
| rules_dir: str = Field(default="backend/app/rules") | |
| class Config: | |
| env_file = ".env" | |
| env_file_encoding = "utf-8" | |
| case_sensitive = False | |
| extra = "allow" | |
| # --- Derived properties --- | |
| def base_path(self) -> Path: | |
| """Project root directory.""" | |
| return Path(__file__).resolve().parent.parent.parent | |
| def upload_path(self) -> Path: | |
| p = self.base_path / self.upload_dir | |
| p.mkdir(parents=True, exist_ok=True) | |
| return p | |
| def data_path(self) -> Path: | |
| p = self.base_path / self.data_dir | |
| p.mkdir(parents=True, exist_ok=True) | |
| return p | |
| def log_path(self) -> Path: | |
| p = self.base_path / self.log_dir | |
| p.mkdir(parents=True, exist_ok=True) | |
| return p | |
| def db_path(self) -> Path: | |
| return self.data_path / "accident_analysis.db" | |
| def rules_path(self) -> Path: | |
| return self.base_path / self.rules_dir | |
| def allowed_extensions_list(self) -> List[str]: | |
| return [ext.strip().lower() for ext in self.allowed_extensions.split(",")] | |
| def cors_origins_list(self) -> List[str]: | |
| return [origin.strip() for origin in self.cors_origins.split(",")] | |
| def max_file_size_bytes(self) -> int: | |
| return self.max_file_size_mb * 1024 * 1024 | |
| def resolve_device(self) -> str: | |
| """Resolve 'auto' device to the best available: MPS (M3) > CUDA > CPU.""" | |
| import torch | |
| if self.device != "auto": | |
| return self.device | |
| if torch.backends.mps.is_available(): | |
| return "mps" | |
| if torch.cuda.is_available(): | |
| return "cuda" | |
| return "cpu" | |
| def resolve_torch_dtype(self): | |
| """Convert string dtype to torch dtype object.""" | |
| import torch | |
| dtype_map = { | |
| "bfloat16": torch.bfloat16, | |
| "float16": torch.float16, | |
| "float32": torch.float32, | |
| } | |
| return dtype_map.get(self.model_torch_dtype, torch.bfloat16) | |
| # Singleton settings instance | |
| settings = Settings() | |