| """ |
| Configuration module for DeepVision Core Engine. |
| |
| Manages all configuration settings including paths, model settings, |
| processing parameters, and resource limits. |
| """ |
|
|
| import os |
| from pathlib import Path |
| from typing import Dict, List, Optional |
| from pydantic_settings import BaseSettings |
| from pydantic import Field |
|
|
|
|
| class CoreConfig(BaseSettings): |
| """Core configuration settings.""" |
| |
| |
| APP_NAME: str = "DeepVision Prompt Builder" |
| APP_VERSION: str = "0.1.0" |
| DEBUG: bool = Field(default=False, env="DEBUG") |
| |
| |
| BASE_DIR: Path = Path(__file__).parent.parent |
| UPLOAD_DIR: Path = Field(default=Path("/var/uploads"), env="UPLOAD_DIR") |
| CACHE_DIR: Path = Field(default=Path("/var/cache"), env="CACHE_DIR") |
| MODEL_DIR: Path = Field(default=Path("models"), env="MODEL_DIR") |
| |
| |
| MAX_IMAGE_SIZE: int = Field(default=50 * 1024 * 1024, env="MAX_IMAGE_SIZE") |
| MAX_VIDEO_SIZE: int = Field(default=200 * 1024 * 1024, env="MAX_VIDEO_SIZE") |
| ALLOWED_IMAGE_FORMATS: List[str] = [".jpg", ".jpeg", ".png", ".gif", ".webp"] |
| ALLOWED_VIDEO_FORMATS: List[str] = [".mp4", ".mov", ".avi"] |
| |
| |
| IMAGE_MAX_DIMENSION: int = 2048 |
| IMAGE_QUALITY: int = 85 |
| DEFAULT_IMAGE_SIZE: tuple = (512, 512) |
| |
| |
| VIDEO_FPS_EXTRACTION: int = 1 |
| MAX_FRAMES_PER_VIDEO: int = 100 |
| |
| |
| DEVICE: str = Field(default="cpu", env="DEVICE") |
| MODEL_BATCH_SIZE: int = 4 |
| MODEL_CACHE_SIZE: int = 3 |
| |
| |
| MAX_WORKERS: int = Field(default=4, env="MAX_WORKERS") |
| ENABLE_CACHING: bool = True |
| CACHE_TTL: int = 3600 |
| |
| |
| OUTPUT_FORMAT: str = "json" |
| PRETTY_JSON: bool = True |
| INCLUDE_METADATA: bool = True |
| |
| class Config: |
| env_file = ".env" |
| env_file_encoding = "utf-8" |
| case_sensitive = True |
| |
| def __init__(self, **kwargs): |
| super().__init__(**kwargs) |
| |
| self.UPLOAD_DIR.mkdir(parents=True, exist_ok=True) |
| self.CACHE_DIR.mkdir(parents=True, exist_ok=True) |
| self.MODEL_DIR.mkdir(parents=True, exist_ok=True) |
|
|
|
|
| |
| config = CoreConfig() |
|
|
|
|
| |
| MODEL_CONFIGS: Dict[str, Dict] = { |
| "clip": { |
| "name": "openai/clip-vit-base-patch32", |
| "task": "feature_extraction", |
| "device": config.DEVICE, |
| }, |
| "blip2": { |
| "name": "Salesforce/blip2-opt-2.7b", |
| "task": "image_captioning", |
| "device": config.DEVICE, |
| }, |
| "sam": { |
| "name": "facebook/sam-vit-base", |
| "task": "segmentation", |
| "device": config.DEVICE, |
| }, |
| } |
|
|
|
|
| |
| PLUGIN_CONFIGS: Dict[str, Dict] = { |
| "object_detector": { |
| "enabled": True, |
| "model": "clip", |
| "confidence_threshold": 0.5, |
| }, |
| "caption_generator": { |
| "enabled": True, |
| "model": "blip2", |
| "max_length": 50, |
| }, |
| "color_analyzer": { |
| "enabled": True, |
| "num_colors": 5, |
| }, |
| "text_extractor": { |
| "enabled": False, |
| "model": "easyocr", |
| }, |
| "emotion_reader": { |
| "enabled": False, |
| "model": "deepface", |
| }, |
| } |
|
|
|
|
| def get_plugin_config(plugin_name: str) -> Optional[Dict]: |
| """Get configuration for a specific plugin.""" |
| return PLUGIN_CONFIGS.get(plugin_name) |
|
|
|
|
| def is_plugin_enabled(plugin_name: str) -> bool: |
| """Check if a plugin is enabled.""" |
| plugin_config = get_plugin_config(plugin_name) |
| return plugin_config.get("enabled", False) if plugin_config else False |
|
|