Spaces:
Sleeping
Sleeping
| from pathlib import Path | |
| from pydantic_settings import BaseSettings, SettingsConfigDict | |
| from typing import ClassVar | |
| import os | |
| current_file = Path(__file__).resolve() | |
| if str(current_file).startswith('/app/'): | |
| CARDSERVER_BASE_DIR = Path('/app') | |
| else: | |
| # Local development - go up from config.py -> core -> app -> sexnow-backend (root) | |
| CARDSERVER_BASE_DIR = current_file.parent.parent.parent | |
| class Settings(BaseSettings): | |
| # Use the determined base directory | |
| _CARDSERVER_DIR_CLS: ClassVar[Path] = CARDSERVER_BASE_DIR | |
| _APP_DEFAULT_STATIC_DIR_CLS: ClassVar[Path] = CARDSERVER_BASE_DIR / "static" | |
| MODEL_PATH: str = str(_CARDSERVER_DIR_CLS / "models" / "lora-checkpoint") | |
| DEFAULT_MODEL_ID: str = "teknium/OpenHermes-2.5-Mistral-7B" | |
| MODEL_LOAD_IN_4BIT: bool = False # Disabled for macOS compatibility | |
| GENERATED_PATH: str = str(_APP_DEFAULT_STATIC_DIR_CLS / "images" / "generated") | |
| BASE_PATH: str = str(_APP_DEFAULT_STATIC_DIR_CLS / "images" / "base") | |
| SYMBOLS_PATH: str = str(_APP_DEFAULT_STATIC_DIR_CLS / "images" / "symbols") | |
| QR_CODE_PATH: str = str(_APP_DEFAULT_STATIC_DIR_CLS / "images" / "qr") | |
| STATIC_FILES_MOUNT_DIR: str = str(_APP_DEFAULT_STATIC_DIR_CLS) | |
| FONTS_SUBDIR: str = "fonts" | |
| API_PREFIX: str = "/api/v1" | |
| PROJECT_NAME: str = "Playcard-Generator" | |
| APP_VERSION: str = "1.0.0" | |
| # Generation parameters | |
| GENERATION_MAX_NEW_TOKENS: int = 250 | |
| GENERATION_TEMPERATURE: float = 0.75 | |
| GENERATION_DO_SAMPLE: bool = True | |
| GENERATION_TOP_K: int = 50 | |
| GENERATION_TOP_P: float = 0.95 | |
| GENERATION_MAX_RETRIES: int = 3 | |
| # Frontend URL for QR codes - should be set to production URL in production | |
| FRONTEND_BASE_URL: str = os.getenv("FRONTEND_BASE_URL", "https://huggingface.co/spaces/ch404/cardserver") | |
| QR_CODE_SIZE: int = 200 | |
| QR_CODE_BOX_SIZE: int = 10 | |
| QR_CODE_BORDER: int = 4 | |
| # Model and Tokenizer settings | |
| # HUGGING_FACE_HUB_TOKEN: str | None = os.getenv("HUGGING_FACE_HUB_TOKEN") # Handled by HF libs | |
| LORA_MODEL_REPO_ID: str | None = None # Beispiel: "your-username/your-lora-model-repo" | |
| # LORA_MODEL_REVISION: str = "main" # Beispiel: "main" oder ein spezifischer Commit/Tag | |
| def resolved_model_path(self) -> Path: | |
| return Path(self.MODEL_PATH) | |
| def resolved_generated_path(self) -> Path: | |
| return Path(self.GENERATED_PATH) | |
| def resolved_base_path(self) -> Path: | |
| return Path(self.BASE_PATH) | |
| def resolved_symbols_path(self) -> Path: | |
| return Path(self.SYMBOLS_PATH) | |
| def resolved_qr_code_path(self) -> Path: | |
| return Path(self.QR_CODE_PATH) | |
| def resolved_static_files_mount_dir(self) -> Path: | |
| return Path(self.STATIC_FILES_MOUNT_DIR) | |
| def resolved_default_font_path(self) -> Path: | |
| return self.resolved_static_files_mount_dir / self.FONTS_SUBDIR / "hand.ttf" | |
| model_config = SettingsConfigDict( | |
| env_file=".env", | |
| env_file_encoding='utf-8', | |
| extra='ignore', | |
| case_sensitive=False | |
| ) | |
| settings = Settings() | |
| def apply_hf_space_optimizations(): | |
| """Apply Hugging Face Space optimizations for better performance""" | |
| # Set environment variables for better caching and performance | |
| os.environ["TRANSFORMERS_CACHE"] = "/tmp/transformers_cache" | |
| os.environ["HF_HOME"] = "/tmp/huggingface" | |
| os.environ["TORCH_HOME"] = "/tmp/torch" | |
| # Additional HF Space optimizations | |
| os.environ["TOKENIZERS_PARALLELISM"] = "false" # Avoid tokenizer warnings | |
| os.environ["TRANSFORMERS_VERBOSITY"] = "error" # Reduce logging overhead | |
| os.environ["HF_HUB_DISABLE_PROGRESS_BARS"] = "1" # Disable progress bars in logs | |