File size: 3,788 Bytes
a38f710
 
 
198c5a7
830be50
 
 
 
 
876e650
 
a38f710
 
830be50
 
 
a38f710
 
 
aced7f3
a38f710
 
 
 
 
 
 
 
 
 
 
b0037bd
a38f710
 
f6e3d73
 
 
 
 
 
 
 
eb803dd
 
e9349ad
 
 
f6e3d73
 
 
 
 
 
a38f710
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198c5a7
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
from pathlib import Path
from pydantic_settings import BaseSettings, SettingsConfigDict
from typing import ClassVar
import os

current_file = Path(__file__).resolve()
if str(current_file).startswith('/app/'):
    CARDSERVER_BASE_DIR = Path('/app')
else:
    # Local development - go up from config.py -> core -> app -> sexnow-backend (root)
    CARDSERVER_BASE_DIR = current_file.parent.parent.parent

class Settings(BaseSettings):
    # Use the determined base directory
    _CARDSERVER_DIR_CLS: ClassVar[Path] = CARDSERVER_BASE_DIR
    _APP_DEFAULT_STATIC_DIR_CLS: ClassVar[Path] = CARDSERVER_BASE_DIR / "static"

    MODEL_PATH: str = str(_CARDSERVER_DIR_CLS / "models" / "lora-checkpoint") 
    DEFAULT_MODEL_ID: str = "teknium/OpenHermes-2.5-Mistral-7B" 
    MODEL_LOAD_IN_4BIT: bool = False  # Disabled for macOS compatibility

    GENERATED_PATH: str = str(_APP_DEFAULT_STATIC_DIR_CLS / "images" / "generated") 
    BASE_PATH: str = str(_APP_DEFAULT_STATIC_DIR_CLS / "images" / "base") 
    SYMBOLS_PATH: str = str(_APP_DEFAULT_STATIC_DIR_CLS / "images" / "symbols") 
    QR_CODE_PATH: str = str(_APP_DEFAULT_STATIC_DIR_CLS / "images" / "qr") 
    
    STATIC_FILES_MOUNT_DIR: str = str(_APP_DEFAULT_STATIC_DIR_CLS) 
    
    FONTS_SUBDIR: str = "fonts" 

    API_PREFIX: str = "/api/v1"
    PROJECT_NAME: str = "Playcard-Generator"
    APP_VERSION: str = "1.0.0"

    # Generation parameters
    GENERATION_MAX_NEW_TOKENS: int = 250
    GENERATION_TEMPERATURE: float = 0.75
    GENERATION_DO_SAMPLE: bool = True
    GENERATION_TOP_K: int = 50
    GENERATION_TOP_P: float = 0.95
    GENERATION_MAX_RETRIES: int = 3

    # Frontend URL for QR codes - should be set to production URL in production
    FRONTEND_BASE_URL: str = os.getenv("FRONTEND_BASE_URL", "https://huggingface.co/spaces/ch404/cardserver")
    QR_CODE_SIZE: int = 200
    QR_CODE_BOX_SIZE: int = 10 
    QR_CODE_BORDER: int = 4 

    # Model and Tokenizer settings
    # HUGGING_FACE_HUB_TOKEN: str | None = os.getenv("HUGGING_FACE_HUB_TOKEN") # Handled by HF libs
    LORA_MODEL_REPO_ID: str | None = None # Beispiel: "your-username/your-lora-model-repo"
    # LORA_MODEL_REVISION: str = "main" # Beispiel: "main" oder ein spezifischer Commit/Tag

    @property
    def resolved_model_path(self) -> Path:
        return Path(self.MODEL_PATH)

    @property
    def resolved_generated_path(self) -> Path:
        return Path(self.GENERATED_PATH)

    @property
    def resolved_base_path(self) -> Path:
        return Path(self.BASE_PATH)

    @property
    def resolved_symbols_path(self) -> Path:
        return Path(self.SYMBOLS_PATH)

    @property
    def resolved_qr_code_path(self) -> Path:
        return Path(self.QR_CODE_PATH)
    
    @property
    def resolved_static_files_mount_dir(self) -> Path:
        return Path(self.STATIC_FILES_MOUNT_DIR)

    @property
    def resolved_default_font_path(self) -> Path:
        return self.resolved_static_files_mount_dir / self.FONTS_SUBDIR / "hand.ttf"

    model_config = SettingsConfigDict(
        env_file=".env", 
        env_file_encoding='utf-8',
        extra='ignore', 
        case_sensitive=False 
    )

settings = Settings()

def apply_hf_space_optimizations():
    """Apply Hugging Face Space optimizations for better performance"""
    # Set environment variables for better caching and performance
    os.environ["TRANSFORMERS_CACHE"] = "/tmp/transformers_cache"
    os.environ["HF_HOME"] = "/tmp/huggingface"
    os.environ["TORCH_HOME"] = "/tmp/torch"
    
    # Additional HF Space optimizations
    os.environ["TOKENIZERS_PARALLELISM"] = "false"  # Avoid tokenizer warnings
    os.environ["TRANSFORMERS_VERBOSITY"] = "error"  # Reduce logging overhead
    os.environ["HF_HUB_DISABLE_PROGRESS_BARS"] = "1"  # Disable progress bars in logs