Spaces:
Sleeping
Sleeping
| # config.py | |
| import os | |
| from dotenv import load_dotenv | |
| import torch | |
| load_dotenv() # Load variables from .env file for local development | |
| # Model Configuration | |
| TEXT_MODEL_NAME = os.getenv("TEXT_MODEL_NAME", "google/flan-t5-base") | |
| IMAGE_MODEL_NAME = os.getenv("IMAGE_MODEL_NAME", "runwayml/stable-diffusion-v1-5") | |
| VIDEO_MODEL_NAME = os.getenv("VIDEO_MODEL_NAME", "zeroscope_v2_576w") | |
| # API Security | |
| ALLOWED_API_KEYS_STR = os.getenv("ALLOWED_API_KEYS", "") | |
| ALLOWED_API_KEYS = set(ALLOWED_API_KEYS_STR.split(',')) if ALLOWED_API_KEYS_STR else set() | |
| # Inference Device | |
| DEVICE = "cuda" if torch.cuda.is_available() else "cpu" | |
| # For Spaces free tier CPU or basic T4 GPU, FP16 might be beneficial if GPU is available | |
| DTYPE = torch.float16 if DEVICE == "cuda" else torch.float32 | |
| # Rate Limiting (Example: 10 requests per minute) | |
| RATE_LIMIT = "10/minute" | |
| print(f"--- Configuration ---") | |
| print(f"Text Model: {TEXT_MODEL_NAME}") | |
| print(f"Image Model: {IMAGE_MODEL_NAME}") | |
| print(f"Video Model: {VIDEO_MODEL_NAME}") | |
| print(f"Device: {DEVICE}") | |
| print(f"Dtype: {DTYPE}") | |
| print(f"API Keys Loaded: {'Yes' if ALLOWED_API_KEYS else 'No (WARNING: Open Access!)'}") | |
| print(f"Rate Limit: {RATE_LIMIT}") | |
| print(f"---------------------") |