contentapi / config.py
rajux75's picture
Update config.py
deaed92 verified
# config.py
import os
from dotenv import load_dotenv
import torch
load_dotenv()
# Model Configuration
TEXT_MODEL_NAME = os.getenv("TEXT_MODEL_NAME", "google/flan-t5-base")
IMAGE_MODEL_NAME = os.getenv("IMAGE_MODEL_NAME", "runwayml/stable-diffusion-v1-5")
# >>> CORRECTED VIDEO MODEL NAME <<<
VIDEO_MODEL_NAME = os.getenv("VIDEO_MODEL_NAME", "cerspense/zeroscope_v2_576w")
# Optional: For LCM speedup (requires 'peft' in requirements.txt)
IMAGE_LCM_LORA_NAME = os.getenv("IMAGE_LCM_LORA_NAME", "latent-consistency/lcm-lora-sdv1-5")
# API Security
ALLOWED_API_KEYS_STR = os.getenv("ALLOWED_API_KEYS", "")
ALLOWED_API_KEYS = set(ALLOWED_API_KEYS_STR.split(',')) if ALLOWED_API_KEYS_STR else set()
# Inference Device
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
DTYPE = torch.float16 if DEVICE == "cuda" else torch.float32
# Rate Limiting
RATE_LIMIT = "10/minute"
# Print config at startup for verification
print(f"--- Configuration ---")
print(f"Text Model: {TEXT_MODEL_NAME}")
print(f"Image Model: {IMAGE_MODEL_NAME}")
print(f"Image LCM LoRA: {IMAGE_LCM_LORA_NAME if IMAGE_LCM_LORA_NAME else 'Not Configured'}") # Added LCM print
print(f"Video Model: {VIDEO_MODEL_NAME}")
print(f"Device: {DEVICE}")
print(f"Dtype: {DTYPE}")
print(f"API Keys Loaded: {'Yes' if ALLOWED_API_KEYS else 'No (WARNING: Open Access!)'}")
print(f"Rate Limit: {RATE_LIMIT}")
print(f"---------------------")