Spaces:
Sleeping
Sleeping
Update config.py
Browse files
config.py
CHANGED
|
@@ -3,13 +3,15 @@ import os
|
|
| 3 |
from dotenv import load_dotenv
|
| 4 |
import torch
|
| 5 |
|
| 6 |
-
load_dotenv()
|
| 7 |
|
| 8 |
# Model Configuration
|
| 9 |
TEXT_MODEL_NAME = os.getenv("TEXT_MODEL_NAME", "google/flan-t5-base")
|
| 10 |
IMAGE_MODEL_NAME = os.getenv("IMAGE_MODEL_NAME", "runwayml/stable-diffusion-v1-5")
|
| 11 |
-
|
| 12 |
-
VIDEO_MODEL_NAME = os.getenv("VIDEO_MODEL_NAME", "zeroscope_v2_576w")
|
|
|
|
|
|
|
| 13 |
|
| 14 |
# API Security
|
| 15 |
ALLOWED_API_KEYS_STR = os.getenv("ALLOWED_API_KEYS", "")
|
|
@@ -17,15 +19,16 @@ ALLOWED_API_KEYS = set(ALLOWED_API_KEYS_STR.split(',')) if ALLOWED_API_KEYS_STR
|
|
| 17 |
|
| 18 |
# Inference Device
|
| 19 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
| 20 |
-
# For Spaces free tier CPU or basic T4 GPU, FP16 might be beneficial if GPU is available
|
| 21 |
DTYPE = torch.float16 if DEVICE == "cuda" else torch.float32
|
| 22 |
|
| 23 |
-
# Rate Limiting
|
| 24 |
RATE_LIMIT = "10/minute"
|
| 25 |
|
|
|
|
| 26 |
print(f"--- Configuration ---")
|
| 27 |
print(f"Text Model: {TEXT_MODEL_NAME}")
|
| 28 |
print(f"Image Model: {IMAGE_MODEL_NAME}")
|
|
|
|
| 29 |
print(f"Video Model: {VIDEO_MODEL_NAME}")
|
| 30 |
print(f"Device: {DEVICE}")
|
| 31 |
print(f"Dtype: {DTYPE}")
|
|
|
|
| 3 |
from dotenv import load_dotenv
|
| 4 |
import torch
|
| 5 |
|
| 6 |
+
load_dotenv()
|
| 7 |
|
| 8 |
# Model Configuration
|
| 9 |
TEXT_MODEL_NAME = os.getenv("TEXT_MODEL_NAME", "google/flan-t5-base")
|
| 10 |
IMAGE_MODEL_NAME = os.getenv("IMAGE_MODEL_NAME", "runwayml/stable-diffusion-v1-5")
|
| 11 |
+
# >>> CORRECTED VIDEO MODEL NAME <<<
|
| 12 |
+
VIDEO_MODEL_NAME = os.getenv("VIDEO_MODEL_NAME", "cerspense/zeroscope_v2_576w")
|
| 13 |
+
# Optional: For LCM speedup (requires 'peft' in requirements.txt)
|
| 14 |
+
IMAGE_LCM_LORA_NAME = os.getenv("IMAGE_LCM_LORA_NAME", "latent-consistency/lcm-lora-sdv1-5")
|
| 15 |
|
| 16 |
# API Security
|
| 17 |
ALLOWED_API_KEYS_STR = os.getenv("ALLOWED_API_KEYS", "")
|
|
|
|
| 19 |
|
| 20 |
# Inference Device
|
| 21 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
|
| 22 |
DTYPE = torch.float16 if DEVICE == "cuda" else torch.float32
|
| 23 |
|
| 24 |
+
# Rate Limiting
|
| 25 |
RATE_LIMIT = "10/minute"
|
| 26 |
|
| 27 |
+
# Print config at startup for verification
|
| 28 |
print(f"--- Configuration ---")
|
| 29 |
print(f"Text Model: {TEXT_MODEL_NAME}")
|
| 30 |
print(f"Image Model: {IMAGE_MODEL_NAME}")
|
| 31 |
+
print(f"Image LCM LoRA: {IMAGE_LCM_LORA_NAME if IMAGE_LCM_LORA_NAME else 'Not Configured'}") # Added LCM print
|
| 32 |
print(f"Video Model: {VIDEO_MODEL_NAME}")
|
| 33 |
print(f"Device: {DEVICE}")
|
| 34 |
print(f"Dtype: {DTYPE}")
|