Spaces:
Running
on
Zero
Running
on
Zero
Upload folder using huggingface_hub
Browse files- core/model_manager.py +10 -7
- ui/layout.py +1 -0
core/model_manager.py
CHANGED
|
@@ -23,6 +23,7 @@ class ModelManager:
|
|
| 23 |
if hasattr(self, 'initialized'):
|
| 24 |
return
|
| 25 |
self.loaded_models: Dict[str, Any] = {}
|
|
|
|
| 26 |
self.initialized = True
|
| 27 |
print("✅ ModelManager initialized.")
|
| 28 |
|
|
@@ -89,15 +90,16 @@ class ModelManager:
|
|
| 89 |
required_set = set(required_models)
|
| 90 |
current_set = set(self.loaded_models.keys())
|
| 91 |
|
| 92 |
-
loras_changed =
|
| 93 |
-
|
| 94 |
models_to_unload = current_set - required_set
|
| 95 |
-
|
|
|
|
|
|
|
|
|
|
| 96 |
if models_to_unload:
|
| 97 |
print(f"--- [ModelManager] Models to unload: {models_to_unload} ---")
|
| 98 |
if loras_changed and not models_to_unload:
|
| 99 |
-
|
| 100 |
-
print(f"--- [ModelManager] LoRA configuration changed. Reloading base model(s): {models_to_unload} ---")
|
| 101 |
|
| 102 |
model_management.unload_all_models()
|
| 103 |
self.loaded_models.clear()
|
|
@@ -105,7 +107,7 @@ class ModelManager:
|
|
| 105 |
torch.cuda.empty_cache()
|
| 106 |
print("--- [ModelManager] All models unloaded to free RAM. ---")
|
| 107 |
|
| 108 |
-
models_to_load = required_set if
|
| 109 |
|
| 110 |
if models_to_load:
|
| 111 |
print(f"--- [ModelManager] Models to load: {models_to_load} ---")
|
|
@@ -136,7 +138,8 @@ class ModelManager:
|
|
| 136 |
raise gr.Error(f"Failed to load model or apply LoRA '{display_name}'. Reason: {e}")
|
| 137 |
else:
|
| 138 |
print(f"--- [ModelManager] All required models are already loaded. ---")
|
| 139 |
-
|
|
|
|
| 140 |
return {name: self.loaded_models[name] for name in required_models}
|
| 141 |
|
| 142 |
model_manager = ModelManager()
|
|
|
|
| 23 |
if hasattr(self, 'initialized'):
|
| 24 |
return
|
| 25 |
self.loaded_models: Dict[str, Any] = {}
|
| 26 |
+
self.last_active_loras: List[Dict[str, Any]] = []
|
| 27 |
self.initialized = True
|
| 28 |
print("✅ ModelManager initialized.")
|
| 29 |
|
|
|
|
| 90 |
required_set = set(required_models)
|
| 91 |
current_set = set(self.loaded_models.keys())
|
| 92 |
|
| 93 |
+
loras_changed = self.last_active_loras != active_loras
|
|
|
|
| 94 |
models_to_unload = current_set - required_set
|
| 95 |
+
|
| 96 |
+
must_reload = bool(models_to_unload) or loras_changed
|
| 97 |
+
|
| 98 |
+
if must_reload:
|
| 99 |
if models_to_unload:
|
| 100 |
print(f"--- [ModelManager] Models to unload: {models_to_unload} ---")
|
| 101 |
if loras_changed and not models_to_unload:
|
| 102 |
+
print(f"--- [ModelManager] LoRA configuration changed. Reloading base model(s): {current_set.intersection(required_set)} ---")
|
|
|
|
| 103 |
|
| 104 |
model_management.unload_all_models()
|
| 105 |
self.loaded_models.clear()
|
|
|
|
| 107 |
torch.cuda.empty_cache()
|
| 108 |
print("--- [ModelManager] All models unloaded to free RAM. ---")
|
| 109 |
|
| 110 |
+
models_to_load = required_set if must_reload else (required_set - current_set)
|
| 111 |
|
| 112 |
if models_to_load:
|
| 113 |
print(f"--- [ModelManager] Models to load: {models_to_load} ---")
|
|
|
|
| 138 |
raise gr.Error(f"Failed to load model or apply LoRA '{display_name}'. Reason: {e}")
|
| 139 |
else:
|
| 140 |
print(f"--- [ModelManager] All required models are already loaded. ---")
|
| 141 |
+
|
| 142 |
+
self.last_active_loras = active_loras
|
| 143 |
return {name: self.loaded_models[name] for name in required_models}
|
| 144 |
|
| 145 |
model_manager = ModelManager()
|
ui/layout.py
CHANGED
|
@@ -27,6 +27,7 @@ def build_ui(event_handler_function):
|
|
| 27 |
"Other versions are also available: "
|
| 28 |
"[Z-Image](https://huggingface.co/spaces/RioShiina/ImageGen-Z-Image), "
|
| 29 |
"[Qwen-Image](https://huggingface.co/spaces/RioShiina/ImageGen-Qwen-Image), "
|
|
|
|
| 30 |
"[Illstrious](https://huggingface.co/spaces/RioShiina/ImageGen-Illstrious), "
|
| 31 |
"[NoobAI](https://huggingface.co/spaces/RioShiina/ImageGen-NoobAI), "
|
| 32 |
"[Pony](https://huggingface.co/spaces/RioShiina/ImageGen-Pony1), "
|
|
|
|
| 27 |
"Other versions are also available: "
|
| 28 |
"[Z-Image](https://huggingface.co/spaces/RioShiina/ImageGen-Z-Image), "
|
| 29 |
"[Qwen-Image](https://huggingface.co/spaces/RioShiina/ImageGen-Qwen-Image), "
|
| 30 |
+
"[NewBie-Image](https://huggingface.co/spaces/RioShiina/ImageGen-NewBie-Image), "
|
| 31 |
"[Illstrious](https://huggingface.co/spaces/RioShiina/ImageGen-Illstrious), "
|
| 32 |
"[NoobAI](https://huggingface.co/spaces/RioShiina/ImageGen-NoobAI), "
|
| 33 |
"[Pony](https://huggingface.co/spaces/RioShiina/ImageGen-Pony1), "
|