Update models/model_manager.py
Browse files- models/model_manager.py +76 -9
models/model_manager.py
CHANGED
|
@@ -191,18 +191,85 @@ class ModelManager:
|
|
| 191 |
return Image.new('RGB', (width, height), color=color)
|
| 192 |
|
| 193 |
def cleanup(self):
|
| 194 |
-
"""
|
| 195 |
-
logger.info("
|
| 196 |
try:
|
| 197 |
-
|
| 198 |
-
del self.clip_model
|
| 199 |
-
del self.sd_pipeline
|
| 200 |
-
del self.controlnet
|
| 201 |
-
del self.controlnet_pipeline
|
| 202 |
-
|
| 203 |
if torch.cuda.is_available():
|
| 204 |
torch.cuda.empty_cache()
|
| 205 |
|
| 206 |
-
logger.info("
|
|
|
|
| 207 |
except Exception as e:
|
| 208 |
logger.error(f"清理显存失败: {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 191 |
return Image.new('RGB', (width, height), color=color)
|
| 192 |
|
| 193 |
def cleanup(self):
|
| 194 |
+
"""仅清理显存缓存,保留模型以避免重新加载"""
|
| 195 |
+
logger.info("清理显存缓存...")
|
| 196 |
try:
|
| 197 |
+
# 只清理缓存,不删除模型
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 198 |
if torch.cuda.is_available():
|
| 199 |
torch.cuda.empty_cache()
|
| 200 |
|
| 201 |
+
logger.info("显存缓存清理完成")
|
| 202 |
+
|
| 203 |
except Exception as e:
|
| 204 |
logger.error(f"清理显存失败: {e}")
|
| 205 |
+
|
| 206 |
+
def move_models_to_cpu(self):
|
| 207 |
+
"""将模型移动到CPU以释放显存"""
|
| 208 |
+
try:
|
| 209 |
+
logger.info("将模型移动到CPU...")
|
| 210 |
+
if self.caption_model is not None:
|
| 211 |
+
self.caption_model = self.caption_model.to('cpu')
|
| 212 |
+
if self.clip_model is not None:
|
| 213 |
+
self.clip_model = self.clip_model.to('cpu')
|
| 214 |
+
if self.sd_pipeline is not None:
|
| 215 |
+
self.sd_pipeline = self.sd_pipeline.to('cpu')
|
| 216 |
+
if self.controlnet_pipeline is not None:
|
| 217 |
+
self.controlnet_pipeline = self.controlnet_pipeline.to('cpu')
|
| 218 |
+
|
| 219 |
+
if torch.cuda.is_available():
|
| 220 |
+
torch.cuda.empty_cache()
|
| 221 |
+
|
| 222 |
+
logger.info("模型已移动到CPU")
|
| 223 |
+
except Exception as e:
|
| 224 |
+
logger.error(f"移动模型到CPU失败: {e}")
|
| 225 |
+
|
| 226 |
+
def move_models_to_gpu(self):
|
| 227 |
+
"""将模型移回GPU"""
|
| 228 |
+
try:
|
| 229 |
+
logger.info("将模型移动到GPU...")
|
| 230 |
+
if self.caption_model is not None:
|
| 231 |
+
self.caption_model = self.caption_model.to(self.device)
|
| 232 |
+
if self.clip_model is not None:
|
| 233 |
+
self.clip_model = self.clip_model.to(self.device)
|
| 234 |
+
if self.sd_pipeline is not None:
|
| 235 |
+
self.sd_pipeline = self.sd_pipeline.to(self.device)
|
| 236 |
+
if self.controlnet_pipeline is not None:
|
| 237 |
+
self.controlnet_pipeline = self.controlnet_pipeline.to(self.device)
|
| 238 |
+
|
| 239 |
+
logger.info("模型已移回GPU")
|
| 240 |
+
except Exception as e:
|
| 241 |
+
logger.error(f"移动模型到GPU失败: {e}")
|
| 242 |
+
|
| 243 |
+
def force_reload_all_models(self):
|
| 244 |
+
"""强制重新加载所有模型"""
|
| 245 |
+
logger.info("强制重新加载所有模型...")
|
| 246 |
+
try:
|
| 247 |
+
# 先清理
|
| 248 |
+
if hasattr(self, 'caption_model') and self.caption_model is not None:
|
| 249 |
+
del self.caption_model
|
| 250 |
+
del self.caption_processor
|
| 251 |
+
self.caption_model = None
|
| 252 |
+
self.caption_processor = None
|
| 253 |
+
if hasattr(self, 'clip_model') and self.clip_model is not None:
|
| 254 |
+
del self.clip_model
|
| 255 |
+
del self.clip_processor
|
| 256 |
+
self.clip_model = None
|
| 257 |
+
self.clip_processor = None
|
| 258 |
+
if hasattr(self, 'sd_pipeline') and self.sd_pipeline is not None:
|
| 259 |
+
del self.sd_pipeline
|
| 260 |
+
self.sd_pipeline = None
|
| 261 |
+
if hasattr(self, 'controlnet_pipeline') and self.controlnet_pipeline is not None:
|
| 262 |
+
del self.controlnet
|
| 263 |
+
del self.controlnet_pipeline
|
| 264 |
+
self.controlnet = None
|
| 265 |
+
self.controlnet_pipeline = None
|
| 266 |
+
|
| 267 |
+
if torch.cuda.is_available():
|
| 268 |
+
torch.cuda.empty_cache()
|
| 269 |
+
|
| 270 |
+
# 重新加载
|
| 271 |
+
self.load_all_models()
|
| 272 |
+
logger.info("所有模型重新加载完成")
|
| 273 |
+
|
| 274 |
+
except Exception as e:
|
| 275 |
+
logger.error(f"强制重新加载模型失败: {e}")
|