Update src/pipeline.py
Browse files- src/pipeline.py +16 -10
src/pipeline.py
CHANGED
|
@@ -75,17 +75,23 @@ def load_pipeline() -> Pipeline:
|
|
| 75 |
vae.encoder = E(16)
|
| 76 |
vae.decoder = D(16)
|
| 77 |
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 88 |
vae.decoder.requires_grad_(False)
|
|
|
|
| 89 |
|
| 90 |
# quantize_(vae, int8_weight_only())
|
| 91 |
quantizer = ModelQuantization(vae)
|
|
|
|
| 75 |
vae.encoder = E(16)
|
| 76 |
vae.decoder = D(16)
|
| 77 |
|
| 78 |
+
encoder_path = "encoder.pth"
|
| 79 |
+
decoder_path = "decoder.pth"
|
| 80 |
+
|
| 81 |
+
if encoder_path is not None:
|
| 82 |
+
encoder_state_dict = torch.load(encoder_path, map_location="cpu", weights_only=True)
|
| 83 |
+
filtered_state_dict = {k.strip('encoder.'): v for k, v in encoder_state_dict.items() if k.strip('encoder.') in vae.encoder.state_dict() and v.size() == vae.encoder.state_dict()[k.strip('encoder.')].size()}
|
| 84 |
+
vae.encoder.load_state_dict(filtered_state_dict, strict=False)
|
| 85 |
+
vae.encoder.to(dtype=dtype)
|
| 86 |
+
|
| 87 |
+
if decoder_path is not None:
|
| 88 |
+
decoder_state_dict = torch.load(decoder_path, map_location="cpu", weights_only=True)
|
| 89 |
+
filtered_state_dict = {k.strip('decoder.'): v for k, v in decoder_state_dict.items() if k.strip('decoder.') in vae.decoder.state_dict() and v.size() == vae.decoder.state_dict()[k.strip('decoder.')].size()}
|
| 90 |
+
vae.decoder.load_state_dict(filtered_state_dict, strict=False)
|
| 91 |
+
vae.decoder.to(dtype=dtype)
|
| 92 |
+
|
| 93 |
vae.decoder.requires_grad_(False)
|
| 94 |
+
vae.encoder.requires_grad_(False)
|
| 95 |
|
| 96 |
# quantize_(vae, int8_weight_only())
|
| 97 |
quantizer = ModelQuantization(vae)
|