manbeast3b commited on
Commit
46808ec
·
verified ·
1 Parent(s): b9bf46b

Update src/pipeline.py

Browse files
Files changed (1) hide show
  1. src/pipeline.py +16 -10
src/pipeline.py CHANGED
@@ -75,17 +75,23 @@ def load_pipeline() -> Pipeline:
75
  vae.encoder = E(16)
76
  vae.decoder = D(16)
77
 
78
- def lsd(p, mod, pfx):
79
- sd = torch.load(p, map_location="cpu", weights_only=True)
80
- f_sd = {k.strip(pfx): v for k, v in sd.items() if k.strip(pfx) in mod.state_dict() and v.size() == mod.state_dict()[k.strip(pfx)].size()}
81
- print(f"num keys: {len(f_sd)} of {len(mod.state_dict())} from {len(sd.items())}")
82
- mod.load_state_dict(f_sd, strict=False)
83
- mod.to(dtype=torch.bfloat16)
84
-
85
- lsd("encoder.pth", vae.encoder, "encoder.")
86
- lsd("decoder.pth", vae.decoder, "decoder.")
87
- vae.encoder.requires_grad_(False)
 
 
 
 
 
88
  vae.decoder.requires_grad_(False)
 
89
 
90
  # quantize_(vae, int8_weight_only())
91
  quantizer = ModelQuantization(vae)
 
75
  vae.encoder = E(16)
76
  vae.decoder = D(16)
77
 
78
+ encoder_path = "encoder.pth"
79
+ decoder_path = "decoder.pth"
80
+
81
+ if encoder_path is not None:
82
+ encoder_state_dict = torch.load(encoder_path, map_location="cpu", weights_only=True)
83
+ filtered_state_dict = {k.strip('encoder.'): v for k, v in encoder_state_dict.items() if k.strip('encoder.') in vae.encoder.state_dict() and v.size() == vae.encoder.state_dict()[k.strip('encoder.')].size()}
84
+ vae.encoder.load_state_dict(filtered_state_dict, strict=False)
85
+ vae.encoder.to(dtype=dtype)
86
+
87
+ if decoder_path is not None:
88
+ decoder_state_dict = torch.load(decoder_path, map_location="cpu", weights_only=True)
89
+ filtered_state_dict = {k.strip('decoder.'): v for k, v in decoder_state_dict.items() if k.strip('decoder.') in vae.decoder.state_dict() and v.size() == vae.decoder.state_dict()[k.strip('decoder.')].size()}
90
+ vae.decoder.load_state_dict(filtered_state_dict, strict=False)
91
+ vae.decoder.to(dtype=dtype)
92
+
93
  vae.decoder.requires_grad_(False)
94
+ vae.encoder.requires_grad_(False)
95
 
96
  # quantize_(vae, int8_weight_only())
97
  quantizer = ModelQuantization(vae)