cyd0806 commited on
Commit
af37370
·
verified ·
1 Parent(s): 1d7ce38

Upload src/hook.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. src/hook.py +63 -0
src/hook.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os.path
2
+
3
+ import ipdb
4
+ from peft import set_peft_model_state_dict,get_peft_model_state_dict
5
+ from diffusers import FluxPipeline
6
+ from diffusers.training_utils import cast_training_params
7
+
8
+ def save_model_hook(models, weights, output_dir,wanted_model, accelerator,adapter_names):
9
+ if accelerator.is_main_process:
10
+ transformer_lora_layers_to_save = None
11
+ for model in models:
12
+ if isinstance(model, type(accelerator.unwrap_model(wanted_model))):
13
+ transformer_lora_layers_to_save = {adapter_name: get_peft_model_state_dict(model,adapter_name=adapter_name) for adapter_name in adapter_names}
14
+ else:
15
+ raise ValueError(f"unexpected save model: {model.__class__}")
16
+
17
+ # make sure to pop weight so that corresponding model is not saved again
18
+ if weights:
19
+ weights.pop()
20
+ for adapter_name,lora in transformer_lora_layers_to_save.items():
21
+ FluxPipeline.save_lora_weights(
22
+ os.path.join(output_dir,adapter_name),
23
+ transformer_lora_layers=lora,
24
+ )
25
+
26
+
27
+ def load_model_hook(models, input_dir,wanted_model, accelerator,adapter_names):
28
+ transformer_ = None
29
+ while len(models) > 0:
30
+ model = models.pop()
31
+ if isinstance(model, type(accelerator.unwrap_model(wanted_model))):
32
+ transformer_ = model
33
+ else:
34
+ raise ValueError(f"unexpected save model: {model.__class__}")
35
+
36
+ lora_state_dict_list = []
37
+ for adapter_name in adapter_names:
38
+ lora_path = os.path.join(input_dir,adapter_name)
39
+ lora_state_dict_list.append(FluxPipeline.lora_state_dict(lora_path))
40
+ transformer_lora_state_dict_list = []
41
+ for lora_state_dict in lora_state_dict_list:
42
+ transformer_lora_state_dict_list.append({
43
+ f'{k.replace("transformer.", "")}': v
44
+ for k, v in lora_state_dict.items()
45
+ if k.startswith("transformer.") and "lora" in k
46
+ })
47
+ incompatible_keys = [set_peft_model_state_dict(transformer_, transformer_lora_state_dict_list[i], adapter_name=adapter_name) for i,adapter_name in enumerate(adapter_names)]
48
+ if incompatible_keys is not None:
49
+ # check only for unexpected keys
50
+ unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None)
51
+ if unexpected_keys:
52
+ accelerator.warning(
53
+ f"Loading adapter weights from state_dict led to unexpected keys not found in the model: "
54
+ f" {unexpected_keys}. "
55
+ )
56
+
57
+ # Make sure the trainable params are in float32. This is again needed since the base models
58
+ # are in `weight_dtype`. More details:
59
+ # https://github.com/huggingface/diffusers/pull/6514#discussion_r1449796804
60
+ if accelerator.mixed_precision == "fp16":
61
+ models = [transformer_]
62
+ # only upcast trainable parameters (LoRA) into fp32
63
+ cast_training_params(models)