primerz commited on
Commit
87a0c60
·
verified ·
1 Parent(s): 70fc44d

Update lora.py

Browse files
Files changed (1) hide show
  1. lora.py +58 -58
lora.py CHANGED
@@ -66,7 +66,7 @@ class LoRAModule(torch.nn.Module):
66
  alpha = alpha.detach().float().numpy() # without casting, bf16 causes error
67
  alpha = self.lora_dim if alpha is None or alpha == 0 else alpha
68
  self.scale = alpha / self.lora_dim
69
- self.register_buffer("alpha", torch.tensor(alpha)) # 定数として扱える
70
 
71
  # same as microsoft's
72
  torch.nn.init.kaiming_uniform_(self.lora_down.weight, a=math.sqrt(5))
@@ -107,7 +107,7 @@ class LoRAModule(torch.nn.Module):
107
  lx = lx * mask
108
 
109
  # scaling for rank dropout: treat as if the rank is changed
110
- # maskから計算することも考えられるが、augmentation的な効果を期待してrank_dropoutを用いる
111
  scale = self.scale * (1.0 / (1.0 - self.rank_dropout)) # redundant for readability
112
  else:
113
  scale = self.scale
@@ -130,7 +130,7 @@ class LoRAInfModule(LoRAModule):
130
  # no dropout for inference
131
  super().__init__(lora_name, org_module, multiplier, lora_dim, alpha)
132
 
133
- self.org_module_ref = [org_module] # 後から参照できるように
134
  self.enabled = True
135
 
136
  # check regional or not by lora_name
@@ -154,7 +154,7 @@ class LoRAInfModule(LoRAModule):
154
  def set_network(self, network):
155
  self.network = network
156
 
157
- # freezeしてマージする
158
  def merge_to(self, sd, dtype, device):
159
  # get up/down weight
160
  up_weight = sd["lora_up.weight"].to(torch.float).to(device)
@@ -186,7 +186,7 @@ class LoRAInfModule(LoRAModule):
186
  org_sd["weight"] = weight.to(dtype)
187
  self.org_module.load_state_dict(org_sd)
188
 
189
- # 復元できるマージのため、このモジュールのweightを返す
190
  def get_weight(self, multiplier=None):
191
  if multiplier is None:
192
  multiplier = self.multiplier
@@ -357,7 +357,7 @@ class LoRAInfModule(LoRAModule):
357
  mask = torch.cat(masks)
358
  mask_sum = torch.sum(mask, dim=0) + 1e-4
359
  for i in range(self.network.batch_size):
360
- # 1枚の画像ごとに処理する
361
  lx1 = lx[i * self.network.num_sub_prompts : (i + 1) * self.network.num_sub_prompts]
362
  lx1 = lx1 * mask
363
  lx1 = torch.sum(lx1, dim=0)
@@ -380,7 +380,7 @@ def parse_block_lr_kwargs(nw_kwargs):
380
  mid_lr_weight = nw_kwargs.get("mid_lr_weight", None)
381
  up_lr_weight = nw_kwargs.get("up_lr_weight", None)
382
 
383
- # 以上のいずれにも設定がない場合は無効としてNoneを返す
384
  if down_lr_weight is None and mid_lr_weight is None and up_lr_weight is None:
385
  return None, None, None
386
 
@@ -433,7 +433,7 @@ def create_network(
433
  block_dims = kwargs.get("block_dims", None)
434
  down_lr_weight, mid_lr_weight, up_lr_weight = parse_block_lr_kwargs(kwargs)
435
 
436
- # 以上のいずれかに指定があればblockごとのdim(rank)を有効にする
437
  if block_dims is not None or down_lr_weight is not None or mid_lr_weight is not None or up_lr_weight is not None:
438
  block_alphas = kwargs.get("block_alphas", None)
439
  conv_block_dims = kwargs.get("conv_block_dims", None)
@@ -461,7 +461,7 @@ def create_network(
461
  if module_dropout is not None:
462
  module_dropout = float(module_dropout)
463
 
464
- # すごく引数が多いな ( ^ω^)・・・
465
  network = LoRANetwork(
466
  text_encoder,
467
  unet,
@@ -486,10 +486,10 @@ def create_network(
486
  return network
487
 
488
 
489
- # このメソッドは外部から呼び出される可能性を考慮しておく
490
- # network_dim, network_alpha にはデフォルト値が入っている。
491
- # block_dims, block_alphas は両方ともNoneまたは両方と��‚‚値が入っている
492
- # conv_dim, conv_alpha は両方ともNoneまたは両方とも値が入っている
493
  def get_block_dims_and_alphas(
494
  block_dims, block_alphas, network_dim, network_alpha, conv_block_dims, conv_block_alphas, conv_dim, conv_alpha
495
  ):
@@ -501,50 +501,50 @@ def get_block_dims_and_alphas(
501
  def parse_floats(s):
502
  return [float(i) for i in s.split(",")]
503
 
504
- # block_dimsとblock_alphasをパースする。必ず値が入る
505
  if block_dims is not None:
506
  block_dims = parse_ints(block_dims)
507
  assert (
508
  len(block_dims) == num_total_blocks
509
- ), f"block_dims must have {num_total_blocks} elements / block_dimsは{num_total_blocks}個指定してください"
510
  else:
511
- print(f"block_dims is not specified. all dims are set to {network_dim} / block_dimsが指定されていません。すべてのdimは{network_dim}になります")
512
  block_dims = [network_dim] * num_total_blocks
513
 
514
  if block_alphas is not None:
515
  block_alphas = parse_floats(block_alphas)
516
  assert (
517
  len(block_alphas) == num_total_blocks
518
- ), f"block_alphas must have {num_total_blocks} elements / block_alphasは{num_total_blocks}個指定してください"
519
  else:
520
  print(
521
- f"block_alphas is not specified. all alphas are set to {network_alpha} / block_alphasが指定されていません。すべてのalphaは{network_alpha}になります"
522
  )
523
  block_alphas = [network_alpha] * num_total_blocks
524
 
525
- # conv_block_dimsとconv_block_alphasを、指定がある場合のみパースする。指定がなければconv_dimとconv_alphaを使う
526
  if conv_block_dims is not None:
527
  conv_block_dims = parse_ints(conv_block_dims)
528
  assert (
529
  len(conv_block_dims) == num_total_blocks
530
- ), f"conv_block_dims must have {num_total_blocks} elements / conv_block_dimsは{num_total_blocks}個指定してください"
531
 
532
  if conv_block_alphas is not None:
533
  conv_block_alphas = parse_floats(conv_block_alphas)
534
  assert (
535
  len(conv_block_alphas) == num_total_blocks
536
- ), f"conv_block_alphas must have {num_total_blocks} elements / conv_block_alphasは{num_total_blocks}個指定してください"
537
  else:
538
  if conv_alpha is None:
539
  conv_alpha = 1.0
540
  print(
541
- f"conv_block_alphas is not specified. all alphas are set to {conv_alpha} / conv_block_alphasが指定されていません。すべてのalphaは{conv_alpha}になります"
542
  )
543
  conv_block_alphas = [conv_alpha] * num_total_blocks
544
  else:
545
  if conv_dim is not None:
546
  print(
547
- f"conv_dim/alpha for all blocks are set to {conv_dim} and {conv_alpha} / ���べてのブロックのconv_dimとalphaは{conv_dim}および{conv_alpha}になります"
548
  )
549
  conv_block_dims = [conv_dim] * num_total_blocks
550
  conv_block_alphas = [conv_alpha] * num_total_blocks
@@ -555,15 +555,15 @@ def get_block_dims_and_alphas(
555
  return block_dims, block_alphas, conv_block_dims, conv_block_alphas
556
 
557
 
558
- # 層別学習率用に層ごとの学習率に対する倍率を定義する、外部から呼び出される可能性を考慮しておく
559
  def get_block_lr_weight(
560
  down_lr_weight, mid_lr_weight, up_lr_weight, zero_threshold
561
  ) -> Tuple[List[float], List[float], List[float]]:
562
- # パラメータ未指定時は何もせず、今までと同じ動作とする
563
  if up_lr_weight is None and mid_lr_weight is None and down_lr_weight is None:
564
  return None, None, None
565
 
566
- max_len = LoRANetwork.NUM_OF_BLOCKS # フルモデル相当でのup,downの層の数
567
 
568
  def get_list(name_with_suffix) -> List[float]:
569
  import math
@@ -584,7 +584,7 @@ def get_block_lr_weight(
584
  return [0.0 + base_lr] * max_len
585
  else:
586
  print(
587
- "Unknown lr_weight argument %s is used. Valid arguments: / 不明なlr_weightの引数 %s が使われました。有効な引数:\n\tcosine, sine, linear, reverse_linear, zeros"
588
  % (name)
589
  )
590
  return None
@@ -596,13 +596,13 @@ def get_block_lr_weight(
596
 
597
  if (up_lr_weight != None and len(up_lr_weight) > max_len) or (down_lr_weight != None and len(down_lr_weight) > max_len):
598
  print("down_weight or up_weight is too long. Parameters after %d-th are ignored." % max_len)
599
- print("down_weightもしくはup_weightが長すぎます。%d個目以降のパラメータは無視されます。" % max_len)
600
  up_lr_weight = up_lr_weight[:max_len]
601
  down_lr_weight = down_lr_weight[:max_len]
602
 
603
  if (up_lr_weight != None and len(up_lr_weight) < max_len) or (down_lr_weight != None and len(down_lr_weight) < max_len):
604
  print("down_weight or up_weight is too short. Parameters after %d-th are filled with 1." % max_len)
605
- print("down_weightもしくはup_weightが短すぎます。%d個目までの不足したパラメータは1で補われます。" % max_len)
606
 
607
  if down_lr_weight != None and len(down_lr_weight) < max_len:
608
  down_lr_weight = down_lr_weight + [1.0] * (max_len - len(down_lr_weight))
@@ -610,12 +610,12 @@ def get_block_lr_weight(
610
  up_lr_weight = up_lr_weight + [1.0] * (max_len - len(up_lr_weight))
611
 
612
  if (up_lr_weight != None) or (mid_lr_weight != None) or (down_lr_weight != None):
613
- print("apply block learning rate / 階層別学習率を適用します。")
614
  if down_lr_weight != None:
615
  down_lr_weight = [w if w > zero_threshold else 0 for w in down_lr_weight]
616
- print("down_lr_weight (shallower -> deeper, 浅い層->深い層):", down_lr_weight)
617
  else:
618
- print("down_lr_weight: all 1.0, すべて1.0")
619
 
620
  if mid_lr_weight != None:
621
  mid_lr_weight = mid_lr_weight if mid_lr_weight > zero_threshold else 0
@@ -625,14 +625,14 @@ def get_block_lr_weight(
625
 
626
  if up_lr_weight != None:
627
  up_lr_weight = [w if w > zero_threshold else 0 for w in up_lr_weight]
628
- print("up_lr_weight (deeper -> shallower, 深い層->浅い層):", up_lr_weight)
629
  else:
630
- print("up_lr_weight: all 1.0, すべて1.0")
631
 
632
  return down_lr_weight, mid_lr_weight, up_lr_weight
633
 
634
 
635
- # lr_weightが0のblockをblock_dimsから除外する、外部から呼び出す可能性を考慮しておく
636
  def remove_block_dims_and_alphas(
637
  block_dims, block_alphas, conv_block_dims, conv_block_alphas, down_lr_weight, mid_lr_weight, up_lr_weight
638
  ):
@@ -658,7 +658,7 @@ def remove_block_dims_and_alphas(
658
  return block_dims, block_alphas, conv_block_dims, conv_block_alphas
659
 
660
 
661
- # 外部から呼び出す可能性を考慮しておく
662
  def get_block_index(lora_name: str) -> int:
663
  block_idx = -1 # invalid lora name
664
 
@@ -675,7 +675,7 @@ def get_block_index(lora_name: str) -> int:
675
  idx = 3 * i + 2
676
 
677
  if g[0] == "down":
678
- block_idx = 1 + idx # 0に該当するLoRAは存在しない
679
  elif g[0] == "up":
680
  block_idx = LoRANetwork.NUM_OF_BLOCKS + 1 + idx
681
 
@@ -730,7 +730,7 @@ def create_network_from_weights(multiplier, file, vae, text_encoder, unet, weigh
730
 
731
 
732
  class LoRANetwork(torch.nn.Module):
733
- NUM_OF_BLOCKS = 12 # フルモデル相当でのup,downの層の数
734
 
735
  UNET_TARGET_REPLACE_MODULE = ["Transformer2DModel"]
736
  UNET_TARGET_REPLACE_MODULE_CONV2D_3X3 = ["ResnetBlock2D", "Downsample2D", "Upsample2D"]
@@ -764,12 +764,12 @@ class LoRANetwork(torch.nn.Module):
764
  varbose: Optional[bool] = False,
765
  ) -> None:
766
  """
767
- LoRA network: すごく引数が多いが、パターンは以下の通り
768
- 1. lora_dimとalphaを指定
769
- 2. lora_dim、alpha、conv_lora_dim、conv_alphaを指定
770
- 3. block_dimsとblock_alphasを指定 : Conv2d3x3には適用しない
771
- 4. block_dims、block_alphas、conv_block_dims、conv_block_alphasを指定 : Conv2d3x3にも適用する
772
- 5. modules_dimとmodules_alphaを指定 (推論用)
773
  """
774
  super().__init__()
775
  self.multiplier = multiplier
@@ -831,12 +831,12 @@ class LoRANetwork(torch.nn.Module):
831
  alpha = None
832
 
833
  if modules_dim is not None:
834
- # モジュール指定あり
835
  if lora_name in modules_dim:
836
  dim = modules_dim[lora_name]
837
  alpha = modules_alpha[lora_name]
838
  elif is_unet and block_dims is not None:
839
- # U-Netでblock_dims指定あり
840
  block_idx = get_block_index(lora_name)
841
  if is_linear or is_conv2d_1x1:
842
  dim = block_dims[block_idx]
@@ -845,7 +845,7 @@ class LoRANetwork(torch.nn.Module):
845
  dim = conv_block_dims[block_idx]
846
  alpha = conv_block_alphas[block_idx]
847
  else:
848
- # 通常、すべて対象とする
849
  if is_linear or is_conv2d_1x1:
850
  dim = self.lora_dim
851
  alpha = self.alpha
@@ -854,7 +854,7 @@ class LoRANetwork(torch.nn.Module):
854
  alpha = self.conv_alpha
855
 
856
  if dim is None or dim == 0:
857
- # skipした情報を出力
858
  if is_linear or is_conv2d_1x1 or (self.conv_lora_dim is not None or conv_block_dims is not None):
859
  skipped.append(lora_name)
860
  continue
@@ -875,7 +875,7 @@ class LoRANetwork(torch.nn.Module):
875
  text_encoders = text_encoder if type(text_encoder) == list else [text_encoder]
876
  print(text_encoders)
877
  # create LoRA for text encoder
878
- # 毎回すべてのモジュールを作るのは無駄なので要検討
879
  self.text_encoder_loras = []
880
  skipped_te = []
881
  for i, text_encoder in enumerate(text_encoders):
@@ -903,7 +903,7 @@ class LoRANetwork(torch.nn.Module):
903
  skipped = skipped_te + skipped_un
904
  if varbose and len(skipped) > 0:
905
  print(
906
- f"because block_lr_weight is 0 or dim (rank) is 0, {len(skipped)} LoRA modules are skipped / block_lr_weightまたはdim (rank)が0の為、次の{len(skipped)}個のLoRAモジュールはスキップされます:"
907
  )
908
  for name in skipped:
909
  print(f"\t{name}")
@@ -949,7 +949,7 @@ class LoRANetwork(torch.nn.Module):
949
  lora.apply_to()
950
  self.add_module(lora.lora_name, lora)
951
 
952
- # マージできるかどうかを返す
953
  def is_mergeable(self):
954
  return True
955
 
@@ -981,7 +981,7 @@ class LoRANetwork(torch.nn.Module):
981
 
982
  print(f"weights are merged")
983
 
984
- # 層別学習率用に層ごとの学習率に対する倍率を定義する 引数の順番が逆だがとりあえず気にしない
985
  def set_block_lr_weight(
986
  self,
987
  up_lr_weight: List[float] = None,
@@ -1011,7 +1011,7 @@ class LoRANetwork(torch.nn.Module):
1011
 
1012
  return lr_weight
1013
 
1014
- # 二つのText Encoderに別々の学習率を設定できるようにするといいかも
1015
  def prepare_optimizer_params(self, text_encoder_lr, unet_lr, default_lr):
1016
  self.requires_grad_(True)
1017
  all_params = []
@@ -1030,7 +1030,7 @@ class LoRANetwork(torch.nn.Module):
1030
 
1031
  if self.unet_loras:
1032
  if self.block_lr:
1033
- # 学習率のグラフをblockごとにしたいので、blockごとにloraを分類
1034
  block_idx_to_lora = {}
1035
  for lora in self.unet_loras:
1036
  idx = get_block_index(lora.lora_name)
@@ -1038,7 +1038,7 @@ class LoRANetwork(torch.nn.Module):
1038
  block_idx_to_lora[idx] = []
1039
  block_idx_to_lora[idx].append(lora)
1040
 
1041
- # blockごとにパラメータを設定する
1042
  for idx, block_loras in block_idx_to_lora.items():
1043
  param_data = {"params": enumerate_params(block_loras)}
1044
 
@@ -1142,7 +1142,7 @@ class LoRANetwork(torch.nn.Module):
1142
  self.mask_dic = mask_dic
1143
 
1144
  def backup_weights(self):
1145
- # 重みのバックアップを行う
1146
  loras: List[LoRAInfModule] = self.text_encoder_loras + self.unet_loras
1147
  for lora in loras:
1148
  org_module = lora.org_module_ref[0]
@@ -1152,7 +1152,7 @@ class LoRANetwork(torch.nn.Module):
1152
  org_module._lora_restored = True
1153
 
1154
  def restore_weights(self):
1155
- # 重みのリストアを行う
1156
  loras: List[LoRAInfModule] = self.text_encoder_loras + self.unet_loras
1157
  for lora in loras:
1158
  org_module = lora.org_module_ref[0]
@@ -1163,7 +1163,7 @@ class LoRANetwork(torch.nn.Module):
1163
  org_module._lora_restored = True
1164
 
1165
  def pre_calculation(self):
1166
- # 事前計算を行う
1167
  loras: List[LoRAInfModule] = self.text_encoder_loras + self.unet_loras
1168
  for lora in loras:
1169
  org_module = lora.org_module_ref[0]
 
66
  alpha = alpha.detach().float().numpy() # without casting, bf16 causes error
67
  alpha = self.lora_dim if alpha is None or alpha == 0 else alpha
68
  self.scale = alpha / self.lora_dim
69
+ self.register_buffer("alpha", torch.tensor(alpha)) # 定数として扱える
70
 
71
  # same as microsoft's
72
  torch.nn.init.kaiming_uniform_(self.lora_down.weight, a=math.sqrt(5))
 
107
  lx = lx * mask
108
 
109
  # scaling for rank dropout: treat as if the rank is changed
110
+ # maskから計算することも考えられるが、augmentation的な効果を期待してrank_dropoutを用いる
111
  scale = self.scale * (1.0 / (1.0 - self.rank_dropout)) # redundant for readability
112
  else:
113
  scale = self.scale
 
130
  # no dropout for inference
131
  super().__init__(lora_name, org_module, multiplier, lora_dim, alpha)
132
 
133
+ self.org_module_ref = [org_module] # 後から参照できるように
134
  self.enabled = True
135
 
136
  # check regional or not by lora_name
 
154
  def set_network(self, network):
155
  self.network = network
156
 
157
+ # freezeしてマージする
158
  def merge_to(self, sd, dtype, device):
159
  # get up/down weight
160
  up_weight = sd["lora_up.weight"].to(torch.float).to(device)
 
186
  org_sd["weight"] = weight.to(dtype)
187
  self.org_module.load_state_dict(org_sd)
188
 
189
+ # 復元できるマージのため、このモジュールのweightを返す
190
  def get_weight(self, multiplier=None):
191
  if multiplier is None:
192
  multiplier = self.multiplier
 
357
  mask = torch.cat(masks)
358
  mask_sum = torch.sum(mask, dim=0) + 1e-4
359
  for i in range(self.network.batch_size):
360
+ # 1枚の画像ごとに処理する
361
  lx1 = lx[i * self.network.num_sub_prompts : (i + 1) * self.network.num_sub_prompts]
362
  lx1 = lx1 * mask
363
  lx1 = torch.sum(lx1, dim=0)
 
380
  mid_lr_weight = nw_kwargs.get("mid_lr_weight", None)
381
  up_lr_weight = nw_kwargs.get("up_lr_weight", None)
382
 
383
+ # 以上のいずれにも設定がない場合は無効としてNoneを返す
384
  if down_lr_weight is None and mid_lr_weight is None and up_lr_weight is None:
385
  return None, None, None
386
 
 
433
  block_dims = kwargs.get("block_dims", None)
434
  down_lr_weight, mid_lr_weight, up_lr_weight = parse_block_lr_kwargs(kwargs)
435
 
436
+ # 以上のいずれかに指定があればblockごとのdim(rank)を有効にする
437
  if block_dims is not None or down_lr_weight is not None or mid_lr_weight is not None or up_lr_weight is not None:
438
  block_alphas = kwargs.get("block_alphas", None)
439
  conv_block_dims = kwargs.get("conv_block_dims", None)
 
461
  if module_dropout is not None:
462
  module_dropout = float(module_dropout)
463
 
464
+ # すごく引数が多いな ( ^ω^)・・・
465
  network = LoRANetwork(
466
  text_encoder,
467
  unet,
 
486
  return network
487
 
488
 
489
+ # このメソッドは外部から呼び出される可能性を考慮しておく
490
+ # network_dim, network_alpha にはデフォルト値が入っている。
491
+ # block_dims, block_alphas は両方ともNoneまたは両方とも値が入っている
492
+ # conv_dim, conv_alpha は両方ともNoneまたは両方とも値が入っている
493
  def get_block_dims_and_alphas(
494
  block_dims, block_alphas, network_dim, network_alpha, conv_block_dims, conv_block_alphas, conv_dim, conv_alpha
495
  ):
 
501
  def parse_floats(s):
502
  return [float(i) for i in s.split(",")]
503
 
504
+ # block_dimsblock_alphasをパースする。必ず値が入る
505
  if block_dims is not None:
506
  block_dims = parse_ints(block_dims)
507
  assert (
508
  len(block_dims) == num_total_blocks
509
+ ), f"block_dims must have {num_total_blocks} elements / block_dims{num_total_blocks}個指定してください"
510
  else:
511
+ print(f"block_dims is not specified. all dims are set to {network_dim} / block_dimsが指定されていません。すべてのdim{network_dim}になります")
512
  block_dims = [network_dim] * num_total_blocks
513
 
514
  if block_alphas is not None:
515
  block_alphas = parse_floats(block_alphas)
516
  assert (
517
  len(block_alphas) == num_total_blocks
518
+ ), f"block_alphas must have {num_total_blocks} elements / block_alphas{num_total_blocks}個指定してください"
519
  else:
520
  print(
521
+ f"block_alphas is not specified. all alphas are set to {network_alpha} / block_alphasが指定されていません。すべてのalpha{network_alpha}になります"
522
  )
523
  block_alphas = [network_alpha] * num_total_blocks
524
 
525
+ # conv_block_dimsconv_block_alphasを、指定がある場合のみパースする。指定がなければconv_dimconv_alphaを使う
526
  if conv_block_dims is not None:
527
  conv_block_dims = parse_ints(conv_block_dims)
528
  assert (
529
  len(conv_block_dims) == num_total_blocks
530
+ ), f"conv_block_dims must have {num_total_blocks} elements / conv_block_dims{num_total_blocks}個指定してください"
531
 
532
  if conv_block_alphas is not None:
533
  conv_block_alphas = parse_floats(conv_block_alphas)
534
  assert (
535
  len(conv_block_alphas) == num_total_blocks
536
+ ), f"conv_block_alphas must have {num_total_blocks} elements / conv_block_alphas{num_total_blocks}個指定してください"
537
  else:
538
  if conv_alpha is None:
539
  conv_alpha = 1.0
540
  print(
541
+ f"conv_block_alphas is not specified. all alphas are set to {conv_alpha} / conv_block_alphasが指定されていません。すべてのalpha{conv_alpha}になります"
542
  )
543
  conv_block_alphas = [conv_alpha] * num_total_blocks
544
  else:
545
  if conv_dim is not None:
546
  print(
547
+ f"conv_dim/alpha for all blocks are set to {conv_dim} and {conv_alpha} / すべてのブロックのconv_dimalpha{conv_dim}および{conv_alpha}になります"
548
  )
549
  conv_block_dims = [conv_dim] * num_total_blocks
550
  conv_block_alphas = [conv_alpha] * num_total_blocks
 
555
  return block_dims, block_alphas, conv_block_dims, conv_block_alphas
556
 
557
 
558
+ # 層別学習率用に層ごとの学習率に対する倍率を定義する、外部から呼び出される可能性を考慮しておく
559
  def get_block_lr_weight(
560
  down_lr_weight, mid_lr_weight, up_lr_weight, zero_threshold
561
  ) -> Tuple[List[float], List[float], List[float]]:
562
+ # パラメータ未指定時は何もせず、今までと同じ動作とする
563
  if up_lr_weight is None and mid_lr_weight is None and down_lr_weight is None:
564
  return None, None, None
565
 
566
+ max_len = LoRANetwork.NUM_OF_BLOCKS # フルモデル相当でのup,downの層の数
567
 
568
  def get_list(name_with_suffix) -> List[float]:
569
  import math
 
584
  return [0.0 + base_lr] * max_len
585
  else:
586
  print(
587
+ "Unknown lr_weight argument %s is used. Valid arguments: / 不明なlr_weightの引数 %s が使われました。有効な引数:\n\tcosine, sine, linear, reverse_linear, zeros"
588
  % (name)
589
  )
590
  return None
 
596
 
597
  if (up_lr_weight != None and len(up_lr_weight) > max_len) or (down_lr_weight != None and len(down_lr_weight) > max_len):
598
  print("down_weight or up_weight is too long. Parameters after %d-th are ignored." % max_len)
599
+ print("down_weightもしくはup_weightが長すぎます。%d個目以降のパラメータは無視されます。" % max_len)
600
  up_lr_weight = up_lr_weight[:max_len]
601
  down_lr_weight = down_lr_weight[:max_len]
602
 
603
  if (up_lr_weight != None and len(up_lr_weight) < max_len) or (down_lr_weight != None and len(down_lr_weight) < max_len):
604
  print("down_weight or up_weight is too short. Parameters after %d-th are filled with 1." % max_len)
605
+ print("down_weightもしくはup_weightが短すぎます。%d個目までの不足したパラメータは1で補われます。" % max_len)
606
 
607
  if down_lr_weight != None and len(down_lr_weight) < max_len:
608
  down_lr_weight = down_lr_weight + [1.0] * (max_len - len(down_lr_weight))
 
610
  up_lr_weight = up_lr_weight + [1.0] * (max_len - len(up_lr_weight))
611
 
612
  if (up_lr_weight != None) or (mid_lr_weight != None) or (down_lr_weight != None):
613
+ print("apply block learning rate / 階層別学習率を適用します。")
614
  if down_lr_weight != None:
615
  down_lr_weight = [w if w > zero_threshold else 0 for w in down_lr_weight]
616
+ print("down_lr_weight (shallower -> deeper, 浅い層->深い層):", down_lr_weight)
617
  else:
618
+ print("down_lr_weight: all 1.0, すべて1.0")
619
 
620
  if mid_lr_weight != None:
621
  mid_lr_weight = mid_lr_weight if mid_lr_weight > zero_threshold else 0
 
625
 
626
  if up_lr_weight != None:
627
  up_lr_weight = [w if w > zero_threshold else 0 for w in up_lr_weight]
628
+ print("up_lr_weight (deeper -> shallower, 深い層->浅い層):", up_lr_weight)
629
  else:
630
+ print("up_lr_weight: all 1.0, すべて1.0")
631
 
632
  return down_lr_weight, mid_lr_weight, up_lr_weight
633
 
634
 
635
+ # lr_weight0blockblock_dimsから除外する、外部から呼び出す可能性を考慮しておく
636
  def remove_block_dims_and_alphas(
637
  block_dims, block_alphas, conv_block_dims, conv_block_alphas, down_lr_weight, mid_lr_weight, up_lr_weight
638
  ):
 
658
  return block_dims, block_alphas, conv_block_dims, conv_block_alphas
659
 
660
 
661
+ # 外部から呼び出す可能性を考慮しておく
662
  def get_block_index(lora_name: str) -> int:
663
  block_idx = -1 # invalid lora name
664
 
 
675
  idx = 3 * i + 2
676
 
677
  if g[0] == "down":
678
+ block_idx = 1 + idx # 0に該当するLoRAは存在しない
679
  elif g[0] == "up":
680
  block_idx = LoRANetwork.NUM_OF_BLOCKS + 1 + idx
681
 
 
730
 
731
 
732
  class LoRANetwork(torch.nn.Module):
733
+ NUM_OF_BLOCKS = 12 # フルモデル相当でのup,downの層の数
734
 
735
  UNET_TARGET_REPLACE_MODULE = ["Transformer2DModel"]
736
  UNET_TARGET_REPLACE_MODULE_CONV2D_3X3 = ["ResnetBlock2D", "Downsample2D", "Upsample2D"]
 
764
  varbose: Optional[bool] = False,
765
  ) -> None:
766
  """
767
+ LoRA network: すごく引数が多いが、パターンは以下の通り
768
+ 1. lora_dimalphaを指定
769
+ 2. lora_dimalphaconv_lora_dimconv_alphaを指定
770
+ 3. block_dimsblock_alphasを指定 : Conv2d3x3には適用しない
771
+ 4. block_dimsblock_alphasconv_block_dimsconv_block_alphasを指定 : Conv2d3x3にも適用する
772
+ 5. modules_dimmodules_alphaを指定 (推論用)
773
  """
774
  super().__init__()
775
  self.multiplier = multiplier
 
831
  alpha = None
832
 
833
  if modules_dim is not None:
834
+ # モジュール指定あり
835
  if lora_name in modules_dim:
836
  dim = modules_dim[lora_name]
837
  alpha = modules_alpha[lora_name]
838
  elif is_unet and block_dims is not None:
839
+ # U-Netblock_dims指定あり
840
  block_idx = get_block_index(lora_name)
841
  if is_linear or is_conv2d_1x1:
842
  dim = block_dims[block_idx]
 
845
  dim = conv_block_dims[block_idx]
846
  alpha = conv_block_alphas[block_idx]
847
  else:
848
+ # 通常、すべて対象とする
849
  if is_linear or is_conv2d_1x1:
850
  dim = self.lora_dim
851
  alpha = self.alpha
 
854
  alpha = self.conv_alpha
855
 
856
  if dim is None or dim == 0:
857
+ # skipした情報を出力
858
  if is_linear or is_conv2d_1x1 or (self.conv_lora_dim is not None or conv_block_dims is not None):
859
  skipped.append(lora_name)
860
  continue
 
875
  text_encoders = text_encoder if type(text_encoder) == list else [text_encoder]
876
  print(text_encoders)
877
  # create LoRA for text encoder
878
+ # 毎回すべてのモジュールを作るのは無駄なので要検討
879
  self.text_encoder_loras = []
880
  skipped_te = []
881
  for i, text_encoder in enumerate(text_encoders):
 
903
  skipped = skipped_te + skipped_un
904
  if varbose and len(skipped) > 0:
905
  print(
906
+ f"because block_lr_weight is 0 or dim (rank) is 0, {len(skipped)} LoRA modules are skipped / block_lr_weightまたはdim (rank)0の為、次の{len(skipped)}個のLoRAモジュールはスキップされます:"
907
  )
908
  for name in skipped:
909
  print(f"\t{name}")
 
949
  lora.apply_to()
950
  self.add_module(lora.lora_name, lora)
951
 
952
+ # マージできるかどうかを返す
953
  def is_mergeable(self):
954
  return True
955
 
 
981
 
982
  print(f"weights are merged")
983
 
984
+ # 層別学習率用に層ごとの学習率に対する倍率を定義する 引数の順番が逆だがとりあえず気にしない
985
  def set_block_lr_weight(
986
  self,
987
  up_lr_weight: List[float] = None,
 
1011
 
1012
  return lr_weight
1013
 
1014
+ # 二つのText Encoderに別々の学習率を設定できるようにするといいかも
1015
  def prepare_optimizer_params(self, text_encoder_lr, unet_lr, default_lr):
1016
  self.requires_grad_(True)
1017
  all_params = []
 
1030
 
1031
  if self.unet_loras:
1032
  if self.block_lr:
1033
+ # 学習率のグラフをblockごとにしたいので、blockごとにloraを分類
1034
  block_idx_to_lora = {}
1035
  for lora in self.unet_loras:
1036
  idx = get_block_index(lora.lora_name)
 
1038
  block_idx_to_lora[idx] = []
1039
  block_idx_to_lora[idx].append(lora)
1040
 
1041
+ # blockごとにパラメータを設定する
1042
  for idx, block_loras in block_idx_to_lora.items():
1043
  param_data = {"params": enumerate_params(block_loras)}
1044
 
 
1142
  self.mask_dic = mask_dic
1143
 
1144
  def backup_weights(self):
1145
+ # 重みのバックアップを行う
1146
  loras: List[LoRAInfModule] = self.text_encoder_loras + self.unet_loras
1147
  for lora in loras:
1148
  org_module = lora.org_module_ref[0]
 
1152
  org_module._lora_restored = True
1153
 
1154
  def restore_weights(self):
1155
+ # 重みのリストアを行う
1156
  loras: List[LoRAInfModule] = self.text_encoder_loras + self.unet_loras
1157
  for lora in loras:
1158
  org_module = lora.org_module_ref[0]
 
1163
  org_module._lora_restored = True
1164
 
1165
  def pre_calculation(self):
1166
+ # 事前計算を行う
1167
  loras: List[LoRAInfModule] = self.text_encoder_loras + self.unet_loras
1168
  for lora in loras:
1169
  org_module = lora.org_module_ref[0]