Spaces:
Runtime error
Runtime error
Upload 13 files
Browse files- app.py +2 -2
- generator.py +33 -20
- lora.py +58 -58
- pipeline_stable_diffusion_xl_instantid_img2img.py +1 -1
app.py
CHANGED
|
@@ -173,7 +173,7 @@ with gr.Blocks(title="Pixagram - AI Pixel Art Generator", theme=gr.themes.Soft()
|
|
| 173 |
scheduler_info = f"""
|
| 174 |
**[CONFIG] Advanced Configuration:**
|
| 175 |
- Pipeline: **InstantID Img2Img** (native face preservation)
|
| 176 |
-
- Face System: **InstantID + InsightFace** (512D embeddings
|
| 177 |
- **[INSTANTID] Built-in Resampler:** 4 layers, 20 heads (official architecture)
|
| 178 |
- **[INSTANTID] IP-Adapter:** Native attention processors
|
| 179 |
- **[INSTANTID] Dual ControlNets:** Face keypoints + Depth
|
|
@@ -350,7 +350,7 @@ with gr.Blocks(title="Pixagram - AI Pixel Art Generator", theme=gr.themes.Soft()
|
|
| 350 |
**[ADAPTIVE] Automatic Adjustments:**
|
| 351 |
- Small faces (< 50K px): Boosts identity preservation to 1.8
|
| 352 |
- Low confidence (< 80%): Increases identity control to 0.9
|
| 353 |
-
- Profile views (> 20
|
| 354 |
- Good quality faces: Uses your selected parameters
|
| 355 |
|
| 356 |
**[PARAMETERS] Parameter Relationships:**
|
|
|
|
| 173 |
scheduler_info = f"""
|
| 174 |
**[CONFIG] Advanced Configuration:**
|
| 175 |
- Pipeline: **InstantID Img2Img** (native face preservation)
|
| 176 |
+
- Face System: **InstantID + InsightFace** (512D embeddings → 16×2048D)
|
| 177 |
- **[INSTANTID] Built-in Resampler:** 4 layers, 20 heads (official architecture)
|
| 178 |
- **[INSTANTID] IP-Adapter:** Native attention processors
|
| 179 |
- **[INSTANTID] Dual ControlNets:** Face keypoints + Depth
|
|
|
|
| 350 |
**[ADAPTIVE] Automatic Adjustments:**
|
| 351 |
- Small faces (< 50K px): Boosts identity preservation to 1.8
|
| 352 |
- Low confidence (< 80%): Increases identity control to 0.9
|
| 353 |
+
- Profile views (> 20° yaw): Enhances preservation to 1.7
|
| 354 |
- Good quality faces: Uses your selected parameters
|
| 355 |
|
| 356 |
**[PARAMETERS] Parameter Relationships:**
|
generator.py
CHANGED
|
@@ -8,6 +8,9 @@ import cv2
|
|
| 8 |
from PIL import Image
|
| 9 |
import gc
|
| 10 |
|
|
|
|
|
|
|
|
|
|
| 11 |
from config import (
|
| 12 |
device, dtype, TRIGGER_WORD,
|
| 13 |
ADAPTIVE_THRESHOLDS, ADAPTIVE_PARAMS, CAPTION_CONFIG
|
|
@@ -326,9 +329,9 @@ class RetroArtConverter:
|
|
| 326 |
# Generate depth map
|
| 327 |
depth_image = self.get_depth_map(resized_image)
|
| 328 |
|
| 329 |
-
#
|
| 330 |
# FACE DETECTION
|
| 331 |
-
#
|
| 332 |
has_detected_faces = False
|
| 333 |
face_kps_image = None
|
| 334 |
face_embeddings = None
|
|
@@ -381,9 +384,9 @@ class RetroArtConverter:
|
|
| 381 |
except Exception as e:
|
| 382 |
print(f"[LORA] Could not fuse: {e}")
|
| 383 |
|
| 384 |
-
#
|
| 385 |
# PIPELINE CONFIGURATION
|
| 386 |
-
#
|
| 387 |
pipe_kwargs = {
|
| 388 |
"image": resized_image,
|
| 389 |
"strength": strength,
|
|
@@ -423,14 +426,24 @@ class RetroArtConverter:
|
|
| 423 |
pipe_kwargs["prompt"] = prompt
|
| 424 |
pipe_kwargs["negative_prompt"] = negative_prompt
|
| 425 |
|
| 426 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 427 |
# CONTROLNET + IP-ADAPTER CONFIGURATION
|
| 428 |
-
#
|
| 429 |
|
| 430 |
if has_detected_faces and face_kps_image is not None and face_embeddings is not None:
|
| 431 |
-
print("
|
| 432 |
print("MODE: InstantID (Face Keypoints + Depth + IP-Adapter)")
|
| 433 |
-
print("
|
| 434 |
|
| 435 |
# Set IP-Adapter scale
|
| 436 |
self.pipe.set_ip_adapter_scale(identity_preservation)
|
|
@@ -455,13 +468,13 @@ class RetroArtConverter:
|
|
| 455 |
print(f" [CONTROLNET] Identity scale: {identity_control_scale}")
|
| 456 |
print(f" [CONTROLNET] Depth scale: {depth_control_scale}")
|
| 457 |
print(f" [EMBEDDINGS] Shape: {face_embeddings.shape} (raw)")
|
| 458 |
-
print(" [INFO] Pipeline will handle: Resampler
|
| 459 |
-
print("
|
| 460 |
|
| 461 |
elif has_detected_faces and face_kps_image is not None:
|
| 462 |
-
print("
|
| 463 |
print("MODE: InstantID Keypoints Only (no embeddings)")
|
| 464 |
-
print("
|
| 465 |
|
| 466 |
# Disable IP-Adapter
|
| 467 |
self.pipe.set_ip_adapter_scale(0.0)
|
|
@@ -481,12 +494,12 @@ class RetroArtConverter:
|
|
| 481 |
pipe_kwargs["image_embeds"] = zero_embeddings
|
| 482 |
|
| 483 |
print(" [INFO] Using keypoints for structure only (zero embeddings)")
|
| 484 |
-
print("
|
| 485 |
|
| 486 |
else:
|
| 487 |
-
print("
|
| 488 |
print("MODE: Depth Only (no face detection)")
|
| 489 |
-
print("
|
| 490 |
|
| 491 |
# Disable IP-Adapter
|
| 492 |
self.pipe.set_ip_adapter_scale(0.0)
|
|
@@ -504,20 +517,20 @@ class RetroArtConverter:
|
|
| 504 |
|
| 505 |
print(f" [CONTROLNET] Depth scale: {depth_control_scale}")
|
| 506 |
print(" [INFO] Generating without face preservation (zero embeddings)")
|
| 507 |
-
print("
|
| 508 |
|
| 509 |
-
#
|
| 510 |
# GENERATION
|
| 511 |
-
#
|
| 512 |
print(f"\nGenerating: Steps={num_inference_steps}, CFG={guidance_scale}, Strength={strength}")
|
| 513 |
|
| 514 |
result = self.pipe(**pipe_kwargs)
|
| 515 |
|
| 516 |
generated_image = result.images[0]
|
| 517 |
|
| 518 |
-
#
|
| 519 |
# POST-PROCESSING
|
| 520 |
-
#
|
| 521 |
if enable_color_matching and has_detected_faces:
|
| 522 |
print("Applying enhanced face-aware color matching...")
|
| 523 |
try:
|
|
|
|
| 8 |
from PIL import Image
|
| 9 |
import gc
|
| 10 |
|
| 11 |
+
# CRITICAL: Import lora module for LORA fusion (missing from original)
|
| 12 |
+
import lora
|
| 13 |
+
|
| 14 |
from config import (
|
| 15 |
device, dtype, TRIGGER_WORD,
|
| 16 |
ADAPTIVE_THRESHOLDS, ADAPTIVE_PARAMS, CAPTION_CONFIG
|
|
|
|
| 329 |
# Generate depth map
|
| 330 |
depth_image = self.get_depth_map(resized_image)
|
| 331 |
|
| 332 |
+
# â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•
|
| 333 |
# FACE DETECTION
|
| 334 |
+
# â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•
|
| 335 |
has_detected_faces = False
|
| 336 |
face_kps_image = None
|
| 337 |
face_embeddings = None
|
|
|
|
| 384 |
except Exception as e:
|
| 385 |
print(f"[LORA] Could not fuse: {e}")
|
| 386 |
|
| 387 |
+
# â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•
|
| 388 |
# PIPELINE CONFIGURATION
|
| 389 |
+
# â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•
|
| 390 |
pipe_kwargs = {
|
| 391 |
"image": resized_image,
|
| 392 |
"strength": strength,
|
|
|
|
| 426 |
pipe_kwargs["prompt"] = prompt
|
| 427 |
pipe_kwargs["negative_prompt"] = negative_prompt
|
| 428 |
|
| 429 |
+
# LORA FUSION (CRITICAL - following examplewithface.py)
|
| 430 |
+
# Must fuse LORA with scale before generation
|
| 431 |
+
if self.models_loaded['lora']:
|
| 432 |
+
try:
|
| 433 |
+
from models import fuse_lora_with_scale
|
| 434 |
+
print(f"[LORA] Fusing LORA with scale: {lora_scale}")
|
| 435 |
+
fuse_lora_with_scale(self.pipe, lora_scale)
|
| 436 |
+
except Exception as e:
|
| 437 |
+
print(f"[WARNING] LORA fusion failed: {e}")
|
| 438 |
+
|
| 439 |
+
# â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•
|
| 440 |
# CONTROLNET + IP-ADAPTER CONFIGURATION
|
| 441 |
+
# â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•��•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•
|
| 442 |
|
| 443 |
if has_detected_faces and face_kps_image is not None and face_embeddings is not None:
|
| 444 |
+
print("â•" * 60)
|
| 445 |
print("MODE: InstantID (Face Keypoints + Depth + IP-Adapter)")
|
| 446 |
+
print("â•" * 60)
|
| 447 |
|
| 448 |
# Set IP-Adapter scale
|
| 449 |
self.pipe.set_ip_adapter_scale(identity_preservation)
|
|
|
|
| 468 |
print(f" [CONTROLNET] Identity scale: {identity_control_scale}")
|
| 469 |
print(f" [CONTROLNET] Depth scale: {depth_control_scale}")
|
| 470 |
print(f" [EMBEDDINGS] Shape: {face_embeddings.shape} (raw)")
|
| 471 |
+
print(" [INFO] Pipeline will handle: Resampler → Concatenation → Attention")
|
| 472 |
+
print("â•" * 60)
|
| 473 |
|
| 474 |
elif has_detected_faces and face_kps_image is not None:
|
| 475 |
+
print("â•" * 60)
|
| 476 |
print("MODE: InstantID Keypoints Only (no embeddings)")
|
| 477 |
+
print("â•" * 60)
|
| 478 |
|
| 479 |
# Disable IP-Adapter
|
| 480 |
self.pipe.set_ip_adapter_scale(0.0)
|
|
|
|
| 494 |
pipe_kwargs["image_embeds"] = zero_embeddings
|
| 495 |
|
| 496 |
print(" [INFO] Using keypoints for structure only (zero embeddings)")
|
| 497 |
+
print("â•" * 60)
|
| 498 |
|
| 499 |
else:
|
| 500 |
+
print("â•" * 60)
|
| 501 |
print("MODE: Depth Only (no face detection)")
|
| 502 |
+
print("â•" * 60)
|
| 503 |
|
| 504 |
# Disable IP-Adapter
|
| 505 |
self.pipe.set_ip_adapter_scale(0.0)
|
|
|
|
| 517 |
|
| 518 |
print(f" [CONTROLNET] Depth scale: {depth_control_scale}")
|
| 519 |
print(" [INFO] Generating without face preservation (zero embeddings)")
|
| 520 |
+
print("â•" * 60)
|
| 521 |
|
| 522 |
+
# â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•
|
| 523 |
# GENERATION
|
| 524 |
+
# â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•
|
| 525 |
print(f"\nGenerating: Steps={num_inference_steps}, CFG={guidance_scale}, Strength={strength}")
|
| 526 |
|
| 527 |
result = self.pipe(**pipe_kwargs)
|
| 528 |
|
| 529 |
generated_image = result.images[0]
|
| 530 |
|
| 531 |
+
# â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•
|
| 532 |
# POST-PROCESSING
|
| 533 |
+
# â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•��•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•
|
| 534 |
if enable_color_matching and has_detected_faces:
|
| 535 |
print("Applying enhanced face-aware color matching...")
|
| 536 |
try:
|
lora.py
CHANGED
|
@@ -66,7 +66,7 @@ class LoRAModule(torch.nn.Module):
|
|
| 66 |
alpha = alpha.detach().float().numpy() # without casting, bf16 causes error
|
| 67 |
alpha = self.lora_dim if alpha is None or alpha == 0 else alpha
|
| 68 |
self.scale = alpha / self.lora_dim
|
| 69 |
-
self.register_buffer("alpha", torch.tensor(alpha)) #
|
| 70 |
|
| 71 |
# same as microsoft's
|
| 72 |
torch.nn.init.kaiming_uniform_(self.lora_down.weight, a=math.sqrt(5))
|
|
@@ -107,7 +107,7 @@ class LoRAModule(torch.nn.Module):
|
|
| 107 |
lx = lx * mask
|
| 108 |
|
| 109 |
# scaling for rank dropout: treat as if the rank is changed
|
| 110 |
-
# mask
|
| 111 |
scale = self.scale * (1.0 / (1.0 - self.rank_dropout)) # redundant for readability
|
| 112 |
else:
|
| 113 |
scale = self.scale
|
|
@@ -130,7 +130,7 @@ class LoRAInfModule(LoRAModule):
|
|
| 130 |
# no dropout for inference
|
| 131 |
super().__init__(lora_name, org_module, multiplier, lora_dim, alpha)
|
| 132 |
|
| 133 |
-
self.org_module_ref = [org_module] #
|
| 134 |
self.enabled = True
|
| 135 |
|
| 136 |
# check regional or not by lora_name
|
|
@@ -154,7 +154,7 @@ class LoRAInfModule(LoRAModule):
|
|
| 154 |
def set_network(self, network):
|
| 155 |
self.network = network
|
| 156 |
|
| 157 |
-
# freeze
|
| 158 |
def merge_to(self, sd, dtype, device):
|
| 159 |
# get up/down weight
|
| 160 |
up_weight = sd["lora_up.weight"].to(torch.float).to(device)
|
|
@@ -186,7 +186,7 @@ class LoRAInfModule(LoRAModule):
|
|
| 186 |
org_sd["weight"] = weight.to(dtype)
|
| 187 |
self.org_module.load_state_dict(org_sd)
|
| 188 |
|
| 189 |
-
#
|
| 190 |
def get_weight(self, multiplier=None):
|
| 191 |
if multiplier is None:
|
| 192 |
multiplier = self.multiplier
|
|
@@ -357,7 +357,7 @@ class LoRAInfModule(LoRAModule):
|
|
| 357 |
mask = torch.cat(masks)
|
| 358 |
mask_sum = torch.sum(mask, dim=0) + 1e-4
|
| 359 |
for i in range(self.network.batch_size):
|
| 360 |
-
# 1
|
| 361 |
lx1 = lx[i * self.network.num_sub_prompts : (i + 1) * self.network.num_sub_prompts]
|
| 362 |
lx1 = lx1 * mask
|
| 363 |
lx1 = torch.sum(lx1, dim=0)
|
|
@@ -380,7 +380,7 @@ def parse_block_lr_kwargs(nw_kwargs):
|
|
| 380 |
mid_lr_weight = nw_kwargs.get("mid_lr_weight", None)
|
| 381 |
up_lr_weight = nw_kwargs.get("up_lr_weight", None)
|
| 382 |
|
| 383 |
-
#
|
| 384 |
if down_lr_weight is None and mid_lr_weight is None and up_lr_weight is None:
|
| 385 |
return None, None, None
|
| 386 |
|
|
@@ -433,7 +433,7 @@ def create_network(
|
|
| 433 |
block_dims = kwargs.get("block_dims", None)
|
| 434 |
down_lr_weight, mid_lr_weight, up_lr_weight = parse_block_lr_kwargs(kwargs)
|
| 435 |
|
| 436 |
-
#
|
| 437 |
if block_dims is not None or down_lr_weight is not None or mid_lr_weight is not None or up_lr_weight is not None:
|
| 438 |
block_alphas = kwargs.get("block_alphas", None)
|
| 439 |
conv_block_dims = kwargs.get("conv_block_dims", None)
|
|
@@ -461,7 +461,7 @@ def create_network(
|
|
| 461 |
if module_dropout is not None:
|
| 462 |
module_dropout = float(module_dropout)
|
| 463 |
|
| 464 |
-
#
|
| 465 |
network = LoRANetwork(
|
| 466 |
text_encoder,
|
| 467 |
unet,
|
|
@@ -486,10 +486,10 @@ def create_network(
|
|
| 486 |
return network
|
| 487 |
|
| 488 |
|
| 489 |
-
#
|
| 490 |
-
# network_dim, network_alpha
|
| 491 |
-
# block_dims, block_alphas
|
| 492 |
-
# conv_dim, conv_alpha
|
| 493 |
def get_block_dims_and_alphas(
|
| 494 |
block_dims, block_alphas, network_dim, network_alpha, conv_block_dims, conv_block_alphas, conv_dim, conv_alpha
|
| 495 |
):
|
|
@@ -501,50 +501,50 @@ def get_block_dims_and_alphas(
|
|
| 501 |
def parse_floats(s):
|
| 502 |
return [float(i) for i in s.split(",")]
|
| 503 |
|
| 504 |
-
# block_dims
|
| 505 |
if block_dims is not None:
|
| 506 |
block_dims = parse_ints(block_dims)
|
| 507 |
assert (
|
| 508 |
len(block_dims) == num_total_blocks
|
| 509 |
-
), f"block_dims must have {num_total_blocks} elements / block_dims
|
| 510 |
else:
|
| 511 |
-
print(f"block_dims is not specified. all dims are set to {network_dim} / block_dims
|
| 512 |
block_dims = [network_dim] * num_total_blocks
|
| 513 |
|
| 514 |
if block_alphas is not None:
|
| 515 |
block_alphas = parse_floats(block_alphas)
|
| 516 |
assert (
|
| 517 |
len(block_alphas) == num_total_blocks
|
| 518 |
-
), f"block_alphas must have {num_total_blocks} elements / block_alphas
|
| 519 |
else:
|
| 520 |
print(
|
| 521 |
-
f"block_alphas is not specified. all alphas are set to {network_alpha} / block_alphas
|
| 522 |
)
|
| 523 |
block_alphas = [network_alpha] * num_total_blocks
|
| 524 |
|
| 525 |
-
# conv_block_dims
|
| 526 |
if conv_block_dims is not None:
|
| 527 |
conv_block_dims = parse_ints(conv_block_dims)
|
| 528 |
assert (
|
| 529 |
len(conv_block_dims) == num_total_blocks
|
| 530 |
-
), f"conv_block_dims must have {num_total_blocks} elements / conv_block_dims
|
| 531 |
|
| 532 |
if conv_block_alphas is not None:
|
| 533 |
conv_block_alphas = parse_floats(conv_block_alphas)
|
| 534 |
assert (
|
| 535 |
len(conv_block_alphas) == num_total_blocks
|
| 536 |
-
), f"conv_block_alphas must have {num_total_blocks} elements / conv_block_alphas
|
| 537 |
else:
|
| 538 |
if conv_alpha is None:
|
| 539 |
conv_alpha = 1.0
|
| 540 |
print(
|
| 541 |
-
f"conv_block_alphas is not specified. all alphas are set to {conv_alpha} / conv_block_alphas
|
| 542 |
)
|
| 543 |
conv_block_alphas = [conv_alpha] * num_total_blocks
|
| 544 |
else:
|
| 545 |
if conv_dim is not None:
|
| 546 |
print(
|
| 547 |
-
f"conv_dim/alpha for all blocks are set to {conv_dim} and {conv_alpha} /
|
| 548 |
)
|
| 549 |
conv_block_dims = [conv_dim] * num_total_blocks
|
| 550 |
conv_block_alphas = [conv_alpha] * num_total_blocks
|
|
@@ -555,15 +555,15 @@ def get_block_dims_and_alphas(
|
|
| 555 |
return block_dims, block_alphas, conv_block_dims, conv_block_alphas
|
| 556 |
|
| 557 |
|
| 558 |
-
#
|
| 559 |
def get_block_lr_weight(
|
| 560 |
down_lr_weight, mid_lr_weight, up_lr_weight, zero_threshold
|
| 561 |
) -> Tuple[List[float], List[float], List[float]]:
|
| 562 |
-
#
|
| 563 |
if up_lr_weight is None and mid_lr_weight is None and down_lr_weight is None:
|
| 564 |
return None, None, None
|
| 565 |
|
| 566 |
-
max_len = LoRANetwork.NUM_OF_BLOCKS #
|
| 567 |
|
| 568 |
def get_list(name_with_suffix) -> List[float]:
|
| 569 |
import math
|
|
@@ -584,7 +584,7 @@ def get_block_lr_weight(
|
|
| 584 |
return [0.0 + base_lr] * max_len
|
| 585 |
else:
|
| 586 |
print(
|
| 587 |
-
"Unknown lr_weight argument %s is used. Valid arguments: /
|
| 588 |
% (name)
|
| 589 |
)
|
| 590 |
return None
|
|
@@ -596,13 +596,13 @@ def get_block_lr_weight(
|
|
| 596 |
|
| 597 |
if (up_lr_weight != None and len(up_lr_weight) > max_len) or (down_lr_weight != None and len(down_lr_weight) > max_len):
|
| 598 |
print("down_weight or up_weight is too long. Parameters after %d-th are ignored." % max_len)
|
| 599 |
-
print("down_weight
|
| 600 |
up_lr_weight = up_lr_weight[:max_len]
|
| 601 |
down_lr_weight = down_lr_weight[:max_len]
|
| 602 |
|
| 603 |
if (up_lr_weight != None and len(up_lr_weight) < max_len) or (down_lr_weight != None and len(down_lr_weight) < max_len):
|
| 604 |
print("down_weight or up_weight is too short. Parameters after %d-th are filled with 1." % max_len)
|
| 605 |
-
print("down_weight
|
| 606 |
|
| 607 |
if down_lr_weight != None and len(down_lr_weight) < max_len:
|
| 608 |
down_lr_weight = down_lr_weight + [1.0] * (max_len - len(down_lr_weight))
|
|
@@ -610,12 +610,12 @@ def get_block_lr_weight(
|
|
| 610 |
up_lr_weight = up_lr_weight + [1.0] * (max_len - len(up_lr_weight))
|
| 611 |
|
| 612 |
if (up_lr_weight != None) or (mid_lr_weight != None) or (down_lr_weight != None):
|
| 613 |
-
print("apply block learning rate /
|
| 614 |
if down_lr_weight != None:
|
| 615 |
down_lr_weight = [w if w > zero_threshold else 0 for w in down_lr_weight]
|
| 616 |
-
print("down_lr_weight (shallower -> deeper,
|
| 617 |
else:
|
| 618 |
-
print("down_lr_weight: all 1.0,
|
| 619 |
|
| 620 |
if mid_lr_weight != None:
|
| 621 |
mid_lr_weight = mid_lr_weight if mid_lr_weight > zero_threshold else 0
|
|
@@ -625,14 +625,14 @@ def get_block_lr_weight(
|
|
| 625 |
|
| 626 |
if up_lr_weight != None:
|
| 627 |
up_lr_weight = [w if w > zero_threshold else 0 for w in up_lr_weight]
|
| 628 |
-
print("up_lr_weight (deeper -> shallower,
|
| 629 |
else:
|
| 630 |
-
print("up_lr_weight: all 1.0,
|
| 631 |
|
| 632 |
return down_lr_weight, mid_lr_weight, up_lr_weight
|
| 633 |
|
| 634 |
|
| 635 |
-
# lr_weight
|
| 636 |
def remove_block_dims_and_alphas(
|
| 637 |
block_dims, block_alphas, conv_block_dims, conv_block_alphas, down_lr_weight, mid_lr_weight, up_lr_weight
|
| 638 |
):
|
|
@@ -658,7 +658,7 @@ def remove_block_dims_and_alphas(
|
|
| 658 |
return block_dims, block_alphas, conv_block_dims, conv_block_alphas
|
| 659 |
|
| 660 |
|
| 661 |
-
#
|
| 662 |
def get_block_index(lora_name: str) -> int:
|
| 663 |
block_idx = -1 # invalid lora name
|
| 664 |
|
|
@@ -675,7 +675,7 @@ def get_block_index(lora_name: str) -> int:
|
|
| 675 |
idx = 3 * i + 2
|
| 676 |
|
| 677 |
if g[0] == "down":
|
| 678 |
-
block_idx = 1 + idx # 0
|
| 679 |
elif g[0] == "up":
|
| 680 |
block_idx = LoRANetwork.NUM_OF_BLOCKS + 1 + idx
|
| 681 |
|
|
@@ -730,7 +730,7 @@ def create_network_from_weights(multiplier, file, vae, text_encoder, unet, weigh
|
|
| 730 |
|
| 731 |
|
| 732 |
class LoRANetwork(torch.nn.Module):
|
| 733 |
-
NUM_OF_BLOCKS = 12 #
|
| 734 |
|
| 735 |
UNET_TARGET_REPLACE_MODULE = ["Transformer2DModel"]
|
| 736 |
UNET_TARGET_REPLACE_MODULE_CONV2D_3X3 = ["ResnetBlock2D", "Downsample2D", "Upsample2D"]
|
|
@@ -764,12 +764,12 @@ class LoRANetwork(torch.nn.Module):
|
|
| 764 |
varbose: Optional[bool] = False,
|
| 765 |
) -> None:
|
| 766 |
"""
|
| 767 |
-
LoRA network:
|
| 768 |
-
1. lora_dim
|
| 769 |
-
2. lora_dim
|
| 770 |
-
3. block_dims
|
| 771 |
-
4. block_dims
|
| 772 |
-
5. modules_dim
|
| 773 |
"""
|
| 774 |
super().__init__()
|
| 775 |
self.multiplier = multiplier
|
|
@@ -831,12 +831,12 @@ class LoRANetwork(torch.nn.Module):
|
|
| 831 |
alpha = None
|
| 832 |
|
| 833 |
if modules_dim is not None:
|
| 834 |
-
#
|
| 835 |
if lora_name in modules_dim:
|
| 836 |
dim = modules_dim[lora_name]
|
| 837 |
alpha = modules_alpha[lora_name]
|
| 838 |
elif is_unet and block_dims is not None:
|
| 839 |
-
# U-Net
|
| 840 |
block_idx = get_block_index(lora_name)
|
| 841 |
if is_linear or is_conv2d_1x1:
|
| 842 |
dim = block_dims[block_idx]
|
|
@@ -845,7 +845,7 @@ class LoRANetwork(torch.nn.Module):
|
|
| 845 |
dim = conv_block_dims[block_idx]
|
| 846 |
alpha = conv_block_alphas[block_idx]
|
| 847 |
else:
|
| 848 |
-
#
|
| 849 |
if is_linear or is_conv2d_1x1:
|
| 850 |
dim = self.lora_dim
|
| 851 |
alpha = self.alpha
|
|
@@ -854,7 +854,7 @@ class LoRANetwork(torch.nn.Module):
|
|
| 854 |
alpha = self.conv_alpha
|
| 855 |
|
| 856 |
if dim is None or dim == 0:
|
| 857 |
-
# skip
|
| 858 |
if is_linear or is_conv2d_1x1 or (self.conv_lora_dim is not None or conv_block_dims is not None):
|
| 859 |
skipped.append(lora_name)
|
| 860 |
continue
|
|
@@ -875,7 +875,7 @@ class LoRANetwork(torch.nn.Module):
|
|
| 875 |
text_encoders = text_encoder if type(text_encoder) == list else [text_encoder]
|
| 876 |
print(text_encoders)
|
| 877 |
# create LoRA for text encoder
|
| 878 |
-
#
|
| 879 |
self.text_encoder_loras = []
|
| 880 |
skipped_te = []
|
| 881 |
for i, text_encoder in enumerate(text_encoders):
|
|
@@ -903,7 +903,7 @@ class LoRANetwork(torch.nn.Module):
|
|
| 903 |
skipped = skipped_te + skipped_un
|
| 904 |
if varbose and len(skipped) > 0:
|
| 905 |
print(
|
| 906 |
-
f"because block_lr_weight is 0 or dim (rank) is 0, {len(skipped)} LoRA modules are skipped / block_lr_weight
|
| 907 |
)
|
| 908 |
for name in skipped:
|
| 909 |
print(f"\t{name}")
|
|
@@ -949,7 +949,7 @@ class LoRANetwork(torch.nn.Module):
|
|
| 949 |
lora.apply_to()
|
| 950 |
self.add_module(lora.lora_name, lora)
|
| 951 |
|
| 952 |
-
#
|
| 953 |
def is_mergeable(self):
|
| 954 |
return True
|
| 955 |
|
|
@@ -981,7 +981,7 @@ class LoRANetwork(torch.nn.Module):
|
|
| 981 |
|
| 982 |
print(f"weights are merged")
|
| 983 |
|
| 984 |
-
#
|
| 985 |
def set_block_lr_weight(
|
| 986 |
self,
|
| 987 |
up_lr_weight: List[float] = None,
|
|
@@ -1011,7 +1011,7 @@ class LoRANetwork(torch.nn.Module):
|
|
| 1011 |
|
| 1012 |
return lr_weight
|
| 1013 |
|
| 1014 |
-
#
|
| 1015 |
def prepare_optimizer_params(self, text_encoder_lr, unet_lr, default_lr):
|
| 1016 |
self.requires_grad_(True)
|
| 1017 |
all_params = []
|
|
@@ -1030,7 +1030,7 @@ class LoRANetwork(torch.nn.Module):
|
|
| 1030 |
|
| 1031 |
if self.unet_loras:
|
| 1032 |
if self.block_lr:
|
| 1033 |
-
#
|
| 1034 |
block_idx_to_lora = {}
|
| 1035 |
for lora in self.unet_loras:
|
| 1036 |
idx = get_block_index(lora.lora_name)
|
|
@@ -1038,7 +1038,7 @@ class LoRANetwork(torch.nn.Module):
|
|
| 1038 |
block_idx_to_lora[idx] = []
|
| 1039 |
block_idx_to_lora[idx].append(lora)
|
| 1040 |
|
| 1041 |
-
# block
|
| 1042 |
for idx, block_loras in block_idx_to_lora.items():
|
| 1043 |
param_data = {"params": enumerate_params(block_loras)}
|
| 1044 |
|
|
@@ -1142,7 +1142,7 @@ class LoRANetwork(torch.nn.Module):
|
|
| 1142 |
self.mask_dic = mask_dic
|
| 1143 |
|
| 1144 |
def backup_weights(self):
|
| 1145 |
-
#
|
| 1146 |
loras: List[LoRAInfModule] = self.text_encoder_loras + self.unet_loras
|
| 1147 |
for lora in loras:
|
| 1148 |
org_module = lora.org_module_ref[0]
|
|
@@ -1152,7 +1152,7 @@ class LoRANetwork(torch.nn.Module):
|
|
| 1152 |
org_module._lora_restored = True
|
| 1153 |
|
| 1154 |
def restore_weights(self):
|
| 1155 |
-
#
|
| 1156 |
loras: List[LoRAInfModule] = self.text_encoder_loras + self.unet_loras
|
| 1157 |
for lora in loras:
|
| 1158 |
org_module = lora.org_module_ref[0]
|
|
@@ -1163,7 +1163,7 @@ class LoRANetwork(torch.nn.Module):
|
|
| 1163 |
org_module._lora_restored = True
|
| 1164 |
|
| 1165 |
def pre_calculation(self):
|
| 1166 |
-
#
|
| 1167 |
loras: List[LoRAInfModule] = self.text_encoder_loras + self.unet_loras
|
| 1168 |
for lora in loras:
|
| 1169 |
org_module = lora.org_module_ref[0]
|
|
|
|
| 66 |
alpha = alpha.detach().float().numpy() # without casting, bf16 causes error
|
| 67 |
alpha = self.lora_dim if alpha is None or alpha == 0 else alpha
|
| 68 |
self.scale = alpha / self.lora_dim
|
| 69 |
+
self.register_buffer("alpha", torch.tensor(alpha)) # 定数ã¨ã—ã¦æ‰±ãˆã‚‹
|
| 70 |
|
| 71 |
# same as microsoft's
|
| 72 |
torch.nn.init.kaiming_uniform_(self.lora_down.weight, a=math.sqrt(5))
|
|
|
|
| 107 |
lx = lx * mask
|
| 108 |
|
| 109 |
# scaling for rank dropout: treat as if the rank is changed
|
| 110 |
+
# maskã‹ã‚‰è¨ˆç®—ã™ã‚‹ã“ã¨ã‚‚考ãˆã‚‰ã‚Œã‚‹ãŒã€augmentationçš„ãªåŠ¹æžœã‚’æœŸå¾…ã—ã¦rank_dropoutを用ã„ã‚‹
|
| 111 |
scale = self.scale * (1.0 / (1.0 - self.rank_dropout)) # redundant for readability
|
| 112 |
else:
|
| 113 |
scale = self.scale
|
|
|
|
| 130 |
# no dropout for inference
|
| 131 |
super().__init__(lora_name, org_module, multiplier, lora_dim, alpha)
|
| 132 |
|
| 133 |
+
self.org_module_ref = [org_module] # 後ã‹ã‚‰å‚ç…§ã§ãるよã†ã«
|
| 134 |
self.enabled = True
|
| 135 |
|
| 136 |
# check regional or not by lora_name
|
|
|
|
| 154 |
def set_network(self, network):
|
| 155 |
self.network = network
|
| 156 |
|
| 157 |
+
# freezeã—ã¦ãƒžãƒ¼ã‚¸ã™ã‚‹
|
| 158 |
def merge_to(self, sd, dtype, device):
|
| 159 |
# get up/down weight
|
| 160 |
up_weight = sd["lora_up.weight"].to(torch.float).to(device)
|
|
|
|
| 186 |
org_sd["weight"] = weight.to(dtype)
|
| 187 |
self.org_module.load_state_dict(org_sd)
|
| 188 |
|
| 189 |
+
# 復元ã§ãるマージã®ãŸã‚ã€ã“ã®ãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã®weightã‚’è¿”ã™
|
| 190 |
def get_weight(self, multiplier=None):
|
| 191 |
if multiplier is None:
|
| 192 |
multiplier = self.multiplier
|
|
|
|
| 357 |
mask = torch.cat(masks)
|
| 358 |
mask_sum = torch.sum(mask, dim=0) + 1e-4
|
| 359 |
for i in range(self.network.batch_size):
|
| 360 |
+
# 1æžšã®ç”»åƒã”ã¨ã«å‡¦ç†ã™ã‚‹
|
| 361 |
lx1 = lx[i * self.network.num_sub_prompts : (i + 1) * self.network.num_sub_prompts]
|
| 362 |
lx1 = lx1 * mask
|
| 363 |
lx1 = torch.sum(lx1, dim=0)
|
|
|
|
| 380 |
mid_lr_weight = nw_kwargs.get("mid_lr_weight", None)
|
| 381 |
up_lr_weight = nw_kwargs.get("up_lr_weight", None)
|
| 382 |
|
| 383 |
+
# 以上ã®ã„ãšã‚Œã«ã‚‚è¨å®šãŒãªã„å ´åˆã¯ç„¡åйã¨ã—ã¦Noneã‚’è¿”ã™
|
| 384 |
if down_lr_weight is None and mid_lr_weight is None and up_lr_weight is None:
|
| 385 |
return None, None, None
|
| 386 |
|
|
|
|
| 433 |
block_dims = kwargs.get("block_dims", None)
|
| 434 |
down_lr_weight, mid_lr_weight, up_lr_weight = parse_block_lr_kwargs(kwargs)
|
| 435 |
|
| 436 |
+
# 以上ã®ã„ãšã‚Œã‹ã«æŒ‡å®šãŒã‚れã°blockã”ã¨ã®dim(rank)を有効ã«ã™ã‚‹
|
| 437 |
if block_dims is not None or down_lr_weight is not None or mid_lr_weight is not None or up_lr_weight is not None:
|
| 438 |
block_alphas = kwargs.get("block_alphas", None)
|
| 439 |
conv_block_dims = kwargs.get("conv_block_dims", None)
|
|
|
|
| 461 |
if module_dropout is not None:
|
| 462 |
module_dropout = float(module_dropout)
|
| 463 |
|
| 464 |
+
# ã™ã”ã引数ãŒå¤šã„㪠( ^ω^)・・・
|
| 465 |
network = LoRANetwork(
|
| 466 |
text_encoder,
|
| 467 |
unet,
|
|
|
|
| 486 |
return network
|
| 487 |
|
| 488 |
|
| 489 |
+
# ã“ã®ãƒ¡ã‚½ãƒƒãƒ‰ã¯å¤–部ã‹ã‚‰å‘��ã³å‡ºã•れるå¯èƒ½æ€§ã‚’考慮ã—ã¦ãŠã
|
| 490 |
+
# network_dim, network_alpha ã«ã¯ãƒ‡ãƒ•ォルト値ãŒå…¥ã£ã¦ã„る。
|
| 491 |
+
# block_dims, block_alphas ã¯ä¸¡æ–¹ã¨ã‚‚Noneã¾ãŸã¯ä¸¡æ–¹ã¨ã‚‚値ãŒå…¥ã£ã¦ã„ã‚‹
|
| 492 |
+
# conv_dim, conv_alpha ã¯ä¸¡æ–¹ã¨ã‚‚Noneã¾ãŸã¯ä¸¡æ–¹ã¨ã‚‚値ãŒå…¥ã£ã¦ã„ã‚‹
|
| 493 |
def get_block_dims_and_alphas(
|
| 494 |
block_dims, block_alphas, network_dim, network_alpha, conv_block_dims, conv_block_alphas, conv_dim, conv_alpha
|
| 495 |
):
|
|
|
|
| 501 |
def parse_floats(s):
|
| 502 |
return [float(i) for i in s.split(",")]
|
| 503 |
|
| 504 |
+
# block_dimsã¨block_alphasをパースã™ã‚‹ã€‚å¿…ãšå€¤ãŒå…¥ã‚‹
|
| 505 |
if block_dims is not None:
|
| 506 |
block_dims = parse_ints(block_dims)
|
| 507 |
assert (
|
| 508 |
len(block_dims) == num_total_blocks
|
| 509 |
+
), f"block_dims must have {num_total_blocks} elements / block_dimsã¯{num_total_blocks}個指定ã—ã¦ãã ã•ã„"
|
| 510 |
else:
|
| 511 |
+
print(f"block_dims is not specified. all dims are set to {network_dim} / block_dimsãŒæŒ‡å®šã•れã¦ã„ã¾ã›ã‚“。ã™ã¹ã¦ã®dimã¯{network_dim}ã«ãªã‚Šã¾ã™")
|
| 512 |
block_dims = [network_dim] * num_total_blocks
|
| 513 |
|
| 514 |
if block_alphas is not None:
|
| 515 |
block_alphas = parse_floats(block_alphas)
|
| 516 |
assert (
|
| 517 |
len(block_alphas) == num_total_blocks
|
| 518 |
+
), f"block_alphas must have {num_total_blocks} elements / block_alphasã¯{num_total_blocks}個指定ã—ã¦ãã ã•ã„"
|
| 519 |
else:
|
| 520 |
print(
|
| 521 |
+
f"block_alphas is not specified. all alphas are set to {network_alpha} / block_alphasãŒæŒ‡å®šã•れã¦ã„ã¾ã›ã‚“。ã™ã¹ã¦ã®alphaã¯{network_alpha}ã«ãªã‚Šã¾ã™"
|
| 522 |
)
|
| 523 |
block_alphas = [network_alpha] * num_total_blocks
|
| 524 |
|
| 525 |
+
# conv_block_dimsã¨conv_block_alphasã‚’ã€æŒ‡å®šãŒã‚ã‚‹å ´åˆã®ã¿ãƒ‘ースã™ã‚‹ã€‚指定ãŒãªã‘れã°conv_dimã¨conv_alphaを使ã†
|
| 526 |
if conv_block_dims is not None:
|
| 527 |
conv_block_dims = parse_ints(conv_block_dims)
|
| 528 |
assert (
|
| 529 |
len(conv_block_dims) == num_total_blocks
|
| 530 |
+
), f"conv_block_dims must have {num_total_blocks} elements / conv_block_dimsã¯{num_total_blocks}個指定ã—ã¦ãã ã•ã„"
|
| 531 |
|
| 532 |
if conv_block_alphas is not None:
|
| 533 |
conv_block_alphas = parse_floats(conv_block_alphas)
|
| 534 |
assert (
|
| 535 |
len(conv_block_alphas) == num_total_blocks
|
| 536 |
+
), f"conv_block_alphas must have {num_total_blocks} elements / conv_block_alphasã¯{num_total_blocks}個指定ã—ã¦ãã ã•ã„"
|
| 537 |
else:
|
| 538 |
if conv_alpha is None:
|
| 539 |
conv_alpha = 1.0
|
| 540 |
print(
|
| 541 |
+
f"conv_block_alphas is not specified. all alphas are set to {conv_alpha} / conv_block_alphasãŒæŒ‡å®šã•れã¦ã„ã¾ã›ã‚“。ã™ã¹ã¦ã®alphaã¯{conv_alpha}ã«ãªã‚Šã¾ã™"
|
| 542 |
)
|
| 543 |
conv_block_alphas = [conv_alpha] * num_total_blocks
|
| 544 |
else:
|
| 545 |
if conv_dim is not None:
|
| 546 |
print(
|
| 547 |
+
f"conv_dim/alpha for all blocks are set to {conv_dim} and {conv_alpha} / ã™ã¹ã¦ã®ãƒ–ãƒãƒƒã‚¯ã®conv_dimã¨alphaã¯{conv_dim}ãŠã‚ˆã³{conv_alpha}ã«ãªã‚Šã¾ã™"
|
| 548 |
)
|
| 549 |
conv_block_dims = [conv_dim] * num_total_blocks
|
| 550 |
conv_block_alphas = [conv_alpha] * num_total_blocks
|
|
|
|
| 555 |
return block_dims, block_alphas, conv_block_dims, conv_block_alphas
|
| 556 |
|
| 557 |
|
| 558 |
+
# 層別å¦ç¿’率用ã«å±¤ã”ã¨ã®å¦ç¿’率ã«å¯¾ã™ã‚‹å€çŽ‡ã‚’å®šç¾©ã™ã‚‹ã€å¤–部ã‹ã‚‰å‘¼ã³å‡ºã•れるå¯èƒ½æ€§ã‚’考慮ã—ã¦ãŠã
|
| 559 |
def get_block_lr_weight(
|
| 560 |
down_lr_weight, mid_lr_weight, up_lr_weight, zero_threshold
|
| 561 |
) -> Tuple[List[float], List[float], List[float]]:
|
| 562 |
+
# パラメータ未指定時ã¯ä½•ã‚‚ã›ãšã€ä»Šã¾ã§ã¨åŒã˜å‹•作ã¨ã™ã‚‹
|
| 563 |
if up_lr_weight is None and mid_lr_weight is None and down_lr_weight is None:
|
| 564 |
return None, None, None
|
| 565 |
|
| 566 |
+
max_len = LoRANetwork.NUM_OF_BLOCKS # フルモデル相当ã§ã®up,downã®å±¤ã®æ•°
|
| 567 |
|
| 568 |
def get_list(name_with_suffix) -> List[float]:
|
| 569 |
import math
|
|
|
|
| 584 |
return [0.0 + base_lr] * max_len
|
| 585 |
else:
|
| 586 |
print(
|
| 587 |
+
"Unknown lr_weight argument %s is used. Valid arguments: / 䏿˜Žãªlr_weightã®å¼•æ•° %s ãŒä½¿ã‚れã¾ã—ãŸã€‚有効ãªå¼•æ•°:\n\tcosine, sine, linear, reverse_linear, zeros"
|
| 588 |
% (name)
|
| 589 |
)
|
| 590 |
return None
|
|
|
|
| 596 |
|
| 597 |
if (up_lr_weight != None and len(up_lr_weight) > max_len) or (down_lr_weight != None and len(down_lr_weight) > max_len):
|
| 598 |
print("down_weight or up_weight is too long. Parameters after %d-th are ignored." % max_len)
|
| 599 |
+
print("down_weightã‚‚ã—ãã¯up_weightãŒé•·ã™ãŽã¾ã™ã€‚%d個目以é™ã®ãƒ‘ラメータã¯ç„¡è¦–ã•れã¾ã™ã€‚" % max_len)
|
| 600 |
up_lr_weight = up_lr_weight[:max_len]
|
| 601 |
down_lr_weight = down_lr_weight[:max_len]
|
| 602 |
|
| 603 |
if (up_lr_weight != None and len(up_lr_weight) < max_len) or (down_lr_weight != None and len(down_lr_weight) < max_len):
|
| 604 |
print("down_weight or up_weight is too short. Parameters after %d-th are filled with 1." % max_len)
|
| 605 |
+
print("down_weightã‚‚ã—ãã¯up_weightãŒçŸã™ãŽã¾ã™ã€‚%d個目ã¾ã§ã®ä¸è¶³ã—ãŸãƒ‘ラメータã¯1ã§è£œã‚れã¾ã™ã€‚" % max_len)
|
| 606 |
|
| 607 |
if down_lr_weight != None and len(down_lr_weight) < max_len:
|
| 608 |
down_lr_weight = down_lr_weight + [1.0] * (max_len - len(down_lr_weight))
|
|
|
|
| 610 |
up_lr_weight = up_lr_weight + [1.0] * (max_len - len(up_lr_weight))
|
| 611 |
|
| 612 |
if (up_lr_weight != None) or (mid_lr_weight != None) or (down_lr_weight != None):
|
| 613 |
+
print("apply block learning rate / 階層別å¦ç¿’率をé©ç”¨ã—ã¾ã™ã€‚")
|
| 614 |
if down_lr_weight != None:
|
| 615 |
down_lr_weight = [w if w > zero_threshold else 0 for w in down_lr_weight]
|
| 616 |
+
print("down_lr_weight (shallower -> deeper, æµ…ã„層->æ·±ã„層):", down_lr_weight)
|
| 617 |
else:
|
| 618 |
+
print("down_lr_weight: all 1.0, ã™ã¹ã¦1.0")
|
| 619 |
|
| 620 |
if mid_lr_weight != None:
|
| 621 |
mid_lr_weight = mid_lr_weight if mid_lr_weight > zero_threshold else 0
|
|
|
|
| 625 |
|
| 626 |
if up_lr_weight != None:
|
| 627 |
up_lr_weight = [w if w > zero_threshold else 0 for w in up_lr_weight]
|
| 628 |
+
print("up_lr_weight (deeper -> shallower, æ·±ã„層->æµ…ã„層):", up_lr_weight)
|
| 629 |
else:
|
| 630 |
+
print("up_lr_weight: all 1.0, ã™ã¹ã¦1.0")
|
| 631 |
|
| 632 |
return down_lr_weight, mid_lr_weight, up_lr_weight
|
| 633 |
|
| 634 |
|
| 635 |
+
# lr_weightãŒ0ã®blockã‚’block_dimsã‹ã‚‰é™¤å¤–ã™ã‚‹ã€å¤–部ã‹ã‚‰å‘¼ã³å‡ºã™å¯èƒ½æ€§ã‚’考慮ã—ã¦ãŠã
|
| 636 |
def remove_block_dims_and_alphas(
|
| 637 |
block_dims, block_alphas, conv_block_dims, conv_block_alphas, down_lr_weight, mid_lr_weight, up_lr_weight
|
| 638 |
):
|
|
|
|
| 658 |
return block_dims, block_alphas, conv_block_dims, conv_block_alphas
|
| 659 |
|
| 660 |
|
| 661 |
+
# 外部ã‹ã‚‰å‘¼ã³å‡ºã™å¯èƒ½æ€§ã‚’考慮ã—ã¦ãŠã
|
| 662 |
def get_block_index(lora_name: str) -> int:
|
| 663 |
block_idx = -1 # invalid lora name
|
| 664 |
|
|
|
|
| 675 |
idx = 3 * i + 2
|
| 676 |
|
| 677 |
if g[0] == "down":
|
| 678 |
+
block_idx = 1 + idx # 0ã«è©²å½“ã™ã‚‹LoRAã¯å˜åœ¨ã—ãªã„
|
| 679 |
elif g[0] == "up":
|
| 680 |
block_idx = LoRANetwork.NUM_OF_BLOCKS + 1 + idx
|
| 681 |
|
|
|
|
| 730 |
|
| 731 |
|
| 732 |
class LoRANetwork(torch.nn.Module):
|
| 733 |
+
NUM_OF_BLOCKS = 12 # フルモデル相当ã§ã®up,downã®å±¤ã®æ•°
|
| 734 |
|
| 735 |
UNET_TARGET_REPLACE_MODULE = ["Transformer2DModel"]
|
| 736 |
UNET_TARGET_REPLACE_MODULE_CONV2D_3X3 = ["ResnetBlock2D", "Downsample2D", "Upsample2D"]
|
|
|
|
| 764 |
varbose: Optional[bool] = False,
|
| 765 |
) -> None:
|
| 766 |
"""
|
| 767 |
+
LoRA network: ã™ã”ã引数ãŒå¤šã„ãŒã€ãƒ‘ターンã¯ä»¥ä¸‹ã®é€šã‚Š
|
| 768 |
+
1. lora_dimã¨alphaを指定
|
| 769 |
+
2. lora_dimã€alphaã€conv_lora_dimã€conv_alphaを指定
|
| 770 |
+
3. block_dimsã¨block_alphasを指定 : Conv2d3x3ã«ã¯é©ç”¨ã—ãªã„
|
| 771 |
+
4. block_dimsã€block_alphasã€conv_block_dimsã€conv_block_alphasを指定 : Conv2d3x3ã«ã‚‚é©ç”¨ã™ã‚‹
|
| 772 |
+
5. modules_dimã¨modules_alphaを指定 (推論用)
|
| 773 |
"""
|
| 774 |
super().__init__()
|
| 775 |
self.multiplier = multiplier
|
|
|
|
| 831 |
alpha = None
|
| 832 |
|
| 833 |
if modules_dim is not None:
|
| 834 |
+
# モジュール指定ã‚り
|
| 835 |
if lora_name in modules_dim:
|
| 836 |
dim = modules_dim[lora_name]
|
| 837 |
alpha = modules_alpha[lora_name]
|
| 838 |
elif is_unet and block_dims is not None:
|
| 839 |
+
# U-Netã§block_dims指定ã‚り
|
| 840 |
block_idx = get_block_index(lora_name)
|
| 841 |
if is_linear or is_conv2d_1x1:
|
| 842 |
dim = block_dims[block_idx]
|
|
|
|
| 845 |
dim = conv_block_dims[block_idx]
|
| 846 |
alpha = conv_block_alphas[block_idx]
|
| 847 |
else:
|
| 848 |
+
# 通常ã€ã™ã¹ã¦å¯¾è±¡ã¨ã™ã‚‹
|
| 849 |
if is_linear or is_conv2d_1x1:
|
| 850 |
dim = self.lora_dim
|
| 851 |
alpha = self.alpha
|
|
|
|
| 854 |
alpha = self.conv_alpha
|
| 855 |
|
| 856 |
if dim is None or dim == 0:
|
| 857 |
+
# skipã—ãŸæƒ…å ±ã‚’å‡ºåŠ›
|
| 858 |
if is_linear or is_conv2d_1x1 or (self.conv_lora_dim is not None or conv_block_dims is not None):
|
| 859 |
skipped.append(lora_name)
|
| 860 |
continue
|
|
|
|
| 875 |
text_encoders = text_encoder if type(text_encoder) == list else [text_encoder]
|
| 876 |
print(text_encoders)
|
| 877 |
# create LoRA for text encoder
|
| 878 |
+
# 毎回ã™ã¹ã¦ã®ãƒ¢ã‚¸ãƒ¥ãƒ¼ãƒ«ã‚’作るã®ã¯ç„¡é§„ãªã®ã§è¦æ¤œè¨Ž
|
| 879 |
self.text_encoder_loras = []
|
| 880 |
skipped_te = []
|
| 881 |
for i, text_encoder in enumerate(text_encoders):
|
|
|
|
| 903 |
skipped = skipped_te + skipped_un
|
| 904 |
if varbose and len(skipped) > 0:
|
| 905 |
print(
|
| 906 |
+
f"because block_lr_weight is 0 or dim (rank) is 0, {len(skipped)} LoRA modules are skipped / block_lr_weightã¾ãŸã¯dim (rank)ãŒ0ã®ç‚ºã€æ¬¡ã®{len(skipped)}個ã®LoRAモジュールã¯ã‚¹ã‚ップã•れã¾ã™:"
|
| 907 |
)
|
| 908 |
for name in skipped:
|
| 909 |
print(f"\t{name}")
|
|
|
|
| 949 |
lora.apply_to()
|
| 950 |
self.add_module(lora.lora_name, lora)
|
| 951 |
|
| 952 |
+
# マージã§ãã‚‹ã‹ã©ã†ã‹ã‚’è¿”ã™
|
| 953 |
def is_mergeable(self):
|
| 954 |
return True
|
| 955 |
|
|
|
|
| 981 |
|
| 982 |
print(f"weights are merged")
|
| 983 |
|
| 984 |
+
# 層別å¦ç¿’率用ã«å±¤ã”ã¨ã®å¦ç¿’率ã«å¯¾ã™ã‚‹å€çŽ‡ã‚’å®šç¾©ã™ã‚‹ã€€å¼•æ•°ã®é †ç•ªãŒé€†ã ãŒã¨ã‚Šã‚ãˆãšæ°—ã«ã—ãªã„
|
| 985 |
def set_block_lr_weight(
|
| 986 |
self,
|
| 987 |
up_lr_weight: List[float] = None,
|
|
|
|
| 1011 |
|
| 1012 |
return lr_weight
|
| 1013 |
|
| 1014 |
+
# 二ã¤ã®Text Encoderã«åˆ¥ã€…ã®å¦ç¿’率をè¨å®šã§ãるよã†ã«ã™ã‚‹ã¨ã„ã„ã‹ã‚‚
|
| 1015 |
def prepare_optimizer_params(self, text_encoder_lr, unet_lr, default_lr):
|
| 1016 |
self.requires_grad_(True)
|
| 1017 |
all_params = []
|
|
|
|
| 1030 |
|
| 1031 |
if self.unet_loras:
|
| 1032 |
if self.block_lr:
|
| 1033 |
+
# å¦ç¿’率ã®ã‚°ãƒ©ãƒ•ã‚’blockã”ã¨ã«ã—ãŸã„ã®ã§ã€blockã”ã¨ã«loraを分類
|
| 1034 |
block_idx_to_lora = {}
|
| 1035 |
for lora in self.unet_loras:
|
| 1036 |
idx = get_block_index(lora.lora_name)
|
|
|
|
| 1038 |
block_idx_to_lora[idx] = []
|
| 1039 |
block_idx_to_lora[idx].append(lora)
|
| 1040 |
|
| 1041 |
+
# blockã”ã¨ã«ãƒ‘ラメータをè¨å®šã™ã‚‹
|
| 1042 |
for idx, block_loras in block_idx_to_lora.items():
|
| 1043 |
param_data = {"params": enumerate_params(block_loras)}
|
| 1044 |
|
|
|
|
| 1142 |
self.mask_dic = mask_dic
|
| 1143 |
|
| 1144 |
def backup_weights(self):
|
| 1145 |
+
# é‡ã¿ã®ãƒãƒƒã‚¯ã‚¢ãƒƒãƒ—を行ã†
|
| 1146 |
loras: List[LoRAInfModule] = self.text_encoder_loras + self.unet_loras
|
| 1147 |
for lora in loras:
|
| 1148 |
org_module = lora.org_module_ref[0]
|
|
|
|
| 1152 |
org_module._lora_restored = True
|
| 1153 |
|
| 1154 |
def restore_weights(self):
|
| 1155 |
+
# é‡ã¿ã®ãƒªã‚¹ãƒˆã‚¢ã‚’行ã†
|
| 1156 |
loras: List[LoRAInfModule] = self.text_encoder_loras + self.unet_loras
|
| 1157 |
for lora in loras:
|
| 1158 |
org_module = lora.org_module_ref[0]
|
|
|
|
| 1163 |
org_module._lora_restored = True
|
| 1164 |
|
| 1165 |
def pre_calculation(self):
|
| 1166 |
+
# 事å‰è¨ˆç®—を行ã†
|
| 1167 |
loras: List[LoRAInfModule] = self.text_encoder_loras + self.unet_loras
|
| 1168 |
for lora in loras:
|
| 1169 |
org_module = lora.org_module_ref[0]
|
pipeline_stable_diffusion_xl_instantid_img2img.py
CHANGED
|
@@ -622,7 +622,7 @@ class StableDiffusionXLInstantIDImg2ImgPipeline(StableDiffusionXLControlNetImg2I
|
|
| 622 |
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 623 |
The number of images to generate per prompt.
|
| 624 |
eta (`float`, *optional*, defaults to 0.0):
|
| 625 |
-
Corresponds to parameter eta (
|
| 626 |
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
| 627 |
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 628 |
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
|
|
|
| 622 |
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
| 623 |
The number of images to generate per prompt.
|
| 624 |
eta (`float`, *optional*, defaults to 0.0):
|
| 625 |
+
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
|
| 626 |
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
|
| 627 |
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
| 628 |
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|