Upload convert_lora.py
Browse files- convert_lora.py +78 -0
convert_lora.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import re
|
| 3 |
+
from safetensors.torch import load_file, save_file
|
| 4 |
+
|
| 5 |
+
def convert_key(key):
|
| 6 |
+
# 1. Remove the original prefix
|
| 7 |
+
new_key = key.replace("lora_unet__", "")
|
| 8 |
+
|
| 9 |
+
# 2. Identify the parts.
|
| 10 |
+
parts = new_key.split(".")
|
| 11 |
+
block_name = parts[0] # e.g., "layers_0_attention_to_k"
|
| 12 |
+
rest = parts[1:] # e.g., ["lora_down", "weight"]
|
| 13 |
+
|
| 14 |
+
# --- FIX 1: Layers and Refiners (Force dot after number) ---
|
| 15 |
+
# The previous script missed the trailing underscore.
|
| 16 |
+
# We replace "layers_0_" with "layers.0."
|
| 17 |
+
|
| 18 |
+
# Fix "layers_X_" -> "layers.X."
|
| 19 |
+
block_name = re.sub(r'layers_(\d+)_', r'layers.\1.', block_name)
|
| 20 |
+
|
| 21 |
+
# Fix "context_refiner_X_" -> "context_refiner.X."
|
| 22 |
+
block_name = re.sub(r'context_refiner_(\d+)_', r'context_refiner.\1.', block_name)
|
| 23 |
+
|
| 24 |
+
# Fix "noise_refiner_X_" -> "noise_refiner.X."
|
| 25 |
+
block_name = re.sub(r'noise_refiner_(\d+)_', r'noise_refiner.\1.', block_name)
|
| 26 |
+
|
| 27 |
+
# --- FIX 2: Attention blocks ---
|
| 28 |
+
|
| 29 |
+
# Fix "attention_to_k" -> "attention.to_k"
|
| 30 |
+
for t in ["to_k", "to_q", "to_v"]:
|
| 31 |
+
if f"_{t}" in block_name:
|
| 32 |
+
block_name = block_name.replace(f"_{t}", f".{t}")
|
| 33 |
+
|
| 34 |
+
# Fix "attention_to_out_0" -> "attention.to_out.0"
|
| 35 |
+
# This is specific: ComfyUI expects "to_out.0"
|
| 36 |
+
if "_to_out" in block_name:
|
| 37 |
+
block_name = block_name.replace("_to_out", ".to_out")
|
| 38 |
+
# Handle the trailing number for output (e.g. to_out_0 -> to_out.0)
|
| 39 |
+
block_name = re.sub(r'\.to_out_(\d+)', r'.to_out.\1', block_name)
|
| 40 |
+
|
| 41 |
+
# --- FINALIZE ---
|
| 42 |
+
final_key = "diffusion_model." + ".".join([block_name] + rest)
|
| 43 |
+
return final_key
|
| 44 |
+
|
| 45 |
+
# --- MAIN EXECUTION ---
|
| 46 |
+
print("Looking for .safetensors files...")
|
| 47 |
+
# Find original file (exclude previously converted ones to be safe)
|
| 48 |
+
files = [f for f in os.listdir('.') if f.endswith('.safetensors') and 'converted' not in f and 'fixed' not in f]
|
| 49 |
+
|
| 50 |
+
if not files:
|
| 51 |
+
print("Error: No original .safetensors file found.")
|
| 52 |
+
print("Make sure the original 'Z-Image-Fun-Lora-Distill-8-Steps.safetensors' is in this folder.")
|
| 53 |
+
exit()
|
| 54 |
+
|
| 55 |
+
input_file = files[0]
|
| 56 |
+
print(f"Processing: {input_file}")
|
| 57 |
+
|
| 58 |
+
try:
|
| 59 |
+
tensors = load_file(input_file)
|
| 60 |
+
new_tensors = {}
|
| 61 |
+
|
| 62 |
+
print("Converting keys...")
|
| 63 |
+
# Print a preview of the first converted key to verify
|
| 64 |
+
first_key = list(tensors.keys())[0]
|
| 65 |
+
print(f"Preview: {first_key} \n -> {convert_key(first_key)}")
|
| 66 |
+
|
| 67 |
+
for k, v in tensors.items():
|
| 68 |
+
new_k = convert_key(k)
|
| 69 |
+
new_tensors[new_k] = v
|
| 70 |
+
|
| 71 |
+
output_file = input_file.replace(".safetensors", "_v3_fixed.safetensors")
|
| 72 |
+
save_file(new_tensors, output_file)
|
| 73 |
+
|
| 74 |
+
print(f"\nSUCCESS! Created: {output_file}")
|
| 75 |
+
print("Move this file to ComfyUI/models/loras/ and DELETE the old converted ones.")
|
| 76 |
+
|
| 77 |
+
except Exception as e:
|
| 78 |
+
print(f"An error occurred: {e}")
|