File size: 65,315 Bytes
7803bdf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
#!/usr/bin/env python
# coding=utf-8
"""
分布式采样脚本:支持指定 LoRA 权重与 Rectified Noise(SIT) 权重

依据 train_rectified_noise.py 的模型结构,加载并组装 SD3WithRectifiedNoise 进行采样。
"""

import os
import sys
import json
import math
import argparse
from pathlib import Path

import torch
import torch.distributed as dist
from tqdm import tqdm
import numpy as np
from PIL import Image

from accelerate import Accelerator
from diffusers import StableDiffusion3Pipeline
from peft import LoraConfig, get_peft_model_state_dict
from peft.utils import set_peft_model_state_dict


def dynamic_import_training_classes(project_root: str):
    """从 train_rectified_noise.py 动态导入 RectifiedNoiseModule 和 SD3WithRectifiedNoise"""
    sys.path.insert(0, project_root)
    try:
        import train_rectified_noise as trn
        return trn.RectifiedNoiseModule, trn.SD3WithRectifiedNoise
    except Exception as e:
        raise ImportError(f"无法从 train_rectified_noise.py 导入类: {e}")

def create_npz_from_sample_folder(sample_dir, num_samples):
    """
    从样本文件夹构建单个.npz文件,保持与sample_ddp_new相同的格式
    """
    samples = []
    actual_files = []
    
    # 收集所有PNG文件
    for filename in sorted(os.listdir(sample_dir)):
        if filename.endswith('.png'):
            actual_files.append(filename)
    
    # 按照数量限制处理
    for i in tqdm(range(min(num_samples, len(actual_files))), desc="Building .npz file from samples"):
        if i < len(actual_files):
            sample_path = os.path.join(sample_dir, actual_files[i])
            sample_pil = Image.open(sample_path)
            sample_np = np.asarray(sample_pil).astype(np.uint8)
            samples.append(sample_np)
        else:
            # 如果不够,创建空白图像
            sample_np = np.zeros((512, 512, 3), dtype=np.uint8)
            samples.append(sample_np)
    
    if samples:
        samples = np.stack(samples)
        npz_path = f"{sample_dir}.npz"
        np.savez(npz_path, arr_0=samples)
        print(f"Saved .npz file to {npz_path} [shape={samples.shape}].")
        return npz_path
    else:
        print("No samples found to create npz file.")
        return None


def get_existing_sample_count(sample_dir):
    """获取已存在的样本数量和最大索引"""
    if not os.path.exists(sample_dir):
        return 0, -1
    
    existing_files = []
    for filename in os.listdir(sample_dir):
        if filename.endswith('.png') and filename[:-4].isdigit():
            try:
                idx = int(filename[:-4])
                existing_files.append(idx)
            except ValueError:
                continue
    
    if not existing_files:
        return 0, -1
    
    existing_files.sort()
    max_index = existing_files[-1]
    count = len(existing_files)
    
    # 检查是否有缺失的文件(从0到max_index应该连续)
    expected_count = max_index + 1
    if count < expected_count:
        print(f"Warning: Found {count} files but expected {expected_count} (missing some indices)")
    
    return count, max_index



def load_sit_weights(rectified_module, weights_path: str, rank=0):
    """加载 Rectified Noise(SIT) 权重,支持 .safetensors / .bin / .pt
    支持以下目录结构:
    - weights_path/pytorch_sit_weights.safetensors (直接在主目录)
    - weights_path/sit_weights/pytorch_sit_weights.safetensors (在sit_weights子目录)
    """
    if os.path.isdir(weights_path):
        # 首先尝试在主目录查找
        search_paths = [
            weights_path,  # 主目录
            os.path.join(weights_path, "sit_weights"),  # sit_weights子目录
        ]
        
        for search_dir in search_paths:
            if not os.path.exists(search_dir):
                continue
                
            # 优先寻找 safetensors
            st_path = os.path.join(search_dir, "pytorch_sit_weights.safetensors")
            if os.path.exists(st_path):
                try:
                    from safetensors.torch import load_file
                    if rank == 0:
                        print(f"Loading rectified weights from: {st_path}")
                    state = load_file(st_path)
                    missing_keys, unexpected_keys = rectified_module.load_state_dict(state, strict=False)
                    if rank == 0:
                        print(f"  Loaded rectified weights: {len(state)} keys")
                        if missing_keys:
                            print(f"  Missing keys: {len(missing_keys)}")
                        if unexpected_keys:
                            print(f"  Unexpected keys: {len(unexpected_keys)}")
                    return True
                except Exception as e:
                    if rank == 0:
                        print(f"  Failed to load from {st_path}: {e}")
                    continue
            
            # 其次寻找 bin/pt
            for name in ["pytorch_sit_weights.bin", "pytorch_sit_weights.pt", "sit_weights.pt", "sit.pt"]:
                cand = os.path.join(search_dir, name)
                if os.path.exists(cand):
                    try:
                        if rank == 0:
                            print(f"Loading rectified weights from: {cand}")
                        state = torch.load(cand, map_location="cpu")
                        missing_keys, unexpected_keys = rectified_module.load_state_dict(state, strict=False)
                        if rank == 0:
                            print(f"  Loaded rectified weights: {len(state)} keys")
                            if missing_keys:
                                print(f"  Missing keys: {len(missing_keys)}")
                            if unexpected_keys:
                                print(f"  Unexpected keys: {len(unexpected_keys)}")
                        return True
                    except Exception as e:
                        if rank == 0:
                            print(f"  Failed to load from {cand}: {e}")
                        continue
            
            # 兜底:目录下任意 pt/bin
            try:
                for fn in os.listdir(search_dir):
                    if fn.endswith((".pt", ".bin")):
                        cand = os.path.join(search_dir, fn)
                        try:
                            if rank == 0:
                                print(f"Loading rectified weights from: {cand}")
                            state = torch.load(cand, map_location="cpu")
                            missing_keys, unexpected_keys = rectified_module.load_state_dict(state, strict=False)
                            if rank == 0:
                                print(f"  Loaded rectified weights: {len(state)} keys")
                            return True
                        except Exception as e:
                            if rank == 0:
                                print(f"  Failed to load from {cand}: {e}")
                            continue
            except Exception:
                pass
        
        if rank == 0:
            print(f"  ❌ No rectified weights found in {weights_path} or {os.path.join(weights_path, 'sit_weights')}")
        return False
    else:
        # 直接文件
        try:
            if rank == 0:
                print(f"Loading rectified weights from file: {weights_path}")
            if weights_path.endswith(".safetensors"):
                from safetensors.torch import load_file
                state = load_file(weights_path)
            else:
                state = torch.load(weights_path, map_location="cpu")
            missing_keys, unexpected_keys = rectified_module.load_state_dict(state, strict=False)
            if rank == 0:
                print(f"  Loaded rectified weights: {len(state)} keys")
                if missing_keys:
                    print(f"  Missing keys: {len(missing_keys)}")
                if unexpected_keys:
                    print(f"  Unexpected keys: {len(unexpected_keys)}")
            return True
        except Exception as e:
            if rank == 0:
                print(f"  ❌ Failed to load rectified weights from {weights_path}: {e}")
            return False


def check_lora_weights_exist(lora_path):
    """检查LoRA权重文件是否存在"""
    if not lora_path:
        return False
    
    if os.path.isdir(lora_path):
        # 检查目录中是否有pytorch_lora_weights.safetensors文件
        weight_file = os.path.join(lora_path, "pytorch_lora_weights.safetensors")
        if os.path.exists(weight_file):
            return True
        # 检查是否有其他.safetensors文件
        for file in os.listdir(lora_path):
            if file.endswith(".safetensors") and "lora" in file.lower():
                return True
        return False
    elif os.path.isfile(lora_path):
        return lora_path.endswith(".safetensors")
    
    return False


def load_lora_from_checkpoint(pipeline, checkpoint_path, rank=0, lora_rank=64):
    """
    从accelerator checkpoint目录加载LoRA权重或完整模型权重
    如果checkpoint包含完整的模型权重(合并后的),直接加载
    如果只包含LoRA权重,则按LoRA方式加载
    """
    if rank == 0:
        print(f"Loading weights from accelerator checkpoint: {checkpoint_path}")
    
    try:
        from safetensors.torch import load_file
        model_file = os.path.join(checkpoint_path, "model.safetensors")
        if not os.path.exists(model_file):
            if rank == 0:
                print(f"Model file not found: {model_file}")
            return False
        
        # 加载state dict
        state_dict = load_file(model_file)
        all_keys = list(state_dict.keys())
        
        # 检测checkpoint类型:
        # 1. 是否包含base_layer(PEFT格式,需要合并)
        # 2. 是否包含完整的模型权重(合并后的,直接可用)
        # 3. 是否只包含LoRA权重(需要添加适配器)
        lora_keys = [k for k in all_keys if 'lora' in k.lower() and 'transformer' in k.lower()]
        base_layer_keys = [k for k in all_keys if 'base_layer' in k.lower() and 'transformer' in k.lower()]
        non_lora_transformer_keys = [k for k in all_keys if 'lora' not in k.lower() and 'base_layer' not in k.lower() and 'transformer' in k.lower()]
        
        if rank == 0:
            print(f"Checkpoint analysis:")
            print(f"  Total keys: {len(all_keys)}")
            print(f"  LoRA keys: {len(lora_keys)}")
            print(f"  Base layer keys: {len(base_layer_keys)}")
            print(f"  Direct transformer weight keys (merged): {len(non_lora_transformer_keys)}")
        
        # 如果包含base_layer,说明是PEFT格式,需要合并base_layer + lora
        if len(base_layer_keys) > 0:
            if rank == 0:
                print(f"✓ Detected PEFT format (base_layer + LoRA), merging weights...")
            
            # 合并base_layer和lora权重
            merged_state_dict = {}
            
            # 首先收集所有需要合并的模块
            modules_to_merge = {}
            # 记录所有非LoRA的transformer权重键名(用于调试)
            non_lora_keys_found = []
            
            for key in all_keys:
                # 移除前缀
                new_key = key
                has_transformer_prefix = False
                
                if key.startswith('base_model.model.transformer.'):
                    new_key = key[len('base_model.model.transformer.'):]
                    has_transformer_prefix = True
                elif key.startswith('model.transformer.'):
                    new_key = key[len('model.transformer.'):]
                    has_transformer_prefix = True
                elif key.startswith('transformer.'):
                    new_key = key[len('transformer.'):]
                    has_transformer_prefix = True
                elif 'transformer' in key.lower():
                    # 可能没有前缀,但包含transformer(如直接是transformer_blocks.0...)
                    has_transformer_prefix = True
                
                if not has_transformer_prefix:
                    continue
                
                # 检查是否是base_layer或lora权重
                if '.base_layer.weight' in new_key:
                    # 提取模块名(去掉.base_layer.weight部分)
                    module_key = new_key.replace('.base_layer.weight', '.weight')
                    if module_key not in modules_to_merge:
                        modules_to_merge[module_key] = {'base_weight': None, 'base_bias': None, 'lora_A': None, 'lora_B': None}
                    modules_to_merge[module_key]['base_weight'] = (key, state_dict[key])
                elif '.base_layer.bias' in new_key:
                    module_key = new_key.replace('.base_layer.bias', '.bias')
                    if module_key not in modules_to_merge:
                        modules_to_merge[module_key] = {'base_weight': None, 'base_bias': None, 'lora_A': None, 'lora_B': None}
                    modules_to_merge[module_key]['base_bias'] = (key, state_dict[key])
                elif '.lora_A.default.weight' in new_key:
                    module_key = new_key.replace('.lora_A.default.weight', '.weight')
                    if module_key not in modules_to_merge:
                        modules_to_merge[module_key] = {'base_weight': None, 'base_bias': None, 'lora_A': None, 'lora_B': None}
                    modules_to_merge[module_key]['lora_A'] = (key, state_dict[key])
                elif '.lora_B.default.weight' in new_key:
                    module_key = new_key.replace('.lora_B.default.weight', '.weight')
                    if module_key not in modules_to_merge:
                        modules_to_merge[module_key] = {'base_weight': None, 'base_bias': None, 'lora_A': None, 'lora_B': None}
                    modules_to_merge[module_key]['lora_B'] = (key, state_dict[key])
                elif 'lora' not in new_key.lower() and 'base_layer' not in new_key.lower():
                    # 其他非LoRA权重(如pos_embed、time_text_embed、context_embedder等),直接使用
                    # 这些权重不在LoRA适配范围内,应该直接从checkpoint加载
                    merged_state_dict[new_key] = state_dict[key]
                    non_lora_keys_found.append(new_key)
            
            if rank == 0:
                print(f"  Found {len(non_lora_keys_found)} non-LoRA transformer keys in checkpoint")
                if non_lora_keys_found:
                    print(f"  Sample non-LoRA keys: {non_lora_keys_found[:10]}")
            
            # 合并权重:weight = base_weight + lora_B @ lora_A * (alpha / rank)
            if rank == 0:
                print(f"  Merging {len(modules_to_merge)} modules...")
            
            import torch
            for module_key, weights in modules_to_merge.items():
                # 处理权重(.weight)
                if weights['base_weight'] is not None:
                    base_key, base_weight = weights['base_weight']
                    base_weight = base_weight.clone()
                    
                    if weights['lora_A'] is not None and weights['lora_B'] is not None:
                        lora_A_key, lora_A = weights['lora_A']
                        lora_B_key, lora_B = weights['lora_B']
                        
                        # 检测rank和alpha
                        # lora_A: [rank, in_features], lora_B: [out_features, rank]
                        rank_value = lora_A.shape[0]
                        alpha = rank_value  # 通常alpha = rank
                        
                        # 合并:weight = base + (lora_B @ lora_A) * (alpha / rank)
                        # lora_B @ lora_A 得到 [out_features, in_features]
                        lora_delta = torch.matmul(lora_B, lora_A)
                        
                        if lora_delta.shape == base_weight.shape:
                            merged_weight = base_weight + lora_delta * (alpha / rank_value)
                            merged_state_dict[module_key] = merged_weight
                            if rank == 0 and len(modules_to_merge) <= 20:
                                print(f"  ✓ Merged {module_key}: {base_weight.shape}")
                        else:
                            if rank == 0:
                                print(f"  ⚠️ Shape mismatch for {module_key}: base={base_weight.shape}, lora_delta={lora_delta.shape}, using base only")
                            merged_state_dict[module_key] = base_weight
                    else:
                        # 只有base权重,没有LoRA
                        merged_state_dict[module_key] = base_weight
                
                # 处理bias(.bias)- bias通常不需要合并,直接使用base_bias
                if '.bias' in module_key and weights['base_bias'] is not None:
                    bias_key, base_bias = weights['base_bias']
                    merged_state_dict[module_key] = base_bias.clone()
            
            if rank == 0:
                print(f"  Merged {len(merged_state_dict)} weights")
                print(f"  Sample merged keys: {list(merged_state_dict.keys())[:5]}")
            
            # 加载合并后的权重
            try:
                missing_keys, unexpected_keys = pipeline.transformer.load_state_dict(merged_state_dict, strict=False)
                
                if rank == 0:
                    print(f"  Loaded merged weights:")
                    print(f"    Missing keys: {len(missing_keys)}")
                    print(f"    Unexpected keys: {len(unexpected_keys)}")
                    if missing_keys:
                        print(f"    Missing keys: {missing_keys}")
                        # 检查缺失的keys是否关键
                        critical_keys = ['pos_embed', 'time_text_embed', 'context_embedder', 'norm_out', 'proj_out']
                        has_critical = any(any(ck in mk for ck in critical_keys) for mk in missing_keys)
                        if has_critical:
                            print(f"    ⚠️ WARNING: Missing critical keys! These should be loaded from pretrained model.")
                            print(f"    The missing keys will use values from the pretrained model (not fine-tuned).")
                
                # 如果缺失的keys太多或包含关键组件,给出警告
                if len(missing_keys) > 0:
                    # 这些缺失的keys会使用pretrained model的默认值
                    # 这是正常的,因为LoRA只适配了部分层,其他层保持原样
                    if rank == 0:
                        print(f"  Note: Missing keys will use pretrained model weights (not fine-tuned)")
                
                if rank == 0:
                    print(f"  ✓ Successfully loaded merged model weights")
                return True
                
            except Exception as e:
                if rank == 0:
                    print(f"  ❌ Error loading merged weights: {e}")
                    import traceback
                    traceback.print_exc()
                return False
        
        # 如果包含非LoRA的transformer权重(且没有base_layer),说明是合并后的完整模型
        elif len(non_lora_transformer_keys) > 0:
            if rank == 0:
                print(f"✓ Detected merged model weights (contains full transformer weights)")
                print(f"  Loading full model weights directly...")
            
            # 提取transformer相关的权重(包括LoRA和基础权重)
            transformer_state_dict = {}
            for key, value in state_dict.items():
                # 移除可能的accelerator包装前缀
                new_key = key
                if key.startswith('base_model.model.transformer.'):
                    new_key = key[len('base_model.model.transformer.'):]
                elif key.startswith('model.transformer.'):
                    new_key = key[len('model.transformer.'):]
                elif key.startswith('transformer.'):
                    new_key = key[len('transformer.'):]
                
                # 只保留transformer相关的权重(包括所有transformer子模块)
                # 检查是否是transformer的权重(不包含text_encoder等)
                if (new_key.startswith('transformer_blocks') or 
                    new_key.startswith('pos_embed') or 
                    new_key.startswith('time_text_embed') or
                    'lora' in new_key.lower()):  # 也包含LoRA权重(如果存在)
                    transformer_state_dict[new_key] = value
            
            if rank == 0:
                print(f"  Extracted {len(transformer_state_dict)} transformer weight keys")
                print(f"  Sample keys: {list(transformer_state_dict.keys())[:5]}")
            
            # 直接加载到transformer(不使用LoRA适配器)
            try:
                missing_keys, unexpected_keys = pipeline.transformer.load_state_dict(transformer_state_dict, strict=False)
                
                if rank == 0:
                    print(f"  Loaded full model weights:")
                    print(f"    Missing keys: {len(missing_keys)}")
                    print(f"    Unexpected keys: {len(unexpected_keys)}")
                    if missing_keys:
                        print(f"    Sample missing keys: {missing_keys[:5]}")
                    if unexpected_keys:
                        print(f"    Sample unexpected keys: {unexpected_keys[:5]}")
                
                # 如果missing keys太多,可能有问题
                if len(missing_keys) > len(transformer_state_dict) * 0.5:
                    if rank == 0:
                        print(f"  ⚠️ WARNING: Too many missing keys, weights may not be fully loaded")
                    return False
                
                if rank == 0:
                    print(f"  ✓ Successfully loaded merged model weights")
                return True
                
            except Exception as e:
                if rank == 0:
                    print(f"  ❌ Error loading full model weights: {e}")
                    import traceback
                    traceback.print_exc()
                return False
        
        # 如果只包含LoRA权重,按原来的方式加载
        if rank == 0:
            print(f"Detected LoRA-only weights, loading as LoRA adapter...")
        
        # 首先尝试从checkpoint中检测实际的rank
        detected_rank = None
        for key, value in state_dict.items():
            if 'lora_A' in key and 'transformer' in key and len(value.shape) == 2:
                # lora_A的形状是 [rank, hidden_size]
                detected_rank = value.shape[0]
                if rank == 0:
                    print(f"✓ Detected LoRA rank from checkpoint: {detected_rank} (from key: {key})")
                break
        
        # 如果检测到rank,使用检测到的rank;否则使用传入的rank
        actual_rank = detected_rank if detected_rank is not None else lora_rank
        if detected_rank is not None and detected_rank != lora_rank:
            if rank == 0:
                print(f"⚠️ Warning: Detected rank ({detected_rank}) differs from requested rank ({lora_rank}), using detected rank")
        
        # 检查适配器是否已存在,如果存在则先卸载
        # SD3Transformer2DModel没有delete_adapter方法,需要使用unload_lora_weights
        if hasattr(pipeline.transformer, 'peft_config') and pipeline.transformer.peft_config:
            if "default" in pipeline.transformer.peft_config:
                if rank == 0:
                    print("Removing existing 'default' adapter before adding new one...")
                try:
                    # 使用pipeline的unload_lora_weights方法
                    pipeline.unload_lora_weights()
                    if rank == 0:
                        print("Successfully unloaded existing LoRA adapter")
                except Exception as e:
                    if rank == 0:
                        print(f"❌ ERROR: Could not unload existing adapter: {e}")
                        print("Cannot proceed without cleaning up adapter")
                    return False
        
        # 先配置LoRA适配器(必须在加载之前配置)
        # 使用检测到的或传入的rank
        transformer_lora_config = LoraConfig(
            r=actual_rank,
            lora_alpha=actual_rank,
            init_lora_weights="gaussian",
            target_modules=["attn.to_k", "attn.to_q", "attn.to_v", "attn.to_out.0"],
        )
        
        # 为transformer添加LoRA适配器
        pipeline.transformer.add_adapter(transformer_lora_config)
        
        if rank == 0:
            print(f"LoRA adapter configured with rank={actual_rank}")
        
        # 继续处理LoRA权重加载(state_dict已经在上面加载了)
        
        # 提取LoRA权重 - accelerator保存的格式
        # 从accelerator checkpoint的model.safetensors中,键名格式可能是:
        # - transformer_blocks.X.attn.to_q.lora_A.default.weight (PEFT格式,直接可用)
        # - 或者包含其他前缀
        lora_state_dict = {}
        for key, value in state_dict.items():
            if 'lora' in key.lower() and 'transformer' in key.lower():
                # 检查键名格式
                new_key = key
                
                # 移除可能的accelerator包装前缀
                # accelerator可能保存为: model.transformer.transformer_blocks...
                # 或者: base_model.model.transformer.transformer_blocks...
                if key.startswith('base_model.model.transformer.'):
                    new_key = key[len('base_model.model.transformer.'):]
                elif key.startswith('model.transformer.'):
                    new_key = key[len('model.transformer.'):]
                elif key.startswith('transformer.'):
                    # 如果已经是transformer_blocks开头,不需要移除transformer.前缀
                    # 因为transformer_blocks是transformer的子模块
                    if not key[len('transformer.'):].startswith('transformer_blocks'):
                        new_key = key[len('transformer.'):]
                    else:
                        new_key = key[len('transformer.'):]
                
                # 只保留transformer相关的LoRA权重
                if 'transformer_blocks' in new_key or 'transformer' in new_key:
                    lora_state_dict[new_key] = value
        
        if not lora_state_dict:
            if rank == 0:
                print("No LoRA weights found in checkpoint")
                # 打印所有键名用于调试
                all_keys = list(state_dict.keys())
                print(f"Total keys: {len(all_keys)}")
                print(f"First 20 keys: {all_keys[:20]}")
                # 查找包含lora的键
                lora_related = [k for k in all_keys if 'lora' in k.lower()]
                if lora_related:
                    print(f"Keys containing 'lora': {lora_related[:10]}")
            return False
        
        if rank == 0:
            print(f"Found {len(lora_state_dict)} LoRA weight keys")
            sample_keys = list(lora_state_dict.keys())[:5]
            print(f"Sample LoRA keys: {sample_keys}")
        
        # 加载LoRA权重到transformer
        # 注意:从checkpoint提取的键名格式已经是PEFT格式(如:transformer_blocks.0.attn.to_q.lora_A.default.weight)
        # 不需要使用convert_unet_state_dict_to_peft转换,直接使用即可
        try:
            # 检查键名格式
            sample_key = list(lora_state_dict.keys())[0] if lora_state_dict else ""
            
            if rank == 0:
                print(f"Original key format: {sample_key}")
            
            # 关键问题:set_peft_model_state_dict期望的键名格式
            # 从back/train_dreambooth_lora.py看,需要移除.default后缀
            # 格式应该是:transformer_blocks.X.attn.to_q.lora_A.weight(没有.default)
            # 但accelerator保存的格式是:transformer_blocks.X.attn.to_q.lora_A.default.weight(有.default)
            
            # 检查键名格式
            sample_key = list(lora_state_dict.keys())[0] if lora_state_dict else ""
            has_default_suffix = '.default.weight' in sample_key or '.default.bias' in sample_key
            
            if rank == 0:
                print(f"Sample key: {sample_key}")
                print(f"Has .default suffix: {has_default_suffix}")
            
            # 如果键名包含.default.weight或.default.bias,需要移除.default部分
            # 因为set_peft_model_state_dict期望的格式是:lora_A.weight,而不是lora_A.default.weight
            converted_dict = {}
            for key, value in lora_state_dict.items():
                # 移除.default后缀(如果存在)
                # transformer_blocks.0.attn.to_q.lora_A.default.weight -> transformer_blocks.0.attn.to_q.lora_A.weight
                new_key = key
                if '.default.weight' in new_key:
                    new_key = new_key.replace('.default.weight', '.weight')
                elif '.default.bias' in new_key:
                    new_key = new_key.replace('.default.bias', '.bias')
                elif '.default' in new_key and (new_key.endswith('.weight') or new_key.endswith('.bias')):
                    # 处理其他可能的.default位置
                    new_key = new_key.replace('.default', '')
                
                converted_dict[new_key] = value
            
            if rank == 0:
                print(f"Converted {len(converted_dict)} keys (removed .default suffix if present)")
                print(f"Sample converted keys: {list(converted_dict.keys())[:5]}")
            
            # 调用set_peft_model_state_dict并检查返回值
            incompatible_keys = set_peft_model_state_dict(
                pipeline.transformer, 
                converted_dict, 
                adapter_name="default"
            )
            
            # 检查加载结果
            if incompatible_keys is not None:
                missing_keys = getattr(incompatible_keys, "missing_keys", [])
                unexpected_keys = getattr(incompatible_keys, "unexpected_keys", [])
                
                if rank == 0:
                    print(f"LoRA loading result:")
                    print(f"  Missing keys: {len(missing_keys)}")
                    print(f"  Unexpected keys: {len(unexpected_keys)}")
                    
                    if len(missing_keys) > 100:
                        print(f"  ⚠️ WARNING: Too many missing keys ({len(missing_keys)}), LoRA may not be fully loaded!")
                        print(f"  Sample missing keys: {missing_keys[:10]}")
                    elif missing_keys:
                        print(f"  Sample missing keys: {missing_keys[:10]}")
                    
                    if unexpected_keys:
                        print(f"  Unexpected keys: {unexpected_keys[:10]}")
                
                # 如果missing keys太多,说明加载失败
                if len(missing_keys) > len(converted_dict) * 0.5:  # 超过50%的键缺失
                    if rank == 0:
                        print("❌ ERROR: Too many missing keys, LoRA weights not loaded correctly!")
                    return False
            else:
                if rank == 0:
                    print("✓ LoRA weights loaded (no incompatible keys reported)")
                    
        except RuntimeError as e:
            # 检查是否是size mismatch错误
            error_str = str(e)
            if "size mismatch" in error_str:
                if rank == 0:
                    print(f"❌ Size mismatch error: The checkpoint rank doesn't match the adapter rank")
                    print(f"   This usually means the checkpoint was trained with a different rank")
                    # 尝试从错误信息中提取期望的rank
                    import re
                    # 错误信息格式: "copying a param with shape torch.Size([32, 1536]) from checkpoint"
                    match = re.search(r'copying a param with shape torch\.Size\(\[(\d+),', error_str)
                    if match:
                        checkpoint_rank = int(match.group(1))
                        if rank == 0:
                            print(f"   Detected checkpoint rank: {checkpoint_rank}")
                            print(f"   Adapter was configured with rank: {actual_rank}")
                            if checkpoint_rank != actual_rank:
                                print(f"   ⚠️ Mismatch! Need to recreate adapter with rank={checkpoint_rank}")
            else:
                if rank == 0:
                    print(f"❌ Error setting LoRA state dict: {e}")
                    import traceback
                    traceback.print_exc()
            # 清理适配器以便下次尝试
            try:
                pipeline.unload_lora_weights()
            except:
                pass
            return False
        except Exception as e:
            if rank == 0:
                print(f"❌ Error setting LoRA state dict: {e}")
                import traceback
                traceback.print_exc()
            # 清理适配器以便下次尝试
            try:
                pipeline.unload_lora_weights()
            except:
                pass
            return False
        
        # 启用LoRA适配器
        pipeline.transformer.set_adapter("default")
        
        # 验证LoRA是否已加载和应用
        if hasattr(pipeline.transformer, 'peft_config'):
            adapters = list(pipeline.transformer.peft_config.keys())
            if rank == 0:
                print(f"LoRA adapters configured: {adapters}")
                # 检查适配器是否启用
                if hasattr(pipeline.transformer, 'active_adapters'):
                    # active_adapters 是一个方法,需要调用
                    try:
                        if callable(pipeline.transformer.active_adapters):
                            active = pipeline.transformer.active_adapters()
                        else:
                            active = pipeline.transformer.active_adapters
                        if rank == 0:
                            print(f"Active adapters: {active}")
                    except:
                        if rank == 0:
                            print("Could not get active adapters, but LoRA is configured")
        
        # 验证LoRA权重是否真的被应用
        # 检查LoRA层的权重是否非零
        lora_layers_found = 0
        nonzero_lora_layers = 0
        total_lora_weight_sum = 0.0
        
        for name, module in pipeline.transformer.named_modules():
            if 'lora_A' in name or 'lora_B' in name:
                lora_layers_found += 1
                if hasattr(module, 'weight') and module.weight is not None:
                    weight_sum = module.weight.abs().sum().item()
                    total_lora_weight_sum += weight_sum
                    if weight_sum > 1e-6:  # 非零阈值
                        nonzero_lora_layers += 1
                        if rank == 0 and nonzero_lora_layers <= 3:  # 只打印前3个
                            print(f"✓ Found non-zero LoRA weight in: {name}, sum={weight_sum:.6f}")
        
        if rank == 0:
            print(f"LoRA verification:")
            print(f"  Total LoRA layers found: {lora_layers_found}")
            print(f"  Non-zero LoRA layers: {nonzero_lora_layers}")
            print(f"  Total LoRA weight sum: {total_lora_weight_sum:.6f}")
            
            if lora_layers_found == 0:
                print("❌ ERROR: No LoRA layers found in transformer!")
                return False
            elif nonzero_lora_layers == 0:
                print("❌ ERROR: All LoRA weights are zero, LoRA not loaded correctly!")
                return False
            elif nonzero_lora_layers < lora_layers_found * 0.5:
                print(f"⚠️ WARNING: Only {nonzero_lora_layers}/{lora_layers_found} LoRA layers have non-zero weights!")
                print("⚠️ LoRA may not be fully applied!")
            else:
                print(f"✓ LoRA weights verified: {nonzero_lora_layers}/{lora_layers_found} layers have non-zero weights")
        
        if nonzero_lora_layers == 0:
            return False
        
        if rank == 0:
            print("✓ Successfully loaded and verified LoRA weights from checkpoint")
        
        return True
        
    except Exception as e:
        if rank == 0:
            print(f"Error loading LoRA from checkpoint: {e}")
            import traceback
            traceback.print_exc()
        return False


def load_captions_from_jsonl(jsonl_path):
    captions = []
    with open(jsonl_path, 'r', encoding='utf-8') as f:
        for line in f:
            line = line.strip()
            if not line:
                continue
            try:
                data = json.loads(line)
                cap = None
                for field in ['caption', 'text', 'prompt', 'description']:
                    if field in data and isinstance(data[field], str):
                        cap = data[field].strip()
                        break
                if cap:
                    captions.append(cap)
            except Exception:
                continue
    return captions if captions else ["a beautiful high quality image"]


def main(args):
    assert torch.cuda.is_available(), "需要GPU运行"
    dist.init_process_group("nccl")
    rank = dist.get_rank()
    world_size = dist.get_world_size()
    device = rank % torch.cuda.device_count()
    torch.cuda.set_device(device)
    seed = args.global_seed * world_size + rank
    torch.manual_seed(seed)

    print(f"[rank{rank}] DDP initialized, device={device}, seed={seed}, world_size={world_size}")

    # 调试:打印接收到的参数
    if rank == 0:
        print("=" * 80)
        print("参数检查:")
        print(f"  lora_path: {args.lora_path}")
        print(f"  rectified_weights: {args.rectified_weights}")
        print(f"  lora_path is None: {args.lora_path is None}")
        print(f"  lora_path is empty: {args.lora_path == '' if args.lora_path else 'N/A'}")
        print(f"  rectified_weights is None: {args.rectified_weights is None}")
        print(f"  rectified_weights is empty: {args.rectified_weights == '' if args.rectified_weights else 'N/A'}")
        print("=" * 80)

    # 导入训练脚本中的类
    RectifiedNoiseModule, SD3WithRectifiedNoise = dynamic_import_training_classes(str(Path(__file__).parent))

    # 加载 pipeline
    dtype = torch.float16 if args.mixed_precision == "fp16" else (torch.bfloat16 if args.mixed_precision == "bf16" else torch.float32)
    if rank == 0:
        print(f"Loading SD3 pipeline from {args.pretrained_model_name_or_path} (dtype={dtype})")
    pipeline = StableDiffusion3Pipeline.from_pretrained(
        args.pretrained_model_name_or_path,
        revision=args.revision,
        variant=args.variant,
        torch_dtype=dtype,
    ).to(device)

    print(f"[rank{rank}] Pipeline loaded and moved to device {device}")

    # 加载 LoRA(可选)
    lora_loaded = False
    if args.lora_path:
        if rank == 0:
            print(f"Attempting to load LoRA weights from: {args.lora_path}")
            print(f"LoRA path exists: {os.path.exists(args.lora_path) if args.lora_path else False}")
        
        # 首先检查是否是标准的LoRA权重文件/目录
        if check_lora_weights_exist(args.lora_path):
            if rank == 0:
                print("Found standard LoRA weights, loading...")
            try:
                # 检查加载前的transformer参数(用于验证)
                if rank == 0:
                    sample_param_before = next(iter(pipeline.transformer.parameters())).clone()
                    print(f"Sample transformer param before LoRA (first 5 values): {sample_param_before.flatten()[:5]}")
                
                pipeline.load_lora_weights(args.lora_path)
                lora_loaded = True
                
                # 验证LoRA是否真的被加载
                if rank == 0:
                    sample_param_after = next(iter(pipeline.transformer.parameters())).clone()
                    param_diff = (sample_param_after - sample_param_before).abs().max().item()
                    print(f"Sample transformer param after LoRA (first 5 values): {sample_param_after.flatten()[:5]}")
                    print(f"Max parameter change after LoRA loading: {param_diff}")
                    if param_diff < 1e-6:
                        print("⚠️ WARNING: LoRA weights may not have been applied (parameter change is very small)")
                    else:
                        print("✓ LoRA weights appear to have been applied")
                    
                    # 检查是否有peft_config
                    if hasattr(pipeline.transformer, 'peft_config'):
                        print(f"✓ PEFT config found: {list(pipeline.transformer.peft_config.keys())}")
                    else:
                        print("⚠️ WARNING: No peft_config found after loading LoRA")
                
                if rank == 0:
                    print("LoRA loaded successfully from standard format.")
            except Exception as e:
                if rank == 0:
                    print(f"Failed to load LoRA from standard format: {e}")
                    import traceback
                    traceback.print_exc()
        
        # 如果不是标准格式,尝试从accelerator checkpoint加载
        if not lora_loaded and os.path.isdir(args.lora_path):
            if rank == 0:
                print("Standard LoRA weights not found, trying accelerator checkpoint format...")
            
            # 首先尝试从checkpoint的model.safetensors中检测实际的rank
            # 通过检查LoRA权重的形状来推断rank
            detected_rank = None
            try:
                from safetensors.torch import load_file
                model_file = os.path.join(args.lora_path, "model.safetensors")
                if os.path.exists(model_file):
                    state_dict = load_file(model_file)
                    # 查找一个LoRA权重来确定rank
                    for key, value in state_dict.items():
                        if 'lora_A' in key and 'transformer' in key and len(value.shape) == 2:
                            # lora_A的形状是 [rank, hidden_size]
                            detected_rank = value.shape[0]
                            if rank == 0:
                                print(f"✓ Detected LoRA rank from checkpoint: {detected_rank} (from key: {key})")
                            break
            except Exception as e:
                if rank == 0:
                    print(f"Could not detect rank from checkpoint: {e}")
            
            # 构建rank尝试列表
            # 如果检测到rank,优先使用检测到的rank,只尝试一次
            # 如果未检测到,尝试常见的rank值
            if detected_rank is not None:
                rank_list = [detected_rank]
                if rank == 0:
                    print(f"Using detected rank: {detected_rank}")
            else:
                # 如果检测失败,尝试常见的rank值(按用户指定的rank优先)
                rank_list = []
                # 如果用户指定了rank(从args.lora_rank),优先尝试
                if hasattr(args, 'lora_rank') and args.lora_rank:
                    rank_list.append(args.lora_rank)
                # 添加其他常见的rank值
                for r in [32, 64, 16, 128]:
                    if r not in rank_list:
                        rank_list.append(r)
                if rank == 0:
                    print(f"Rank detection failed, will try ranks in order: {rank_list}")
            
            # 尝试不同的rank值
            for lora_rank in rank_list:
                # 在尝试新的rank之前,先清理已存在的适配器
                # 重要:每次尝试前都要清理,否则适配器会保留之前的rank配置
                if hasattr(pipeline.transformer, 'peft_config') and pipeline.transformer.peft_config:
                    if "default" in pipeline.transformer.peft_config:
                        try:
                            # 使用pipeline的unload_lora_weights方法
                            pipeline.unload_lora_weights()
                            if rank == 0:
                                print(f"Cleaned up existing adapter before trying rank={lora_rank}")
                        except Exception as e:
                            if rank == 0:
                                print(f"Warning: Could not unload adapter: {e}")
                                # 如果卸载失败,需要重新创建pipeline
                                if rank == 0:
                                    print("⚠️ WARNING: Cannot unload adapter, will recreate pipeline...")
                                    # 重新加载pipeline(最后手段)
                                    try:
                                        pipeline = StableDiffusion3Pipeline.from_pretrained(
                                            args.pretrained_model_name_or_path,
                                            revision=args.revision,
                                            variant=args.variant,
                                            torch_dtype=dtype,
                                        ).to(device)
                                        if rank == 0:
                                            print("Pipeline recreated to clear adapter state")
                                    except Exception as e2:
                                        if rank == 0:
                                            print(f"Failed to recreate pipeline: {e2}")
                
                if rank == 0:
                    print(f"Trying to load with LoRA rank={lora_rank}...")
                lora_loaded = load_lora_from_checkpoint(pipeline, args.lora_path, rank=rank, lora_rank=lora_rank)
                if lora_loaded:
                    if rank == 0:
                        print(f"✓ Successfully loaded LoRA with rank={lora_rank}")
                    break
                elif rank == 0:
                    print(f"✗ Failed to load with rank={lora_rank}, trying next rank...")
        
        # 如果checkpoint目录加载失败,尝试从输出目录的根目录加载标准LoRA权重
        if not lora_loaded and os.path.isdir(args.lora_path):
            # 检查输出目录的根目录(checkpoint的父目录)
            output_dir = os.path.dirname(args.lora_path.rstrip('/'))
            if output_dir and os.path.exists(output_dir):
                if rank == 0:
                    print(f"Trying to load standard LoRA weights from output directory: {output_dir}")
                if check_lora_weights_exist(output_dir):
                    try:
                        pipeline.load_lora_weights(output_dir)
                        lora_loaded = True
                        if rank == 0:
                            print("LoRA loaded successfully from output directory.")
                    except Exception as e:
                        if rank == 0:
                            print(f"Failed to load LoRA from output directory: {e}")
        
        if not lora_loaded:
            if rank == 0:
                print(f"⚠️ WARNING: Failed to load LoRA weights from {args.lora_path}, using baseline model")
        else:
            # 最终验证LoRA是否真的被启用
            if rank == 0:
                print("=" * 80)
                print("LoRA 加载验证:")
                if hasattr(pipeline.transformer, 'peft_config') and pipeline.transformer.peft_config:
                    print(f"  ✓ PEFT config exists: {list(pipeline.transformer.peft_config.keys())}")
                    # 检查LoRA层的权重
                    lora_layers_found = 0
                    for name, module in pipeline.transformer.named_modules():
                        if 'lora_A' in name or 'lora_B' in name:
                            lora_layers_found += 1
                            if lora_layers_found <= 3:  # 只打印前3个
                                if hasattr(module, 'weight'):
                                    weight_sum = module.weight.abs().sum().item() if module.weight is not None else 0
                                    print(f"  ✓ Found LoRA layer: {name}, weight_sum={weight_sum:.6f}")
                    print(f"  ✓ Total LoRA layers found: {lora_layers_found}")
                    if lora_layers_found == 0:
                        print("  ⚠️ WARNING: No LoRA layers found in transformer!")
                else:
                    print("  ⚠️ WARNING: No PEFT config found - LoRA may not be active!")
                print("=" * 80)

    # 构建 RectifiedNoiseModule 并加载权重(仅在提供了 rectified_weights 时)
    # 安全地检查 rectified_weights 是否有效
    use_rectified = False
    rectified_weights_path = None
    if args.rectified_weights:
        rectified_weights_str = str(args.rectified_weights).strip()
        if rectified_weights_str:
            use_rectified = True
            rectified_weights_path = rectified_weights_str
    
    if rank == 0:
        print(f"use_rectified: {use_rectified}, rectified_weights_path: {rectified_weights_path}")
    
    if use_rectified:
        if rank == 0:
            print(f"Using Rectified Noise module with weights from: {rectified_weights_path}")
        print(f"[rank{rank}] RectifiedNoiseModule configuration: num_sit_layers={args.num_sit_layers}")
        
        # 从 transformer 配置推断必要尺寸
        tfm = pipeline.transformer
        if hasattr(tfm.config, 'joint_attention_dim') and tfm.config.joint_attention_dim is not None:
            sit_hidden_size = tfm.config.joint_attention_dim
        elif hasattr(tfm.config, 'inner_dim') and tfm.config.inner_dim is not None:
            sit_hidden_size = tfm.config.inner_dim
        elif hasattr(tfm.config, 'hidden_size') and tfm.config.hidden_size is not None:
            sit_hidden_size = tfm.config.hidden_size
        else:
            sit_hidden_size = 4096

        transformer_hidden_size = getattr(tfm.config, 'hidden_size', 1536)
        num_attention_heads = getattr(tfm.config, 'num_attention_heads', 32)
        input_dim = getattr(tfm.config, 'in_channels', 16)

        rectified_module = RectifiedNoiseModule(
            hidden_size=sit_hidden_size,
            num_sit_layers=args.num_sit_layers,
            num_attention_heads=num_attention_heads,
            input_dim=input_dim,
            transformer_hidden_size=transformer_hidden_size,
        )
        # 加载 SIT 权重
        ok = load_sit_weights(rectified_module, rectified_weights_path, rank=rank)
        if rank == 0:
            if not ok:
                print("⚠️ Warning: Failed to load rectified weights, will use baseline model without rectified noise")
            else:
                print("✓ Successfully loaded rectified noise weights")

        # 组装 SD3WithRectifiedNoise
        # 关键:SD3WithRectifiedNoise 会保留 transformer 的引用
        # 但是,SD3WithRectifiedNoise 在 __init__ 中会冻结 transformer 参数
        # 这不应该影响 LoRA,因为 LoRA 是作为适配器添加的,不是原始参数
        # 我们需要确保在创建 SD3WithRectifiedNoise 之前,LoRA 适配器已经正确加载和启用
        if lora_loaded and rank == 0:
            print("Creating SD3WithRectifiedNoise with LoRA-enabled transformer...")
        elif rank == 0:
            print("Creating SD3WithRectifiedNoise...")
        
        model = SD3WithRectifiedNoise(pipeline.transformer, rectified_module).to(device)
        
        # 重要:SD3WithRectifiedNoise 的 __init__ 会冻结 transformer 参数
        # 但 LoRA 适配器应该仍然有效,因为它们是独立的模块
        # 我们需要确保 LoRA 适配器在包装后仍然可以访问
        
        # 确保 LoRA 适配器在模型替换后仍然启用
        if lora_loaded:
            # 通过model.transformer访问,因为SD3WithRectifiedNoise包装了transformer
            if hasattr(model.transformer, 'peft_config'):
                try:
                    # 确保适配器处于启用状态
                    model.transformer.set_adapter("default_0")
                    
                    # 验证LoRA权重在包装后是否仍然存在
                    lora_layers_after_wrap = 0
                    nonzero_after_wrap = 0
                    for name, module in model.transformer.named_modules():
                        if 'lora_A' in name or 'lora_B' in name:
                            lora_layers_after_wrap += 1
                            if hasattr(module, 'weight') and module.weight is not None:
                                if module.weight.abs().sum().item() > 1e-6:
                                    nonzero_after_wrap += 1
                    
                    if rank == 0:
                        print(f"LoRA after SD3WithRectifiedNoise wrapping:")
                        print(f"  LoRA layers: {lora_layers_after_wrap}, Non-zero: {nonzero_after_wrap}")
                        if nonzero_after_wrap == 0:
                            print("  ❌ ERROR: All LoRA weights are zero after wrapping!")
                        elif nonzero_after_wrap < lora_layers_after_wrap * 0.5:
                            print(f"  ⚠️ WARNING: Only {nonzero_after_wrap}/{lora_layers_after_wrap} LoRA layers have weights!")
                        else:
                            print(f"  ✓ LoRA weights preserved after wrapping")
                    
                    # 验证适配器是否真的启用
                    if hasattr(model.transformer, 'active_adapters'):
                        try:
                            if callable(model.transformer.active_adapters):
                                active = model.transformer.active_adapters()
                            else:
                                active = model.transformer.active_adapters
                            if rank == 0:
                                print(f"  Active adapters: {active}")
                        except:
                            if rank == 0:
                                print("  LoRA adapter re-enabled after model wrapping")
                    else:
                        if rank == 0:
                            print("  LoRA adapter re-enabled after model wrapping")
                except Exception as e:
                    if rank == 0:
                        print(f"❌ ERROR: Could not re-enable LoRA adapter: {e}")
                        import traceback
                        traceback.print_exc()
            else:
                # LoRA权重已经合并到transformer的基础权重中(合并加载方式)
                # 这种情况下没有peft_config是正常的,因为LoRA已经合并了
                if rank == 0:
                    print("LoRA loaded via merged weights (no PEFT adapter needed)")
                    print("  ✓ LoRA weights are already merged into transformer base weights")
                    print("  Note: This is expected when loading from merged checkpoint format")
        
        # 注册到 pipeline(pipeline_stable_diffusion_3.py 已支持 external model)
        pipeline.model = model
        
        # 确保模型处于评估模式(LoRA在eval模式下也应该工作)
        model.eval()
        model.transformer.eval()  # 确保transformer也处于eval模式
    else:
        if rank == 0:
            print("Not using Rectified Noise module, using baseline SD3 pipeline")
        # 不使用 SD3WithRectifiedNoise,保持原始 pipeline
        # pipeline.model 保持为原始的 transformer
    
    # 关键:确保LoRA适配器在推理时被使用
    # PEFT模型在eval模式下,LoRA适配器应该自动启用,但我们需要确保
    if lora_loaded:
        # 获取正确的 transformer 引用
        transformer_ref = model.transformer if use_rectified else pipeline.transformer
        
        # 确保transformer的LoRA适配器处于启用状态
        if hasattr(transformer_ref, 'set_adapter'):
            try:
                transformer_ref.set_adapter("default")
            except:
                pass
        
        # 验证LoRA是否真的会被使用
        if rank == 0:
            # 检查一个LoRA层的权重
            lora_found = False
            for name, module in transformer_ref.named_modules():
                if 'lora_A' in name and 'default' in name and hasattr(module, 'weight'):
                    if module.weight is not None:
                        weight_sum = module.weight.abs().sum().item()
                        if weight_sum > 0:
                            print(f"✓ Verified LoRA weight in {name}: sum={weight_sum:.6f}")
                            lora_found = True
                            break
            
            if not lora_found:
                print("⚠ Warning: Could not verify LoRA weights in model")
            else:
                # 额外检查:验证LoRA层是否真的会被调用
                # 检查一个LoRA Linear层
                for name, module in transformer_ref.named_modules():
                    if hasattr(module, '__class__') and 'lora' in module.__class__.__name__.lower():
                        if hasattr(module, 'lora_enabled'):
                            enabled = module.lora_enabled
                            if rank == 0:
                                print(f"✓ Found LoRA layer {name}, enabled: {enabled}")
                        break
                
            print("Model set to eval mode, LoRA should be active during inference")

    # 启用内存优化选项
    if args.enable_attention_slicing:
        if rank == 0:
            print("Enabling attention slicing to save memory")
        pipeline.enable_attention_slicing()
    
    if args.enable_vae_slicing:
        if rank == 0:
            print("Enabling VAE slicing to save memory")
        pipeline.enable_vae_slicing()
    
    if args.enable_cpu_offload:
        if rank == 0:
            print("Enabling CPU offload to save memory")
        pipeline.enable_model_cpu_offload()
    
    # 禁用进度条以减少输出
    pipeline.set_progress_bar_config(disable=True)

    # 读入 captions
    captions = load_captions_from_jsonl(args.captions_jsonl)
    total_images_needed = min(len(captions) * args.images_per_caption, args.max_samples)

    # 输出目录
    if rank == 0:
        os.makedirs(args.sample_dir, exist_ok=True)
    dist.barrier()

    # 检查已存在的样本
    existing_count, max_existing_index = get_existing_sample_count(args.sample_dir)
    if rank == 0:
        print(f"Found {existing_count} existing samples, max index: {max_existing_index}")
    
    # 调整需要生成的样本数量
    remaining_images_needed = max(0, total_images_needed - existing_count)
    if remaining_images_needed == 0:
        if rank == 0:
            print("All required samples already exist. Skipping generation.")
            print(f"Creating npz from existing samples...")
            create_npz_from_sample_folder(args.sample_dir, total_images_needed)
        return

    if rank == 0:
        print(f"Need to generate {remaining_images_needed} more samples (total needed: {total_images_needed})")

    n = args.per_proc_batch_size
    global_batch = n * world_size
    total_samples = int(math.ceil(remaining_images_needed / global_batch) * global_batch)
    assert total_samples % world_size == 0
    samples_per_gpu = total_samples // world_size
    assert samples_per_gpu % n == 0
    iterations = samples_per_gpu // n

    if rank == 0:
        print(f"Sampling remaining={remaining_images_needed}, total_samples={total_samples}, per_gpu={samples_per_gpu}, iterations={iterations}")

    pbar = tqdm(range(iterations)) if rank == 0 else range(iterations)
    saved = 0

    autocast_device = "cuda" if torch.cuda.is_available() else "cpu"
    for it in pbar:
        if rank == 0 and it % 10 == 0:
            print(f"[rank{rank}] Sampling iteration {it}/{iterations}")
        batch_prompts = []
        base_index = it * global_batch + rank
        for j in range(n):
            idx = it * global_batch + j * world_size + rank
            if idx < remaining_images_needed:
                cap_idx = idx // args.images_per_caption
                batch_prompts.append(captions[cap_idx])
            else:
                batch_prompts.append("a beautiful high quality image")

        with torch.autocast(autocast_device, dtype=dtype):
            images = []
            for k, prompt in enumerate(batch_prompts):
                image_seed = seed + it * 10000 + k * 1000 + rank
                generator = torch.Generator(device=device).manual_seed(image_seed)
                img = pipeline(
                    prompt=prompt,
                    height=args.height,
                    width=args.width,
                    num_inference_steps=args.num_inference_steps,
                    guidance_scale=args.guidance_scale,
                    generator=generator,
                    num_images_per_prompt=1,
                ).images[0]
                images.append(img)

        # 保存
        out_dir = Path(args.sample_dir)
        if rank == 0 and it == 0:
            print(f"Saving pngs to: {out_dir}")
        for j, img in enumerate(images):
            global_index = it * global_batch + j * world_size + rank + existing_count  # 加上已存在的数量
            if global_index < total_images_needed:
                filename = f"{global_index:07d}.png"
                img.save(out_dir / filename)
                saved += 1
        dist.barrier()

    if rank == 0:
        print(f"Done. Saved {saved * world_size} images in total.")
        actual_num_samples = len([name for name in os.listdir(args.sample_dir) if name.endswith(".png")])
        print(f"Actually generated {actual_num_samples} images")
        npz_samples = min(actual_num_samples, total_images_needed)
        print(f"[rank{rank}] Creating npz from sample folder: {args.sample_dir}, npz_samples={npz_samples}")
        create_npz_from_sample_folder(args.sample_dir, npz_samples)
        print("Done creating npz.")
        print("Done.")


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="SD3 LoRA + RectifiedNoise 分布式采样脚本")
    # 模型
    parser.add_argument("--pretrained_model_name_or_path", type=str, required=True)
    parser.add_argument("--revision", type=str, default=None)
    parser.add_argument("--variant", type=str, default=None)
    # LoRA 与 Rectified
    parser.add_argument("--lora_path", type=str, default=None, help="LoRA 权重路径(文件或目录)")
    parser.add_argument("--rectified_weights", type=str, default=None, help="Rectified(SIT) 权重路径(文件或目录)")
    parser.add_argument("--num_sit_layers", type=int, default=1, help="与训练一致的 SIT 层数")
    # 采样
    parser.add_argument("--num_inference_steps", type=int, default=28)
    parser.add_argument("--guidance_scale", type=float, default=7.0)
    parser.add_argument("--height", type=int, default=1024)
    parser.add_argument("--width", type=int, default=1024)
    parser.add_argument("--per_proc_batch_size", type=int, default=1)
    parser.add_argument("--images_per_caption", type=int, default=1)
    parser.add_argument("--max_samples", type=int, default=10000)
    parser.add_argument("--captions_jsonl", type=str, required=True)
    parser.add_argument("--sample_dir", type=str, default="sd3_rectified_samples")
    parser.add_argument("--global_seed", type=int, default=42)
    parser.add_argument("--mixed_precision", type=str, default="fp16", choices=["no", "fp16", "bf16"])
    # 内存优化选项
    parser.add_argument("--enable_attention_slicing", action="store_true", help="启用 attention slicing 以节省显存")
    parser.add_argument("--enable_vae_slicing", action="store_true", help="启用 VAE slicing 以节省显存")
    parser.add_argument("--enable_cpu_offload", action="store_true", help="启用 CPU offload 以节省显存")

    args = parser.parse_args()
    main(args)