recoilme commited on
Commit
95332d0
·
verified ·
1 Parent(s): 441aa4c

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. train_vae_fdl.py +624 -0
train_vae_fdl.py ADDED
@@ -0,0 +1,624 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ import os
3
+ import math
4
+ import re
5
+ import torch
6
+ import numpy as np
7
+ import random
8
+ import gc
9
+ from datetime import datetime
10
+ from pathlib import Path
11
+
12
+ import torchvision.transforms as transforms
13
+ import torch.nn.functional as F
14
+ from torch.utils.data import DataLoader, Dataset
15
+ from torch.optim.lr_scheduler import LambdaLR
16
+ from diffusers import AutoencoderKL, AsymmetricAutoencoderKL
17
+ # QWEN: импорт класса
18
+ from diffusers import AutoencoderKLQwenImage
19
+ from diffusers import AutoencoderKLWan
20
+
21
+ from accelerate import Accelerator
22
+ from PIL import Image, UnidentifiedImageError
23
+ from tqdm import tqdm
24
+ import bitsandbytes as bnb
25
+ import wandb
26
+ import lpips # pip install lpips
27
+ from FDL_pytorch import FDL_loss # pip install fdl-pytorch
28
+ from collections import deque
29
+
30
+ # --------------------------- Параметры ---------------------------
31
+ ds_path = "/workspace/d23"
32
+ project = "vae9"
33
+ batch_size = 1
34
+ base_learning_rate = 6e-6
35
+ min_learning_rate = 7e-7
36
+ num_epochs = 4
37
+ sample_interval_share = 25
38
+ use_wandb = True
39
+ save_model = True
40
+ use_decay = True
41
+ optimizer_type = "adam8bit"
42
+ dtype = torch.float32
43
+
44
+ model_resolution = 384 #288
45
+ high_resolution = 768 #576
46
+ limit = 0
47
+ save_barrier = 1.3
48
+ warmup_percent = 0.005
49
+ percentile_clipping = 99
50
+ beta2 = 0.997
51
+ eps = 1e-8
52
+ clip_grad_norm = 1.0
53
+ mixed_precision = "no"
54
+ gradient_accumulation_steps = 1
55
+ generated_folder = "samples"
56
+ save_as = "vae10"
57
+ num_workers = 0
58
+ device = None
59
+ torch.backends.cuda.matmul.allow_tf32 = True
60
+ torch.backends.cudnn.allow_tf32 = True
61
+ # Включение Flash Attention 2/SDPA #MAX_JOBS=4 pip install flash-attn --no-build-isolation
62
+ torch.backends.cuda.enable_flash_sdp(True)
63
+ torch.backends.cuda.enable_mem_efficient_sdp(True)
64
+ torch.backends.cuda.enable_math_sdp(False)
65
+
66
+ # --- Режимы обучения ---
67
+ # QWEN: учим только декодер
68
+ train_decoder_only = True
69
+ train_up_only = False
70
+ full_training = False # если True — учим весь VAE и добавляем KL (ниже)
71
+ kl_ratio = 0.00
72
+
73
+ # Доли лоссов
74
+ loss_ratios = {
75
+ "lpips": 0.70,#0.50,
76
+ "fdl" : 0.10,#0.25,
77
+ "edge": 0.05,
78
+ "mse": 0.10,
79
+ "mae": 0.05,
80
+ "kl": 0.00, # активируем при full_training=True
81
+ }
82
+ median_coeff_steps = 250
83
+
84
+ resize_long_side = 1280 # ресайз длинной стороны исходных картинок
85
+
86
+ # QWEN: конфиг загрузки модели
87
+ vae_kind = "kl" # "qwen" или "kl" (обычный)
88
+
89
+ Path(generated_folder).mkdir(parents=True, exist_ok=True)
90
+
91
+ accelerator = Accelerator(
92
+ mixed_precision=mixed_precision,
93
+ gradient_accumulation_steps=gradient_accumulation_steps
94
+ )
95
+ device = accelerator.device
96
+
97
+ # reproducibility
98
+ seed = int(datetime.now().strftime("%Y%m%d")) + 13
99
+ torch.manual_seed(seed); np.random.seed(seed); random.seed(seed)
100
+ torch.backends.cudnn.benchmark = False
101
+
102
+ # --------------------------- WandB ---------------------------
103
+ if use_wandb and accelerator.is_main_process:
104
+ wandb.init(project=project, config={
105
+ "batch_size": batch_size,
106
+ "base_learning_rate": base_learning_rate,
107
+ "num_epochs": num_epochs,
108
+ "optimizer_type": optimizer_type,
109
+ "model_resolution": model_resolution,
110
+ "high_resolution": high_resolution,
111
+ "gradient_accumulation_steps": gradient_accumulation_steps,
112
+ "train_decoder_only": train_decoder_only,
113
+ "full_training": full_training,
114
+ "kl_ratio": kl_ratio,
115
+ "vae_kind": vae_kind,
116
+ })
117
+
118
+ # --------------------------- VAE ---------------------------
119
+ def get_core_model(model):
120
+ m = model
121
+ # если модель уже обёрнута torch.compile
122
+ if hasattr(m, "_orig_mod"):
123
+ m = m._orig_mod
124
+ return m
125
+
126
+ def is_video_vae(model) -> bool:
127
+ # WAN/Qwen — это видео-VAEs
128
+ if vae_kind in ("wan", "qwen"):
129
+ return True
130
+ # fallback по структуре (если понадобится)
131
+ try:
132
+ core = get_core_model(model)
133
+ enc = getattr(core, "encoder", None)
134
+ conv_in = getattr(enc, "conv_in", None)
135
+ w = getattr(conv_in, "weight", None)
136
+ if isinstance(w, torch.nn.Parameter):
137
+ return w.ndim == 5
138
+ except Exception:
139
+ pass
140
+ return False
141
+
142
+ # загрузка
143
+ if vae_kind == "qwen":
144
+ vae = AutoencoderKLQwenImage.from_pretrained("Qwen/Qwen-Image", subfolder="vae")
145
+ else:
146
+ if vae_kind == "wan":
147
+ vae = AutoencoderKLWan.from_pretrained(project)
148
+ else:
149
+ # старое поведение (пример)
150
+ if model_resolution==high_resolution:
151
+ vae = AutoencoderKL.from_pretrained(project)
152
+ else:
153
+ vae = AsymmetricAutoencoderKL.from_pretrained(project)
154
+
155
+ vae = vae.to(dtype)
156
+
157
+ # torch.compile (опциона��ьно)
158
+ if hasattr(torch, "compile"):
159
+ try:
160
+ vae = torch.compile(vae)
161
+ except Exception as e:
162
+ print(f"[WARN] torch.compile failed: {e}")
163
+
164
+ # --------------------------- Freeze/Unfreeze ---------------------------
165
+ core = get_core_model(vae)
166
+
167
+ for p in core.parameters():
168
+ p.requires_grad = False
169
+
170
+ unfrozen_param_names = []
171
+
172
+ if full_training and not train_decoder_only:
173
+ for name, p in core.named_parameters():
174
+ p.requires_grad = True
175
+ unfrozen_param_names.append(name)
176
+ loss_ratios["kl"] = float(kl_ratio)
177
+ trainable_module = core
178
+ else:
179
+ # учим только 0-й блок декодера + post_quant_conv
180
+ if hasattr(core, "decoder"):
181
+ if train_up_only:#hasattr(core.decoder, "up_blocks") and len(core.decoder.up_blocks) > 0:
182
+ # --- только 0-й up_block ---
183
+ for name, p in core.decoder.up_blocks[0].named_parameters():
184
+ p.requires_grad = True
185
+ unfrozen_param_names.append(f"{name}")
186
+ else:
187
+ print("Decoder — fallback to full decoder")
188
+ for name, p in core.decoder.named_parameters():
189
+ p.requires_grad = True
190
+ unfrozen_param_names.append(f"decoder.{name}")
191
+ if hasattr(core, "post_quant_conv"):
192
+ for name, p in core.post_quant_conv.named_parameters():
193
+ p.requires_grad = True
194
+ unfrozen_param_names.append(f"post_quant_conv.{name}")
195
+ trainable_module = core.decoder if hasattr(core, "decoder") else core
196
+
197
+
198
+ print(f"[INFO] Разморожено параметров: {len(unfrozen_param_names)}. Первые 200 имён:")
199
+ for nm in unfrozen_param_names[:200]:
200
+ print(" ", nm)
201
+
202
+ # --------------------------- Датасет ---------------------------
203
+ class PngFolderDataset(Dataset):
204
+ def __init__(self, root_dir, min_exts=('.png',), resolution=1024, limit=0):
205
+ self.root_dir = root_dir
206
+ self.resolution = resolution
207
+ self.paths = []
208
+ for root, _, files in os.walk(root_dir):
209
+ for fname in files:
210
+ if fname.lower().endswith(tuple(ext.lower() for ext in min_exts)):
211
+ self.paths.append(os.path.join(root, fname))
212
+ if limit:
213
+ self.paths = self.paths[:limit]
214
+ valid = []
215
+ for p in self.paths:
216
+ try:
217
+ with Image.open(p) as im:
218
+ im.verify()
219
+ valid.append(p)
220
+ except (OSError, UnidentifiedImageError):
221
+ continue
222
+ self.paths = valid
223
+ if len(self.paths) == 0:
224
+ raise RuntimeError(f"No valid PNG images found under {root_dir}")
225
+ random.shuffle(self.paths)
226
+
227
+ def __len__(self):
228
+ return len(self.paths)
229
+
230
+ def __getitem__(self, idx):
231
+ p = self.paths[idx % len(self.paths)]
232
+ with Image.open(p) as img:
233
+ img = img.convert("RGB")
234
+ if not resize_long_side or resize_long_side <= 0:
235
+ return img
236
+ w, h = img.size
237
+ long = max(w, h)
238
+ if long <= resize_long_side:
239
+ return img
240
+ scale = resize_long_side / float(long)
241
+ new_w = int(round(w * scale))
242
+ new_h = int(round(h * scale))
243
+ return img.resize((new_w, new_h), Image.BICUBIC)
244
+
245
+ def random_crop(img, sz):
246
+ w, h = img.size
247
+ if w < sz or h < sz:
248
+ img = img.resize((max(sz, w), max(sz, h)), Image.BICUBIC)
249
+ x = random.randint(0, max(1, img.width - sz))
250
+ y = random.randint(0, max(1, img.height - sz))
251
+ return img.crop((x, y, x + sz, y + sz))
252
+
253
+ tfm = transforms.Compose([
254
+ transforms.ToTensor(),
255
+ transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
256
+ ])
257
+
258
+ dataset = PngFolderDataset(ds_path, min_exts=('.png',), resolution=high_resolution, limit=limit)
259
+ print("len(dataset)",len(dataset))
260
+ if len(dataset) < batch_size:
261
+ raise RuntimeError(f"Not enough valid images ({len(dataset)}) to form a batch of size {batch_size}")
262
+
263
+ def collate_fn(batch):
264
+ imgs = []
265
+ for img in batch:
266
+ img = random_crop(img, high_resolution)
267
+ imgs.append(tfm(img))
268
+ return torch.stack(imgs)
269
+
270
+ dataloader = DataLoader(
271
+ dataset,
272
+ batch_size=batch_size,
273
+ shuffle=True,
274
+ collate_fn=collate_fn,
275
+ num_workers=num_workers,
276
+ pin_memory=True,
277
+ drop_last=True
278
+ )
279
+
280
+ # --------------------------- Оптимизатор ---------------------------
281
+ def get_param_groups(module, weight_decay=0.001):
282
+ no_decay = ["bias", "LayerNorm.weight", "layer_norm.weight", "ln_1.weight", "ln_f.weight"]
283
+ decay_params, no_decay_params = [], []
284
+ for n, p in vae.named_parameters(): # глобально по vae, с фильтром requires_grad
285
+ if not p.requires_grad:
286
+ continue
287
+ if any(nd in n for nd in no_decay):
288
+ no_decay_params.append(p)
289
+ else:
290
+ decay_params.append(p)
291
+ return [
292
+ {"params": decay_params, "weight_decay": weight_decay},
293
+ {"params": no_decay_params, "weight_decay": 0.0},
294
+ ]
295
+
296
+ def get_param_groups(module, weight_decay=0.001):
297
+ no_decay_tokens = ("bias", "norm", "rms", "layernorm")
298
+ decay_params, no_decay_params = [], []
299
+ for n, p in module.named_parameters():
300
+ if not p.requires_grad:
301
+ continue
302
+ n_l = n.lower()
303
+ if any(t in n_l for t in no_decay_tokens):
304
+ no_decay_params.append(p)
305
+ else:
306
+ decay_params.append(p)
307
+ return [
308
+ {"params": decay_params, "weight_decay": weight_decay},
309
+ {"params": no_decay_params, "weight_decay": 0.0},
310
+ ]
311
+
312
+ def create_optimizer(name, param_groups):
313
+ if name == "adam8bit":
314
+ return bnb.optim.AdamW8bit(param_groups, lr=base_learning_rate, betas=(0.9, beta2), eps=eps)
315
+ raise ValueError(name)
316
+
317
+ param_groups = get_param_groups(get_core_model(vae), weight_decay=0.001)
318
+ optimizer = create_optimizer(optimizer_type, param_groups)
319
+
320
+ # --------------------------- LR schedule ---------------------------
321
+ batches_per_epoch = len(dataloader)
322
+ steps_per_epoch = int(math.ceil(batches_per_epoch / float(gradient_accumulation_steps)))
323
+ total_steps = steps_per_epoch * num_epochs
324
+
325
+ def lr_lambda(step):
326
+ if not use_decay:
327
+ return 1.0
328
+ x = float(step) / float(max(1, total_steps))
329
+ warmup = float(warmup_percent)
330
+ min_ratio = float(min_learning_rate) / float(base_learning_rate)
331
+ if x < warmup:
332
+ return min_ratio + (1.0 - min_ratio) * (x / warmup)
333
+ decay_ratio = (x - warmup) / (1.0 - warmup)
334
+ return min_ratio + 0.5 * (1.0 - min_ratio) * (1.0 + math.cos(math.pi * decay_ratio))
335
+
336
+ scheduler = LambdaLR(optimizer, lr_lambda)
337
+
338
+ # Подготовка
339
+ dataloader, vae, optimizer, scheduler = accelerator.prepare(dataloader, vae, optimizer, scheduler)
340
+ trainable_params = [p for p in vae.parameters() if p.requires_grad]
341
+
342
+ # fdl
343
+ fdl_loss = FDL_loss()
344
+ fdl_loss = fdl_loss.to(accelerator.device)
345
+
346
+ # --------------------------- LPIPS и вспомогательные ---------------------------
347
+ _lpips_net = None
348
+ def _get_lpips():
349
+ global _lpips_net
350
+ if _lpips_net is None:
351
+ _lpips_net = lpips.LPIPS(net='vgg', verbose=False).eval().to(accelerator.device).eval()
352
+ return _lpips_net
353
+
354
+ _sobel_kx = torch.tensor([[[[-1,0,1],[-2,0,2],[-1,0,1]]]], dtype=torch.float32)
355
+ _sobel_ky = torch.tensor([[[[-1,-2,-1],[0,0,0],[1,2,1]]]], dtype=torch.float32)
356
+ def sobel_edges(x: torch.Tensor) -> torch.Tensor:
357
+ C = x.shape[1]
358
+ kx = _sobel_kx.to(x.device, x.dtype).repeat(C, 1, 1, 1)
359
+ ky = _sobel_ky.to(x.device, x.dtype).repeat(C, 1, 1, 1)
360
+ gx = F.conv2d(x, kx, padding=1, groups=C)
361
+ gy = F.conv2d(x, ky, padding=1, groups=C)
362
+ return torch.sqrt(gx * gx + gy * gy + 1e-12)
363
+
364
+ class MedianLossNormalizer:
365
+ def __init__(self, desired_ratios: dict, window_steps: int):
366
+ s = sum(desired_ratios.values())
367
+ self.ratios = {k: (v / s) if s > 0 else 0.0 for k, v in desired_ratios.items()}
368
+ self.buffers = {k: deque(maxlen=window_steps) for k in self.ratios.keys()}
369
+ self.window = window_steps
370
+
371
+ def update_and_total(self, abs_losses: dict):
372
+ for k, v in abs_losses.items():
373
+ if k in self.buffers:
374
+ self.buffers[k].append(float(v.detach().abs().cpu()))
375
+ meds = {k: (np.median(self.buffers[k]) if len(self.buffers[k]) > 0 else 1.0) for k in self.buffers}
376
+ coeffs = {k: (self.ratios[k] / max(meds[k], 1e-12)) for k in self.ratios}
377
+ total = sum(coeffs[k] * abs_losses[k] for k in abs_losses if k in coeffs)
378
+ return total, coeffs, meds
379
+
380
+ if full_training and not train_decoder_only:
381
+ loss_ratios["kl"] = float(kl_ratio)
382
+ normalizer = MedianLossNormalizer(loss_ratios, median_coeff_steps)
383
+
384
+ # --------------------------- Сэмплы ---------------------------
385
+ @torch.no_grad()
386
+ def get_fixed_samples(n=3):
387
+ idx = random.sample(range(len(dataset)), min(n, len(dataset)))
388
+ pil_imgs = [dataset[i] for i in idx]
389
+ tensors = []
390
+ for img in pil_imgs:
391
+ img = random_crop(img, high_resolution)
392
+ tensors.append(tfm(img))
393
+ return torch.stack(tensors).to(accelerator.device, dtype)
394
+
395
+ fixed_samples = get_fixed_samples()
396
+
397
+ @torch.no_grad()
398
+ def _to_pil_uint8(img_tensor: torch.Tensor) -> Image.Image:
399
+ arr = ((img_tensor.float().clamp(-1, 1) + 1.0) * 127.5).clamp(0, 255).byte().cpu().numpy().transpose(1, 2, 0)
400
+ return Image.fromarray(arr)
401
+
402
+
403
+ @torch.no_grad()
404
+ def generate_and_save_samples(step=None):
405
+ try:
406
+ #temp_vae = accelerator.unwrap_model(vae).eval()
407
+ if hasattr(vae, "module"):
408
+ # Если это DDP или DistributedDataParallel
409
+ unwrapped_vae = vae.module
410
+ else:
411
+ unwrapped_vae = vae
412
+
413
+ # Если использовался torch.compile, достаем оригинал
414
+ if hasattr(unwrapped_vae, "_orig_mod"):
415
+ temp_vae = unwrapped_vae._orig_mod
416
+ else:
417
+ temp_vae = unwrapped_vae
418
+
419
+ temp_vae = temp_vae.eval()
420
+ lpips_net = _get_lpips()
421
+ with torch.no_grad():
422
+ orig_high = fixed_samples
423
+ orig_low = F.interpolate(
424
+ orig_high,
425
+ size=(model_resolution, model_resolution),
426
+ mode="bilinear",
427
+ align_corners=False
428
+ )
429
+ model_dtype = next(temp_vae.parameters()).dtype
430
+ orig_low = orig_low.to(dtype=model_dtype)
431
+
432
+ # Encode/decode с учётом видео-режима
433
+ if is_video_vae(temp_vae):
434
+ x_in = orig_low.unsqueeze(2) # [B,3,1,H,W]
435
+ enc = temp_vae.encode(x_in)
436
+ latents_mean = enc.latent_dist.mean
437
+ dec = temp_vae.decode(latents_mean).sample # [B,3,1,H,W]
438
+ rec = dec.squeeze(2) # [B,3,H,W]
439
+ else:
440
+ enc = temp_vae.encode(orig_low)
441
+ latents_mean = enc.latent_dist.mean
442
+ rec = temp_vae.decode(latents_mean).sample
443
+
444
+ # Подгон размеров, если надо
445
+ #if rec.shape[-2:] != orig_high.shape[-2:]:
446
+ # rec = F.interpolate(rec, size=orig_high.shape[-2:], mode="bilinear", align_corners=False)
447
+
448
+ # Сохраняем все real/decoded
449
+ for i in range(rec.shape[0]):
450
+ real_img = _to_pil_uint8(orig_high[i])
451
+ dec_img = _to_pil_uint8(rec[i])
452
+ real_img.save(f"{generated_folder}/sample_real_{i}.png")
453
+ dec_img.save(f"{generated_folder}/sample_decoded_{i}.png")
454
+
455
+ # LPIPS
456
+ lpips_scores = []
457
+ for i in range(rec.shape[0]):
458
+ orig_full = orig_high[i:i+1].to(torch.float32)
459
+ rec_full = rec[i:i+1].to(torch.float32)
460
+ #if rec_full.shape[-2:] != orig_full.shape[-2:]:
461
+ # rec_full = F.interpolate(rec_full, size=orig_full.shape[-2:], mode="bilinear", align_corners=False)
462
+ lpips_val = lpips_net(orig_full, rec_full).item()
463
+ lpips_scores.append(lpips_val)
464
+ avg_lpips = float(np.mean(lpips_scores))
465
+
466
+ # W&B логирование
467
+ if use_wandb and accelerator.is_main_process:
468
+ log_data = {"lpips_mean": avg_lpips}
469
+ for i in range(rec.shape[0]):
470
+ log_data[f"sample/real_{i}"] = wandb.Image(f"{generated_folder}/sample_real_{i}.png", caption=f"real_{i}")
471
+ log_data[f"sample/decoded_{i}"] = wandb.Image(f"{generated_folder}/sample_decoded_{i}.png", caption=f"decoded_{i}")
472
+ wandb.log(log_data, step=step)
473
+
474
+ finally:
475
+ gc.collect()
476
+ torch.cuda.empty_cache()
477
+
478
+
479
+ if accelerator.is_main_process and save_model:
480
+ print("Генерация сэмплов до старта обучения...")
481
+ generate_and_save_samples(0)
482
+
483
+ accelerator.wait_for_everyone()
484
+
485
+ # --------------------------- Тренировка ---------------------------
486
+ progress = tqdm(total=total_steps, disable=not accelerator.is_local_main_process)
487
+ global_step = 0
488
+ min_loss = float("inf")
489
+ sample_interval = max(1, total_steps // max(1, sample_interval_share * num_epochs))
490
+
491
+ for epoch in range(num_epochs):
492
+ vae.train()
493
+ batch_losses, batch_grads = [], []
494
+ track_losses = {k: [] for k in loss_ratios.keys()}
495
+
496
+ for imgs in dataloader:
497
+ with accelerator.accumulate(vae):
498
+ imgs = imgs.to(accelerator.device)
499
+
500
+ if high_resolution != model_resolution:
501
+ imgs_low = F.interpolate(imgs, size=(model_resolution, model_resolution), mode="bilinear", align_corners=False)
502
+ else:
503
+ imgs_low = imgs
504
+
505
+ model_dtype = next(vae.parameters()).dtype
506
+ imgs_low_model = imgs_low.to(dtype=model_dtype) if imgs_low.dtype != model_dtype else imgs_low
507
+
508
+ # Вместо: current_vae = accelerator.unwrap_model(vae)
509
+ unwrapped = vae.module if hasattr(vae, "module") else vae
510
+ current_vae = getattr(unwrapped, "_orig_mod", unwrapped)
511
+
512
+
513
+ # QWEN: encode/decode с T=1
514
+ if is_video_vae(current_vae):
515
+ x_in = imgs_low_model.unsqueeze(2) # [B,3,1,H,W]
516
+ enc = current_vae.encode(x_in)
517
+ latents = enc.latent_dist.mean if train_decoder_only else enc.latent_dist.sample()
518
+ dec = current_vae.decode(latents).sample # [B,3,1,H,W]
519
+ rec = dec.squeeze(2) # [B,3,H,W]
520
+ else:
521
+ enc = current_vae.encode(imgs_low_model)
522
+ latents = enc.latent_dist.mean if train_decoder_only else enc.latent_dist.sample()
523
+ rec = current_vae.decode(latents).sample
524
+
525
+ #if rec.shape[-2:] != imgs.shape[-2:]:
526
+ # rec = F.interpolate(rec, size=imgs.shape[-2:], mode="bilinear", align_corners=False)
527
+
528
+ rec_f32 = rec.to(torch.float32)
529
+ imgs_f32 = imgs.to(torch.float32)
530
+
531
+ abs_losses = {
532
+ "mae": F.l1_loss(rec_f32, imgs_f32),
533
+ "mse": F.mse_loss(rec_f32, imgs_f32),
534
+ "lpips": _get_lpips()(rec_f32, imgs_f32).mean(),
535
+ "fdl": fdl_loss(rec_f32, imgs_f32),
536
+ "edge": F.l1_loss(sobel_edges(rec_f32), sobel_edges(imgs_f32)),
537
+ }
538
+
539
+ if full_training and not train_decoder_only:
540
+ mean = enc.latent_dist.mean
541
+ logvar = enc.latent_dist.logvar
542
+ kl = -0.5 * torch.mean(1 + logvar - mean.pow(2) - logvar.exp())
543
+ abs_losses["kl"] = kl
544
+ else:
545
+ abs_losses["kl"] = torch.tensor(0.0, device=accelerator.device, dtype=torch.float32)
546
+
547
+ total_loss, coeffs, meds = normalizer.update_and_total(abs_losses)
548
+
549
+ if torch.isnan(total_loss) or torch.isinf(total_loss):
550
+ raise RuntimeError("NaN/Inf loss")
551
+
552
+ accelerator.backward(total_loss)
553
+
554
+ grad_norm = torch.tensor(0.0, device=accelerator.device)
555
+ if accelerator.sync_gradients:
556
+ grad_norm = accelerator.clip_grad_norm_(trainable_params, clip_grad_norm)
557
+ optimizer.step()
558
+ scheduler.step()
559
+ optimizer.zero_grad(set_to_none=True)
560
+ global_step += 1
561
+ progress.update(1)
562
+
563
+ if accelerator.is_main_process:
564
+ try:
565
+ current_lr = optimizer.param_groups[0]["lr"]
566
+ except Exception:
567
+ current_lr = scheduler.get_last_lr()[0]
568
+
569
+ batch_losses.append(total_loss.detach().item())
570
+ batch_grads.append(float(grad_norm.detach().cpu().item()) if isinstance(grad_norm, torch.Tensor) else float(grad_norm))
571
+ for k, v in abs_losses.items():
572
+ track_losses[k].append(float(v.detach().item()))
573
+
574
+ if use_wandb and accelerator.sync_gradients:
575
+ log_dict = {
576
+ "total_loss": float(total_loss.detach().item()),
577
+ "learning_rate": current_lr,
578
+ "epoch": epoch,
579
+ "grad_norm": batch_grads[-1],
580
+ }
581
+ for k, v in abs_losses.items():
582
+ log_dict[f"loss_{k}"] = float(v.detach().item())
583
+ for k in coeffs:
584
+ log_dict[f"coeff_{k}"] = float(coeffs[k])
585
+ log_dict[f"median_{k}"] = float(meds[k])
586
+ wandb.log(log_dict, step=global_step)
587
+
588
+ if global_step > 0 and global_step % sample_interval == 0:
589
+ if accelerator.is_main_process:
590
+ generate_and_save_samples(global_step)
591
+ accelerator.wait_for_everyone()
592
+
593
+ n_micro = sample_interval * gradient_accumulation_steps
594
+ avg_loss = float(np.mean(batch_losses[-n_micro:])) if len(batch_losses) >= n_micro else float(np.mean(batch_losses)) if batch_losses else float("nan")
595
+ avg_grad = float(np.mean(batch_grads[-n_micro:])) if len(batch_grads) >= 1 else float(np.mean(batch_grads)) if batch_grads else 0.0
596
+
597
+ if accelerator.is_main_process:
598
+ print(f"Epoch {epoch} step {global_step} loss: {avg_loss:.6f}, grad_norm: {avg_grad:.6f}, lr: {current_lr:.9f}")
599
+ if save_model and avg_loss < min_loss * save_barrier:
600
+ min_loss = avg_loss
601
+ unwrapped = vae.module if hasattr(vae, "module") else vae
602
+ current_vae = getattr(unwrapped, "_orig_mod", unwrapped)
603
+ current_vae.save_pretrained(save_as)
604
+ if use_wandb:
605
+ wandb.log({"interm_loss": avg_loss, "interm_grad": avg_grad}, step=global_step)
606
+
607
+ if accelerator.is_main_process:
608
+ epoch_avg = float(np.mean(batch_losses)) if batch_losses else float("nan")
609
+ print(f"Epoch {epoch} done, avg loss {epoch_avg:.6f}")
610
+ if use_wandb:
611
+ wandb.log({"epoch_loss": epoch_avg, "epoch": epoch + 1}, step=global_step)
612
+
613
+ # --------------------------- Финальное сохранение ---------------------------
614
+ if accelerator.is_main_process:
615
+ print("Training finished – saving final model")
616
+ if save_model:
617
+ unwrapped = vae.module if hasattr(vae, "module") else vae
618
+ current_vae = getattr(unwrapped, "_orig_mod", unwrapped)
619
+ current_vae.save_pretrained(save_as)
620
+
621
+ accelerator.free_memory()
622
+ if torch.distributed.is_initialized():
623
+ torch.distributed.destroy_process_group()
624
+ print("Готово!")