temp / train.py
Muinez's picture
Upload train.py with huggingface_hub
7a67c9d verified
import glob
import torch
import json
import os
from PIL import Image
from torchvision.transforms import v2
from torch.utils.data import DataLoader
import torch.nn.functional as F
from tqdm import tqdm
from stae import StupidAE
from diffusers import AutoencoderKL
from transformers import AutoModel
os.environ['HF_HOME'] = '/home/muinez/hf_home'
siglip = AutoModel.from_pretrained("google/siglip2-base-patch32-256", trust_remote_code=True).bfloat16().cuda()
siglip.text_model = None
torch.cuda.empty_cache()
vae = StupidAE().cuda()
params = list(vae.parameters())
from muon import SingleDeviceMuonWithAuxAdam
hidden_weights = [p for p in params if p.ndim >= 2]
hidden_gains_biases = [p for p in params if p.ndim < 2]
param_groups = [
dict(params=hidden_weights, use_muon=True,
lr=1e-4, weight_decay=1e-4),
dict(params=hidden_gains_biases, use_muon=False,
lr=3e-4, betas=(0.9, 0.95), weight_decay=1e-4),
]
optimizer = SingleDeviceMuonWithAuxAdam(param_groups)
from snooc import SnooC
optimizer = SnooC(optimizer)
from torchvision.io import decode_image
import webdataset as wds
def decode_image_data(key, value):
if key.endswith((".jpg", ".jpeg", ".webp")):
try:
return decode_image(torch.tensor(list(value), dtype=torch.uint8), mode="RGB")
except Exception:
return None
return None
image_transforms = v2.Compose([
v2.ToDtype(torch.float32, scale=True),
v2.Resize((256, 256)),
v2.Normalize([0.5], [0.5]),
#v2.RandomHorizontalFlip(0.5),
#transforms.RandomVerticalFlip(0.5),
])
def preprocess(sample):
image_key = 'jpg' if 'jpg' in sample else 'webp' if 'webp' in sample else None
if image_key:
sample[image_key] = image_transforms(sample[image_key])
sample['jpg'] = sample.pop(image_key)
return sample
batch_size = 96
num_workers = 16
urls = [
f"https://huggingface.co/datasets/Muinez/sankaku-webp-256shortest-edge/resolve/main/{i:04d}.tar"
for i in range(1000)
]
dataset = wds.WebDataset(urls, handler=wds.warn_and_continue, shardshuffle=100000) \
.shuffle(2000) \
.decode(decode_image_data) \
.map(preprocess) \
.to_tuple("jpg")#.batched(batch_size)
from torch.utils.tensorboard import SummaryWriter
import datetime
logger = SummaryWriter(f'./logs/{datetime.datetime.now().strftime("%Y%m%d-%H%M%S")}')
vae.load_state_dict(torch.load('model_2.pt'))
step = 0
while(True):
dataloader = DataLoader(
dataset,
num_workers=num_workers,
batch_size=batch_size,
prefetch_factor=16, persistent_workers=True,
drop_last=True
)
bar = tqdm(dataloader)
for data, in bar:
image = data.cuda().bfloat16()
with torch.no_grad(), torch.amp.autocast('cuda', torch.bfloat16):
last_hidden_state = siglip.vision_model(image, output_hidden_states=True).last_hidden_state
std = last_hidden_state.std()
last_hidden_state = last_hidden_state / std
with torch.amp.autocast('cuda', torch.bfloat16):
latent = vae.encode(image)
decoded = vae.decode(latent)
semantic = vae.semantic_decoder(latent) / std
semantic = semantic.flatten(2).transpose(1,2)
pixel_loss = F.mse_loss(decoded.float(), image.float())
semantic_loss = F.mse_loss(semantic.float(), last_hidden_state.float())
loss = pixel_loss + semantic_loss
loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(vae.parameters(), 1.0)
optimizer.step()
optimizer.zero_grad()
if(step % 1000 == 0):
torch.save(vae.state_dict(), 'model_2.pt')
bar.set_description(f'Step: {step}, Loss: {loss.item()}, Grad norm: {grad_norm}, Std: {latent.std()}')
logger.add_scalar(f'Pixel loss', pixel_loss, step)
logger.add_scalar(f'Semantic loss', semantic_loss, step)
if(step % 50 == 0):
for i in range(3):
logger.add_image(f'Decoded/{i}', decoded[i].cpu() * 0.5 + 0.5, step)
logger.add_image(f'Real/{i}', image[i].cpu() * 0.5 + 0.5, step)
logger.flush()
step += 1