sft-v4 / train3.py
sheep33333's picture
Upload train3.py with huggingface_hub
3f8fa10 verified
import json
import pdb
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, OlmoeForCausalLM, OlmoeModel
from transformers.models.auto.modeling_auto import MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES
import copy
from transformers.modeling_outputs import (
MoeCausalLMOutputWithPast,
MoeModelOutputWithPast,
)
from collections import defaultdict
import numpy as np
import math
from torch import nn
import pandas as pd
from transformers.cache_utils import Cache, DynamicCache, StaticCache
from dataclasses import dataclass
# from transformers.models.olmoe.configuration_olmoe import OlmoeConfig
# from transformers.models.olmoe.modeling_olmoe import OlmoeMLP, OlmoeAttention, OlmoeFlashAttention2, OlmoeSdpaAttention, OlmoeRMSNorm, OlmoeSparseMoeBlock, apply_rotary_pos_emb, repeat_kv, OlmoeRotaryEmbedding
import os
import sys
import torch.distributed as dist
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
import transformers
import pickle
# from transformers.models.olmoe.configuration_olmoe import OlmoeConfig
from dataset import *
# from utils import flash_attn_forward, flash_attn_prepare_decoder_attention_mask, get_multiround_data
from peft import (get_peft_model, PeftModel)
import random
# from config import *
from datasets import Dataset, DatasetDict, load_dataset
import wandb
import gc
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import functools
from torch.optim.lr_scheduler import StepLR
import torch.nn.functional as F
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data.distributed import DistributedSampler
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
checkpoint_wrapper, CheckpointImpl)
from torch.distributed.fsdp import (
FullyShardedDataParallel as FSDP,
MixedPrecision,
BackwardPrefetch,
ShardingStrategy,
FullStateDictConfig,
StateDictType,
)
from torch.distributed.fsdp.wrap import (
transformer_auto_wrap_policy,
enable_wrap,
wrap,
)
from functools import partial
from torch.utils.data import DataLoader
from pathlib import Path
from typing import Type, List, Optional, Tuple, Union
from modelforseminat_v4 import *
from torch.optim.lr_scheduler import _LRScheduler
class WarmupCosineScheduler(_LRScheduler):
def __init__(self,
optimizer,
warmup_steps,
total_steps,
min_lr=0.0,
last_epoch=-1):
# self.warmup_steps = warmup_steps
self.total_steps = total_steps
self.min_lr = min_lr
if isinstance(warmup_steps, float) and 0 < warmup_steps < 1:
self.warmup_steps = int(warmup_steps * total_steps)
else:
self.warmup_steps = int(warmup_steps)
super().__init__(optimizer, last_epoch)
def get_lr(self):
step = self.last_epoch + 1
lrs = []
for base_lr in self.base_lrs:
if step < self.warmup_steps:
# Linear warmup
lr = base_lr * step / self.warmup_steps
else:
# Cosine decay
progress = (step - self.warmup_steps) / max(
1, self.total_steps - self.warmup_steps)
cosine_decay = 0.5 * (1 + math.cos(math.pi * progress))
lr = self.min_lr + (base_lr - self.min_lr) * cosine_decay
lrs.append(lr)
return lrs
################################# FSDP Config #####################################
def setup():
# initialize the process group
local_rank = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
dist.init_process_group(
backend='nccl',
init_method='env://',
)
def cleanup():
gc.collect()
torch.cuda.empty_cache()
dist.destroy_process_group()
def get_fsdp_device():
# 每个进程初始化分布式环境后调用
local_rank = int(os.environ.get("LOCAL_RANK", 0)) # torchrun 自动设置
device = torch.device(f"cuda:{local_rank}")
torch.cuda.set_device(device)
return device
def load_trained_model(model_name):
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
olmo_path = "/AIRvePFS/ai4science/users/ai4science/users/zyk/seminat_backup/model/OLMo-2-0425-1B"
pt_path = "/AIRvePFS/ai4science/users/ai4science/users/zyk/seminat/ckp/sft-v4-1e3-len2-fc-chunklimit2-jueduipos/sft-v4-1e3-len2-fc-chunklimit2-jueduipos-steps_300.pt"
config_path = "/AIRvePFS/ai4science/users/ai4science/users/zyk/seminat_backup/model/OLMo-2-0425-1B/config.json"
config = AutoConfig.from_pretrained(olmo_path)
model = Olmo2ForCausalLMForSemiNAT.from_pretrained(olmo_path,
config=config,
torch_dtype=torch.bfloat16)
state_dict = torch.load(pt_path, map_location=DEVICE, weights_only=True)
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
print(
f"Loaded with {len(missing_keys)} missing keys and {len(unexpected_keys)} unexpected keys."
)
if missing_keys:
print("Missing keys:", missing_keys)
if unexpected_keys:
print("Unexpected keys:", unexpected_keys)
model = model.to(DEVICE)
tokenizer = AutoTokenizer.from_pretrained(olmo_path)
return model, tokenizer
def setup_model(model_name):
model = Olmo2ForCausalLMForSemiNAT.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
# config = AutoConfig.from_pretrained(model_name)
# model = Olmo2ForCausalLMForSemiNAT(config) # 注意这里不用 from_pretrained
# tokenizer = AutoTokenizer.from_pretrained(model_name)
return model, tokenizer
def fsdp_main(args):
local_rank = int(os.environ['LOCAL_RANK'])
rank = int(os.environ['RANK'])
world_size = int(os.environ['WORLD_SIZE'])
if args.use_wandb and rank == 0:
wandb.init(entity="SemiNAT", project="SemiNAT-SFT", name=args.run_name)
model, tokenizer = setup_model(args.model_path)
# model, tokenizer = load_trained_model(args.model_path)
model.config.chunk_size_limit = args.chunk_size_limit
# pdb.set_trace()
if ".pkl" in args.data_path:
train_dataset = pickle.load(open(args.data_path, "rb"))
else:
datasets = pd.read_parquet(args.data_path)
train_dataset = eval(f"{args.data_type}")(
tokenizer,
datasets, # your data preprocessing function
args.max_length, # your max input length
args.data_processess_num)
train_sampler = DistributedSampler(train_dataset,
rank=rank,
num_replicas=world_size,
shuffle=True)
train_dataloader = DataLoader(dataset=train_dataset,
sampler=train_sampler,
batch_size=args.batch_size)
print(f"Size of train dataset: {len(train_dataset)}")
setup()
Olmo2DecoderLayerForSemiNAT_auto_wrap_policy = functools.partial(
transformer_auto_wrap_policy,
transformer_layer_cls={
Olmo2DecoderLayerForSemiNAT,
NATDecoderForSemiNAT,
})
sharding_strategy: ShardingStrategy = ShardingStrategy.FULL_SHARD #for Zero2 and FULL_SHARD for Zero3
torch.cuda.set_device(local_rank)
bfSixteen = MixedPrecision(
param_dtype=torch.bfloat16,
reduce_dtype=torch.bfloat16,
buffer_dtype=torch.bfloat16,
)
# if bf16_ready:
mp_policy = bfSixteen
# else:
# mp_policy = None # defaults to fp32
# if args.use_lora:
# model = get_peft_model(model, lora_config)
# model is on CPU before input to FSDP
model = FSDP(model,
auto_wrap_policy=Olmo2DecoderLayerForSemiNAT_auto_wrap_policy,
mixed_precision=mp_policy,
sharding_strategy=sharding_strategy,
device_id=torch.cuda.current_device(),
use_orig_params=True)
optimizer = optim.AdamW(
model.parameters(),
lr=args.lr,
betas=args.betas,
weight_decay=args.weight_decay,
eps=args.eps,
)
# scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
scheduler = WarmupCosineScheduler(
optimizer=optimizer, # 优化器对象
warmup_steps=args.warmup_steps, # warmup 步数(或比例)
total_steps=args.total_steps, # 总训练步数
min_lr=args.min_lr # 最小学习率
)
torch.autograd.set_detect_anomaly(True)
loss1_list = []
loss2_list = []
loss_list = []
global_step = 0
for epoch in range(1, args.epochs + 1):
# t0 = time.time()
model.train()
local_rank = int(os.environ['LOCAL_RANK'])
# fsdp_loss = torch.zeros(2).to(local_rank)
if train_sampler:
train_sampler.set_epoch(epoch)
if rank == 0:
inner_pbar = tqdm(range(len(train_dataloader)),
colour="blue",
desc="r0 Training Epoch")
for batch in train_dataloader:
optimizer.zero_grad()
loss1, loss2 = model(input_ids=batch[0],
labels=batch[1],
attention_mask=batch[2],
slice_pos=batch[3],
use_cache=False).loss
loss = loss1 + loss2
# loss = loss2
loss1_list.append(loss1.item())
loss2_list.append(loss2.item())
loss_list.append(loss.item())
# pdb.set_trace()
# if torch.isnan(loss):
# print(f"Step {global_step}: loss is NaN, entering pdb …")
# pdb.set_trace()
# print(f"loss1:{loss1},loss2:{loss2}")
loss.backward()
# 按参数计算
for name, module in model.named_modules():
total_norm = 0.0
param_count = 0
for param in module.parameters(recurse=False):
if param.grad is not None:
total_norm += param.grad.data.norm(2).item()**2
param_count += 1
if param_count > 0:
total_norm = total_norm**0.5
wandb.log({f"grad_norm/{name}": total_norm},
step=global_step)
# 按层计算
# layer_grads = defaultdict(list)
# for name, module in model.named_modules():
# # 只处理 encoder.layers.N 这类结构(按需改写匹配条件)
# # pdb.set_trace()
# if "model.layers." in name or "model.decoder" in name or "model.encoder" in name:
# if "model.layers" in name:
# # 提取层号(如 encoder.layers.0 → layer0)
# parts = name.split(".")
# try:
# layer_idx = int(parts[3])
# except (IndexError, ValueError):
# pdb.set_trace()
# # continue
# layer_key = f"layer{layer_idx}"
# else:
# layer_key = "decoder" if "model.decoder" in name else "encoder"
# # 收集该模块下的参数梯度(只自身,不递归)
# for param in module.parameters(recurse=False):
# if param.grad is not None:
# layer_grads[layer_key].append(
# param.grad.detach().flatten())
# if "lm_head" in name:
# layer_key = "lm_head"
# layer_grads[layer_key].append(
# param.grad.detach().flatten())
# # 合并每层所有子模块的梯度,并 log 总体 L2 范数
# for layer_key, grads in layer_grads.items():
# if grads:
# total_grad = torch.cat(grads).norm(2).item()
# wandb.log({f"grad_norm/{layer_key}": total_grad},
# step=global_step)
optimizer.step()
global_step += 1
if global_step % args.save_steps == 0:
save_policy = FullStateDictConfig(offload_to_cpu=True,
rank0_only=True)
with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT,
save_policy):
cpu_state = model.state_dict()
if rank == 0:
print(f"--> steps: {str(global_step)} saving model ...")
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
save_name = f"{args.save_name}-steps_{str(global_step)}.pt"
print(f"--> saving as model name {save_name}")
save_path = os.path.join(args.save_path, save_name)
torch.save(cpu_state, save_path)
if rank == 0:
inner_pbar.update(1)
if args.use_wandb:
wandb.log({
"length prediction loss":
sum(loss1_list[-20:]) / len(loss1_list[-20:]),
"nat loss":
sum(loss2_list[-20:]) / len(loss2_list[-20:]),
"loss":
sum(loss_list[-20:]) / len(loss_list[-20:])
})
dist.all_reduce(loss, op=dist.ReduceOp.SUM)
if rank == 0:
inner_pbar.close()
scheduler.step()
# if rank == 0:
# print(f"--> entering save model state")
# save_policy = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
# with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT,
# save_policy):
# cpu_state = model.state_dict()
# if rank == 0:
# print(f"--> epoch: {str(epoch)} saving model ...")
# if not os.path.exists(args.save_path):
# os.makedirs(args.save_path)
# save_name = f"{args.save_name}-epoch_{str(epoch)}.pt"
# print(f"--> saving as model name {save_name}")
# save_path = os.path.join(args.save_path, save_name)
# torch.save(cpu_state, save_path)
dist.barrier()
cleanup()
################################# FSDP Config #####################################
if __name__ == "__main__":
# Training settings
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size',
type=int,
default=4,
metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--model_path', type=str)
parser.add_argument('--save_path', type=str)
parser.add_argument('--save_name', type=str)
parser.add_argument('--data_path', type=str)
parser.add_argument('--data_type', type=str)
parser.add_argument('--run_name', type=str)
parser.add_argument('--max_length', type=int)
parser.add_argument('--chunk_size_limit', type=int)
parser.add_argument('--save_steps', type=int, default=5000)
parser.add_argument('--data_processess_num', type=int, default=8)
parser.add_argument('--epochs',
type=int,
default=2,
metavar='N',
help='number of epochs to train (default: 3)')
parser.add_argument('--lr',
type=float,
default=.002,
metavar='LR',
help='learning rate (default: .002)')
parser.add_argument('--gamma',
type=float,
default=0.7,
metavar='M',
help='Learning rate step gamma (default: 0.7)')
parser.add_argument('--weight_decay', type=float)
parser.add_argument('--eps', type=float)
parser.add_argument('--decay_norm_and_bias', type=bool)
parser.add_argument(
"--betas",
type=float,
nargs=2, # 表示需要两个 float 值
default=[0.9, 0.999] # 可选默认值
)
parser.add_argument('--decay_embeddings', type=bool)
parser.add_argument('--warmup_steps', type=float)
parser.add_argument('--total_steps', type=int)
parser.add_argument('--min_lr', type=float)
parser.add_argument('--seed',
type=int,
default=1,
metavar='S',
help='random seed (default: 1)')
parser.add_argument('--use_lora', action='store_true', default=False)
parser.add_argument("--use_wandb",
action="store_true",
help="whether to use wandb")
args = parser.parse_args()
torch.manual_seed(args.seed)
fsdp_main(args)