sft-v3 / train.py
ykzhang721's picture
Upload train.py with huggingface_hub
5ebeec1 verified
import json
import pdb
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, OlmoeForCausalLM, OlmoeModel
from transformers.models.auto.modeling_auto import MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES
import copy
from transformers.modeling_outputs import (
MoeCausalLMOutputWithPast,
MoeModelOutputWithPast,
)
import numpy as np
import math
from torch import nn
import pandas as pd
from transformers.cache_utils import Cache, DynamicCache, StaticCache
from dataclasses import dataclass
# from transformers.models.olmoe.configuration_olmoe import OlmoeConfig
# from transformers.models.olmoe.modeling_olmoe import OlmoeMLP, OlmoeAttention, OlmoeFlashAttention2, OlmoeSdpaAttention, OlmoeRMSNorm, OlmoeSparseMoeBlock, apply_rotary_pos_emb, repeat_kv, OlmoeRotaryEmbedding
import os
import sys
import torch.distributed as dist
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
import transformers
import pickle
# from transformers.models.olmoe.configuration_olmoe import OlmoeConfig
from dataset import *
# from utils import flash_attn_forward, flash_attn_prepare_decoder_attention_mask, get_multiround_data
from peft import (get_peft_model, PeftModel)
import random
from config import *
from datasets import Dataset, DatasetDict, load_dataset
import wandb
import gc
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import functools
from torch.optim.lr_scheduler import StepLR
import torch.nn.functional as F
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data.distributed import DistributedSampler
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
checkpoint_wrapper, CheckpointImpl)
from torch.distributed.fsdp import (
FullyShardedDataParallel as FSDP,
MixedPrecision,
BackwardPrefetch,
ShardingStrategy,
FullStateDictConfig,
StateDictType,
)
from torch.distributed.fsdp.wrap import (
transformer_auto_wrap_policy,
enable_wrap,
wrap,
)
from functools import partial
from torch.utils.data import DataLoader
from pathlib import Path
from typing import Type, List, Optional, Tuple, Union
from modelforseminat_v3 import *
################################# FSDP Config #####################################
def setup():
# initialize the process group
local_rank = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
dist.init_process_group(
backend='nccl',
init_method='env://',
)
def cleanup():
gc.collect()
torch.cuda.empty_cache()
dist.destroy_process_group()
def get_fsdp_device():
# 每个进程初始化分布式环境后调用
local_rank = int(os.environ.get("LOCAL_RANK", 0)) # torchrun 自动设置
device = torch.device(f"cuda:{local_rank}")
torch.cuda.set_device(device)
return device
def setup_model(model_name):
model = OlmoForCausalLMForSemiNAT.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
# pdb.set_trace()
return model, tokenizer
def fsdp_main(args):
local_rank = int(os.environ['LOCAL_RANK'])
rank = int(os.environ['RANK'])
world_size = int(os.environ['WORLD_SIZE'])
if args.use_wandb and rank == 0:
wandb.init(entity="SemiNAT", project="SemiNAT-SFT", name=args.run_name)
model, tokenizer = setup_model(args.model_path)
model.config.chunk_size_limit = args.chunk_size_limit
# pdb.set_trace()
if ".pkl" in args.data_path:
train_dataset = pickle.load(open(args.data_path, "rb"))
else:
datasets = pd.read_parquet(args.data_path)
train_dataset = eval(f"{args.data_type}")(
tokenizer,
datasets, # your data preprocessing function
args.max_length, # your max input length
args.data_processess_num)
train_sampler = DistributedSampler(train_dataset,
rank=rank,
num_replicas=world_size,
shuffle=True)
train_dataloader = DataLoader(dataset=train_dataset,
sampler=train_sampler,
batch_size=args.batch_size)
print(f"Size of train dataset: {len(train_dataset)}")
setup()
OlmoDecoderLayerForSemiNAT_auto_wrap_policy = functools.partial(
transformer_auto_wrap_policy,
transformer_layer_cls={
OlmoDecoderLayerForSemiNAT,
NATDecoderForSemiNAT,
})
sharding_strategy: ShardingStrategy = ShardingStrategy.FULL_SHARD #for Zero2 and FULL_SHARD for Zero3
torch.cuda.set_device(local_rank)
bfSixteen = MixedPrecision(
param_dtype=torch.bfloat16,
reduce_dtype=torch.bfloat16,
buffer_dtype=torch.bfloat16,
)
# if bf16_ready:
mp_policy = bfSixteen
# else:
# mp_policy = None # defaults to fp32
# if args.use_lora:
# model = get_peft_model(model, lora_config)
# model is on CPU before input to FSDP
model = FSDP(model,
auto_wrap_policy=OlmoDecoderLayerForSemiNAT_auto_wrap_policy,
mixed_precision=mp_policy,
sharding_strategy=sharding_strategy,
device_id=torch.cuda.current_device(),
use_orig_params=True)
optimizer = optim.AdamW(model.parameters(), lr=args.lr)
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
loss1_list = []
loss2_list = []
loss_list = []
global_step = 0
for epoch in range(1, args.epochs + 1):
# t0 = time.time()
model.train()
local_rank = int(os.environ['LOCAL_RANK'])
# fsdp_loss = torch.zeros(2).to(local_rank)
if train_sampler:
train_sampler.set_epoch(epoch)
if rank == 0:
inner_pbar = tqdm(range(len(train_dataloader)),
colour="blue",
desc="r0 Training Epoch")
for batch in train_dataloader:
optimizer.zero_grad()
loss1, loss2 = model(input_ids=batch[0],
labels=batch[1],
attention_mask=batch[2],
slice_pos=batch[3],
use_cache=False).loss
loss = loss1 + loss2
loss1_list.append(loss1.item())
loss2_list.append(loss2.item())
loss_list.append(loss.item())
# pdb.set_trace()
loss.backward()
optimizer.step()
global_step += 1
if global_step % args.save_steps == 0:
save_policy = FullStateDictConfig(offload_to_cpu=True,
rank0_only=True)
with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT,
save_policy):
cpu_state = model.state_dict()
if rank == 0:
print(f"--> steps: {str(global_step)} saving model ...")
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
save_name = f"{args.save_name}-steps_{str(global_step)}.pt"
print(f"--> saving as model name {save_name}")
save_path = os.path.join(args.save_path, save_name)
torch.save(cpu_state, save_path)
if rank == 0:
inner_pbar.update(1)
if args.use_wandb:
wandb.log({
"length prediction loss":
sum(loss1_list[-20:]) / len(loss1_list[-20:]),
"nat loss":
sum(loss2_list[-20:]) / len(loss2_list[-20:]),
"loss":
sum(loss_list[-20:]) / len(loss_list[-20:])
})
dist.all_reduce(loss, op=dist.ReduceOp.SUM)
if rank == 0:
inner_pbar.close()
scheduler.step()
if rank == 0:
print(f"--> entering save model state")
save_policy = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT,
save_policy):
cpu_state = model.state_dict()
if rank == 0:
print(f"--> epoch: {str(epoch)} saving model ...")
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
save_name = f"{args.save_name}-epoch_{str(epoch)}.pt"
print(f"--> saving as model name {save_name}")
save_path = os.path.join(args.save_path, save_name)
torch.save(cpu_state, save_path)
dist.barrier()
cleanup()
################################# FSDP Config #####################################
if __name__ == "__main__":
# Training settings
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size',
type=int,
default=4,
metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--model_path', type=str)
parser.add_argument('--save_path', type=str)
parser.add_argument('--save_name', type=str)
parser.add_argument('--data_path', type=str)
parser.add_argument('--data_type', type=str)
parser.add_argument('--run_name', type=str)
parser.add_argument('--max_length', type=int)
parser.add_argument('--chunk_size_limit', type=int)
parser.add_argument('--save_steps', type=int, default=5000)
parser.add_argument('--data_processess_num', type=int, default=8)
parser.add_argument('--epochs',
type=int,
default=2,
metavar='N',
help='number of epochs to train (default: 3)')
parser.add_argument('--lr',
type=float,
default=.002,
metavar='LR',
help='learning rate (default: .002)')
parser.add_argument('--gamma',
type=float,
default=0.7,
metavar='M',
help='Learning rate step gamma (default: 0.7)')
parser.add_argument('--seed',
type=int,
default=1,
metavar='S',
help='random seed (default: 1)')
parser.add_argument('--use_lora', action='store_true', default=False)
parser.add_argument("--use_wandb",
action="store_true",
help="whether to use wandb")
args = parser.parse_args()
torch.manual_seed(args.seed)
fsdp_main(args)