|
|
import json |
|
|
import pdb |
|
|
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, OlmoeForCausalLM, OlmoeModel |
|
|
from transformers.models.auto.modeling_auto import MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES |
|
|
import copy |
|
|
from transformers.modeling_outputs import ( |
|
|
MoeCausalLMOutputWithPast, |
|
|
MoeModelOutputWithPast, |
|
|
) |
|
|
import numpy as np |
|
|
import math |
|
|
from torch import nn |
|
|
import pandas as pd |
|
|
from transformers.cache_utils import Cache, DynamicCache, StaticCache |
|
|
from dataclasses import dataclass |
|
|
|
|
|
|
|
|
import os |
|
|
import sys |
|
|
import torch.distributed as dist |
|
|
from tqdm import tqdm |
|
|
from torch.utils.data import DataLoader |
|
|
from torch.utils.data.distributed import DistributedSampler |
|
|
import transformers |
|
|
import pickle |
|
|
|
|
|
|
|
|
from dataset import * |
|
|
|
|
|
from peft import (get_peft_model, PeftModel) |
|
|
import random |
|
|
from config import * |
|
|
from datasets import Dataset, DatasetDict, load_dataset |
|
|
import wandb |
|
|
import gc |
|
|
import os |
|
|
import argparse |
|
|
import torch |
|
|
import torch.nn as nn |
|
|
import torch.nn.functional as F |
|
|
import torch.optim as optim |
|
|
import functools |
|
|
from torch.optim.lr_scheduler import StepLR |
|
|
import torch.nn.functional as F |
|
|
import torch.distributed as dist |
|
|
import torch.multiprocessing as mp |
|
|
from torch.nn.parallel import DistributedDataParallel as DDP |
|
|
from torch.utils.data.distributed import DistributedSampler |
|
|
|
|
|
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( |
|
|
checkpoint_wrapper, CheckpointImpl) |
|
|
|
|
|
from torch.distributed.fsdp import ( |
|
|
FullyShardedDataParallel as FSDP, |
|
|
MixedPrecision, |
|
|
BackwardPrefetch, |
|
|
ShardingStrategy, |
|
|
FullStateDictConfig, |
|
|
StateDictType, |
|
|
) |
|
|
from torch.distributed.fsdp.wrap import ( |
|
|
transformer_auto_wrap_policy, |
|
|
enable_wrap, |
|
|
wrap, |
|
|
) |
|
|
from functools import partial |
|
|
from torch.utils.data import DataLoader |
|
|
from pathlib import Path |
|
|
from typing import Type, List, Optional, Tuple, Union |
|
|
from modelforseminat_v3 import * |
|
|
|
|
|
|
|
|
|
|
|
def setup(): |
|
|
|
|
|
local_rank = int(os.environ['LOCAL_RANK']) |
|
|
torch.cuda.set_device(local_rank) |
|
|
dist.init_process_group( |
|
|
backend='nccl', |
|
|
init_method='env://', |
|
|
) |
|
|
|
|
|
|
|
|
def cleanup(): |
|
|
gc.collect() |
|
|
torch.cuda.empty_cache() |
|
|
dist.destroy_process_group() |
|
|
|
|
|
|
|
|
def get_fsdp_device(): |
|
|
|
|
|
local_rank = int(os.environ.get("LOCAL_RANK", 0)) |
|
|
device = torch.device(f"cuda:{local_rank}") |
|
|
torch.cuda.set_device(device) |
|
|
return device |
|
|
|
|
|
|
|
|
def setup_model(model_name): |
|
|
model = OlmoForCausalLMForSemiNAT.from_pretrained(model_name) |
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
|
|
|
return model, tokenizer |
|
|
|
|
|
|
|
|
def fsdp_main(args): |
|
|
local_rank = int(os.environ['LOCAL_RANK']) |
|
|
rank = int(os.environ['RANK']) |
|
|
world_size = int(os.environ['WORLD_SIZE']) |
|
|
if args.use_wandb and rank == 0: |
|
|
wandb.init(entity="SemiNAT", project="SemiNAT-SFT", name=args.run_name) |
|
|
|
|
|
model, tokenizer = setup_model(args.model_path) |
|
|
|
|
|
model.config.chunk_size_limit = args.chunk_size_limit |
|
|
|
|
|
|
|
|
|
|
|
if ".pkl" in args.data_path: |
|
|
train_dataset = pickle.load(open(args.data_path, "rb")) |
|
|
else: |
|
|
datasets = pd.read_parquet(args.data_path) |
|
|
train_dataset = eval(f"{args.data_type}")( |
|
|
tokenizer, |
|
|
datasets, |
|
|
args.max_length, |
|
|
args.data_processess_num) |
|
|
|
|
|
train_sampler = DistributedSampler(train_dataset, |
|
|
rank=rank, |
|
|
num_replicas=world_size, |
|
|
shuffle=True) |
|
|
train_dataloader = DataLoader(dataset=train_dataset, |
|
|
sampler=train_sampler, |
|
|
batch_size=args.batch_size) |
|
|
|
|
|
print(f"Size of train dataset: {len(train_dataset)}") |
|
|
|
|
|
setup() |
|
|
|
|
|
OlmoDecoderLayerForSemiNAT_auto_wrap_policy = functools.partial( |
|
|
transformer_auto_wrap_policy, |
|
|
transformer_layer_cls={ |
|
|
OlmoDecoderLayerForSemiNAT, |
|
|
NATDecoderForSemiNAT, |
|
|
}) |
|
|
|
|
|
|
|
|
sharding_strategy: ShardingStrategy = ShardingStrategy.FULL_SHARD |
|
|
torch.cuda.set_device(local_rank) |
|
|
|
|
|
bfSixteen = MixedPrecision( |
|
|
param_dtype=torch.bfloat16, |
|
|
reduce_dtype=torch.bfloat16, |
|
|
buffer_dtype=torch.bfloat16, |
|
|
) |
|
|
|
|
|
|
|
|
mp_policy = bfSixteen |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model = FSDP(model, |
|
|
auto_wrap_policy=OlmoDecoderLayerForSemiNAT_auto_wrap_policy, |
|
|
mixed_precision=mp_policy, |
|
|
sharding_strategy=sharding_strategy, |
|
|
device_id=torch.cuda.current_device(), |
|
|
use_orig_params=True) |
|
|
|
|
|
optimizer = optim.AdamW(model.parameters(), lr=args.lr) |
|
|
|
|
|
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma) |
|
|
|
|
|
loss1_list = [] |
|
|
loss2_list = [] |
|
|
loss_list = [] |
|
|
|
|
|
global_step = 0 |
|
|
for epoch in range(1, args.epochs + 1): |
|
|
|
|
|
model.train() |
|
|
local_rank = int(os.environ['LOCAL_RANK']) |
|
|
|
|
|
|
|
|
if train_sampler: |
|
|
train_sampler.set_epoch(epoch) |
|
|
if rank == 0: |
|
|
inner_pbar = tqdm(range(len(train_dataloader)), |
|
|
colour="blue", |
|
|
desc="r0 Training Epoch") |
|
|
for batch in train_dataloader: |
|
|
|
|
|
optimizer.zero_grad() |
|
|
loss1, loss2 = model(input_ids=batch[0], |
|
|
labels=batch[1], |
|
|
attention_mask=batch[2], |
|
|
slice_pos=batch[3], |
|
|
use_cache=False).loss |
|
|
loss = loss1 + loss2 |
|
|
loss1_list.append(loss1.item()) |
|
|
loss2_list.append(loss2.item()) |
|
|
loss_list.append(loss.item()) |
|
|
|
|
|
loss.backward() |
|
|
optimizer.step() |
|
|
|
|
|
global_step += 1 |
|
|
|
|
|
if global_step % args.save_steps == 0: |
|
|
save_policy = FullStateDictConfig(offload_to_cpu=True, |
|
|
rank0_only=True) |
|
|
with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT, |
|
|
save_policy): |
|
|
cpu_state = model.state_dict() |
|
|
|
|
|
if rank == 0: |
|
|
print(f"--> steps: {str(global_step)} saving model ...") |
|
|
if not os.path.exists(args.save_path): |
|
|
os.makedirs(args.save_path) |
|
|
save_name = f"{args.save_name}-steps_{str(global_step)}.pt" |
|
|
print(f"--> saving as model name {save_name}") |
|
|
save_path = os.path.join(args.save_path, save_name) |
|
|
torch.save(cpu_state, save_path) |
|
|
|
|
|
if rank == 0: |
|
|
inner_pbar.update(1) |
|
|
if args.use_wandb: |
|
|
wandb.log({ |
|
|
"length prediction loss": |
|
|
sum(loss1_list[-20:]) / len(loss1_list[-20:]), |
|
|
"nat loss": |
|
|
sum(loss2_list[-20:]) / len(loss2_list[-20:]), |
|
|
"loss": |
|
|
sum(loss_list[-20:]) / len(loss_list[-20:]) |
|
|
}) |
|
|
|
|
|
dist.all_reduce(loss, op=dist.ReduceOp.SUM) |
|
|
|
|
|
if rank == 0: |
|
|
inner_pbar.close() |
|
|
|
|
|
scheduler.step() |
|
|
|
|
|
if rank == 0: |
|
|
print(f"--> entering save model state") |
|
|
|
|
|
save_policy = FullStateDictConfig(offload_to_cpu=True, rank0_only=True) |
|
|
with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT, |
|
|
save_policy): |
|
|
cpu_state = model.state_dict() |
|
|
|
|
|
if rank == 0: |
|
|
print(f"--> epoch: {str(epoch)} saving model ...") |
|
|
if not os.path.exists(args.save_path): |
|
|
os.makedirs(args.save_path) |
|
|
save_name = f"{args.save_name}-epoch_{str(epoch)}.pt" |
|
|
print(f"--> saving as model name {save_name}") |
|
|
save_path = os.path.join(args.save_path, save_name) |
|
|
torch.save(cpu_state, save_path) |
|
|
|
|
|
dist.barrier() |
|
|
cleanup() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
parser = argparse.ArgumentParser() |
|
|
parser.add_argument('--batch-size', |
|
|
type=int, |
|
|
default=4, |
|
|
metavar='N', |
|
|
help='input batch size for training (default: 64)') |
|
|
parser.add_argument('--model_path', type=str) |
|
|
parser.add_argument('--save_path', type=str) |
|
|
parser.add_argument('--save_name', type=str) |
|
|
parser.add_argument('--data_path', type=str) |
|
|
parser.add_argument('--data_type', type=str) |
|
|
parser.add_argument('--run_name', type=str) |
|
|
parser.add_argument('--max_length', type=int) |
|
|
parser.add_argument('--chunk_size_limit', type=int) |
|
|
parser.add_argument('--save_steps', type=int, default=5000) |
|
|
parser.add_argument('--data_processess_num', type=int, default=8) |
|
|
parser.add_argument('--epochs', |
|
|
type=int, |
|
|
default=2, |
|
|
metavar='N', |
|
|
help='number of epochs to train (default: 3)') |
|
|
parser.add_argument('--lr', |
|
|
type=float, |
|
|
default=.002, |
|
|
metavar='LR', |
|
|
help='learning rate (default: .002)') |
|
|
parser.add_argument('--gamma', |
|
|
type=float, |
|
|
default=0.7, |
|
|
metavar='M', |
|
|
help='Learning rate step gamma (default: 0.7)') |
|
|
parser.add_argument('--seed', |
|
|
type=int, |
|
|
default=1, |
|
|
metavar='S', |
|
|
help='random seed (default: 1)') |
|
|
parser.add_argument('--use_lora', action='store_true', default=False) |
|
|
parser.add_argument("--use_wandb", |
|
|
action="store_true", |
|
|
help="whether to use wandb") |
|
|
|
|
|
args = parser.parse_args() |
|
|
|
|
|
torch.manual_seed(args.seed) |
|
|
|
|
|
fsdp_main(args) |
|
|
|