|
|
import json |
|
|
import pdb |
|
|
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, OlmoeForCausalLM, OlmoeModel |
|
|
from transformers.models.auto.modeling_auto import MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES |
|
|
import copy |
|
|
from transformers.modeling_outputs import ( |
|
|
MoeCausalLMOutputWithPast, |
|
|
MoeModelOutputWithPast, |
|
|
) |
|
|
from collections import defaultdict |
|
|
import numpy as np |
|
|
import math |
|
|
from torch import nn |
|
|
import pandas as pd |
|
|
from transformers.cache_utils import Cache, DynamicCache, StaticCache |
|
|
from dataclasses import dataclass |
|
|
|
|
|
|
|
|
import os |
|
|
import sys |
|
|
import torch.distributed as dist |
|
|
from tqdm import tqdm |
|
|
from torch.utils.data import DataLoader |
|
|
from torch.utils.data.distributed import DistributedSampler |
|
|
import transformers |
|
|
import pickle |
|
|
|
|
|
|
|
|
from dataset import * |
|
|
|
|
|
from peft import (get_peft_model, PeftModel) |
|
|
import random |
|
|
|
|
|
from datasets import Dataset, DatasetDict, load_dataset |
|
|
import wandb |
|
|
import gc |
|
|
import os |
|
|
import argparse |
|
|
import torch |
|
|
import torch.nn as nn |
|
|
import torch.nn.functional as F |
|
|
import torch.optim as optim |
|
|
import functools |
|
|
from torch.optim.lr_scheduler import StepLR |
|
|
import torch.nn.functional as F |
|
|
import torch.distributed as dist |
|
|
import torch.multiprocessing as mp |
|
|
from torch.nn.parallel import DistributedDataParallel as DDP |
|
|
from torch.utils.data.distributed import DistributedSampler |
|
|
|
|
|
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( |
|
|
checkpoint_wrapper, CheckpointImpl) |
|
|
|
|
|
from torch.distributed.fsdp import ( |
|
|
FullyShardedDataParallel as FSDP, |
|
|
MixedPrecision, |
|
|
BackwardPrefetch, |
|
|
ShardingStrategy, |
|
|
FullStateDictConfig, |
|
|
StateDictType, |
|
|
) |
|
|
from torch.distributed.fsdp.wrap import ( |
|
|
transformer_auto_wrap_policy, |
|
|
enable_wrap, |
|
|
wrap, |
|
|
) |
|
|
from functools import partial |
|
|
from torch.utils.data import DataLoader |
|
|
from pathlib import Path |
|
|
from typing import Type, List, Optional, Tuple, Union |
|
|
from modelforseminat_v4 import * |
|
|
|
|
|
from torch.optim.lr_scheduler import _LRScheduler |
|
|
|
|
|
class WarmupCosineScheduler(_LRScheduler): |
|
|
|
|
|
def __init__(self, |
|
|
optimizer, |
|
|
warmup_steps, |
|
|
total_steps, |
|
|
min_lr=0.0, |
|
|
last_epoch=-1): |
|
|
|
|
|
self.total_steps = total_steps |
|
|
self.min_lr = min_lr |
|
|
if isinstance(warmup_steps, float) and 0 < warmup_steps < 1: |
|
|
self.warmup_steps = int(warmup_steps * total_steps) |
|
|
else: |
|
|
self.warmup_steps = int(warmup_steps) |
|
|
super().__init__(optimizer, last_epoch) |
|
|
|
|
|
def get_lr(self): |
|
|
step = self.last_epoch + 1 |
|
|
lrs = [] |
|
|
|
|
|
for base_lr in self.base_lrs: |
|
|
if step < self.warmup_steps: |
|
|
|
|
|
lr = base_lr * step / self.warmup_steps |
|
|
else: |
|
|
|
|
|
progress = (step - self.warmup_steps) / max( |
|
|
1, self.total_steps - self.warmup_steps) |
|
|
cosine_decay = 0.5 * (1 + math.cos(math.pi * progress)) |
|
|
lr = self.min_lr + (base_lr - self.min_lr) * cosine_decay |
|
|
|
|
|
lrs.append(lr) |
|
|
|
|
|
return lrs |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def setup(): |
|
|
|
|
|
local_rank = int(os.environ['LOCAL_RANK']) |
|
|
torch.cuda.set_device(local_rank) |
|
|
dist.init_process_group( |
|
|
backend='nccl', |
|
|
init_method='env://', |
|
|
) |
|
|
|
|
|
|
|
|
def cleanup(): |
|
|
gc.collect() |
|
|
torch.cuda.empty_cache() |
|
|
dist.destroy_process_group() |
|
|
|
|
|
|
|
|
def get_fsdp_device(): |
|
|
|
|
|
local_rank = int(os.environ.get("LOCAL_RANK", 0)) |
|
|
device = torch.device(f"cuda:{local_rank}") |
|
|
torch.cuda.set_device(device) |
|
|
return device |
|
|
|
|
|
|
|
|
def load_trained_model(model_name): |
|
|
DEVICE = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
|
|
|
olmo_path = "/AIRvePFS/ai4science/users/ai4science/users/zyk/seminat_backup/model/OLMo-2-0425-1B" |
|
|
pt_path = "/AIRvePFS/ai4science/users/ai4science/users/zyk/seminat/ckp/sft-v4-1e3-len2-fc-chunklimit2-jueduipos/sft-v4-1e3-len2-fc-chunklimit2-jueduipos-steps_300.pt" |
|
|
config_path = "/AIRvePFS/ai4science/users/ai4science/users/zyk/seminat_backup/model/OLMo-2-0425-1B/config.json" |
|
|
|
|
|
|
|
|
config = AutoConfig.from_pretrained(olmo_path) |
|
|
model = Olmo2ForCausalLMForSemiNAT.from_pretrained(olmo_path, |
|
|
config=config, |
|
|
torch_dtype=torch.bfloat16) |
|
|
state_dict = torch.load(pt_path, map_location=DEVICE, weights_only=True) |
|
|
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) |
|
|
print( |
|
|
f"Loaded with {len(missing_keys)} missing keys and {len(unexpected_keys)} unexpected keys." |
|
|
) |
|
|
if missing_keys: |
|
|
print("Missing keys:", missing_keys) |
|
|
if unexpected_keys: |
|
|
print("Unexpected keys:", unexpected_keys) |
|
|
|
|
|
model = model.to(DEVICE) |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(olmo_path) |
|
|
|
|
|
return model, tokenizer |
|
|
|
|
|
|
|
|
def setup_model(model_name): |
|
|
model = Olmo2ForCausalLMForSemiNAT.from_pretrained(model_name) |
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return model, tokenizer |
|
|
|
|
|
|
|
|
def fsdp_main(args): |
|
|
local_rank = int(os.environ['LOCAL_RANK']) |
|
|
rank = int(os.environ['RANK']) |
|
|
world_size = int(os.environ['WORLD_SIZE']) |
|
|
if args.use_wandb and rank == 0: |
|
|
wandb.init(entity="SemiNAT", project="SemiNAT-SFT", name=args.run_name) |
|
|
|
|
|
model, tokenizer = setup_model(args.model_path) |
|
|
|
|
|
|
|
|
model.config.chunk_size_limit = args.chunk_size_limit |
|
|
|
|
|
|
|
|
|
|
|
if ".pkl" in args.data_path: |
|
|
train_dataset = pickle.load(open(args.data_path, "rb")) |
|
|
else: |
|
|
datasets = pd.read_parquet(args.data_path) |
|
|
train_dataset = eval(f"{args.data_type}")( |
|
|
tokenizer, |
|
|
datasets, |
|
|
args.max_length, |
|
|
args.data_processess_num) |
|
|
|
|
|
train_sampler = DistributedSampler(train_dataset, |
|
|
rank=rank, |
|
|
num_replicas=world_size, |
|
|
shuffle=True) |
|
|
train_dataloader = DataLoader(dataset=train_dataset, |
|
|
sampler=train_sampler, |
|
|
batch_size=args.batch_size) |
|
|
|
|
|
print(f"Size of train dataset: {len(train_dataset)}") |
|
|
|
|
|
setup() |
|
|
|
|
|
Olmo2DecoderLayerForSemiNAT_auto_wrap_policy = functools.partial( |
|
|
transformer_auto_wrap_policy, |
|
|
transformer_layer_cls={ |
|
|
Olmo2DecoderLayerForSemiNAT, |
|
|
NATDecoderForSemiNAT, |
|
|
}) |
|
|
|
|
|
sharding_strategy: ShardingStrategy = ShardingStrategy.FULL_SHARD |
|
|
torch.cuda.set_device(local_rank) |
|
|
|
|
|
bfSixteen = MixedPrecision( |
|
|
param_dtype=torch.bfloat16, |
|
|
reduce_dtype=torch.bfloat16, |
|
|
buffer_dtype=torch.bfloat16, |
|
|
) |
|
|
|
|
|
|
|
|
mp_policy = bfSixteen |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model = FSDP(model, |
|
|
auto_wrap_policy=Olmo2DecoderLayerForSemiNAT_auto_wrap_policy, |
|
|
mixed_precision=mp_policy, |
|
|
sharding_strategy=sharding_strategy, |
|
|
device_id=torch.cuda.current_device(), |
|
|
use_orig_params=True) |
|
|
|
|
|
optimizer = optim.AdamW( |
|
|
model.parameters(), |
|
|
lr=args.lr, |
|
|
betas=args.betas, |
|
|
weight_decay=args.weight_decay, |
|
|
eps=args.eps, |
|
|
) |
|
|
|
|
|
|
|
|
scheduler = WarmupCosineScheduler( |
|
|
optimizer=optimizer, |
|
|
warmup_steps=args.warmup_steps, |
|
|
total_steps=args.total_steps, |
|
|
min_lr=args.min_lr |
|
|
) |
|
|
|
|
|
torch.autograd.set_detect_anomaly(True) |
|
|
|
|
|
loss1_list = [] |
|
|
loss2_list = [] |
|
|
loss_list = [] |
|
|
|
|
|
global_step = 0 |
|
|
for epoch in range(1, args.epochs + 1): |
|
|
|
|
|
model.train() |
|
|
local_rank = int(os.environ['LOCAL_RANK']) |
|
|
|
|
|
|
|
|
if train_sampler: |
|
|
train_sampler.set_epoch(epoch) |
|
|
if rank == 0: |
|
|
inner_pbar = tqdm(range(len(train_dataloader)), |
|
|
colour="blue", |
|
|
desc="r0 Training Epoch") |
|
|
for batch in train_dataloader: |
|
|
optimizer.zero_grad() |
|
|
loss1, loss2 = model(input_ids=batch[0], |
|
|
labels=batch[1], |
|
|
attention_mask=batch[2], |
|
|
slice_pos=batch[3], |
|
|
use_cache=False).loss |
|
|
loss = loss1 + loss2 |
|
|
|
|
|
loss1_list.append(loss1.item()) |
|
|
loss2_list.append(loss2.item()) |
|
|
loss_list.append(loss.item()) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
loss.backward() |
|
|
|
|
|
|
|
|
for name, module in model.named_modules(): |
|
|
total_norm = 0.0 |
|
|
param_count = 0 |
|
|
for param in module.parameters(recurse=False): |
|
|
if param.grad is not None: |
|
|
total_norm += param.grad.data.norm(2).item()**2 |
|
|
param_count += 1 |
|
|
if param_count > 0: |
|
|
total_norm = total_norm**0.5 |
|
|
wandb.log({f"grad_norm/{name}": total_norm}, |
|
|
step=global_step) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
optimizer.step() |
|
|
|
|
|
global_step += 1 |
|
|
|
|
|
if global_step % args.save_steps == 0: |
|
|
save_policy = FullStateDictConfig(offload_to_cpu=True, |
|
|
rank0_only=True) |
|
|
with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT, |
|
|
save_policy): |
|
|
cpu_state = model.state_dict() |
|
|
|
|
|
if rank == 0: |
|
|
print(f"--> steps: {str(global_step)} saving model ...") |
|
|
if not os.path.exists(args.save_path): |
|
|
os.makedirs(args.save_path) |
|
|
save_name = f"{args.save_name}-steps_{str(global_step)}.pt" |
|
|
print(f"--> saving as model name {save_name}") |
|
|
save_path = os.path.join(args.save_path, save_name) |
|
|
torch.save(cpu_state, save_path) |
|
|
|
|
|
if rank == 0: |
|
|
inner_pbar.update(1) |
|
|
if args.use_wandb: |
|
|
wandb.log({ |
|
|
"length prediction loss": |
|
|
sum(loss1_list[-20:]) / len(loss1_list[-20:]), |
|
|
"nat loss": |
|
|
sum(loss2_list[-20:]) / len(loss2_list[-20:]), |
|
|
"loss": |
|
|
sum(loss_list[-20:]) / len(loss_list[-20:]) |
|
|
}) |
|
|
|
|
|
dist.all_reduce(loss, op=dist.ReduceOp.SUM) |
|
|
|
|
|
if rank == 0: |
|
|
inner_pbar.close() |
|
|
|
|
|
scheduler.step() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
dist.barrier() |
|
|
cleanup() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
parser = argparse.ArgumentParser() |
|
|
parser.add_argument('--batch-size', |
|
|
type=int, |
|
|
default=4, |
|
|
metavar='N', |
|
|
help='input batch size for training (default: 64)') |
|
|
parser.add_argument('--model_path', type=str) |
|
|
parser.add_argument('--save_path', type=str) |
|
|
parser.add_argument('--save_name', type=str) |
|
|
parser.add_argument('--data_path', type=str) |
|
|
parser.add_argument('--data_type', type=str) |
|
|
parser.add_argument('--run_name', type=str) |
|
|
parser.add_argument('--max_length', type=int) |
|
|
parser.add_argument('--chunk_size_limit', type=int) |
|
|
parser.add_argument('--save_steps', type=int, default=5000) |
|
|
parser.add_argument('--data_processess_num', type=int, default=8) |
|
|
parser.add_argument('--epochs', |
|
|
type=int, |
|
|
default=2, |
|
|
metavar='N', |
|
|
help='number of epochs to train (default: 3)') |
|
|
parser.add_argument('--lr', |
|
|
type=float, |
|
|
default=.002, |
|
|
metavar='LR', |
|
|
help='learning rate (default: .002)') |
|
|
parser.add_argument('--gamma', |
|
|
type=float, |
|
|
default=0.7, |
|
|
metavar='M', |
|
|
help='Learning rate step gamma (default: 0.7)') |
|
|
parser.add_argument('--weight_decay', type=float) |
|
|
parser.add_argument('--eps', type=float) |
|
|
parser.add_argument('--decay_norm_and_bias', type=bool) |
|
|
parser.add_argument( |
|
|
"--betas", |
|
|
type=float, |
|
|
nargs=2, |
|
|
default=[0.9, 0.999] |
|
|
) |
|
|
parser.add_argument('--decay_embeddings', type=bool) |
|
|
parser.add_argument('--warmup_steps', type=float) |
|
|
parser.add_argument('--total_steps', type=int) |
|
|
parser.add_argument('--min_lr', type=float) |
|
|
parser.add_argument('--seed', |
|
|
type=int, |
|
|
default=1, |
|
|
metavar='S', |
|
|
help='random seed (default: 1)') |
|
|
parser.add_argument('--use_lora', action='store_true', default=False) |
|
|
parser.add_argument("--use_wandb", |
|
|
action="store_true", |
|
|
help="whether to use wandb") |
|
|
|
|
|
args = parser.parse_args() |
|
|
|
|
|
torch.manual_seed(args.seed) |
|
|
|
|
|
fsdp_main(args) |
|
|
|