Benchmark-Single / continue_finetune.py
Junyin's picture
Add files using upload-large-folder tool
811e03d verified
import argparse
import os
import sys
from typing import List
import torch
import transformers
from peft import PeftModel
from peft import (
TaskType,
LoraConfig,
get_peft_model,
get_peft_model_state_dict,
set_peft_model_state_dict,
)
from transformers import LlamaForCausalLM, LlamaTokenizer, LlamaConfig
from utils import *
from collator import Collator
import argparse
from utils import *
from rq_llama import *
parser = argparse.ArgumentParser(description = 'rqllama-finetune')
parser = parse_finetune_args(parser)
args = parser.parse_args()
set_seed(args.seed)
ensure_dir(args.output_dir)
device_map = "auto"
world_size = int(os.environ.get("WORLD_SIZE", 1))
ddp = world_size != 1
local_rank = int(os.environ.get("LOCAL_RANK") or 0)
if local_rank == 0:
print(vars(args))
if ddp:
device_map = {"": local_rank}
train_data, valid_data = load_finetune_datasets(args)
tokenizer = LlamaTokenizer.from_pretrained(args.ckpt_path)
base_model = LlamaForCausalLM.from_pretrained(args.base_model, torch_dtype=torch.float16, low_cpu_mem_usage = True, device_map = device_map)
base_model.resize_token_embeddings(len(tokenizer))
rqllama = PeftModel.from_pretrained(base_model, args.ckpt_path, torch_dtype = torch.float16, device_map = device_map)
if local_rank == 0:
print("token num:", len(tokenizer))
print("data num:", len(train_data))
collator = Collator(args, tokenizer)
rqllama.train()
if local_rank == 0:
rqllama.print_trainable_parameters()
trainer = transformers.Trainer(
model = rqllama,
train_dataset = train_data,
eval_dataset = valid_data,
args = transformers.TrainingArguments(
seed = args.seed,
per_device_train_batch_size = args.per_device_batch_size,
per_device_eval_batch_size = args.per_device_batch_size,
gradient_accumulation_steps = args.gradient_accumulation_steps,
warmup_ratio = args.warmup_ratio,
num_train_epochs = args.epochs,
learning_rate = args.learning_rate,
weight_decay = args.weight_decay,
lr_scheduler_type = args.lr_scheduler_type,
fp16 = args.fp16,
bf16 = args.bf16,
logging_steps = args.logging_step,
optim = args.optim,
gradient_checkpointing = True,
evaluation_strategy = args.save_and_eval_strategy,
save_strategy = args.save_and_eval_strategy,
eval_steps = args.save_and_eval_steps,
save_steps = args.save_and_eval_steps,
output_dir = args.output_dir,
save_total_limit = 5,
load_best_model_at_end = True,
deepspeed = args.deepspeed,
ddp_find_unused_parameters = False if ddp else None,
report_to = None,
eval_delay = 1 if args.save_and_eval_strategy=="epoch" else 2000,
dataloader_num_workers = args.dataloader_num_workers,
dataloader_prefetch_factor = args.dataloader_prefetch_factor,
remove_unused_columns = args.remove_unused_columns,
),
tokenizer = tokenizer,
data_collator = collator,
)
rqllama.config.use_cache = False
if torch.__version__ >= "2" and sys.platform != "win32":
rqllama = torch.compile(rqllama)
trainer.train(resume_from_checkpoint = args.resume_from_checkpoint)
trainer.save_state()
trainer.save_model(output_dir = args.output_dir)
if local_rank == 0:
print('rqllama fine-tune finished.')