finetune-model / src /train /train_cls.py
reiofa's picture
Upload folder using huggingface_hub
89b38b0 verified
import os
import torch
from peft import LoraConfig, get_peft_model
import ast
from transformers import AutoProcessor, BitsAndBytesConfig, HfArgumentParser, AutoConfig
from src.trainer import QwenCLSTrainer
from src.model import Qwen2VLForSequenceClassification, Qwen2_5_VLForSequenceClassification
from src.dataset import make_classification_data_module
from src.loss import get_loss_function
from src.params import DataArguments, ModelArguments, CLSArguments
from train.train_utils import get_peft_state_maybe_zero_3, get_peft_state_non_lora_maybe_zero_3, safe_save_model_for_hf_trainer
import pathlib
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
from transformers import EarlyStoppingCallback
def compute_metrics(pred):
preds = pred.predictions.argmax(axis=-1)
labels = pred.label_ids
acc = accuracy_score(labels, preds)
prec, rec, f1, _ = precision_recall_fscore_support(
labels, preds, average="weighted")
return {
"acc": acc,
"precision": prec,
"recall": rec,
"f1": f1,
}
local_rank = None
def rank0_print(*args):
if local_rank == 0 or local_rank == '0' or local_rank is None:
print(*args)
def find_target_linear_names(model, num_lora_modules=-1, lora_namespan_exclude=[], verbose=True):
linear_cls = torch.nn.modules.Linear
embedding_cls = torch.nn.modules.Embedding
lora_module_names = []
for name, module in model.named_modules():
if any(ex_keyword in name for ex_keyword in lora_namespan_exclude):
continue
if isinstance(module, (linear_cls, embedding_cls)):
lora_module_names.append(name)
if num_lora_modules > 0:
lora_module_names = lora_module_names[-num_lora_modules:]
if verbose:
rank0_print(f"Found {len(lora_module_names)} lora modules: {lora_module_names}")
return lora_module_names
def set_requires_grad(parameters, requires_grad):
for p in parameters:
p.requires_grad = requires_grad
def configure_vision_tower(model, training_args, compute_dtype, device):
vision_model_params = model.visual.parameters()
set_requires_grad(vision_model_params, not training_args.freeze_vision_tower)
# Handle merger specifically
merger_params = model.visual.merger.parameters()
set_requires_grad(merger_params, not training_args.freeze_merger)
def configure_llm(model, training_args):
llm_params = model.language_model.parameters()
set_requires_grad(llm_params, not training_args.freeze_llm)
def unfreeze_topk_layers(model, k_llm: int = 0, k_vis: int = 0):
if k_llm and hasattr(model, "language_model") and hasattr(model.language_model, "layers"):
for layer in model.language_model.layers[-k_llm:]:
for p in layer.parameters():
p.requires_grad = True
if k_vis and hasattr(model, "visual") and hasattr(model.visual, "blocks"):
for blk in model.visual.blocks[-k_vis:]:
for p in blk.parameters():
p.requires_grad = True
def train():
global local_rank
parser = HfArgumentParser(
(ModelArguments, DataArguments, CLSArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if training_args.lora_enable and not training_args.freeze_llm:
raise ValueError("If `lora_enable` is True, `freeze_llm` must also be True.")
if not training_args.lora_enable:
assert not training_args.vision_lora, \
"Error: training_args.lora_enable is not enabled, but training_args.vision_lora is enabled."
if training_args.vision_lora and not training_args.freeze_vision_tower:
raise ValueError("If `vision_lora` is True, `freeze_vision_tower` must also be True.")
else:
if training_args.lora_namespan_exclude is not None:
training_args.lora_namespan_exclude = ast.literal_eval(training_args.lora_namespan_exclude)
else:
training_args.lora_namespan_exclude = []
if not training_args.vision_lora:
training_args.lora_namespan_exclude += ["visual"]
local_rank = training_args.local_rank
compute_dtype = (torch.float16 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32))
data_args.compute_dtype = compute_dtype
bnb_model_from_pretrained_args = {}
if training_args.bits in [4,8]:
bnb_model_from_pretrained_args.update(dict(
device_map={"":training_args.device},
quantization_config = BitsAndBytesConfig(
load_in_4bit=training_args.bits==4,
load_in_8bit=training_args.bits==8,
llm_int8_skip_modules=["visual", "score"],
llm_int8_threshold=6.0,
llm_int8_has_fp16_weight=False,
bnb_4bit_compute_dtype=compute_dtype,
bnb_4bit_use_double_quant=training_args.double_quant,
bnb_4bit_quant_type=training_args.quant_type,
)
))
if "Qwen2.5" in model_args.model_id:
cfg = AutoConfig.from_pretrained(model_args.model_id)
cfg.mlp_head_hidden_dim = training_args.mlp_head_dim
cfg.mlp_head_dropout = training_args.mlp_head_dropout
cfg.num_labels = training_args.num_labels
model = Qwen2_5_VLForSequenceClassification.from_pretrained(
model_args.model_id,
config=cfg,
torch_dtype=compute_dtype,
attn_implementation="flash_attention_2" if not training_args.disable_flash_attn2 else "sdpa",
**bnb_model_from_pretrained_args
)
else:
cfg = AutoConfig.from_pretrained(model_args.model_id)
cfg.mlp_head_hidden_dim = training_args.mlp_head_dim
cfg.mlp_head_dropout = training_args.mlp_head_dropout
cfg.num_labels = training_args.num_labels
model = Qwen2VLForSequenceClassification.from_pretrained(
model_args.model_id,
config=cfg,
torch_dtype=compute_dtype,
attn_implementation="flash_attention_2" if not training_args.disable_flash_attn2 else "sdpa",
**bnb_model_from_pretrained_args
)
model.config.use_cache = False
model.config.num_labels = training_args.num_labels
model_to_configure = model
configure_llm(model_to_configure, training_args)
configure_vision_tower(model_to_configure, training_args, compute_dtype, training_args.device)
unfreeze_topk_layers(
model_to_configure,
k_llm=getattr(training_args, "unfreeze_topk_llm", 0),
k_vis=getattr(training_args, "unfreeze_topk_vision", 0),
)
if training_args.bits in [4,8]:
model.config.torch_dtype = (torch.float32 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32))
from peft import prepare_model_for_kbit_training
model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=training_args.gradient_checkpointing, gradient_checkpointing_kwargs={"use_reentrant": True})
if training_args.gradient_checkpointing:
model.enable_input_require_grads()
if hasattr(model, "enable_input_require_grads"):
model.enable_input_require_grads()
else:
def make_inputs_require_grad(module, input, output):
output.requires_grad_(True)
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
training_args.gradient_checkpointing_kwargs = {"use_reentrant": True}
if training_args.lora_enable:
lora_namespan_exclude = training_args.lora_namespan_exclude
peft_config = LoraConfig(
r=training_args.lora_rank,
lora_alpha=training_args.lora_alpha,
target_modules=find_target_linear_names(model, lora_namespan_exclude=lora_namespan_exclude, num_lora_modules=training_args.num_lora_modules),
lora_dropout=training_args.lora_dropout,
bias=training_args.lora_bias,
task_type="CAUSAL_LM",
)
rank0_print("Adding LoRA to the model...")
model = get_peft_model(model, peft_config)
# Peft maodel makes vision tower and merger freezed again.
# Configuring fuction could be called here, but sometimes it does not work properly.
# So I just made it this way.
# Need to be fixed in the future.
if not training_args.freeze_vision_tower:
for name, param in model.named_parameters():
if "visual" in name:
param.requires_grad = True
if not training_args.freeze_merger:
for name, param in model.named_parameters():
if "merger" in name:
param.requires_grad = True
processor = AutoProcessor.from_pretrained(model_args.model_id)
# model.config.tokenizer_model_max_length = processor.tokenizer.model_max_length
model.config.pad_token_id = processor.tokenizer.pad_token_id
if training_args.bits in [4, 8]:
from peft.tuners.lora import LoraLayer
for name, module in model.named_modules():
if isinstance(module, LoraLayer):
if training_args.bf16:
module = module.to(torch.bfloat16)
if 'norm' in name:
module = module.to(torch.float32)
if 'score' in name or 'embed_token' in name:
if hasattr(module, 'weight'):
if training_args.bf16 and module.weight.dtype == torch.float32:
module = module.to(torch.bfloat16)
data_module = make_classification_data_module(model_id=model_args.model_id,
processor=processor,
data_args=data_args)
samples_per_class = data_module.pop("samples_per_class")
loss_fn = get_loss_function(training_args, samples_per_class=samples_per_class)
model.loss_fn = loss_fn.to(model.dtype if hasattr(model, "dtype") else torch.float32)
callback_list = None
if training_args.early_stopping_patience > 0:
early_stop_cb = EarlyStoppingCallback(
early_stopping_patience=training_args.early_stopping_patience,
early_stopping_threshold=training_args.early_stopping_threshold,
)
callback_list = [early_stop_cb]
trainer = QwenCLSTrainer(
model=model,
processing_class=processor,
args=training_args,
compute_metrics=compute_metrics,
callbacks=callback_list,
**data_module,
)
if list(pathlib.Path(training_args.output_dir).glob("checkpoint-*")):
trainer.train(resume_from_checkpoint=True)
else:
trainer.train()
trainer.save_state()
model.config.use_cache = True
if training_args.lora_enable:
state_dict = get_peft_state_maybe_zero_3(
model.named_parameters(), training_args.lora_bias
)
non_lora_state_dict = get_peft_state_non_lora_maybe_zero_3(
model.named_parameters(), require_grad_only=True
)
if local_rank == 0 or local_rank == -1:
model.config.save_pretrained(training_args.output_dir)
model.save_pretrained(training_args.output_dir, state_dict=state_dict)
torch.save(non_lora_state_dict, os.path.join(training_args.output_dir, "non_lora_state_dict.bin"))
else:
safe_save_model_for_hf_trainer(trainer, output_dir=training_args.output_dir)
if __name__ == "__main__":
train()