from unsloth import FastLanguageModel, get_chat_template import torch # 加载模型并合并 LoRA model, tokenizer = FastLanguageModel.from_pretrained( model_name = "mistralai/Mistral-Small-3.1-24B-Base-2503", dtype = None, max_seq_length = 2048, load_in_4bit = True, ) model = FastLanguageModel.get_peft_model( model, r = 16, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128 target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj",], lora_alpha = 16, lora_dropout = 0, # Supports any, but = 0 is optimized bias = "none", # Supports any, but = "none" is optimized # [NEW] "unsloth" uses 30% less VRAM, fits 2x larger batch sizes! use_gradient_checkpointing = "unsloth", # True or "unsloth" for very long context random_state = 3407, use_rslora = False, # We support rank stabilized LoRA loftq_config = None, # And LoftQ ) model.load_adapter("/home/outputs/checkpoint-4000","default") tokenizer = get_chat_template(tokenizer, chat_template="chatml", map_eos_token=True) print(type(model)) # 正确保存合并模型(使用 Unsloth 官方函数) model.save_pretrained_merged( save_directory = "/home/merged_model1", tokenizer = tokenizer, save_method = "merged_16bit", )