| import os |
|
|
| import torch |
| import transformers |
| from peft import PeftModel |
| from transformers import LlamaForCausalLM,AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig |
|
|
| BASE_MODEL = "/root/autodl-tmp/meta-llama/Llama-Guard-3-8B" |
| assert ( |
| BASE_MODEL |
| ), "Please specify a value for BASE_MODEL environment variable, e.g. `export BASE_MODEL=huggyllama/llama-7b`" |
|
|
| tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL) |
|
|
| base_model = AutoModelForCausalLM.from_pretrained( |
| BASE_MODEL, |
| load_in_8bit=False, |
| torch_dtype=torch.float16, |
| device_map={"": "cpu"}, |
| ) |
|
|
| first_weight = base_model.model.layers[0].self_attn.q_proj.weight |
| first_weight_old = first_weight.clone() |
|
|
| lora_model = PeftModel.from_pretrained( |
| base_model, |
| "/root/PATH/to/save/PEFT/model", |
| device_map={"": "cpu"}, |
| torch_dtype=torch.float16, |
| ) |
|
|
| lora_weight = lora_model.base_model.model.model.layers[ |
| 0 |
| ].self_attn.q_proj.weight |
|
|
| assert torch.allclose(first_weight_old, first_weight) |
|
|
| |
| lora_model = lora_model.merge_and_unload() |
|
|
| lora_model.train(False) |
|
|
| |
| assert not torch.allclose(first_weight_old, first_weight) |
|
|
| lora_model_sd = lora_model.state_dict() |
| deloreanized_sd = { |
| k.replace("base_model.model.", ""): v |
| for k, v in lora_model_sd.items() |
| if "lora" not in k |
| } |
|
|
| LlamaForCausalLM.save_pretrained( |
| base_model, "./8b", state_dict=deloreanized_sd, max_shard_size="5000MB" |
| ) |