Spaces:
Sleeping
Sleeping
File size: 717 Bytes
3ac4614 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 | import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel, LoraConfig
import os
from huggingface_hub import HfApi, ModelCard
# Load base model and your fine-tuned LoRA adapters
base_model = "mistralai/Mistral-7B-v0.1"
adapter_path = "./lora-adapters" # Your trained LoRA adapters
model = AutoModelForCausalLM.from_pretrained(
base_model,
torch_dtype=torch.float16,
device_map="auto"
)
# Load adapters
model = PeftModel.from_pretrained(model, adapter_path)
# Save adapters for upload
model.save_pretrained("./hf-upload", safe_serialization=True)
# Save tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model)
tokenizer.save_pretrained("./hf-upload") |