Spaces:
Sleeping
Sleeping
| import torch | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| from peft import PeftModel, LoraConfig | |
| import os | |
| from huggingface_hub import HfApi, ModelCard | |
| # Load base model and your fine-tuned LoRA adapters | |
| base_model = "mistralai/Mistral-7B-v0.1" | |
| adapter_path = "./lora-adapters" # Your trained LoRA adapters | |
| model = AutoModelForCausalLM.from_pretrained( | |
| base_model, | |
| torch_dtype=torch.float16, | |
| device_map="auto" | |
| ) | |
| # Load adapters | |
| model = PeftModel.from_pretrained(model, adapter_path) | |
| # Save adapters for upload | |
| model.save_pretrained("./hf-upload", safe_serialization=True) | |
| # Save tokenizer | |
| tokenizer = AutoTokenizer.from_pretrained(base_model) | |
| tokenizer.save_pretrained("./hf-upload") |