File size: 2,345 Bytes
381d23f 3bc0e0e 381d23f 3bc0e0e 381d23f 3bc0e0e 381d23f 3bc0e0e 381d23f 3bc0e0e 925878c 3bc0e0e 381d23f 925878c 381d23f 3bc0e0e 381d23f 3bc0e0e 381d23f 3bc0e0e 381d23f 925878c 3bc0e0e 381d23f 3bc0e0e 381d23f 3bc0e0e 381d23f 3bc0e0e 381d23f 3bc0e0e 381d23f 3bc0e0e | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 | # /// script
# dependencies = [
# "torch",
# "transformers>=5.0.0rc0",
# "peft",
# "accelerate",
# "huggingface_hub",
# "safetensors",
# "mistral-common>=1.8.6",
# ]
# ///
"""
Merge Ministral 14B LoRA adapter with the official Mistral base model.
Run on HuggingFace Jobs with: hf jobs uv run --flavor a10g-large --timeout 2h --secrets HF_TOKEN merge_ministral_official.py
"""
import torch
import os
from peft import PeftModel
from transformers import Mistral3ForConditionalGeneration, AutoTokenizer
from huggingface_hub import HfApi
BASE_MODEL = "mistralai/Ministral-3-14B-Base-2512"
LORA_ADAPTER = "RoleModel/ministral-14b-reasoning-merged"
OUTPUT_REPO = os.environ.get("OUTPUT_REPO", "RoleModel/ministral-14b-merged-official")
print(f"Base model: {BASE_MODEL}")
print(f"LoRA adapter: {LORA_ADAPTER}")
print(f"Output repo: {OUTPUT_REPO}")
print("\n=== Loading base model ===")
# Load to CPU first to avoid offloading issues, then move to GPU for merge
base = Mistral3ForConditionalGeneration.from_pretrained(
BASE_MODEL,
torch_dtype=torch.bfloat16,
device_map="cuda:0", # Single GPU, no offloading
trust_remote_code=True,
low_cpu_mem_usage=True,
)
print(f"Base model loaded: {base.__class__.__name__}")
print("\n=== Loading LoRA adapter ===")
model = PeftModel.from_pretrained(base, LORA_ADAPTER)
print("LoRA adapter loaded")
print("\n=== Merging weights ===")
merged = model.merge_and_unload()
print("Merge complete")
print("\n=== Moving to CPU for save ===")
merged = merged.cpu()
torch.cuda.empty_cache()
print("\n=== Saving merged model ===")
merged.save_pretrained("./merged-model", safe_serialization=True)
print("Model saved locally")
print("\n=== Saving tokenizer ===")
tok = AutoTokenizer.from_pretrained(BASE_MODEL, trust_remote_code=True)
tok.save_pretrained("./merged-model")
print("Tokenizer saved")
print(f"\n=== Pushing to Hub: {OUTPUT_REPO} ===")
api = HfApi()
api.create_repo(OUTPUT_REPO, exist_ok=True, private=True, token=os.environ.get("HF_TOKEN"))
api.upload_folder(
folder_path="./merged-model",
repo_id=OUTPUT_REPO,
repo_type="model",
token=os.environ.get("HF_TOKEN")
)
print(f"\n=== DONE ===")
print(f"Merged model available at: https://huggingface.co/{OUTPUT_REPO}")
|