scripts / merge_ministral.py
NemoVonNirgend's picture
Upload merge_ministral.py with huggingface_hub
925878c verified
# /// script
# dependencies = [
# "torch",
# "transformers>=5.0.0rc0",
# "peft",
# "accelerate",
# "huggingface_hub",
# "safetensors",
# "mistral-common>=1.8.6",
# ]
# ///
"""
Merge Ministral 14B LoRA adapter with the official Mistral base model.
Run on HuggingFace Jobs with: hf jobs uv run --flavor a10g-large --timeout 2h --secrets HF_TOKEN merge_ministral_official.py
"""
import torch
import os
from peft import PeftModel
from transformers import Mistral3ForConditionalGeneration, AutoTokenizer
from huggingface_hub import HfApi
BASE_MODEL = "mistralai/Ministral-3-14B-Base-2512"
LORA_ADAPTER = "RoleModel/ministral-14b-reasoning-merged"
OUTPUT_REPO = os.environ.get("OUTPUT_REPO", "RoleModel/ministral-14b-merged-official")
print(f"Base model: {BASE_MODEL}")
print(f"LoRA adapter: {LORA_ADAPTER}")
print(f"Output repo: {OUTPUT_REPO}")
print("\n=== Loading base model ===")
# Load to CPU first to avoid offloading issues, then move to GPU for merge
base = Mistral3ForConditionalGeneration.from_pretrained(
BASE_MODEL,
torch_dtype=torch.bfloat16,
device_map="cuda:0", # Single GPU, no offloading
trust_remote_code=True,
low_cpu_mem_usage=True,
)
print(f"Base model loaded: {base.__class__.__name__}")
print("\n=== Loading LoRA adapter ===")
model = PeftModel.from_pretrained(base, LORA_ADAPTER)
print("LoRA adapter loaded")
print("\n=== Merging weights ===")
merged = model.merge_and_unload()
print("Merge complete")
print("\n=== Moving to CPU for save ===")
merged = merged.cpu()
torch.cuda.empty_cache()
print("\n=== Saving merged model ===")
merged.save_pretrained("./merged-model", safe_serialization=True)
print("Model saved locally")
print("\n=== Saving tokenizer ===")
tok = AutoTokenizer.from_pretrained(BASE_MODEL, trust_remote_code=True)
tok.save_pretrained("./merged-model")
print("Tokenizer saved")
print(f"\n=== Pushing to Hub: {OUTPUT_REPO} ===")
api = HfApi()
api.create_repo(OUTPUT_REPO, exist_ok=True, private=True, token=os.environ.get("HF_TOKEN"))
api.upload_folder(
folder_path="./merged-model",
repo_id=OUTPUT_REPO,
repo_type="model",
token=os.environ.get("HF_TOKEN")
)
print(f"\n=== DONE ===")
print(f"Merged model available at: https://huggingface.co/{OUTPUT_REPO}")