NemoVonNirgend commited on
Commit
3bc0e0e
·
verified ·
1 Parent(s): b0911c1

Upload merge_ministral.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. merge_ministral.py +37 -22
merge_ministral.py CHANGED
@@ -1,54 +1,69 @@
1
  # /// script
2
- # dependencies = ["torch", "transformers>=5.0.0rc0", "peft", "accelerate", "huggingface_hub", "safetensors", "mistral-common>=1.8.6"]
 
 
 
 
 
 
 
 
3
  # ///
4
 
 
 
 
 
 
5
  import torch
6
  import os
7
  from peft import PeftModel
8
- from transformers import AutoModel, AutoTokenizer
9
  from huggingface_hub import HfApi
10
 
11
- print("=== Ministral 14B LoRA Merge ===")
12
- print("Base: mistralai/Ministral-3-14B-Base-2512")
13
- print("LoRA: RoleModel/ministral-14b-reasoning-merged")
14
- print("Output: RoleModel/ministral-14b-merged-official")
 
 
 
15
 
16
- print("[1/5] Loading base model...")
17
- # Use AutoModel since this is a vision-language model (Pixtral/Mistral3)
18
- base = AutoModel.from_pretrained(
19
- "mistralai/Ministral-3-14B-Base-2512",
20
  torch_dtype=torch.bfloat16,
21
  device_map="auto",
22
  trust_remote_code=True
23
  )
24
- print(f"Base model class: {base.__class__.__name__}")
25
 
26
- print("[2/5] Loading LoRA adapter...")
27
- model = PeftModel.from_pretrained(base, "RoleModel/ministral-14b-reasoning-merged")
28
  print("LoRA adapter loaded")
29
 
30
- print("[3/5] Merging weights...")
31
  merged = model.merge_and_unload()
32
  print("Merge complete")
33
 
34
- print("[4/5] Saving merged model...")
35
  merged.save_pretrained("./merged-model", safe_serialization=True)
36
  print("Model saved locally")
37
 
38
- print("[4b/5] Saving tokenizer...")
39
- tok = AutoTokenizer.from_pretrained("mistralai/Ministral-3-14B-Base-2512", trust_remote_code=True)
40
  tok.save_pretrained("./merged-model")
41
  print("Tokenizer saved")
42
 
43
- print("[5/5] Pushing to Hub...")
44
  api = HfApi()
45
- api.create_repo("RoleModel/ministral-14b-merged-official", exist_ok=True, private=True, token=os.environ.get("HF_TOKEN"))
46
  api.upload_folder(
47
  folder_path="./merged-model",
48
- repo_id="RoleModel/ministral-14b-merged-official",
49
  repo_type="model",
50
  token=os.environ.get("HF_TOKEN")
51
  )
52
 
53
- print("=== DONE ===")
54
- print("Merged model: https://huggingface.co/RoleModel/ministral-14b-merged-official")
 
1
  # /// script
2
+ # dependencies = [
3
+ # "torch",
4
+ # "transformers>=5.0.0rc0",
5
+ # "peft",
6
+ # "accelerate",
7
+ # "huggingface_hub",
8
+ # "safetensors",
9
+ # "mistral-common>=1.8.6",
10
+ # ]
11
  # ///
12
 
13
+ """
14
+ Merge Ministral 14B LoRA adapter with the official Mistral base model.
15
+ Run on HuggingFace Jobs with: hf jobs uv run --flavor a10g-large --timeout 2h --secrets HF_TOKEN merge_ministral_official.py
16
+ """
17
+
18
  import torch
19
  import os
20
  from peft import PeftModel
21
+ from transformers import Mistral3ForConditionalGeneration, AutoTokenizer
22
  from huggingface_hub import HfApi
23
 
24
+ BASE_MODEL = "mistralai/Ministral-3-14B-Base-2512"
25
+ LORA_ADAPTER = "RoleModel/ministral-14b-reasoning-merged"
26
+ OUTPUT_REPO = os.environ.get("OUTPUT_REPO", "RoleModel/ministral-14b-merged-official")
27
+
28
+ print(f"Base model: {BASE_MODEL}")
29
+ print(f"LoRA adapter: {LORA_ADAPTER}")
30
+ print(f"Output repo: {OUTPUT_REPO}")
31
 
32
+ print("\n=== Loading base model ===")
33
+ base = Mistral3ForConditionalGeneration.from_pretrained(
34
+ BASE_MODEL,
 
35
  torch_dtype=torch.bfloat16,
36
  device_map="auto",
37
  trust_remote_code=True
38
  )
39
+ print(f"Base model loaded: {base.__class__.__name__}")
40
 
41
+ print("\n=== Loading LoRA adapter ===")
42
+ model = PeftModel.from_pretrained(base, LORA_ADAPTER)
43
  print("LoRA adapter loaded")
44
 
45
+ print("\n=== Merging weights ===")
46
  merged = model.merge_and_unload()
47
  print("Merge complete")
48
 
49
+ print("\n=== Saving merged model ===")
50
  merged.save_pretrained("./merged-model", safe_serialization=True)
51
  print("Model saved locally")
52
 
53
+ print("\n=== Saving tokenizer ===")
54
+ tok = AutoTokenizer.from_pretrained(BASE_MODEL, trust_remote_code=True)
55
  tok.save_pretrained("./merged-model")
56
  print("Tokenizer saved")
57
 
58
+ print(f"\n=== Pushing to Hub: {OUTPUT_REPO} ===")
59
  api = HfApi()
60
+ api.create_repo(OUTPUT_REPO, exist_ok=True, private=True, token=os.environ.get("HF_TOKEN"))
61
  api.upload_folder(
62
  folder_path="./merged-model",
63
+ repo_id=OUTPUT_REPO,
64
  repo_type="model",
65
  token=os.environ.get("HF_TOKEN")
66
  )
67
 
68
+ print(f"\n=== DONE ===")
69
+ print(f"Merged model available at: https://huggingface.co/{OUTPUT_REPO}")