Upload merge_ministral.py with huggingface_hub
Browse files- merge_ministral.py +8 -2
merge_ministral.py
CHANGED
|
@@ -30,11 +30,13 @@ print(f"LoRA adapter: {LORA_ADAPTER}")
|
|
| 30 |
print(f"Output repo: {OUTPUT_REPO}")
|
| 31 |
|
| 32 |
print("\n=== Loading base model ===")
|
|
|
|
| 33 |
base = Mistral3ForConditionalGeneration.from_pretrained(
|
| 34 |
BASE_MODEL,
|
| 35 |
torch_dtype=torch.bfloat16,
|
| 36 |
-
device_map="
|
| 37 |
-
trust_remote_code=True
|
|
|
|
| 38 |
)
|
| 39 |
print(f"Base model loaded: {base.__class__.__name__}")
|
| 40 |
|
|
@@ -46,6 +48,10 @@ print("\n=== Merging weights ===")
|
|
| 46 |
merged = model.merge_and_unload()
|
| 47 |
print("Merge complete")
|
| 48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
print("\n=== Saving merged model ===")
|
| 50 |
merged.save_pretrained("./merged-model", safe_serialization=True)
|
| 51 |
print("Model saved locally")
|
|
|
|
| 30 |
print(f"Output repo: {OUTPUT_REPO}")
|
| 31 |
|
| 32 |
print("\n=== Loading base model ===")
|
| 33 |
+
# Load to CPU first to avoid offloading issues, then move to GPU for merge
|
| 34 |
base = Mistral3ForConditionalGeneration.from_pretrained(
|
| 35 |
BASE_MODEL,
|
| 36 |
torch_dtype=torch.bfloat16,
|
| 37 |
+
device_map="cuda:0", # Single GPU, no offloading
|
| 38 |
+
trust_remote_code=True,
|
| 39 |
+
low_cpu_mem_usage=True,
|
| 40 |
)
|
| 41 |
print(f"Base model loaded: {base.__class__.__name__}")
|
| 42 |
|
|
|
|
| 48 |
merged = model.merge_and_unload()
|
| 49 |
print("Merge complete")
|
| 50 |
|
| 51 |
+
print("\n=== Moving to CPU for save ===")
|
| 52 |
+
merged = merged.cpu()
|
| 53 |
+
torch.cuda.empty_cache()
|
| 54 |
+
|
| 55 |
print("\n=== Saving merged model ===")
|
| 56 |
merged.save_pretrained("./merged-model", safe_serialization=True)
|
| 57 |
print("Model saved locally")
|