| import torch |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
| from peft import PeftModel |
| from huggingface_hub import HfApi, login |
|
|
| BASE_MODEL = "microsoft/Phi-3-mini-4k-instruct" |
| ADAPTER = "BlueBoi32/testphimodel" |
| MERGED_PATH = "merged-model" |
| TARGET_REPO = "BlueBoi32/phi3-merged" |
|
|
| def merge_model(): |
| import os |
|
|
| |
| hf_token = os.environ.get("HF_TOKEN") |
| if hf_token is None: |
| raise ValueError("HF_TOKEN environment variable not found.") |
| login(hf_token) |
|
|
| print("Loading base model...") |
| base = AutoModelForCausalLM.from_pretrained( |
| BASE_MODEL, |
| torch_dtype=torch.float16, |
| device_map="cpu" |
| ) |
|
|
| print("Loading LoRA adapter...") |
| model = PeftModel.from_pretrained(base, ADAPTER) |
|
|
| print("Merging LoRA → Base model...") |
| model = model.merge_and_unload() |
|
|
| print("Saving merged model locally...") |
| model.save_pretrained(MERGED_PATH) |
| tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL) |
| tokenizer.save_pretrained(MERGED_PATH) |
|
|
| print("Uploading merged model to HF Hub...") |
| api = HfApi() |
| api.upload_folder( |
| folder_path=MERGED_PATH, |
| repo_id=TARGET_REPO, |
| token=hf_token |
| ) |
|
|
| print("✅ Upload complete! Model is now at:", TARGET_REPO) |
|
|