| |
| |
| |
|
|
| import os |
| import subprocess |
| from peft import PeftModel |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
| from huggingface_hub import HfApi, create_repo |
|
|
| |
| BASE_MODEL_ID = "meta-llama/Llama-3.2-3B-Instruct" |
| ADAPTER_ID = "sunkencity/survival-expert-llama-3b" |
| OUTPUT_REPO = "sunkencity/survival-expert-3b-gguf" |
| MERGED_DIR = "merged_model" |
| GGUF_FILE = "survival-expert-llama-3b.Q4_K_M.gguf" |
|
|
| print(f"Loading base model: {BASE_MODEL_ID}") |
| base_model = AutoModelForCausalLM.from_pretrained( |
| BASE_MODEL_ID, |
| device_map="auto", |
| torch_dtype="auto", |
| trust_remote_code=True |
| ) |
|
|
| print(f"Loading adapter: {ADAPTER_ID}") |
| model = PeftModel.from_pretrained(base_model, ADAPTER_ID) |
|
|
| print("Merging model...") |
| model = model.merge_and_unload() |
|
|
| print(f"Saving merged model to {MERGED_DIR}...") |
| model.save_pretrained(MERGED_DIR) |
| tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL_ID) |
| tokenizer.save_pretrained(MERGED_DIR) |
|
|
| print("Cloning llama.cpp...") |
| if os.path.exists("llama.cpp"): |
| subprocess.run(["rm", "-rf", "llama.cpp"]) |
| subprocess.run(["git", "clone", "https://github.com/ggerganov/llama.cpp"], check=True) |
|
|
| print("Installing llama.cpp requirements...") |
| subprocess.run(["pip", "install", "-r", "llama.cpp/requirements.txt"], check=True) |
|
|
| print("Converting to GGUF (FP16)...") |
| |
| subprocess.run([ |
| "python", "llama.cpp/convert_hf_to_gguf.py", |
| MERGED_DIR, |
| "--outfile", "merged_fp16.gguf", |
| "--outtype", "f16" |
| ], check=True) |
|
|
| print("Building llama-quantize with CMake...") |
| |
| os.makedirs("llama.cpp/build", exist_ok=True) |
|
|
| |
| subprocess.run(["cmake", "-B", "llama.cpp/build", "-S", "llama.cpp"], check=True) |
|
|
| |
| subprocess.run(["cmake", "--build", "llama.cpp/build", "--config", "Release", "-j"], check=True) |
|
|
| print("Quantizing to Q4_K_M...") |
| |
| quantize_bin = "llama.cpp/build/bin/llama-quantize" |
|
|
| subprocess.run([ |
| quantize_bin, |
| "merged_fp16.gguf", |
| GGUF_FILE, |
| "Q4_K_M" |
| ], check=True) |
|
|
| print(f"Creating repo {OUTPUT_REPO}...") |
| api = HfApi() |
| create_repo(OUTPUT_REPO, repo_type="model", exist_ok=True) |
|
|
| print(f"Uploading {GGUF_FILE}...") |
| api.upload_file( |
| path_or_fileobj=GGUF_FILE, |
| path_in_repo=GGUF_FILE, |
| repo_id=OUTPUT_REPO, |
| repo_type="model" |
| ) |
|
|
| print("Done! GGUF available at:", f"https://huggingface.co/{OUTPUT_REPO}") |