training-scripts / convert_survival_gguf.py
sunkencity's picture
Upload convert_survival_gguf.py with huggingface_hub
dc8648d verified
# /// script
# dependencies = ["peft", "transformers", "torch", "huggingface_hub", "sentencepiece", "cmake"]
# ///
import os
import subprocess
from peft import PeftModel
from transformers import AutoModelForCausalLM, AutoTokenizer
from huggingface_hub import HfApi, create_repo
# Configuration
BASE_MODEL_ID = "meta-llama/Llama-3.2-3B-Instruct"
ADAPTER_ID = "sunkencity/survival-expert-llama-3b"
OUTPUT_REPO = "sunkencity/survival-expert-3b-gguf"
MERGED_DIR = "merged_model"
GGUF_FILE = "survival-expert-llama-3b.Q4_K_M.gguf"
print(f"Loading base model: {BASE_MODEL_ID}")
base_model = AutoModelForCausalLM.from_pretrained(
BASE_MODEL_ID,
device_map="auto",
torch_dtype="auto",
trust_remote_code=True
)
print(f"Loading adapter: {ADAPTER_ID}")
model = PeftModel.from_pretrained(base_model, ADAPTER_ID)
print("Merging model...")
model = model.merge_and_unload()
print(f"Saving merged model to {MERGED_DIR}...")
model.save_pretrained(MERGED_DIR)
tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL_ID)
tokenizer.save_pretrained(MERGED_DIR)
print("Cloning llama.cpp...")
if os.path.exists("llama.cpp"):
subprocess.run(["rm", "-rf", "llama.cpp"])
subprocess.run(["git", "clone", "https://github.com/ggerganov/llama.cpp"], check=True)
print("Installing llama.cpp requirements...")
subprocess.run(["pip", "install", "-r", "llama.cpp/requirements.txt"], check=True)
print("Converting to GGUF (FP16)...")
# Convert to FP16 GGUF using the python script
subprocess.run([
"python", "llama.cpp/convert_hf_to_gguf.py",
MERGED_DIR,
"--outfile", "merged_fp16.gguf",
"--outtype", "f16"
], check=True)
print("Building llama-quantize with CMake...")
# Create build directory
os.makedirs("llama.cpp/build", exist_ok=True)
# Run cmake configuration
subprocess.run(["cmake", "-B", "llama.cpp/build", "-S", "llama.cpp"], check=True)
# Build the project
subprocess.run(["cmake", "--build", "llama.cpp/build", "--config", "Release", "-j"], check=True)
print("Quantizing to Q4_K_M...")
# The binary is usually in llama.cpp/build/bin/llama-quantize
quantize_bin = "llama.cpp/build/bin/llama-quantize"
subprocess.run([
quantize_bin,
"merged_fp16.gguf",
GGUF_FILE,
"Q4_K_M"
], check=True)
print(f"Creating repo {OUTPUT_REPO}...")
api = HfApi()
create_repo(OUTPUT_REPO, repo_type="model", exist_ok=True)
print(f"Uploading {GGUF_FILE}...")
api.upload_file(
path_or_fileobj=GGUF_FILE,
path_in_repo=GGUF_FILE,
repo_id=OUTPUT_REPO,
repo_type="model"
)
print("Done! GGUF available at:", f"https://huggingface.co/{OUTPUT_REPO}")