File size: 2,577 Bytes
cf9d873
dc8648d
cf9d873
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dc8648d
 
cf9d873
 
 
 
 
dc8648d
 
cf9d873
 
 
 
 
 
 
dc8648d
 
 
 
 
 
 
 
 
 
 
 
 
 
cf9d873
dc8648d
cf9d873
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dc8648d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
# /// script
# dependencies = ["peft", "transformers", "torch", "huggingface_hub", "sentencepiece", "cmake"]
# ///

import os
import subprocess
from peft import PeftModel
from transformers import AutoModelForCausalLM, AutoTokenizer
from huggingface_hub import HfApi, create_repo

# Configuration
BASE_MODEL_ID = "meta-llama/Llama-3.2-3B-Instruct"
ADAPTER_ID = "sunkencity/survival-expert-llama-3b"
OUTPUT_REPO = "sunkencity/survival-expert-3b-gguf"
MERGED_DIR = "merged_model"
GGUF_FILE = "survival-expert-llama-3b.Q4_K_M.gguf"

print(f"Loading base model: {BASE_MODEL_ID}")
base_model = AutoModelForCausalLM.from_pretrained(
    BASE_MODEL_ID,
    device_map="auto",
    torch_dtype="auto",
    trust_remote_code=True
)

print(f"Loading adapter: {ADAPTER_ID}")
model = PeftModel.from_pretrained(base_model, ADAPTER_ID)

print("Merging model...")
model = model.merge_and_unload()

print(f"Saving merged model to {MERGED_DIR}...")
model.save_pretrained(MERGED_DIR)
tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL_ID)
tokenizer.save_pretrained(MERGED_DIR)

print("Cloning llama.cpp...")
if os.path.exists("llama.cpp"):
    subprocess.run(["rm", "-rf", "llama.cpp"])
subprocess.run(["git", "clone", "https://github.com/ggerganov/llama.cpp"], check=True)

print("Installing llama.cpp requirements...")
subprocess.run(["pip", "install", "-r", "llama.cpp/requirements.txt"], check=True)

print("Converting to GGUF (FP16)...")
# Convert to FP16 GGUF using the python script
subprocess.run([
    "python", "llama.cpp/convert_hf_to_gguf.py",
    MERGED_DIR,
    "--outfile", "merged_fp16.gguf",
    "--outtype", "f16"
], check=True)

print("Building llama-quantize with CMake...")
# Create build directory
os.makedirs("llama.cpp/build", exist_ok=True)

# Run cmake configuration
subprocess.run(["cmake", "-B", "llama.cpp/build", "-S", "llama.cpp"], check=True)

# Build the project
subprocess.run(["cmake", "--build", "llama.cpp/build", "--config", "Release", "-j"], check=True)

print("Quantizing to Q4_K_M...")
# The binary is usually in llama.cpp/build/bin/llama-quantize
quantize_bin = "llama.cpp/build/bin/llama-quantize"

subprocess.run([
    quantize_bin,
    "merged_fp16.gguf",
    GGUF_FILE,
    "Q4_K_M"
], check=True)

print(f"Creating repo {OUTPUT_REPO}...")
api = HfApi()
create_repo(OUTPUT_REPO, repo_type="model", exist_ok=True)

print(f"Uploading {GGUF_FILE}...")
api.upload_file(
    path_or_fileobj=GGUF_FILE,
    path_in_repo=GGUF_FILE,
    repo_id=OUTPUT_REPO,
    repo_type="model"
)

print("Done! GGUF available at:", f"https://huggingface.co/{OUTPUT_REPO}")