Upload convert_to_gguf.py with huggingface_hub
Browse files- convert_to_gguf.py +15 -0
convert_to_gguf.py
CHANGED
|
@@ -68,6 +68,21 @@ print(f" ✅ Merged model saved to {merged_dir}")
|
|
| 68 |
|
| 69 |
# Step 3: Install llama.cpp for conversion
|
| 70 |
print("\n📥 Step 3: Setting up llama.cpp for GGUF conversion...")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
print(" Cloning llama.cpp repository...")
|
| 72 |
subprocess.run(
|
| 73 |
["git", "clone", "https://github.com/ggerganov/llama.cpp.git", "/tmp/llama.cpp"],
|
|
|
|
| 68 |
|
| 69 |
# Step 3: Install llama.cpp for conversion
|
| 70 |
print("\n📥 Step 3: Setting up llama.cpp for GGUF conversion...")
|
| 71 |
+
|
| 72 |
+
# Install build tools (needed for quantization)
|
| 73 |
+
print(" Installing build tools...")
|
| 74 |
+
subprocess.run(
|
| 75 |
+
["apt-get", "update", "-qq"],
|
| 76 |
+
check=True,
|
| 77 |
+
capture_output=True
|
| 78 |
+
)
|
| 79 |
+
subprocess.run(
|
| 80 |
+
["apt-get", "install", "-y", "-qq", "build-essential"],
|
| 81 |
+
check=True,
|
| 82 |
+
capture_output=True
|
| 83 |
+
)
|
| 84 |
+
print(" ✅ Build tools installed")
|
| 85 |
+
|
| 86 |
print(" Cloning llama.cpp repository...")
|
| 87 |
subprocess.run(
|
| 88 |
["git", "clone", "https://github.com/ggerganov/llama.cpp.git", "/tmp/llama.cpp"],
|