nathens commited on
Commit
59c9a78
·
verified ·
1 Parent(s): f3c2b7f

Upload convert_to_gguf.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. convert_to_gguf.py +2 -2
convert_to_gguf.py CHANGED
@@ -59,9 +59,9 @@ nproc_result = subprocess.run(["nproc"], capture_output=True, text=True, check=T
59
  nproc = nproc_result.stdout.strip()
60
  print(f"Building llama.cpp with {nproc} cores using CMake...")
61
 
62
- # Use CMake to build
63
  os.makedirs("llama.cpp/build", exist_ok=True)
64
- subprocess.run(["cmake", "-B", "llama.cpp/build", "-S", "llama.cpp", "-DGGML_CUDA=ON"], check=True)
65
  subprocess.run(["cmake", "--build", "llama.cpp/build", "--config", "Release", "-j", nproc], check=True)
66
 
67
  # Step 5: Convert to GGUF format
 
59
  nproc = nproc_result.stdout.strip()
60
  print(f"Building llama.cpp with {nproc} cores using CMake...")
61
 
62
+ # Use CMake to build (CPU only - CUDA not needed for conversion/quantization)
63
  os.makedirs("llama.cpp/build", exist_ok=True)
64
+ subprocess.run(["cmake", "-B", "llama.cpp/build", "-S", "llama.cpp"], check=True)
65
  subprocess.run(["cmake", "--build", "llama.cpp/build", "--config", "Release", "-j", nproc], check=True)
66
 
67
  # Step 5: Convert to GGUF format