| import subprocess |
| import os |
| import sys |
| import shutil |
| from huggingface_hub import HfApi |
|
|
|
|
| PARALLEL = "8" |
| HF_TOKEN = os.environ.get("HF_TOKEN") |
| REPO_ID = os.environ.get("REPO_ID") |
| TEMP_DIR = "/tmp/build" |
|
|
|
|
| def upload(folder_path, repo_id, repo_type="model"): |
| api = HfApi() |
| api.upload_folder( |
| folder_path=folder_path, |
| repo_id=repo_id, |
| repo_type=repo_type, |
| commit_message="Update llama.cpp" |
| ) |
|
|
|
|
| def compile_ollama(install_path): |
| repo_dir = "/tmp/ollama_source" |
|
|
| bin_dir = os.path.join(install_path, "bin") |
| lib_dir = os.path.join(install_path, "lib/ollama") |
| os.makedirs(install_path, exist_ok=True) |
| os.makedirs(bin_dir, exist_ok=True) |
| os.makedirs(lib_dir, exist_ok=True) |
|
|
| try: |
| |
| if os.path.exists(repo_dir): |
| shutil.rmtree(repo_dir) |
| subprocess.run(["git", "clone", "https://github.com/ollama/ollama", repo_dir], check=True) |
| os.chdir(repo_dir) |
|
|
| |
| env = os.environ.copy() |
| env["CGO_ENABLED"] = "1" |
| |
|
|
| |
| print("Compiling CUDA runners (this may take a while)...") |
| subprocess.run(["go", "generate", "./..."], env=env, check=True) |
|
|
| |
| print("Building Ollama binary...") |
| subprocess.run(["go", "build", "-o", os.path.join(bin_dir, "ollama"), "."], env=env, check=True) |
|
|
| |
| |
| for root, dirs, files in os.walk(repo_dir): |
| if "dist" in root and "runners" in root: |
| for f in files: |
| src = os.path.join(root, f) |
| shutil.copy2(src, lib_dir) |
|
|
| print(f"Build complete. Filetree organized at: {install_path}") |
| return True |
|
|
| except subprocess.CalledProcessError as e: |
| print(f"Build failed: {e}") |
| return False |
|
|
| except subprocess.CalledProcessError as e: |
| print(f"An error occurred during the build process: {e}") |
| sys.exit(1) |
| except Exception as e: |
| print(f"An unexpected error occurred: {e}") |
| sys.exit(1) |
|
|
|
|
| compile_ollama(TEMP_DIR) |
| upload(TEMP_DIR, REPO_ID) |
|
|