MLX
Safetensors
English
llama
llama-3.2
lora
instruction-tuned
coding
ai-assistant
belweave
kai
local-ai
macbook
4-bit precision
Instructions to use belweave/kai-0 with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- MLX
How to use belweave/kai-0 with MLX:
# Download the model from the Hub pip install huggingface_hub[hf_xet] huggingface-cli download --local-dir kai-0 belweave/kai-0
- Notebooks
- Google Colab
- Kaggle
- Local Apps
- LM Studio
| #!/usr/bin/env python3 | |
| """ | |
| Upload Kai-0 to HuggingFace Hub | |
| ============================== | |
| Usage: | |
| export HF_TOKEN=your_token_here | |
| python3 upload.py --repo-id belweave/kai-0 | |
| Or login first: | |
| huggingface-cli login | |
| python3 upload.py --repo-id belweave/kai-0 | |
| """ | |
| import argparse | |
| import os | |
| from pathlib import Path | |
| from huggingface_hub import HfApi, create_repo | |
| def upload_model(repo_id: str, local_dir: Path, private: bool = False): | |
| """Upload the model to HuggingFace Hub.""" | |
| api = HfApi() | |
| # Check auth | |
| token = os.environ.get("HF_TOKEN") | |
| if not token: | |
| try: | |
| api.whoami() | |
| print("β Already authenticated with HuggingFace") | |
| except Exception: | |
| print("β Not authenticated. Run one of these:") | |
| print(" export HF_TOKEN=your_token") | |
| print(" OR") | |
| print(" huggingface-cli login") | |
| return | |
| # Create repo if it doesn't exist | |
| try: | |
| create_repo(repo_id, repo_type="model", private=private, exist_ok=True) | |
| print(f"β Repo ready: https://huggingface.co/{repo_id}") | |
| except Exception as e: | |
| print(f"β οΈ Repo creation issue: {e}") | |
| return | |
| # Upload files | |
| print(f"π Uploading files from {local_dir}...") | |
| api.upload_folder( | |
| folder_path=str(local_dir), | |
| repo_id=repo_id, | |
| repo_type="model", | |
| ) | |
| print(f"\nπ Upload complete!") | |
| print(f" URL: https://huggingface.co/{repo_id}") | |
| print(f" Clone: git clone https://huggingface.co/{repo_id}") | |
| def main(): | |
| parser = argparse.ArgumentParser(description="Upload Kai-0 to HuggingFace Hub") | |
| parser.add_argument("--repo-id", default="belweave/kai-0", help="HF Hub repo ID") | |
| parser.add_argument("--dir", default=".", help="Local directory to upload") | |
| parser.add_argument("--private", action="store_true", help="Make repo private") | |
| args = parser.parse_args() | |
| upload_model(args.repo_id, Path(args.dir), private=args.private) | |
| if __name__ == "__main__": | |
| main() | |