| qsnich-it@MacBook-Air-khxng-Qsnich-IT Finetune % python3 -m venv mlx_env | |
| qsnich-it@MacBook-Air-khxng-Qsnich-IT Finetune % source mlx_env/bin/activate | |
| (mlx_env) qsnich-it@MacBook-Air-khxng-Qsnich-IT Finetune % git clone https://github.com/ggerganov/llama.cpp.git | |
| (mlx_env) qsnich-it@MacBook-Air-khxng-Qsnich-IT Finetune % pip install -r llama.cpp/requirements.txt | |
| (mlx_env) qsnich-it@MacBook-Air-khxng-Qsnich-IT Finetune % pip install torch sentencepiece | |
| (mlx_env) qsnich-it@MacBook-Air-khxng-Qsnich-IT Finetune % pip install -U mlx-lm | |
| (mlx_env) qsnich-it@MacBook-Air-khxng-Qsnich-IT Finetune % python -m mlx_lm lora \ | |
| --model google/gemma-3-4b-it \ | |
| --data ./data \ | |
| --train \ | |
| --iters 200 \ | |
| --batch-size 1 \ | |
| --learning-rate 1e-5 \ | |
| --mask-prompt \ | |
| --num-layers 8 \ | |
| --adapter-path ./adapters | |
| (mlx_env) qsnich-it@MacBook-Air-khxng-Qsnich-IT Finetune % mlx_lm.fuse \ | |
| --model google/gemma-3-4b-it \ | |
| --adapter-path adapters \ | |
| --save-path ./gemma3-fused-f16 | |
| (mlx_env) qsnich-it@MacBook-Air-khxng-Qsnich-IT Finetune % python llama.cpp/convert_hf_to_gguf.py ./gemma3-fused-f16 \ | |
| --outfile gemma3-custom.gguf \ | |
| --outtype f16 | |
| (mlx_env) qsnich-it@MacBook-Air-khxng-Qsnich-IT Finetune % nano Modelfile2 | |
| (mlx_env) qsnich-it@MacBook-Air-khxng-Qsnich-IT Finetune % ollama create gemma3-jarvic -f Modelfile2 | |