File size: 1,200 Bytes
0e053c9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 | # Ollama Modelfile to use this fine-tuned model with Ollama.
# TODO (Ryan Parker, 7-May-2026): this Modelfile currently doesn't work.
# When using safetensors, the base model must be downloaded first, and
# Ollama raises an error stating that MLX is required for quantization.
# Note that the base model can be downloaded like this:
# uv tool run --from huggingface-hub -- hf download LiquidAI/LFM2.5-1.2B-Instruct
# When using GGUF, Ollama successfully creates the model, but then raises
# an error that LoRA is not supported.
# safetensors version
# Uses the adapter_model.safetensors trained in this repository.
# Usage:
# ollama create lfm-coder --file Modelfile --quantize q4_K_M --experimental
# FROM hf.co/LiquidAI/LFM2.5-1.2B-Instruct
# ADAPTER adapter_model.safetensors
# GGUF version
# Usage:
# ollama create lfm-coder --file Modelfile
FROM hf.co/LiquidAI/LFM2.5-1.2B-Instruct-GGUF:Q4_K_M
ADAPTER hf.co/rparkr/LFM2.5-1.2B-Instruct-Coding-F16-GGUF
# Recommended settings from: https://huggingface.co/LiquidAI/LFM2.5-1.2B-Instruct#:~:text=Generation%20parameters%3A
PARAMETER temperature 0.1
PARAMETER top_k 50
PARAMETER repeat_penalty 1.05
PARAMETER num_ctx 128000
|