| { | |
| "adapter_path": "adapters-lfm", | |
| "batch_size": 1, | |
| "chat_template": "{% for message in messages %}{% if message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% elif message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + '\n' }}{% endif %}{% endfor %}", | |
| "config": "lora_config_lfm.yaml", | |
| "data": "datasets", | |
| "fine_tune_type": "lora", | |
| "grad_accumulation_steps": 1, | |
| "grad_checkpoint": false, | |
| "iters": 1000, | |
| "learning_rate": 1e-05, | |
| "lora": { | |
| "rank": 8, | |
| "scale": 10.0, | |
| "dropout": 0.0, | |
| "keys": [ | |
| "self_attn.q_proj", | |
| "self_attn.v_proj" | |
| ], | |
| "num_layers": 12 | |
| }, | |
| "lora_parameters": { | |
| "rank": 8, | |
| "dropout": 0.0, | |
| "scale": 20.0 | |
| }, | |
| "lr_schedule": null, | |
| "mask_prompt": false, | |
| "max_seq_length": 2048, | |
| "model": "LiquidAI/LFM2.5-350M-MLX-4bit", | |
| "num_layers": 16, | |
| "optimizer": "adam", | |
| "optimizer_config": { | |
| "adam": {}, | |
| "adamw": {}, | |
| "muon": {}, | |
| "sgd": {}, | |
| "adafactor": {} | |
| }, | |
| "project_name": null, | |
| "report_to": null, | |
| "resume_adapter_file": null, | |
| "save_every": 100, | |
| "seed": 0, | |
| "steps_per_eval": 1000, | |
| "steps_per_report": 200, | |
| "test": false, | |
| "test_batches": 500, | |
| "train": true, | |
| "val_batches": 10 | |
| } |