| { | |
| "model_name_or_path": "LiquidAI/LFM2-2.6B", | |
| "use_lora": true, | |
| "lora_r": 64, | |
| "lora_alpha": 128, | |
| "lora_dropout": 0.05, | |
| "data_path": "./kokoro_processed_data", | |
| "max_seq_length": 2048, | |
| "response_template": "### Response:", | |
| "output_dir": "./lfm_trl_finetuned", | |
| "num_train_epochs": 3, | |
| "per_device_train_batch_size": 4, | |
| "per_device_eval_batch_size": 4, | |
| "gradient_accumulation_steps": 4, | |
| "learning_rate": 2e-4, | |
| "warmup_ratio": 0.1, | |
| "logging_steps": 10, | |
| "save_steps": 100, | |
| "eval_steps": 100, | |
| "bf16": true, | |
| "tf32": true, | |
| "seed": 42 | |
| } | |