{ "adapter_path": "adapters", "alpha": 32, "batch_size": 4, "config": "scripts/lora_config.yaml", "data": "./data", "fine_tune_type": "lora", "grad_accumulation_steps": 1, "grad_checkpoint": false, "iters": 1000, "learning_rate": 0.0001, "lora_layers": 16, "lora_parameters": { "rank": 8, "dropout": 0.0, "scale": 20.0 }, "lr_schedule": null, "mask_prompt": false, "max_seq_length": 2048, "model": "./base_weights", "num_layers": 16, "optimizer": "adam", "optimizer_config": { "adam": {}, "adamw": {}, "muon": {}, "sgd": {}, "adafactor": {} }, "project_name": null, "rank": 16, "report_to": null, "resume_adapter_file": null, "save_every": 200, "seed": 0, "steps_per_eval": 200, "steps_per_report": 10, "test": false, "test_batches": 500, "train": true, "val_batches": 25 }