| model_family: phi | |
| LoRA: | |
| r: 0 | |
| alpha: 32 | |
| dropout: 0.05 | |
| dataset: Harry | |
| data_path: /data/user/whz/machine_unlearning/our_bench/data/Harry/finetune.json | |
| split: full | |
| batch_size: 4 | |
| gradient_accumulation_steps: 8 | |
| num_epochs: 10 | |
| lr: 3.0e-05 | |
| bf16: true | |
| save_dir: /data/user/whz/machine_unlearning/our_bench/save_model/Harry/finetune_phi_B4_G8_E10_lr3e-5_2 | |
| weight_decay: 0.01 | |
| seed: 42 | |
| ds_size: null | |