| use_gpu: True |
| device: 0 |
| early_stop: |
| patience: 0 |
|
|
| federate: |
| mode: standalone |
| client_num: {num_clients} |
| total_round_num: {total_rounds} |
| save_to: "{save_name}" |
|
|
| data: |
| root: {data_root} |
| type: "{data_type}" |
| splits: {data_splits} |
| splitter: "{data_splitter}" |
|
|
| llm: |
| tok_len: 1000 |
| chat: |
| max_len: 2000 |
| adapter: |
| use: True |
| args: |
| - adapter_package: peft |
| adapter_method: lora |
| r: 8 |
| lora_alpha: 16 |
| lora_dropout: 0.05 |
|
|
| dataloader: |
| batch_size: 1 |
|
|
| model: |
| type: "{model_type}" |
|
|
| train: |
| local_update_steps: {local_update_steps} |
| batch_or_epoch: batch |
| optimizer: |
| lr: 0.0003 |
| weight_decay: 0.0 |
| is_enable_half: True |
|
|
| criterion: |
| type: CrossEntropyLoss |
|
|
| trainer: |
| type: llmtrainer |
|
|
| eval: |
| freq: 50 |
| metrics: ["loss"] |
| count_flops: False |
| best_res_update_round_wise_key: test_avg_loss |