File size: 855 Bytes
1d48909
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
use_gpu: True
device: 0
early_stop:
  patience: 0

federate:
  mode: standalone
  client_num: {num_clients}
  total_round_num: {total_rounds}
  save_to: "{save_name}"

data:
  root: {data_root}
  type: "{data_type}"
  splits: {data_splits}
  splitter: "{data_splitter}"

llm:
  tok_len: 1000
  chat:
    max_len: 2000
  adapter:
    use: True
    args:
      - adapter_package: peft
        adapter_method: lora
        r: 8
        lora_alpha: 16
        lora_dropout: 0.05

dataloader:
  batch_size: 1

model:
  type: "{model_type}"

train:
  local_update_steps: {local_update_steps}
  batch_or_epoch: batch
  optimizer:
    lr: 0.0003
    weight_decay: 0.0
  is_enable_half: True

criterion:
  type: CrossEntropyLoss

trainer:
  type: llmtrainer

eval:
  freq: 50
  metrics: ["loss"]
  count_flops: False
  best_res_update_round_wise_key: test_avg_loss