Annie0430 commited on
Commit
88100b0
·
verified ·
1 Parent(s): 31ba87d

Upload demo_model/server/training_config/training_config.yaml with huggingface_hub

Browse files
demo_model/server/training_config/training_config.yaml ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "use_gpu": true,
3
+ "device": 0,
4
+ "early_stop": {
5
+ "patience": 0
6
+ },
7
+ "federate": {
8
+ "mode": "standalone",
9
+ "client_num": 2,
10
+ "total_round_num": 3,
11
+ "save_to": "llama3.ckpt"
12
+ },
13
+ "data": {
14
+ "root": "https://huggingface.co/datasets/Annie0430/test_fileIO",
15
+ "type": "custom_llm",
16
+ "splits": [
17
+ 0.98,
18
+ 0.01,
19
+ 0.01
20
+ ],
21
+ "splitter": "iid"
22
+ },
23
+ "llm": {
24
+ "tok_len": 1000,
25
+ "chat": {
26
+ "max_len": 2000
27
+ },
28
+ "adapter": {
29
+ "use": true,
30
+ "args": [
31
+ {
32
+ "adapter_package": "peft",
33
+ "adapter_method": "lora",
34
+ "r": 8,
35
+ "lora_alpha": 16,
36
+ "lora_dropout": 0.05
37
+ }
38
+ ]
39
+ }
40
+ },
41
+ "dataloader": {
42
+ "batch_size": 1
43
+ },
44
+ "model": {
45
+ "type": "meta-llama/Llama-3.1-8B-Instruct@huggingface_llm"
46
+ },
47
+ "train": {
48
+ "local_update_steps": 3,
49
+ "batch_or_epoch": "batch",
50
+ "optimizer": {
51
+ "lr": 0.0003,
52
+ "weight_decay": 0.0
53
+ },
54
+ "is_enable_half": true
55
+ },
56
+ "criterion": {
57
+ "type": "CrossEntropyLoss"
58
+ },
59
+ "trainer": {
60
+ "type": "llmtrainer"
61
+ },
62
+ "eval": {
63
+ "freq": 50,
64
+ "metrics": [
65
+ "loss"
66
+ ],
67
+ "count_flops": false,
68
+ "best_res_update_round_wise_key": "test_avg_loss"
69
+ }
70
+ }